diff -Nru probert-0.0.14.2build1/bin/probert probert-0.0.15/bin/probert --- probert-0.0.14.2build1/bin/probert 2016-09-19 21:11:09.000000000 -0400 +++ probert-0.0.15/bin/probert 2019-04-03 14:20:21.000000000 -0400 @@ -28,16 +28,10 @@ description='probert - Hardware prober for all', prog='probert') parser.add_argument('--all', action='store_true', - default=True, - dest='probe_all', help='Probe all hardware types.') parser.add_argument('--storage', action='store_true', - default=False, - dest='probe_storage', help='Probe storage hardware.') parser.add_argument('--network', action='store_true', - default=False, - dest='probe_network', help='Probe network hardware.') return parser.parse_args(argv) @@ -49,13 +43,17 @@ logger.info("Starting probert v{}".format(VERSION)) logger.info("Arguments passed: {}".format(sys.argv)) - if opts.probe_all and (opts.probe_storage or opts.probe_network): - opts.probe_all = False + p = prober.Prober() + probe_opts = [opts.network, opts.storage] + if opts.all or not any(probe_opts): + p.probe_all() + if opts.network: + p.probe_network() + if opts.storage: + p.probe_storage() - p = prober.Prober(opts) - p.probe() results = p.get_results() - print(json.dumps(results, indent=4, sort_keys=False)) + print(json.dumps(results, indent=4, sort_keys=True)) if __name__ == '__main__': diff -Nru probert-0.0.14.2build1/debian/changelog probert-0.0.15/debian/changelog --- probert-0.0.14.2build1/debian/changelog 2018-11-03 12:34:52.000000000 -0400 +++ probert-0.0.15/debian/changelog 2019-04-03 14:20:21.000000000 -0400 @@ -1,3 +1,26 @@ +probert (0.0.15) disco; urgency=medium + + [ Ryan Harper ] + * Add probing for advanced storage types (LP: #1821994) + + [ Daniel Watkins ] + * d/control: + - Remove unneeded X-Python3-Version + - Remove hard-coded Depends on python3-all + - Remove Build-Depends on python3-all + - Add Build-Depends on python3-testtools for testing + - Bump Standards-Version to 4.3.0 + - Update priority from extra to optional + - Remove python3-yaml from Build-Depends and Depends; it isn't actually + used by the package + - Drop hard-coded Depends now that setup.py correctly expresses + dependencies + * d/rules: + - Re-enable dh_auto_test + - Remove unnecessary override_dh_installinit + + -- Ryan Harper Wed, 03 Apr 2019 13:20:21 -0500 + probert (0.0.14.2build1) disco; urgency=medium * No-change rebuild to build without python3.6 support. diff -Nru probert-0.0.14.2build1/debian/control probert-0.0.15/debian/control --- probert-0.0.14.2build1/debian/control 2017-05-10 05:11:52.000000000 -0400 +++ probert-0.0.15/debian/control 2019-04-03 14:20:21.000000000 -0400 @@ -1,6 +1,6 @@ Source: probert Section: admin -Priority: extra +Priority: optional Maintainer: Ubuntu Developers XSBC-Original-Maintainer: Ryan Harper Build-Depends: debhelper (>= 9), @@ -10,25 +10,27 @@ pep8, pkg-config, pyflakes, - python3-all, - python3-coverage, python3-all-dev, + python3-coverage, python3-flake8, + python3-jsonschema, python3-nose, python3-pyudev, python3-setuptools, - python3-yaml -Standards-Version: 3.9.5 + python3-testtools +Standards-Version: 4.3.0 Homepage: https://github.com/CanonicalLtd/probert -X-Python3-Version: >= 3.3 Vcs-Browser: https://github.com/CanonicalLtd/probert Vcs-Git: https://github.com/CanonicalLtd/probert.git Package: probert Architecture: any -Depends: python3-all, - python3-pyudev, - python3-yaml, +Depends: bcache-tools, + lvm2, + mdadm, + multipath-tools, + util-linux, + zfsutils-linux, ${misc:Depends}, ${python3:Depends}, ${shlibs:Depends} diff -Nru probert-0.0.14.2build1/debian/rules probert-0.0.15/debian/rules --- probert-0.0.14.2build1/debian/rules 2016-07-27 07:12:20.000000000 -0400 +++ probert-0.0.15/debian/rules 2019-04-03 14:20:21.000000000 -0400 @@ -11,12 +11,6 @@ override_dh_python3: dh_python3 --ignore-shebangs -override_dh_installinit: - dh_installinit --no-start - -override_dh_auto_test: - @echo "No tests." - get-orig-source: python3 ./setup.py sdist cp dist/*.tar.gz ../probert_${VERS}.orig.tar.gz diff -Nru probert-0.0.14.2build1/probert/bcache.py probert-0.0.15/probert/bcache.py --- probert-0.0.14.2build1/probert/bcache.py 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/probert/bcache.py 2019-04-03 14:20:21.000000000 -0400 @@ -0,0 +1,129 @@ +# Copyright 2019 Canonical, Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging +import os +import subprocess + +log = logging.getLogger('probert.bcache') + + +def superblock_asdict(device=None, data=None): + """ Convert output from bcache-super-show into a dictionary""" + + if not device and not data: + raise ValueError('Supply a device name, or data to parse') + + if not data: + cmd = ['bcache-super-show', device] + result = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + data = result.stdout.decode('utf-8') + bcache_super = {} + for line in data.splitlines(): + if not line: + continue + values = [val for val in line.split('\t') if val] + if len(values) == 2: + bcache_super.update({values[0]: values[1]}) + + return bcache_super + + +def parse_sb_version(sb_version): + """ Convert sb_version string to integer if possible""" + try: + # 'sb.version': '1 [backing device]' + # 'sb.version': '3 [caching device]' + version = int(sb_version.split()[0]) + except (AttributeError, ValueError): + log.warning("Failed to parse bcache 'sb.version' field" + " as integer: %s", sb_version) + return None + + return version + + +def is_backing(device): + """ Test if device is a bcache backing device + + A runtime check for an active bcache backing device is to + examine /sys/class/block//bcache/label + + However if a device is not active then read the superblock + of the device and check that sb.version == 1""" + + sys_block = '/sys/class/block/%s' % os.path.basename(device) + bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label') + return os.path.exists(bcache_sys_attr) + + +def is_caching(device): + """ Test if device is a bcache caching device + + A runtime check for an active bcache backing device is to + examine /sys/class/block//bcache/cache_replacement_policy + + However if a device is not active then read the superblock + of the device and check that sb.version == 3""" + + sys_block = '/sys/class/block/%s' % os.path.basename(device) + bcache_sys_attr = os.path.join(sys_block, 'bcache', + 'cache_replacement_policy') + return os.path.exists(bcache_sys_attr) + + +def is_bcache_device(device): + return device.get('ID_FS_TYPE') == 'bcache' + + +def probe(context=None): + """Probe the system for bcache devices. Bcache devices + are registered with the kernel upon module load and when + devices are hot/cold plugged. There are two portions to + a bcache, the backing device which holds data and the cache + device which cached data from the backing device. A backing + device encodes a specific cache_set UUID in the backing device + which is used to bind both device in the kernel and create a + new block device, bcacheN. + + For each block device which has a bcache superblock embedded, + extract and examine the superblock to determine which type of + bcache device (backing, caching) and the relevant UUIDs and + build (if possible) the pairing of caches to backing. + + This probe reports the devices separately but enough information + is included to re-assemble joined bcache devices if desired. + """ + backing = {} + caching = {} + bcache = {'backing': backing, 'caching': caching} + if not context: + return bcache + + for device in context.list_devices(subsystem='block'): + if is_bcache_device(device): + devpath = device['DEVNAME'] + sb = superblock_asdict(devpath) + bkey = sb.get('dev.uuid', 'not available') + bconfig = {'blockdev': devpath, 'superblock': sb} + if is_backing(devpath): + backing[bkey] = bconfig + elif is_caching(devpath): + caching[bkey] = bconfig + else: + log.error('bcache.probe: %s is not bcache' % devpath) + + return bcache diff -Nru probert-0.0.14.2build1/probert/dmcrypt.py probert-0.0.15/probert/dmcrypt.py --- probert-0.0.14.2build1/probert/dmcrypt.py 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/probert/dmcrypt.py 2019-04-03 14:20:21.000000000 -0400 @@ -0,0 +1,63 @@ +# Copyright 2019 Canonical, Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging +import pyudev +import subprocess + +log = logging.getLogger('probert.dmcrypt') + + +def dmsetup_info(devname): + ''' returns dict of info about device mapper dev. + + {'blkdevname': 'dm-0', + 'blkdevs_used': 'sda5', + 'name': 'sda5_crypt', + 'subsystem': 'CRYPT', + 'uuid': 'CRYPT-LUKS1-2b370697149743b0b2407d11f88311f1-sda5_crypt' + } + ''' + _SEP = '=' + fields = ('name,uuid,blkdevname,blkdevs_used,subsystem'.split(',')) + try: + output = subprocess.check_output( + ['sudo', 'dmsetup', 'info', devname, '-C', '-o', + ','.join(fields), '--noheading', '--separator', _SEP]) + except subprocess.CalledProcessError as e: + log.error('Failed to probe dmsetup info:', e) + return None + values = output.decode('utf-8').strip().split(_SEP) + info = dict(zip(fields, values)) + return info + + +def probe(context=None, report=False): + """ Probing for dm_crypt devices requires running dmsetup info commands + to collect how a particular dm-X device is composed. + """ + # ignore supplied context, we need to read udev after scan/vgchange + context = pyudev.Context() + + crypt_devices = {} + + # look for block devices with DM_UUID and CRYPT; these are crypt devices + for device in context.list_devices(subsystem='block'): + if 'DM_UUID' in device and device['DM_UUID'].startswith('CRYPT'): + devname = device['DEVNAME'] + dm_info = dmsetup_info(devname) + crypt_devices[dm_info['name']] = dm_info + + return crypt_devices diff -Nru probert-0.0.14.2build1/probert/filesystem.py probert-0.0.15/probert/filesystem.py --- probert-0.0.14.2build1/probert/filesystem.py 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/probert/filesystem.py 2019-04-03 14:20:21.000000000 -0400 @@ -0,0 +1,51 @@ +# Copyright 2019 Canonical, Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging +import pyudev + +log = logging.getLogger('probert.filesystems') + + +def get_device_filesystem(device): + # extract ID_FS_* keys into dict, dropping leading ID_FS + return {k.replace('ID_FS_', ''): v + for k, v in device.items() if k.startswith('ID_FS_')} + + +def probe(context=None): + """ Capture detected filesystems found on discovered block devices. """ + filesystems = {} + if not context: + context = pyudev.Context() + + for device in context.list_devices(subsystem='block'): + # Ignore block major=1 (ramdisk) and major=7 (loopback) + # these won't ever be used in recreating storage on target systems. + if device['MAJOR'] not in ["1", "7"]: + fs_info = get_device_filesystem(device) + # The ID_FS_ udev values come from libblkid, which contains code to + # recognize lots of different things that block devices or their + # partitions can contain (filesystems, lvm PVs, bcache, ...). We + # only want to report things that are mountable filesystems here, + # which libblkid conveniently tags with ID_FS_USAGE=filesystem. + # Swap is a bit of a special case because it is not a mountable + # filesystem in the usual sense, but subiquity still needs to + # generate mount actions for it. + if fs_info.get("USAGE") == "filesystem" or \ + fs_info.get("TYPE") == "swap": + filesystems[device['DEVNAME']] = fs_info + + return filesystems diff -Nru probert-0.0.14.2build1/probert/__init__.py probert-0.0.15/probert/__init__.py --- probert-0.0.14.2build1/probert/__init__.py 2016-11-08 18:35:29.000000000 -0500 +++ probert-0.0.15/probert/__init__.py 2019-04-03 14:20:21.000000000 -0400 @@ -15,4 +15,4 @@ """ Probert """ -__version__ = "0.0.12" +__version__ = "0.0.15" diff -Nru probert-0.0.14.2build1/probert/lvm.py probert-0.0.15/probert/lvm.py --- probert-0.0.14.2build1/probert/lvm.py 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/probert/lvm.py 2019-04-03 14:20:21.000000000 -0400 @@ -0,0 +1,234 @@ +# Copyright 2019 Canonical, Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging +import json +import os +import pyudev +import subprocess + +from probert.utils import read_sys_block_size + +log = logging.getLogger('probert.lvm') + + +def _lvm_report(cmd, report_key): + """ [pvs --reportformat=json -o foo,bar] report_key='pv' + { + "report": [ + { + "pv": [ + {"pv_name":"/dev/md0", "vg_name":"vg0", + "pv_fmt":"lvm2", "pv_attr":"a--", + "pv_size":"<9.99g", "pv_free":"<6.99g"}, + {"pv_name":"/dev/md1", "vg_name":"vg0", + "pv_fmt":"lvm2", "pv_attr":"a--", + "pv_size":"<9.99g", "pv_free":"<9.99g"} + ] + } + ] + } + """ + def _flatten_list(data): + return [y for x in data for y in x] + + try: + result = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + output = result.stdout.decode('utf-8') + except subprocess.CalledProcessError as e: + log.error('Failed to probe LVM devices on system:', e) + return None + + if not output: + return + + reports = {} + try: + reports = json.loads(output) + except json.decoder.JSONDecodeError as e: + log.error('Failed to load LVM json report:', e) + return None + + return _flatten_list([report.get(report_key) + for report in reports.get('report', []) + if report_key in report]) + + +def probe_pvs_report(): + return _lvm_report(['pvs', '--reportformat=json'], 'pv') + + +def probe_vgs_report(): + report_cmd = ['vgs', '--reportformat=json', '--units=B', + '-o', 'vg_name,pv_name,pv_uuid,vg_size'] + return _lvm_report(report_cmd, 'vg') + + +def probe_lvs_report(): + return _lvm_report('lvs', 'lv') + + +def lvmetad_running(): + return os.path.exists(os.environ.get('LVM_LVMETAD_PIDFILE', + '/run/lvmetad.pid')) + + +def lvm_scan(activate=True): + for cmd in [['pvscan'], ['vgscan', '--mknodes']]: + if lvmetad_running(): + cmd.append('--cache') + try: + subprocess.run(cmd, stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + except subprocess.CalledProcessError as e: + log.error('Failed lvm_scan command %s: %s', cmd, e) + + +def activate_volgroups(): + """ + Activate available volgroups and logical volumes within. + # found + % vgchange -ay + 1 logical volume(s) in volume group "vg1sdd" now active + + # none found (no output) + % vgchange -ay + """ + + # vgchange handles syncing with udev by default + # see man 8 vgchange and flag --noudevsync + result = subprocess.run(['vgchange', '--activate=y'], check=False, + stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + if result.stdout: + log.info(result.stdout) + + +def extract_lvm_partition(probe_data): + lv_id = "%s/%s" % (probe_data['DM_VG_NAME'], probe_data['DM_LV_NAME']) + return ( + lv_id, {'fullname': lv_id, + 'name': probe_data['DM_LV_NAME'], + 'volgroup': probe_data['DM_VG_NAME'], + 'size': "%sB" % read_sys_block_size(probe_data['DEVNAME'])}) + + +def extract_lvm_volgroup(vg_name, report_data): + """ + [ + {"vg_name":"vg0", "pv_name":"/dev/md0", + "pv_uuid":"p3oDow-dRHp-L8jq-t6gQ-67tv-B8B6-JWLKZP", + "vg_size":"21449670656B"}, + {"vg_name":"vg0", "pv_name":"/dev/md1", + "pv_uuid":"pRR5Zn-c4a9-teVZ-TFaU-yDxf-FSDo-cORcEq", + "vg_size":"21449670656B"} + ] + """ + def _int(size_val): + if size_val and size_val.endswith('B'): + return int(size_val[:-1]) + return 0 + + devices = set() + size = None + for report in report_data: + if report['vg_name'] == vg_name: + vg_size = report['vg_size'] + # set size to the largest size we find + if vg_size: + # unset, take current value + if not size: + size = vg_size + # on set but mismatched values, keep the larger + elif size != vg_size: + if _int(vg_size) > _int(size): + size = vg_size + devices.add(report.get('pv_name')) + + if size is None: + size = '0B' + + return (vg_name, {'name': vg_name, + 'devices': list(devices), + 'size': size}) + + +def probe(context=None, report=False): + """ Probing for LVM devices requires initiating a kernel level scan + of block devices to look for physical volumes, volume groups and + logical volumes. Once detected, the prober will activate any + volume groups detected. + + The prober will refresh the udev context which brings in addition + information relating to LVM devices. + + This prober relies on udev detecting devices via the 'DM_UUID' + field and for each of such devices, the prober records the + logical volume. + + For each logical volume, the prober determines the hosting + volume_group and records detailed information about the group + including members. The process is repeated to determine the + underlying physical volumes that are used to construct a + volume group. + + Care is taken to handle scenarios where physical volumes are + not yet allocated to a volume group (such as a linear VG). + + On newer systems (Disco+) the lvm2 software stack provides + a rich reporting data dump in JSON format. On systems with + older LVM2 stacks, the LVM probe may be incomplete. + """ + # scan and activate lvm vgs/lvs + lvm_scan() + activate_volgroups() + + # ignore supplied context, we need to read udev after scan/vgchange + context = pyudev.Context() + + lvols = {} + vgroups = {} + pvols = {} + vg_report = probe_vgs_report() + + for device in context.list_devices(subsystem='block'): + if 'DM_UUID' in device and device['DM_UUID'].startswith('LVM'): + (lv_id, new_lv) = extract_lvm_partition(device) + if lv_id not in lvols: + lvols[lv_id] = new_lv + else: + log.error('Found duplicate logical volume: %s', lv_id) + continue + + vg_name = device['DM_VG_NAME'] + (vg_id, new_vg) = extract_lvm_volgroup(vg_name, vg_report) + if vg_id not in vgroups: + vgroups[vg_id] = new_vg + else: + log.error('Found duplicate volume group: %s', vg_id) + continue + + if vg_id not in pvols: + pvols[vg_id] = new_vg['devices'] + + lvm = {} + if lvols: + lvm.update({'logical_volumes': lvols}) + if pvols: + lvm.update({'physical_volumes': pvols}) + if vgroups: + lvm.update({'volume_groups': vgroups}) + + return lvm diff -Nru probert-0.0.14.2build1/probert/mount.py probert-0.0.15/probert/mount.py --- probert-0.0.14.2build1/probert/mount.py 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/probert/mount.py 2019-04-03 14:20:21.000000000 -0400 @@ -0,0 +1,48 @@ +# Copyright 2019 Canonical, Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import json +import logging +import subprocess + +log = logging.getLogger('probert.mount') + + +def findmnt(data=None): + if not data: + cmd = ['findmnt', '--bytes', '--json'] + try: + result = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + except (subprocess.CalledProcessError, FileNotFoundError): + return {} + + data = result.stdout.decode('utf-8') + + mounts = {} + try: + mounts = json.loads(data) + except json.decoder.JSONDecodeError as e: + log.error('Failed to load findmnt json output:', e) + + return mounts + + +def probe(context=None): + """The probert uses the util-linux 'findmnt' command which + dumps a JSON tree of detailed information about _all_ + mounts in the current linux system. + """ + return findmnt().get('filesystems', {}) diff -Nru probert-0.0.14.2build1/probert/multipath.py probert-0.0.15/probert/multipath.py --- probert-0.0.14.2build1/probert/multipath.py 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/probert/multipath.py 2019-04-03 14:20:21.000000000 -0400 @@ -0,0 +1,86 @@ +# Copyright 2019 Canonical, Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from collections import namedtuple +import logging +import subprocess + +MPath = namedtuple("MPath", ('device', 'serial', 'multipath', 'host_wwnn', + 'target_wwnn', 'host_wwpn', 'target_wwpn', + 'host_adapter')) +MMap = namedtuple("MMap", ('multipath', 'sysfs', 'paths')) +MPATH_SHOW = { + 'paths': MPath, + 'maps': MMap, +} + +log = logging.getLogger('probert.multipath') + + +def _extract_mpath_data(cmd, show_verb): + try: + result = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + except (subprocess.CalledProcessError, FileNotFoundError): + return [] + + mptype = MPATH_SHOW[show_verb] + data = result.stdout.decode('utf-8') + result = [] + for line in data.splitlines(): + mp_dict = None + try: + mp_dict = mptype(*line.split())._asdict() + except TypeError as e: + log.debug( + 'Failed to parse multipath %s entry: %s: %s' % (show_verb, + line, e)) + if mp_dict: + result.append(mp_dict) + + return result + + +def multipath_show_paths(): + path_format = "%d %z %m %N %n %R %r %a" + cmd = ['multipathd', 'show', 'paths', 'raw', 'format', path_format] + return _extract_mpath_data(cmd, 'paths') + + +def multipath_show_maps(): + maps_format = "%w %d %N" + cmd = ['multipathd', 'show', 'maps', 'raw', 'format', maps_format] + return _extract_mpath_data(cmd, 'maps') + + +def probe(context=None): + """Query the multipath daemon for multipath maps and paths. + + This data is useful for determining whether a specific block + device is part of a multipath and if so which device-mapper (dm) + blockdevice should be used. + + This probe requires multipath module to be loaded and the multipath + daemon to be running. + """ + results = {} + maps = multipath_show_maps() + if maps: + results.update({'maps': maps}) + paths = multipath_show_paths() + if paths: + results.update({'paths': paths}) + + return results diff -Nru probert-0.0.14.2build1/probert/network.py probert-0.0.15/probert/network.py --- probert-0.0.14.2build1/probert/network.py 2017-02-16 20:05:06.000000000 -0500 +++ probert-0.0.15/probert/network.py 2019-03-27 15:48:07.000000000 -0400 @@ -13,9 +13,14 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import abc +from collections import OrderedDict +import contextlib import ipaddress +import jsonschema import logging import os +import socket import pyudev @@ -43,28 +48,171 @@ IFA_F_PERMANENT = 0x80 -def _compute_type(iface): +BOND_MODES = [ + "balance-rr", + "active-backup", + "balance-xor", + "broadcast", + "802.3ad", + "balance-tlb", + "balance-alb", +] + +XMIT_HASH_POLICIES = [ + "layer2", + "layer2+3", + "layer3+4", + "encap2+3", + "encap3+4", +] + +LACP_RATES = [ + "slow", + "fast", +] + +# This json schema describes the links as they are serialized onto +# disk by probert --network. It also describes the format of some of +# the attributes of Link instances. +link_schema = { + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "link", + "type": "object", + "additionalProperties": False, + "required": ["addresses", "bond", "bridge", "netlink_data", "type", + "udev_data"], + "properties": { + "addresses": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": False, + "properties": { + "address": {"type": "string"}, + "ip": {"type": "string"}, + "family": {"type": "integer"}, + "source": {"type": "string"}, + "scope": {"type": "string"}, + }, + }, + }, + "type": { + "type": "string", + # "enum": ["eth", "wlan", "bridge", "vlan"], # there are more + }, + "bond": { + "type": "object", + "additionalProperties": False, + "properties": { + "is_master": {"type": "boolean"}, + "is_slave": {"type": "boolean"}, + "master": { + "oneOf": [ + {"type": "string"}, + {"type": "null"}, + ], + }, + "slaves": { + "type": "array", + "items": {"type": "string"}, + }, + "mode": { + "oneOf": [ + {"type": "string", "enum": BOND_MODES}, + {"type": "null"}, + ], + }, + "xmit_hash_policy": { + "oneOf": [ + {"type": "string", "enum": XMIT_HASH_POLICIES}, + {"type": "null"}, + ], + }, + "lacp_rate": { + "oneOf": [ + {"type": "string", "enum": LACP_RATES}, + {"type": "null"}, + ], + }, + }, + }, + "udev_data": { + "type": "object", + "properties": { + "attrs": { + "type": "object", + "additionalProperties": { + "oneOf": [ + {"type": "string"}, + {"type": "null"}, + ], + }, + }, + }, + "additionalProperties": { + "oneOf": [ + {"type": "string"}, + {"type": "null"}, + ], + }, + }, + "netlink_data": { + "type": "object", + "properties": { + "ifindex": {"type": "integer"}, + "flags": {"type": "integer"}, + "arptype": {"type": "integer"}, + "family": {"type": "integer"}, + "name": {"type": "string"}, + }, + }, + "bridge": { + "type": "object", + "additionalProperties": False, + "properties": { + "is_bridge": {"type": "boolean"}, + "is_port": {"type": "boolean"}, + "interfaces": {"type": "array", "items": {"type": "string"}}, + "options": { # /sys/class/net/brX/bridge/ + "type": "object", + "additionalProperties": {"type": "string"}, + }, + }, + }, + "wlan": { + "type": "object", + "additionalProperties": False, + "properties": { + "ssid": {"type": ["null", "string"]}, + "visible_ssids": { + "type": "array", + "items": {"type": "string"}, + }, + "scan_state": {"type": ["null", "string"]}, + }, + }, + }, +} + + +def _compute_type(iface, arptype): if not iface: return '???' sysfs_path = os.path.join('/sys/class/net', iface) if not os.path.exists(sysfs_path): - print('No sysfs path to {}'.format(sysfs_path)) + log.debug('No sysfs path to {}'.format(sysfs_path)) return None DEV_TYPE = '???' - with open(os.path.join(sysfs_path, 'type')) as t: - type_value = t.read().split('\n')[0] - if type_value == '1': + if arptype == 1: DEV_TYPE = 'eth' if os.path.isdir(os.path.join(sysfs_path, 'wireless')) or \ os.path.islink(os.path.join(sysfs_path, 'phy80211')): DEV_TYPE = 'wlan' elif os.path.isdir(os.path.join(sysfs_path, 'bridge')): DEV_TYPE = 'bridge' - elif os.path.isfile(os.path.join('/proc/net/vlan', iface)): - DEV_TYPE = 'vlan' elif os.path.isdir(os.path.join(sysfs_path, 'bonding')): DEV_TYPE = 'bond' elif os.path.isfile(os.path.join(sysfs_path, 'tun_flags')): @@ -73,32 +221,32 @@ os.path.join('/sys/devices/virtual/net', iface)): if iface.startswith('dummy'): DEV_TYPE = 'dummy' - elif type_value == '24': # firewire ;; IEEE 1394 - RFC 2734 + elif arptype == 24: # firewire ;; IEEE 1394 - RFC 2734 DEV_TYPE = 'eth' - elif type_value == '32': # InfiniBand + elif arptype == 32: # InfiniBand if os.path.isdir(os.path.join(sysfs_path, 'bonding')): DEV_TYPE = 'bond' elif os.path.isdir(os.path.join(sysfs_path, 'create_child')): DEV_TYPE = 'ib' else: DEV_TYPE = 'ibchild' - elif type_value == '512': + elif arptype == 512: DEV_TYPE = 'ppp' - elif type_value == '768': + elif arptype == 768: DEV_TYPE = 'ipip' # IPIP tunnel - elif type_value == '769': + elif arptype == 769: DEV_TYPE = 'ip6tnl' # IP6IP6 tunnel - elif type_value == '772': + elif arptype == 772: DEV_TYPE = 'lo' - elif type_value == '776': + elif arptype == 776: DEV_TYPE = 'sit' # sit0 device - IPv6-in-IPv4 - elif type_value == '778': + elif arptype == 778: DEV_TYPE = 'gre' # GRE over IP - elif type_value == '783': + elif arptype == 783: DEV_TYPE = 'irda' # Linux-IrDA - elif type_value == '801': + elif arptype == 801: DEV_TYPE = 'wlan_aux' - elif type_value == '65534': + elif arptype == 65534: DEV_TYPE = 'tun' if iface.startswith('ippp') or iface.startswith('isdn'): @@ -113,291 +261,439 @@ return DEV_TYPE -_scope_str = { - 0: 'global', - 200: "site", - 253: "link", - 254: "host", - 255: "nowhere", -} +def _get_bonding(ifname, flags): + def _iface_is_master(): + return bool(flags & IFF_MASTER) != 0 -class Address: + def _iface_is_slave(): + return bool(flags & IFF_SLAVE) != 0 - def __init__(self, netlink_data): - self.address = ipaddress.ip_interface(netlink_data['local'].decode('latin-1')) - self.ip = self.address.ip - self.family = netlink_data['family'] - if netlink_data.get('flags', 0) & IFA_F_PERMANENT: - self.source = 'static' - else: - self.source = 'dhcp' - scope = netlink_data['scope'] - self.scope = str(_scope_str.get(scope)) + def _get_slave_iface_list(): + try: + if _iface_is_master(): + bond = open('/sys/class/net/%s/bonding/slaves' % ifname).read() + return bond.split() + else: + return [] + except IOError: + return [] + def _get_bond_master(): + try: + if _iface_is_slave(): + master = os.readlink('/sys/class/net/%s/master' % ifname) + return os.path.basename(master) + else: + return None + except IOError: + return None -class NetworkInfo: - def __init__(self, netlink_data, udev_data): - self.update_from_netlink_data(netlink_data) - self.udev_data = udev_data + def _get_bond_param(param): + try: + if _iface_is_master(): + bond_param = '/sys/class/net/%s/bonding/%s' % (ifname, param) + with open(bond_param) as bp: + bond_param = bp.read().split() + return bond_param[0] if bond_param else None + except IOError: + return None - self.hwaddr = self.udev_data['attrs']['address'] + return { + 'is_master': _iface_is_master(), + 'is_slave': _iface_is_slave(), + 'master': _get_bond_master(), + 'slaves': _get_slave_iface_list(), + 'mode': _get_bond_param('mode'), + 'xmit_hash_policy': _get_bond_param('xmit_hash_policy'), + 'lacp_rate': _get_bond_param('lacp_rate'), + } - self.type = _compute_type(self.name) - self.addresses = {} - self.bond = self._get_bonding() - self.bridge = self._get_bridging() +def _get_bridging(ifname): - # Wifi only things (set from UdevObserver.wlan_event) - self.ssid = None - self.ssids = [] - self.scan_state = None + def _iface_is_bridge(): + bridge_path = os.path.join('/sys/class/net', ifname, 'bridge') + return os.path.exists(bridge_path) - def update_from_netlink_data(self, netlink_data): - self.netlink_data = netlink_data - self.name = self.netlink_data.get('name', '').decode('utf-8', 'replace') - self.flags = self.netlink_data['flags'] - self.ifindex = self.netlink_data['ifindex'] - # This is the logic ip from iproute2 uses to determine whether - # to show NO-CARRIER or not. It only really makes sense for a - # wired connection. - self.is_connected = (not (self.flags & IFF_UP)) or (self.flags & IFF_RUNNING) + def _iface_is_bridge_port(): + bridge_port = os.path.join('/sys/class/net', ifname, 'brport') + return os.path.exists(bridge_port) + def _get_bridge_iface_list(): + if _iface_is_bridge(): + bridge_path = os.path.join('/sys/class/net', ifname, 'brif') + return os.listdir(bridge_path) + return [] - def _get_hwvalues(self, keys, missing='Unknown value'): - for key in keys: - try: - return self.udev_data[key] - except KeyError: - pass + def _get_bridge_options(): + skip_attrs = set(['flush', 'bridge']) # needs root access, not useful + + if _iface_is_bridge(): + bridge_path = os.path.join('/sys/class/net', ifname, 'bridge') + elif _iface_is_bridge_port(): + bridge_path = os.path.join('/sys/class/net', ifname, 'brport') + else: + return {} + options = {} + for bridge_attr_name in os.listdir(bridge_path): + if bridge_attr_name in skip_attrs: + continue + bridge_attr_file = os.path.join(bridge_path, bridge_attr_name) + with open(bridge_attr_file) as bridge_attr: + options[bridge_attr_name] = bridge_attr.read().strip() + + return options + + return { + 'is_bridge': _iface_is_bridge(), + 'is_port': _iface_is_bridge_port(), + 'interfaces': _get_bridge_iface_list(), + 'options': _get_bridge_options(), + } + + +def netlink_attr(attr): + def get(obj): + return obj.netlink_data[attr] + return property(get) + + +def udev_attr(keys, missing): + def get(obj): + for k in keys: + if k in obj.udev_data: + return obj.udev_data[k] return missing + return property(get) - @property - def vendor(self): - keys = [ - 'ID_VENDOR_FROM_DATABASE', - 'ID_VENDOR', - 'ID_VENDOR_ID' - ] - return self._get_hwvalues(keys=keys, missing='Unknown Vendor') - @property - def model(self): - keys = [ - 'ID_MODEL_FROM_DATABASE', - 'ID_MODEL', - 'ID_MODEL_ID' - ] - return self._get_hwvalues(keys=keys, missing='Unknown Model') +class Link: - @property - def driver(self): - keys = [ - 'ID_NET_DRIVER', - 'ID_USB_DRIVER', - ] - return self._get_hwvalues(keys=keys, missing='Unknown Driver') + @classmethod + def from_probe_data(cls, netlink_data, udev_data): + # This is a bit of a hack, but sometimes the interface has + # already been renamed by udev by the time we get here, so we + # can't use netlink_data['name'] to go poking about in + # /sys/class/net. + name = socket.if_indextoname(netlink_data['ifindex']) + if netlink_data['is_vlan']: + typ = 'vlan' + else: + typ = _compute_type(name, netlink_data['arptype']) + return cls( + addresses={}, + type=typ, + udev_data=udev_data, + netlink_data=netlink_data, + bond=_get_bonding(name, netlink_data['flags']), + bridge=_get_bridging(name)) + + @classmethod + def from_saved_data(cls, link_data): + address_objs = {} + for addr in link_data['addresses']: + a = Address.from_saved_data(addr) + address_objs[str(a.ip)] = a + link_data['addresses'] = address_objs + return cls(**link_data) + + def __init__(self, addresses, type, udev_data, netlink_data, bond, + bridge, wlan=None): + self.addresses = addresses + self.type = type + self.udev_data = udev_data + self.netlink_data = netlink_data + self.bond = bond + self.bridge = bridge + self.wlan = wlan + + def mark_as_wlan(self): + if self.wlan is None: + self.wlan = { + 'visible_ssids': [], + 'ssid': None, + 'scan_state': None, + } - @property - def devpath(self): - keys = ['DEVPATH'] - return self._get_hwvalues(keys=keys, missing='Unknown devpath') + def serialize(self): + r = { + "addresses": [a.serialize() for a in self.addresses.values()], + "udev_data": self.udev_data, + "type": self.type, + "netlink_data": self.netlink_data, + "bond": self.bond, + "bridge": self.bridge, + } + if self.wlan is not None: + r["wlan"] = self.wlan + jsonschema.validate(r, link_schema) + return r + + flags = netlink_attr("flags") + ifindex = netlink_attr("ifindex") + name = netlink_attr("name") + hwaddr = property(lambda self: self.udev_data['attrs']['address']) + + vendor = udev_attr(['ID_VENDOR_FROM_DATABASE', 'ID_VENDOR', + 'ID_VENDOR_ID'], "Unknown Vendor") + model = udev_attr(['ID_MODEL_FROM_DATABASE', 'ID_MODEL', 'ID_MODEL_ID'], + "Unknown Model") + driver = udev_attr(['ID_NET_DRIVER', 'ID_USB_DRIVER'], "Unknown Driver") + devpath = udev_attr(['DEVPATH'], "Unknown devpath") + + hwaddr = property(lambda self: self.udev_data['attrs']['address']) + + # This is the logic ip from iproute2 uses to determine whether + # to show NO-CARRIER or not. It only really makes sense for a + # wired connection. + is_connected = (property(lambda self: ((not (self.flags & IFF_UP)) or + (self.flags & IFF_RUNNING)))) + is_virtual = ( + property(lambda self: self.devpath.startswith('/devices/virtual/'))) @property - def is_virtual(self): - return self.devpath.startswith('/devices/virtual/') + def ssid(self): + if self.wlan: + return self.wlan['ssid'] + else: + return None - def _iface_is_master(self): - return bool(self.flags & IFF_MASTER) != 0 - def _iface_is_slave(self): - return bool(self.flags & IFF_SLAVE) != 0 +_scope_str = { + 0: 'global', + 200: "site", + 253: "link", + 254: "host", + 255: "nowhere", +} - def _get_slave_iface_list(self): - try: - if self._iface_is_master(): - bond = open('/sys/class/net/%s/bonding/slaves' % self.name).read() - return bond.split() - except IOError: - return [] - def _get_bond_mode(self, ): - try: - if self._iface_is_master(): - bond_mode = \ - open('/sys/class/net/%s/bonding/mode' % self.name).read() - return bond_mode.split() - except IOError: - return None +class Address: - def _get_bonding(self): - ''' return bond structure for iface - 'bond': { - 'is_master': [True|False] - 'is_slave': [True|False] - 'slaves': [] - 'mode': in BONDING_MODES.keys() or BONDING_MODES.values() - } - ''' - is_master = self._iface_is_master() - is_slave = self._iface_is_slave() - slaves = self._get_slave_iface_list() - mode = self._get_bond_mode() - if mode: - mode_name = mode[0] - else: - mode_name = None - bond = { - 'is_master': is_master, - 'is_slave': is_slave, - 'slaves': slaves, - 'mode': mode_name + def __init__(self, address, family, source, scope): + self.address = ipaddress.ip_interface(address) + self.ip = self.address.ip + self.family = family + self.source = source + self.scope = scope + + def serialize(self): + return { + 'source': self.source, + 'family': self.family, + 'address': str(self.address), + 'scope': self.scope, } - return bond - def _iface_is_bridge(self, ): - bridge_path = os.path.join('/sys/class/net', self.name, 'bridge') - return os.path.exists(bridge_path) + @classmethod + def from_probe_data(cls, netlink_data): + address = netlink_data['local'].decode('latin-1') + family = netlink_data['family'] + if netlink_data.get('flags', 0) & IFA_F_PERMANENT: + source = 'static' + else: + source = 'dhcp' + scope = netlink_data['scope'] + scope = str(_scope_str.get(scope, scope)) + return cls(address, family, source, scope) - def _iface_is_bridge_port(self): - bridge_port = os.path.join('/sys/class/net', self.name, 'brport') - return os.path.exists(bridge_port) + @classmethod + def from_saved_data(cls, link_data): + return Address(**link_data) - def _get_bridge_iface_list(self): - if self._iface_is_bridge(): - bridge_path = os.path.join('/sys/class/net', self.name, 'brif') - return os.listdir(bridge_path) - return [] +class NetworkObserver(abc.ABC): + """A NetworkObserver observes the network state. - def _get_bridge_options(self): - invalid_attrs = ['flush', 'bridge'] # needs root access, not useful + It calls methods on a NetworkEventReceiver in response to changes. + """ - options = {} - if self._iface_is_bridge(): - bridge_path = os.path.join('/sys/class/net', self.name, 'bridge') - elif self._iface_is_bridge_port(): - bridge_path = os.path.join('/sys/class/net', self.name, 'brport') - else: - return options + @abc.abstractmethod + def start(self): + pass - for bridge_attr_name in [attr for attr in os.listdir(bridge_path) - if attr not in invalid_attrs]: - bridge_attr_file = os.path.join(bridge_path, bridge_attr_name) - with open(bridge_attr_file) as bridge_attr: - options[bridge_attr_name] = bridge_attr.read().strip() + @abc.abstractmethod + def data_ready(self, fd): + pass - return options - def _get_bridging(self): - ''' return bridge structure for iface - 'bridge': { - 'is_bridge': [True|False], - 'is_port': [True|False], - 'interfaces': [], - 'options': { # /sys/class/net/brX/bridge/ - 'sysfs_key': sysfs_value - }, - } - ''' - is_bridge = self._iface_is_bridge() - is_port = self._iface_is_bridge_port() - interfaces = self._get_bridge_iface_list() - options = self._get_bridge_options() - bridge = { - 'is_bridge': is_bridge, - 'is_port': is_port, - 'interfaces': interfaces, - 'options': options, - } - return bridge +class NetworkEventReceiver(abc.ABC): + """NetworkEventReceiver has methods called on it in response to network + changes.""" + + @abc.abstractmethod + def new_link(self, ifindex, link): + pass + + @abc.abstractmethod + def update_link(self, ifindex): + pass + @abc.abstractmethod + def del_link(self, ifindex): + pass + + @abc.abstractmethod + def route_change(self, action, data): + pass -class Network: - def __init__(self): +class TrivialEventReceiver(NetworkEventReceiver): + + def new_link(self, ifindex, link): pass - def probe(self): - results = {} - observer = UdevObserver() - observer.start() - for l in observer.links.values(): - results[l.name] = { - 'udev_data' : l.udev_data, - 'hwaddr' : l.hwaddr, - 'type' : l.type, - 'ip' : l.ip, - 'ip_sources' : l.ip_sources, - } - if l.type == 'wlan': - results[l.name]['ssid'] = l.ssid.decode("utf-8") - results[l.name]['ssids'] = l.ssids - results[l.name]['scan_state'] = l.scan_state - if l.type == 'bridge': - results[l.name]['bridge'] = l.bridge - if l.type == 'bond': - results[l.name]['bond'] = l.bond + def update_link(self, ifindex): + pass - print(results) - return results + def del_link(self, ifindex): + pass + + def route_change(self, action, data): + pass + +# Coalescing netlink events +# +# If the client of this library delays calling UdevObserver.data_ready +# until the udev queue is idle (which is a good idea, but cannot be +# implemented here because delaying inherently depends on the event +# loop the client is using), several netlink events might be seen for +# any interface -- the poster child for this being when an interface +# is renamed by a udev rule. The netlink data that comes with the NEW +# event in this case can be out of date by the time the event is +# processed, so what the @coalesce generator does is to collapse a +# series of calls for one object into one, e.g. NEW + CHANGE becomes +# NEW but with the data from the change event, NEW + DEL is dropped +# entirely, etc. @nocoalesce doesn't combine any events but makes sure +# that those events are not processed wildly out of order with the +# events that are coalesced. + + +def coalesce(*keys): + # "keys" defines which events are coalesced, ifindex is enough for + # link events but ifindex + address is needed for address events. + def decorator(func): + def w(self, action, data): + log.debug('event for %s: %s %s', func.__name__, action, data) + key = (func.__name__,) + for k in keys: + key += (data[k],) + if key in self._calls: + prev_meth, prev_action, prev_data = self._calls[key] + if action == 'NEW': + # this clearly shouldn't happen, but take the new data + # just in case + self._calls[key] = (func, action, data) + elif action == 'CHANGE': + # If the object appeared and then changed before we + # looked at it all, just pretend it was a NEW object + # with the changed data. (the other cases for + # prev_action work out ok, although DEL followed by + # CHANGE is obviously not something we expect) + self._calls[key] = (func, prev_action, data) + elif action == 'DEL': + if prev_action == 'NEW': + # link disappeared before we did anything with it. + # forget about it. + del self._calls[key] + else: + # Otherwise just pass on the DEL and forget the + # previous action whatever it was. + self._calls[key] = (func, action, data) + else: + self._calls[key] = (func, action, data) + return w + return decorator + + +def nocoalesce(func): + def w(self, action, data): + self._calls[object()] = (func, action, data) + return w + + +@contextlib.contextmanager +def CoalescedCalls(obj): + obj._calls = OrderedDict() + try: + yield + finally: + for meth, action, data in obj._calls.values(): + meth(obj, action, data) + obj._calls = None -class UdevObserver: +class UdevObserver(NetworkObserver): + """Use udev/netlink to observe network changes.""" - def __init__(self): - self.links = {} + def __init__(self, receiver=None): + self._links = {} self.context = pyudev.Context() + if receiver is None: + receiver = TrivialEventReceiver() + assert isinstance(receiver, NetworkEventReceiver) + self.receiver = receiver + self._calls = None def start(self): self.rtlistener = _rtnetlink.listener(self) - self.rtlistener.start() + with CoalescedCalls(self): + self.rtlistener.start() - self._fdmap = { + self._fdmap = { self.rtlistener.fileno(): self.rtlistener.data_ready, - } + } try: self.wlan_listener = _nl80211.listener(self) self.wlan_listener.start() self._fdmap.update({ self.wlan_listener.fileno(): self.wlan_listener.data_ready, - }) + }) except RuntimeError: log.debug('could not start wlan_listener') return list(self._fdmap) def data_ready(self, fd): - self._fdmap[fd]() + with CoalescedCalls(self): + self._fdmap[fd]() + @coalesce('ifindex') def link_change(self, action, data): log.debug('link_change %s %s', action, data) for k, v in data.items(): - if isinstance(data, bytes): - data[k] = data.decode('utf-8', 'replace') + if isinstance(v, bytes): + data[k] = v.decode('utf-8', 'replace') ifindex = data['ifindex'] if action == 'DEL': - if ifindex in self.links: - del self.links[ifindex] - self.del_link(ifindex) + if ifindex in self._links: + del self._links[ifindex] + self.receiver.del_link(ifindex) return if action == 'CHANGE': - if ifindex in self.links: - dev = self.links[ifindex] + if ifindex in self._links: + dev = self._links[ifindex] # Trigger a scan when a wlan device goes up # Not sure if this is required as devices seem to scan as soon # as they go up? (in which case this fails with EBUSY, so it's # just spam in the logs). - if dev.type == 'wlan' and (not (dev.flags & IFF_UP)) and (data['flags'] & IFF_UP): - try: - self.wlan_listener.trigger_scan(ifindex) - except RuntimeError: - log.exception('on-up trigger_scan failed') - dev.update_from_netlink_data(data) - self.update_link(ifindex) + if dev.type == 'wlan': + if (not (dev.flags & IFF_UP)) and (data['flags'] & IFF_UP): + try: + self.trigger_scan(ifindex) + except RuntimeError: + log.exception('on-up trigger_scan failed') + dev.netlink_data = data + # If a device appears and is immediately renamed, the + # initial _compute_type can fail to find the sysfs + # directory. Have another go now. + if dev.type is None: + dev.type = _compute_type(dev.name) + dev.bond = _get_bonding(dev.name, dev.netlink_data['flags']) + self.receiver.update_link(ifindex) return udev_devices = list(self.context.list_devices(IFINDEX=str(ifindex))) if len(udev_devices) == 0: @@ -406,67 +702,125 @@ udev_device = udev_devices[0] udev_data = dict(udev_device) udev_data['attrs'] = udev_get_attributes(udev_device) - link = NetworkInfo(data, udev_data) - self.links[data['ifindex']] = link - self.new_link(ifindex, link) + link = Link.from_probe_data(data, udev_data) + self._links[ifindex] = link + self.receiver.new_link(ifindex, link) + @coalesce('ifindex', 'local') def addr_change(self, action, data): log.debug('addr_change %s %s', action, data) - link = self.links.get(data['ifindex']) + link = self._links.get(data['ifindex']) if link is None: return ip = data['local'].decode('latin-1') if action == 'DEL': link.addresses.pop(ip, None) + self.receiver.update_link(data['ifindex']) return - link.addresses[ip] = Address(data) + link.addresses[ip] = Address.from_probe_data(data) + self.receiver.update_link(data['ifindex']) + @nocoalesce def route_change(self, action, data): log.debug('route_change %s %s', action, data) + for k, v in data.items(): + if isinstance(v, bytes): + data[k] = v.decode('utf-8', 'replace') + self.receiver.route_change(action, data) + + def trigger_scan(self, ifindex): + self.wlan_listener.trigger_scan(ifindex) def wlan_event(self, arg): log.debug('wlan_event %s', arg) ifindex = arg['ifindex'] - if ifindex < 0 or ifindex not in self.links: + if ifindex < 0 or ifindex not in self._links: return - link = self.links[ifindex] + link = self._links[ifindex] + link.mark_as_wlan() if arg['cmd'] == 'TRIGGER_SCAN': - link.scan_state = 'scanning' + link.wlan['scan_state'] = 'scanning' if arg['cmd'] == 'NEW_SCAN_RESULTS' and 'ssids' in arg: ssids = set() for (ssid, status) in arg['ssids']: + ssid = ssid.decode('utf-8', 'replace') ssids.add(ssid) if status != "no status": - link.ssid = ssid - link.ssids = sorted(ssids) - link.scan_state = None - if arg['cmd'] == 'NEW_INTERFACE' or arg['cmd'] == 'ASSOCIATE': - if len(arg.get('ssids', [])) > 0: - link.ssid = arg['ssids'][0][0] + link.wlan['ssid'] = ssid + link.wlan['visible_ssids'] = sorted(ssids) + link.wlan['scan_state'] = None if arg['cmd'] == 'NEW_INTERFACE': if link.flags & IFF_UP: try: - self.wlan_listener.trigger_scan(ifindex) - except RuntimeError: # Can't trigger a scan as non-root, that's OK. + self.trigger_scan(ifindex) + except RuntimeError: + # Can't trigger a scan as non-root, that's OK. log.exception('initial trigger_scan failed') else: try: self.rtlistener.set_link_flags(ifindex, IFF_UP) except RuntimeError: log.exception('set_link_flags failed') + if arg['cmd'] == 'NEW_INTERFACE' or arg['cmd'] == 'ASSOCIATE': + if len(arg.get('ssids', [])) > 0: + link.wlan['ssid'] = ( + arg['ssids'][0][0].decode('utf-8', 'replace')) if arg['cmd'] == 'DISCONNECT': - link.ssid = None + link.wlan['ssid'] = None - def new_link(self, ifindex, link): - pass - def update_link(self, ifindex): +class StoredDataObserver: + """A cheaty observer that just pretends the network is in some + pre-arranged state.""" + + def __init__(self, saved_data, receiver): + self.saved_data = saved_data + for data in self.saved_data['links']: + jsonschema.validate(data, link_schema) + self.receiver = receiver + + def start(self): + for data in self.saved_data['links']: + link = Link.from_saved_data(data) + self.receiver.new_link(link.ifindex, link) + for data in self.saved_data['routes']: + self.receiver.route_change("NEW", data) + return [] + + def trigger_scan(self, ifindex): pass - def del_link(self, ifindex): + def data_ready(self, fd): pass +class NetworkProber: + + def probe(self): + class CollectingReceiver(TrivialEventReceiver): + def __init__(self): + self.all_links = set() + self.route_data = [] + + def new_link(self, ifindex, link): + self.all_links.add(link) + + def route_change(self, action, data): + self.route_data.append(data) + collector = CollectingReceiver() + observer = UdevObserver(collector) + observer.start() + results = { + 'links': [], + 'routes': [], + } + for link in collector.all_links: + results['links'].append(link.serialize()) + for route_data in collector.route_data: + results['routes'].append(route_data) + return results + + if __name__ == '__main__': import pprint import select diff -Nru probert-0.0.14.2build1/probert/prober.py probert-0.0.15/probert/prober.py --- probert-0.0.14.2build1/probert/prober.py 2016-12-21 16:06:10.000000000 -0500 +++ probert-0.0.15/probert/prober.py 2019-04-03 14:20:21.000000000 -0400 @@ -13,46 +13,24 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from probert.storage import Storage -from probert.network import Network - class Prober(): - def __init__(self, options, results={}): - self.options = options - self.results = results - - ''' build a list of probe_ methods of this class, - excluding probe_all so we don't recurse. - This allows probe_all method to call all probe_ - methods as we add it without maintaining a list - in the code. - ''' - exclude = ['probe_all'] - self.probes = [getattr(self, fn) for fn in - filter(lambda x: callable(getattr(self, x)) and - x.startswith('probe_') and - x not in exclude, dir(self))] - - def probe(self): - # find out what methods to call by looking options - for fn in [x for x in dir(self.options) - if self.options.__getattribute__(x) is True]: - getattr(self, fn)() + def __init__(self): + self._results = {} def probe_all(self): - for fn in self.probes: - fn() + self.probe_storage() + self.probe_network() def probe_storage(self): - storage = Storage() - results = storage.probe() - self.results['storage'] = results + from probert.storage import Storage + self._storage = Storage() + self._results['storage'] = self._storage.probe() def probe_network(self): - network = Network() - results = network.probe() - self.results['network'] = results + from probert.network import NetworkProber + self._network = NetworkProber() + self._results['network'] = self._network.probe() def get_results(self): - return self.results + return self._results diff -Nru probert-0.0.14.2build1/probert/raid.py probert-0.0.15/probert/raid.py --- probert-0.0.14.2build1/probert/raid.py 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/probert/raid.py 2019-04-03 14:20:21.000000000 -0400 @@ -0,0 +1,126 @@ +# Copyright 2019 Canonical, Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging +import pyudev +import subprocess + +from probert.utils import (read_sys_block_size, + udev_get_attributes) + +log = logging.getLogger('probert.raid') + +SUPPORTED_RAID_TYPES = ['raid0', 'raid1', 'raid5', 'raid6', 'raid10'] + + +def mdadm_assemble(scan=True, ignore_errors=True): + cmd = ['mdadm', '--detail', '--scan', '-v'] + try: + subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + except subprocess.CalledProcessError as e: + log.error('Failed mdadm_assemble command %s: %s', cmd, e) + except FileNotFoundError as e: + log.error('Failed mdadm_assemble, mdadm command not found: %s', e) + + return + + +def get_mdadm_array_spares(md_device, detail): + + def role_key_to_dev(rolekey): + # MD_DEVICE_dev_dm_5_ROLE=spare -> MD_DEVICE_dev_dm_5_DEV + devname_mangled = rolekey.split('MD_DEVICE_')[1].split('_ROLE')[0] + return 'MD_DEVICE_%s_DEV' % devname_mangled + + def keymatch(key, data, role): + prefix = key.startswith('MD_DEVICE_') + suffix = key.endswith('_ROLE') + matches = data.get(key) == role + return (prefix and suffix and matches) + + def get_dev_from_key(key, data): + return data.get(role_key_to_dev(key)) + + return [get_dev_from_key(key, detail) for key in detail.keys() + if keymatch(key, detail, 'spare')] + + +def get_mdadm_array_members(md_device, detail): + ''' extract array devices and spares from mdadm --detail --export output + + MD_LEVEL=raid5 + MD_DEVICES=3 + MD_METADATA=1.2 + MD_UUID=7fe1895e:34dcb6dc:d1bcbb9c:f3e05134 + MD_NAME=s1lp6:raid5-2406-2407-2408-2409 + MD_DEVICE_ev_dm_5_ROLE=spare + MD_DEVICE_ev_dm_5_DEV=/dev/dm-5 + MD_DEVICE_ev_dm_3_ROLE=1 + MD_DEVICE_ev_dm_3_DEV=/dev/dm-3 + MD_DEVICE_ev_dm_4_ROLE=2 + MD_DEVICE_ev_dm_4_DEV=/dev/dm-4 + MD_DEVICE_ev_dm_2_ROLE=0 + MD_DEVICE_ev_dm_2_DEV=/dev/dm-2 + + returns (['/dev/dm2', '/dev/dm-3', '/dev/dm-4'], ['/dev/dm-5']) + ''' + md_device_keys = [key for key in detail.keys() + if key.startswith('MD_DEVICE_') and key.endswith('_DEV')] + spares = sorted(get_mdadm_array_spares(md_device, detail)) + devices = sorted([detail[key] for key in md_device_keys + if detail[key] not in spares]) + return (devices, spares) + + +def extract_mdadm_raid_name(conf): + ''' return the raid array name, removing homehost if present. + + MD_NAME=s1lp6:raid5-2406-2407-2408-2409 + + returns 'raid5-2406-2407-2408-2409' + ''' + raid_name = conf.get('MD_NAME') + if ':' in raid_name: + _, raid_name = raid_name.split(':') + return raid_name + + +def probe(context=None, report=False): + """Initiate an mdadm assemble to awaken existing MDADM devices. + For each md block device, extract required information needed + to describe the array for recreation or reuse as needed. + + mdadm tooling provides information about the raid type, + the members, the size, the name, uuids, metadata version. + """ + mdadm_assemble() + + # ignore passed context, must read udev after assembling mdadm devices + context = pyudev.Context() + + raids = {} + for device in context.list_devices(subsystem='block'): + if 'MD_NAME' in device: + devname = device['DEVNAME'] + attrs = udev_get_attributes(device) + attrs['size'] = str(read_sys_block_size(devname)) + devices, spares = get_mdadm_array_members(devname, device) + cfg = dict(device) + cfg.update({'raidlevel': device['MD_LEVEL'], + 'devices': devices, + 'spare_devices': spares}) + raids[devname] = cfg + + return raids diff -Nru probert-0.0.14.2build1/probert/_rtnetlinkmodule.c probert-0.0.15/probert/_rtnetlinkmodule.c --- probert-0.0.14.2build1/probert/_rtnetlinkmodule.c 2018-10-14 17:27:03.000000000 -0400 +++ probert-0.0.15/probert/_rtnetlinkmodule.c 2019-03-27 15:48:07.000000000 -0400 @@ -5,6 +5,7 @@ #include #include #include +#include #include #define NL_CB_me NL_CB_DEFAULT @@ -43,12 +44,18 @@ return; } PyObject *data; + + int is_vlan; + + is_vlan = rtnl_link_is_vlan(link); + data = Py_BuildValue( - "{si sI sI si}", + "{si sI sI si sN}", "ifindex", rtnl_link_get_ifindex(link), "flags", rtnl_link_get_flags(link), "arptype", rtnl_link_get_arptype(link), - "family", rtnl_link_get_family(link)); + "family", rtnl_link_get_family(link), + "is_vlan", PyBool_FromLong(is_vlan)); if (data == NULL) { goto exit; } @@ -60,6 +67,21 @@ } Py_DECREF(ob); } + if (is_vlan) { + PyObject* v; + v = PyLong_FromLong(rtnl_link_vlan_get_id(link)); + if (v == NULL || PyDict_SetItemString(data, "vlan_id", v) < 0) { + Py_XDECREF(v); + goto exit; + } + Py_DECREF(v); + v = PyLong_FromLong(rtnl_link_get_link(link)); + if (v == NULL || PyDict_SetItemString(data, "vlan_link", v) < 0) { + Py_XDECREF(v); + goto exit; + } + Py_DECREF(v); + } PyObject *r = PyObject_CallMethod(listener->observer, "link_change", "sO", act2str(act), data); Py_XDECREF(r); @@ -364,13 +386,51 @@ Py_RETURN_NONE; } +static PyObject* +listener_unset_link_flags(PyObject *self, PyObject* args, PyObject* kw) +{ + int ifindex, flags; + char *kwlist[] = {"ifindex", "flags", 0}; + + if (!PyArg_ParseTupleAndKeywords(args, kw, "ii:unset_link_flags", kwlist, &ifindex, &flags)) + return NULL; + struct Listener* listener = (struct Listener*)self; + struct rtnl_link *link = rtnl_link_get(listener->link_cache, ifindex); + if (link == NULL) { + PyErr_SetString(PyExc_RuntimeError, "link not found"); + return NULL; + } + struct nl_sock* sk = nl_socket_alloc(); + if (sk == NULL) { + rtnl_link_put(link); + PyErr_SetString(PyExc_MemoryError, "nl_socket_alloc() failed"); + return NULL; + } + int r = nl_connect(sk, NETLINK_ROUTE); + if (r < 0) { + rtnl_link_put(link); + nl_socket_free(sk); + PyErr_Format(PyExc_RuntimeError, "nl_connect failed %d", r); + return NULL; + } + rtnl_link_unset_flags(link, flags); + r = rtnl_link_change(sk, link, link, 0); + rtnl_link_put(link); + nl_socket_free(sk); + if (r < 0) { + PyErr_Format(PyExc_RuntimeError, "rtnl_link_change failed %d", r); + return NULL; + } + Py_RETURN_NONE; +} static PyMethodDef ListenerMethods[] = { {"start", listener_start, METH_NOARGS, "XXX."}, {"fileno", listener_fileno, METH_NOARGS, "XXX."}, {"data_ready", listener_data_ready, METH_NOARGS, "XXX."}, {"set_link_flags", (PyCFunction)listener_set_link_flags, METH_VARARGS|METH_KEYWORDS, "XXX."}, + {"unset_link_flags", (PyCFunction)listener_unset_link_flags, METH_VARARGS|METH_KEYWORDS, "XXX."}, {}, }; diff -Nru probert-0.0.14.2build1/probert/storage.py probert-0.0.15/probert/storage.py --- probert-0.0.14.2build1/probert/storage.py 2016-11-20 20:43:53.000000000 -0500 +++ probert-0.0.15/probert/storage.py 2019-04-03 14:20:21.000000000 -0400 @@ -13,12 +13,14 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import json import logging -import os -import re import pyudev +import subprocess -from probert.utils import udev_get_attributes +from probert.utils import udev_get_attributes, read_sys_block_size +from probert import (bcache, dmcrypt, filesystem, lvm, mount, multipath, + raid, zfs) log = logging.getLogger('probert.storage') @@ -42,132 +44,141 @@ self.type = self.raw['DEVTYPE'] self.size = int(self.raw['attrs']['size']) - def _get_hwvalues(self, keys, missing='Unknown value'): + def _get_hwvalues(self, keys): for key in keys: try: return self.raw[key] except KeyError: - log.debug('Failed to get key ' - '{} from interface {}'.format(key, self.name)) - pass + log.debug( + 'Failed to get key {} from interface {}'.format(key, + self.name)) - return missing + return None @property def vendor(self): ''' Some disks don't have ID_VENDOR_* instead the vendor is encoded in the model: SanDisk_A223JJ3J3 ''' - keys = [ - 'ID_VENDOR_FROM_DATABASE', - 'ID_VENDOR', - 'ID_VENDOR_ID' - ] - v = self._get_hwvalues(keys=keys, missing='Unknown Vendor') - if v == 'Unknown Vendor': - v = self.model.split('_')[0] + v = self._get_hwvalues(['ID_VENDOR_FROM_DATABASE', 'ID_VENDOR', + 'ID_VENDOR_ID']) + if v is None: + v = self.model + if v is not None: + return v.split('_')[0] return v @property def model(self): - keys = [ - 'ID_MODEL_FROM_DATABASE', - 'ID_MODEL', - 'ID_MODEL_ID' - ] - return self._get_hwvalues(keys=keys, missing='Unknown Model') + return self._get_hwvalues(['ID_MODEL_FROM_DATABASE', 'ID_MODEL', + 'ID_MODEL_ID']) @property def serial(self): - keys = [ - 'ID_SERIAL', - 'ID_SERIAL_SHORT' - ] - return self._get_hwvalues(keys=keys, missing='Unknown Serial') + return self._get_hwvalues(['ID_SERIAL', 'ID_SERIAL_SHORT']) @property def devpath(self): - keys = ['DEVPATH'] - return self._get_hwvalues(keys=keys, missing='Unknown devpath') + return self._get_hwvalues(['DEVPATH']) @property def is_virtual(self): return self.devpath.startswith('/devices/virtual/') +def blockdev_probe(context=None): + """ Non-class method for extracting relevant block + devices from pyudev.Context(). + """ + def _extract_partition_table(devname): + cmd = ['sfdisk', '--bytes', '--json', devname] + try: + result = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + output = result.stdout.decode('utf-8') + except subprocess.CalledProcessError as e: + log.error('Failed to probe partition table on %s:%s', devname, e) + return None + if not output: + return None + ptable = {} + try: + ptable = json.loads(output) + except json.decoder.JSONDecodeError as e: + log.error('Failed to load sfdisk json output:', e) + return ptable + + if not context: + context = pyudev.Context() + + blockdev = {} + for device in context.list_devices(subsystem='block'): + if device['MAJOR'] not in ["1", "7"]: + attrs = udev_get_attributes(device) + # update the size attr as it may only be the number + # of blocks rather than size in bytes. + attrs['size'] = \ + str(read_sys_block_size(device['DEVNAME'])) + blockdev[device['DEVNAME']] = dict(device) + blockdev[device['DEVNAME']].update({'attrs': attrs}) + # include partition table info if present + ptable = _extract_partition_table(device['DEVNAME']) + if ptable: + blockdev[device['DEVNAME']].update(ptable) + + return blockdev + + class Storage(): + """ The Storage class includes a map of storage types that + probert knows how to extract required information needed + for installation and use. Each storage module included + provides a probe method which will prepare and probe the + environment for the specific type of storage devices. + + The result of each probe is collected into a dictionary + which is collected in the class .results attribute. + + The probe is non-destructive and read-only; however a + probe module may load additional modules if they are not + present. + """ + probe_map = { + 'bcache': bcache.probe, + 'blockdev': blockdev_probe, + 'dmcrypt': dmcrypt.probe, + 'filesystem': filesystem.probe, + 'lvm': lvm.probe, + 'mount': mount.probe, + 'multipath': multipath.probe, + 'raid': raid.probe, + 'zfs': zfs.probe + } + def __init__(self, results={}): self.results = results self.context = pyudev.Context() - def get_devices_by_key(self, keyname, value): - try: - storage = self.results.get('storage') - return [device for device in storage.keys() - if storage[device][keyname] == value] - except (KeyError, AttributeError): - return [] - - def get_devices(self): - try: - return self.results.get('storage').keys() - except (KeyError, AttributeError): - return [] - - def get_partitions(self, device): - ''' /dev/sda ''' - try: - partitions = self.get_devices_by_key('DEVTYPE', 'partition') - return [part for part in partitions - if part.startswith(device)] - except (KeyError, AttributeError): - return [] + def _get_probe_types(self): + return {ptype for ptype, pfunc in self.probe_map.items() if pfunc} - def get_disks(self): - try: - storage = self.results.get('storage') - return [disk for disk in self.get_devices_by_key('MAJOR', '8') - if storage[disk]['DEVTYPE'] == 'disk'] - except (KeyError, AttributeError): - return [] - - def get_device_size(self, device): - try: - hwinfo = self.results.get('storage').get(device) - return hwinfo.get('attrs').get('size') - except (KeyError, AttributeError): - return "0" - - def _get_device_size(self, device, is_partition=False): - ''' device='/dev/sda' ''' - device_dir = os.path.join('/sys/class/block', os.path.basename(device)) - blockdev_size = os.path.join(device_dir, 'size') - with open(blockdev_size) as d: - size = int(d.read().strip()) - - logsize_base = device_dir - if not os.path.exists(os.path.join(device_dir, 'queue')): - parent_dev = os.path.basename(re.split('[\d+]', device)[0]) - logsize_base = os.path.join('/sys/class/block', parent_dev) - - logical_size = os.path.join(logsize_base, 'queue', - 'logical_block_size') - if os.path.exists(logical_size): - with open(logical_size) as s: - size *= int(s.read().strip()) - - return size - - def probe(self): - storage = {} - for device in self.context.list_devices(subsystem='block'): - if device['MAJOR'] not in ["1", "7"]: - attrs = udev_get_attributes(device) - # update the size attr as it may only be the number - # of blocks rather than size in bytes. - attrs['size'] = \ - str(self._get_device_size(device['DEVNAME'])) - storage[device['DEVNAME']] = dict(device) - storage[device['DEVNAME']].update({'attrs': attrs}) + def probe(self, probe_types=None): + default_probes = self._get_probe_types() + if not probe_types: + to_probe = default_probes + else: + to_probe = probe_types.intersection(default_probes) + + if len(to_probe) == 0: + not_avail = probe_types.difference(default_probes) + print('Requsted probes not available: %s' % probe_types) + print('Valid probe types: %s' % default_probes) + print('Unavilable probe types: %s' % not_avail) + return self.results + + probed_data = {} + for ptype in to_probe: + pfunc = self.probe_map[ptype] + probed_data[ptype] = pfunc(context=self.context) - self.results = storage - return storage + self.results = probed_data + return probed_data diff -Nru probert-0.0.14.2build1/probert/tests/fakes.py probert-0.0.15/probert/tests/fakes.py --- probert-0.0.14.2build1/probert/tests/fakes.py 2016-07-27 07:12:20.000000000 -0400 +++ probert-0.0.15/probert/tests/fakes.py 2019-03-27 16:38:00.000000000 -0400 @@ -1,5 +1,4 @@ import os -import yaml TOP_DIR = os.path.join('/'.join(__file__.split('/')[:-3])) TEST_DATA = os.path.join(TOP_DIR, 'probert', 'tests', 'data') diff -Nru probert-0.0.14.2build1/probert/tests/test_network.py probert-0.0.15/probert/tests/test_network.py --- probert-0.0.14.2build1/probert/tests/test_network.py 2016-07-27 07:12:20.000000000 -0400 +++ probert-0.0.15/probert/tests/test_network.py 1969-12-31 19:00:00.000000000 -0500 @@ -1,122 +0,0 @@ -import testtools -import json - -from probert.network import Network, NetworkInfo -from probert.tests.fakes import FAKE_PROBE_ALL_JSON - - -class ProbertTestNetwork(testtools.TestCase): - def setUp(self): - super(ProbertTestNetwork, self).setUp() - self.results = json.load(open(FAKE_PROBE_ALL_JSON)) - self.network = Network(results=self.results) - - def test_network_init(self): - self.assertNotEqual(None, self.network) - - def test_network_get_interfaces(self): - ifaces = self.results['network'].keys() - self.assertEqual(sorted(ifaces), sorted(self.network.get_interfaces())) - - def test_network_get_interfaces_no_nics(self): - ifaces = [] - n = Network() - self.assertEqual(ifaces, n.get_interfaces()) - - def test_network_get_ips(self): - for iface in self.network.get_interfaces(): - ip = self.results['network'][iface]['ip'] - self.assertEqual(ip, self.network.get_ips(iface)) - - def test_network_get_ips_no_ips(self): - n = Network() - self.assertEqual([], n.get_ips('noiface')) - - def test_network_get_hwaddr(self): - for iface in self.network.get_interfaces(): - hwaddr = \ - self.results['network'][iface]['hardware']['attrs']['address'] - self.assertEqual(hwaddr, self.network.get_hwaddr(iface)) - - def test_network_get_iface_type(self): - # TODO: mock out open/read of sysfs - # and use _get_iface_type() - self.assertEqual('eth', self.network.get_iface_type('eth0')) - - # needs mocking of pyudev.Context() - # and return mock data - #def test_network_probe(self): - -class ProbertTestNetworkInfo(testtools.TestCase): - ''' properties: - .name = eth7 - .type = eth - .vendor = Innotec - .model = SuperSonicEtherRocket - .driver = sser - .devpath = /devices - .hwaddr = aa:bb:cc:dd:ee:ff - .addr = 10.2.7.2 - .netmask = 255.255.255.0 - .broadcast = 10.2.7.255 - .addr6 = - .is_virtual = - .raw = {raw dictionary} - .ip = {ip dict} - .bond = {bond dict} - .bridge = {bridge_dict} - ''' - def setUp(self): - super(ProbertTestNetworkInfo, self).setUp() - self.results = json.load(open(FAKE_PROBE_ALL_JSON)) - - def test_networkinfo_init(self): - probe_data = { - 'em1': { - 'bond': { - 'is_slave': False, - 'is_master': False, - 'slaves': [], - 'mode': None, - }, - "bridge": { - "interfaces": [], - "is_bridge": False, - "is_port": False, - "options": {} - }, - 'hardware': { - 'attrs': { - 'address': '00:11:22:33:44:55', - } - }, - 'type': 'eth', - 'ip' : {}, - } - } - ni = NetworkInfo(probe_data) - self.assertNotEqual(ni, None) - - def test_networkinfo_attributes(self): - eth0 = {'eth0': self.results.get('network').get('eth0')} - ni = NetworkInfo(probe_data=eth0) - props = { - 'name': 'eth0', - 'type': 'eth', - 'vendor': 'ASIX Electronics Corp.', - 'model': 'AX88179', - 'driver': 'ax88179_178a', - 'devpath': '/devices/pci0000:00/0000:00:14.0/usb3/3-2/3-2.1/3-2.1.1/3-2.1.1:1.0/net/eth0', - 'hwaddr': '00:0a:cd:26:45:33', - 'addr': '192.168.11.58', - 'netmask': '255.255.255.0', - 'broadcast': '192.168.11.255', - 'is_virtual': False, - 'raw': eth0.get('eth0'), - 'bond': eth0.get('eth0').get('bond'), - 'bridge': eth0.get('eth0').get('bridge'), - 'ip': eth0.get('eth0').get('ip'), - } - for (prop, value) in props.items(): - self.assertEqual(getattr(ni, prop), value) - diff -Nru probert-0.0.14.2build1/probert/tests/test_prober.py probert-0.0.15/probert/tests/test_prober.py --- probert-0.0.14.2build1/probert/tests/test_prober.py 2016-07-27 07:12:20.000000000 -0400 +++ probert-0.0.15/probert/tests/test_prober.py 2019-03-27 16:38:00.000000000 -0400 @@ -1,56 +1,49 @@ import testtools import json import argparse - -from mock import patch +from unittest.mock import patch from probert.prober import Prober from probert.storage import Storage -from probert.network import Network +from probert.network import NetworkProber from probert.tests.fakes import FAKE_PROBE_ALL_JSON class ProbertTestProber(testtools.TestCase): - def setUp(self): - super(ProbertTestProber, self).setUp() - self.results = json.load(open(FAKE_PROBE_ALL_JSON)) - self.options = argparse.Namespace(probe_all=True, - probe_network=False, - probe_storage=False) def test_prober_init(self): - p = Prober(options=self.options, results=self.results) + p = Prober() self.assertNotEqual(p, None) @patch.object(Prober, 'probe_all') def test_prober_probe_all(self, _probe_all): - p = Prober(options=self.options, results=self.results) - p.probe() + p = Prober() + p.probe_all() self.assertTrue(_probe_all.called) @patch.object(Prober, 'probe_network') @patch.object(Prober, 'probe_storage') def test_prober_probe_all_invoke_others(self, _storage, _network): - p = Prober(options=self.options, results=self.results) - p.probe() + p = Prober() + p.probe_all() self.assertTrue(_storage.called) self.assertTrue(_network.called) def test_prober_get_results(self): - p = Prober(options=self.options, results=self.results) - self.assertEqual(self.results, p.get_results()) + p = Prober() + self.assertEqual({}, p.get_results()) - @patch.object(Network, 'probe') + @patch.object(NetworkProber, 'probe') @patch.object(Storage, 'probe') def test_prober_probe_all_check_results(self, _storage, _network): - p = Prober(options=self.options, results=self.results) + p = Prober() results = { 'storage': {'lambic': 99}, 'network': {'saison': 99}, } _storage.return_value = results['storage'] _network.return_value = results['network'] - p.probe() + p.probe_all() self.assertTrue(_storage.called) self.assertTrue(_network.called) self.assertEqual(results, p.get_results()) diff -Nru probert-0.0.14.2build1/probert/tests/test_storage.py probert-0.0.15/probert/tests/test_storage.py --- probert-0.0.14.2build1/probert/tests/test_storage.py 2016-07-27 07:12:20.000000000 -0400 +++ probert-0.0.15/probert/tests/test_storage.py 2019-04-03 14:20:21.000000000 -0400 @@ -8,76 +8,13 @@ class ProbertTestStorage(testtools.TestCase): def setUp(self): super(ProbertTestStorage, self).setUp() - self.results = json.load(open(FAKE_PROBE_ALL_JSON)) + with open(FAKE_PROBE_ALL_JSON) as f: + self.results = json.load(f) self.storage = Storage(results=self.results) def test_storage_init(self): self.assertNotEqual(None, self.storage) - def test_storage_get_devices(self): - storage_keys = self.results.get('storage').keys() - self.assertEqual(sorted(self.storage.get_devices()), - sorted(storage_keys)) - - def test_storage_get_devices_no_storage(self): - s = Storage() - self.assertEqual([], s.get_devices()) - - def test_storage_get_devices_by_key(self): - key = 'DEVTYPE' - val = 'partition' - plist_1 = self.storage.get_devices_by_key(key, val) - plist_2 = [p for p in self.results['storage'].keys() - if self.results['storage'][p][key] == val] - self.assertEqual(sorted(plist_1), sorted(plist_2)) - - def test_storage_get_devices_by_key_invalid_key(self): - key = 'lactobacillus' - val = 'sourbeer' - plist_1 = self.storage.get_devices_by_key(key, val) - plist_2 = [] - self.assertEqual(sorted(plist_1), sorted(plist_2)) - - def test_storage_get_devices_by_key_invalid_value(self): - key = 'DEVTYPE' - val = 'supercomputer' - plist_1 = self.storage.get_devices_by_key(key, val) - plist_2 = [] - self.assertEqual(sorted(plist_1), sorted(plist_2)) - - def test_storage_get_partitions(self): - device = '/dev/sda' - plist_1 = self.storage.get_partitions(device) - plist_2 = [p for p in - self.storage.get_devices_by_key('DEVTYPE', 'partition') - if p.startswith(device)] - self.assertEqual(sorted(plist_1), sorted(plist_2)) - - def test_storage_get_partitions_no_parts(self): - results = {'storage': {'/dev/sda': { 'DEVTYPE': 'disk', 'MAJOR': '8'}}} - s = Storage(results=results) - device = '/dev/sda' - self.assertEqual([], s.get_partitions(device)) - - def test_storage_get_disk_no_disk(self): - s = Storage() - self.assertEqual([], s.get_disks()) - - def test_storage_get_disks(self): - disks = [d for d in self.results['storage'].keys() - if self.results['storage'][d]['MAJOR'] == '8' and - self.results['storage'][d]['DEVTYPE'] == 'disk'] - self.assertEqual(sorted(self.storage.get_disks()), - sorted(disks)) - - def test_storage_get_device_size(self): - disk = self.storage.get_disks().pop() - size = self.results['storage'][disk]['attrs']['size'] - self.assertEqual(self.storage.get_device_size(disk), size) - - #TODO: - # def test_storage_probe() - class ProbertTestStorageInfo(testtools.TestCase): ''' properties: @@ -92,7 +29,8 @@ ''' def setUp(self): super(ProbertTestStorageInfo, self).setUp() - self.results = json.load(open(FAKE_PROBE_ALL_JSON)) + with open(FAKE_PROBE_ALL_JSON) as f: + self.results = json.load(f) def test_storageinfo_init(self): probe_data = { diff -Nru probert-0.0.14.2build1/probert/utils.py probert-0.0.15/probert/utils.py --- probert-0.0.14.2build1/probert/utils.py 2016-12-21 16:06:10.000000000 -0500 +++ probert-0.0.15/probert/utils.py 2019-04-03 14:20:21.000000000 -0400 @@ -223,3 +223,27 @@ for iface in ifaces.keys(): if 'auto' not in ifaces[iface]: ifaces[iface]['auto'] = False + + +def read_sys_block_size(device): + device_dir = os.path.join('/sys/class/block', os.path.basename(device)) + blockdev_size = os.path.join(device_dir, 'size') + with open(blockdev_size) as d: + size = int(d.read().strip()) + + logsize_base = device_dir + if not os.path.exists(os.path.join(device_dir, 'queue')): + parent_dev = os.path.basename(re.split('[\d+]', device)[0]) + logsize_base = os.path.join('/sys/class/block', parent_dev) + + logical_size = os.path.join(logsize_base, 'queue', 'logical_block_size') + if os.path.exists(logical_size): + with open(logical_size) as s: + size *= int(s.read().strip()) + + return size + + +def read_sys_block_slaves(device): + device_dir = os.path.join('/sys/class/block', os.path.basename(device)) + return os.listdir(os.path.join(device_dir, 'slaves')) diff -Nru probert-0.0.14.2build1/probert/zfs.py probert-0.0.15/probert/zfs.py --- probert-0.0.14.2build1/probert/zfs.py 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/probert/zfs.py 2019-04-03 14:20:21.000000000 -0400 @@ -0,0 +1,209 @@ +# Copyright 2019 Canonical, Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from collections import namedtuple +import logging +import operator +import os +import re +import subprocess +from functools import reduce + + +log = logging.getLogger('probert.zfs') +ZfsListEntry = namedtuple('ZfsListEntry', + ('name', 'used', 'avail', 'refer', 'mountpoint')) + + +def parse_zdb_output(data): + """ Parse structured zdb output into a dictionary. + + hogshead: + version: 5000 + name: 'hogshead' + vdev_tree: + type: 'root' + id: 0 + guid: 12392392111803944759 + children[0]: + type: 'raidz' + ashift: 12 + children[0]: + type: 'disk' + id: 0 + guid: 13921270083288950156 + path: '/dev/disk/by-id/usb-ST4000VN_000-1H4168-0:0-part1' + whole_disk: 1 + DTL: 140 + create_txg: 4 + com.delphix:vdev_zap_leaf: 231 + children[1]: + type: 'disk' + id: 1 + guid: 2635788368927674810 + path: '/dev/disk/by-id/usb-ST4000VN_000-1H4168-0:1-part1' + whole_disk: 1 + DTL: 139 + create_txg: 4 + com.delphix:vdev_zap_leaf: 232 + """ + + def get_from_dict(datadict, maplist): + return reduce(operator.getitem, maplist, datadict) + + def set_in_dict(datadict, maplist, value): + get_from_dict(datadict, maplist[:-1])[maplist[-1]] = value + + def parse_line_key_value(line): + """ use ': ' token to split line into key, value pairs + + com.delphi:vdev_zap_top: 230 + ^^ + `- span() = (24, 26) + key = 'com.delphi:vdev_zap_top' + value = '230' + """ + match = re.search(r': ', line) + if match: + tok_start, tok_end = match.span() + key, value = (line[:tok_start], line[tok_end:]) + else: + key, value = line.split(':') + + return (key.lstrip(), value.replace("'", "")) + + # for each line in zdb output, calculate the nested level + # based on indentation. Add key/value pairs for each line + # and generate a list of keys to calcaulate where in the root + # dictionary to set the value. + root = {} + lvl_tok = 4 + prev_item = [] + for line in data.splitlines(): + current_level = int((len(line) - len(line.lstrip(' '))) / lvl_tok) + prev_level = len(prev_item) - 1 + key, value = parse_line_key_value(line) + # TODO: handle children[N] keyname an convert to list + if current_level == 0: + root[key] = {} + prev_item = [(current_level, key)] + else: + new_item_path = [item[1] + for item in prev_item[0: current_level]] + [key] + if value: + set_in_dict(root, new_item_path, value) + else: + set_in_dict(root, new_item_path, {}) + # we've dropped down a level, replace prev level key w/new key + if current_level == prev_level: + prev_item.pop() + prev_item.append((current_level, key)) + + return root + + +def zdb_asdict(data=None): + """ Convert output from zdb into a dictionary""" + if not data: + cmd = ['zdb'] + # exported, altroot, and uncached pools need -e + if not os.path.exists('/etc/zfs/zpool.cache'): + cmd.append('-e') + try: + result = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + except (subprocess.CalledProcessError, FileNotFoundError): + return {} + + data = result.stdout.decode('utf-8') + + return parse_zdb_output(data) + + +def zfs_list_filesystems(raw_output=False): + cmd = ['zfs', 'list', '-Hp', '-t', 'filesystem'] + try: + result = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + except subprocess.CalledProcessError: + return [] + + data = result.stdout.decode('utf-8') + if raw_output: + return data + + # NAME, USED, AVAIL, REFER, MOUNTPOINT + zfs_entries = [] + for line in data.splitlines(): + (name, used, avail, refer, mpoint) = line.split('\t') + if mpoint == 'none': + mpoint = None + zfs_entries.append(ZfsListEntry(name, used, avail, refer, mpoint)) + + return zfs_entries + + +def zfs_get_properties(zfs_name, raw_output=False): + if not zfs_name: + raise ValueError('Invalid zfs_name parameter: "%s"', zfs_name) + + cmd = ['zfs', 'get', 'all', '-Hp', zfs_name] + try: + result = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + except subprocess.ProcessExecutionError: + return [] + + data = result.stdout.decode('utf-8') + if raw_output: + return data + + # NAME, PROPERTY, VALUE, SOURCE + zprops = {} + for line in data.splitlines(): + (name, prop, value, source) = line.split('\t') + zprops[prop] = {'value': value, 'source': source} + + return {zfs_name: {'properties': zprops}} + + +def is_zfs_device(device): + return device.get('ID_FS_TYPE') == 'zfs_member' + + +def probe(context=None): + """The ZFS prober examines the ZFS Dubugger (zdb) output which + produces psuedo-json output. This is converted to a dictionary + where for each zpool, we can extract the datasets and determine + which vdevs (linux block devices) are used to construct the the + zpool. + + For each zpool and dataset, the prober further extracts all of + the pool and filesystem (dataset) properties and captures if + the values and whether they are local changes or defaults. + + The resulting output includes the converted zdb dump and + a tree of datasets and their properties. + """ + zdb = zdb_asdict() + zpools = {} + for zpool, zdb_dump in zdb.items(): + datasets = {} + zlf = zfs_list_filesystems() + for zfs_entry in zlf: + datasets[zfs_entry.name] = zfs_get_properties(zfs_entry.name) + zpools[zpool] = {'zdb': zdb_dump, 'datasets': datasets} + + return {'zpools': zpools} Binary files /tmp/alezjyzP6u/probert-0.0.14.2build1/probert-dbgsym_0.0.15_amd64.ddeb and /tmp/WRjdRk_v9g/probert-0.0.15/probert-dbgsym_0.0.15_amd64.ddeb differ diff -Nru probert-0.0.14.2build1/requirements.txt probert-0.0.15/requirements.txt --- probert-0.0.14.2build1/requirements.txt 1969-12-31 19:00:00.000000000 -0500 +++ probert-0.0.15/requirements.txt 2019-03-27 16:13:44.000000000 -0400 @@ -0,0 +1,2 @@ +pyudev==0.21.0 +jsonschema==2.6.0 diff -Nru probert-0.0.14.2build1/setup.py probert-0.0.15/setup.py --- probert-0.0.14.2build1/setup.py 2016-11-08 18:35:29.000000000 -0500 +++ probert-0.0.15/setup.py 2019-03-27 16:38:00.000000000 -0400 @@ -41,6 +41,11 @@ 'extra_link_args': subprocess.check_output(['pkg-config', '--libs', package]).decode('utf8').split(), } + +def read_requirement(): + return [req.strip() for req in open('requirements.txt')] + + setup(name='probert', version=probert.__version__, description="Hardware probing tool", @@ -61,5 +66,6 @@ **pkgconfig("libnl-genl-3.0")), ], packages=find_packages(), + install_requires=read_requirement(), include_package_data=True, )