diff -Nru sosreport-4.5.6/.cirrus.yml sosreport-4.7.0/.cirrus.yml --- sosreport-4.5.6/.cirrus.yml 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/.cirrus.yml 2024-02-20 00:39:46.000000000 +0000 @@ -8,6 +8,7 @@ DEBIAN_NAME: "debian-11" + UBUNTU_LATEST_NAME: "ubuntu-23.10" UBUNTU_NAME: "ubuntu-22.04" UBUNTU_PRIOR_NAME: "ubuntu-20.04" UBUNTU_PRIOR2_NAME: "ubuntu-18.04" @@ -22,15 +23,17 @@ UBUNTU_PROJECT: "ubuntu-os-cloud" # Images exist on GCP already - CENTOS_9_IMAGE_NAME: "centos-stream-9-v20221102" - CENTOS_8_IMAGE_NAME: "centos-stream-8-v20230306" - DEBIAN_IMAGE_NAME: "debian-11-bullseye-v20230306" + CENTOS_9_IMAGE_NAME: "centos-stream-9-v20230809" + CENTOS_8_IMAGE_NAME: "centos-stream-8-v20230809" + DEBIAN_IMAGE_NAME: "debian-11-bullseye-v20230809" FEDORA_IMAGE_NAME: "fedora-cloud-base-gcp-38-1-6-x86-64" FEDORA_PRIOR_IMAGE_NAME: "fedora-cloud-base-gcp-37-1-7-x86-64" - UBUNTU_IMAGE_NAME: "ubuntu-2204-jammy-v20230302" - UBUNTU_PRIOR_IMAGE_NAME: "ubuntu-2004-focal-v20230302" - UBUNTU_PRIOR2_IMAGE_NAME: "ubuntu-1804-bionic-v20230324" - UBUNTU_SNAP_IMAGE_NAME: "ubuntu-2204-jammy-v20230302" + UBUNTU_DEB_IMAGE_NAME: "ubuntu-minimal-2310-mantic-amd64-v20231030" + UBUNTU_LATEST_IMAGE_NAME: "ubuntu-2310-mantic-amd64-v20231031" + UBUNTU_IMAGE_NAME: "ubuntu-2204-jammy-v20231030" + UBUNTU_PRIOR_IMAGE_NAME: "ubuntu-2004-focal-v20231101" + UBUNTU_PRIOR2_IMAGE_NAME: "ubuntu-1804-bionic-v20230605" + UBUNTU_SNAP_IMAGE_NAME: "ubuntu-2204-jammy-v20231030" # Curl-command prefix for downloading task artifacts, simply add the # the url-encoded task name, artifact name, and path as a suffix. @@ -115,6 +118,27 @@ path: ./sos_${BUILD_NAME}.rpm type: application/octet-stream +# Make sure a user can manually build a deb from the checkout +deb_build_task: + alias: "deb_build" + name: "deb Build From Checkout" + gce_instance: + image_project: "${UBUNTU_PROJECT}" + image_name: "${UBUNTU_DEB_IMAGE_NAME}" + type: e2-medium + setup_script: | + apt update --allow-releaseinfo-change + apt -y install devscripts equivs python3-pip + mk-build-deps + apt -y install ./sosreport-build-deps*.deb + pip3 install avocado-framework==94.0 --break-system-packages + main_script: | + dpkg-buildpackage -b -us -uc -rfakeroot -m --build-by="noreply@canonical.com" + prep_artifacts_script: mv ../*.deb ./sos_cirrus.deb + packages_artifacts: + path: ./sos_cirrus.deb + type: application/octet-stream + # Make sure a user can manually build a snap from the checkout snap_build_task: alias: "snap_build" @@ -146,6 +170,7 @@ depends_on: - rpm_build - snap_build + - deb_build gce_instance: *standardvm matrix: - env: *centos9 @@ -156,24 +181,38 @@ PROJECT: ${UBUNTU_PROJECT} BUILD_NAME: ${UBUNTU_NAME} VM_IMAGE_NAME: ${UBUNTU_IMAGE_NAME} + PKG: "snap" - env: &ubuntuprior PROJECT: ${UBUNTU_PROJECT} BUILD_NAME: ${UBUNTU_PRIOR_NAME} VM_IMAGE_NAME: ${UBUNTU_PRIOR_IMAGE_NAME} + PKG: "snap" - env: &ubuntuprior2 PROJECT: ${UBUNTU_PROJECT} BUILD_NAME: ${UBUNTU_PRIOR2_NAME} VM_IMAGE_NAME: ${UBUNTU_PRIOR2_IMAGE_NAME} + PKG: "snap" + - env: &ubuntu-latest + PROJECT: ${UBUNTU_PROJECT} + BUILD_NAME: ${UBUNTU_LATEST_NAME} + VM_IMAGE_NAME: ${UBUNTU_LATEST_IMAGE_NAME} + PKG: "deb" setup_script: &setup | if [ $(command -v apt) ]; then - echo "$ARTCURL/snap%20Build%20From%20Checkout/packages/sosreport_test_amd64.snap" - $ARTCURL/snap%20Build%20From%20Checkout/packages/sosreport_test_amd64.snap apt -y purge sosreport apt update --allow-releaseinfo-change apt -y install python3-pip snapd - systemctl start snapd - snap install ./sosreport_test_amd64.snap --classic --dangerous - snap alias sosreport.sos sos + if [ ${PKG} == "snap" ] ; then + echo "$ARTCURL/snap%20Build%20From%20Checkout/packages/sosreport_test_amd64.snap" + $ARTCURL/snap%20Build%20From%20Checkout/packages/sosreport_test_amd64.snap + systemctl start snapd + snap install ./sosreport_test_amd64.snap --classic --dangerous + snap alias sosreport.sos sos + elif [ ${PKG} == "deb" ]; then + echo "$ARTCURL/deb%20Build%20From%20Checkout/packages/sos_cirrus.deb" + $ARTCURL/deb%20Build%20From%20Checkout/packages/sos_cirrus.deb + apt -y install ./sos_cirrus.deb + fi fi if [ $(command -v dnf) ]; then echo "$ARTCURL/rpm%20Build%20From%20Checkout%20-%20${BUILD_NAME}/packages/sos_${BUILD_NAME}.rpm" @@ -182,7 +221,9 @@ dnf -y install python3-pip ethtool dnf -y install ./sos_${BUILD_NAME}.rpm fi - pip3 install avocado-framework==94.0 + PIP_EXTRA="" + [[ $(pip3 install --help | grep break-system) ]] && PIP_EXTRA="--break-system-packages" + pip3 install avocado-framework==94.0 ${PIP_EXTRA} # run the unittests separately as they require a different PYTHONPATH in # order for the imports to work properly under avocado unittest_script: PYTHONPATH=. avocado run tests/unittests/ @@ -205,6 +246,7 @@ - env: *centos8 - env: *fedora - env: *ubuntu + - env: *ubuntu-latest setup_script: *setup install_pexpect_script: | if [ $(command -v apt) ]; then @@ -230,18 +272,18 @@ matrix: - env: <<: *centos8 - FOREMAN_VER: "2.5" + FOREMAN_VER: "3.3" - env: <<: *centos8 - FOREMAN_VER: "3.1" + FOREMAN_VER: "3.5" - env: <<: *centos8 - FOREMAN_VER: "3.4" + FOREMAN_VER: "3.7" - env: PROJECT: ${DEBIAN_PROJECT} VM_IMAGE_NAME: ${DEBIAN_IMAGE_NAME} BUILD_NAME: ${DEBIAN_NAME} - FOREMAN_VER: "3.4" + FOREMAN_VER: "3.7" setup_script: *setup foreman_setup_script: ./tests/test_data/foreman_setup.sh main_script: PYTHONPATH=tests/ avocado run -p TESTLOCAL=true --test-runner=runner -t foreman tests/product_tests/foreman/ diff -Nru sosreport-4.5.6/debian/changelog sosreport-4.7.0/debian/changelog --- sosreport-4.5.6/debian/changelog 2023-10-04 04:41:30.000000000 +0100 +++ sosreport-4.7.0/debian/changelog 2024-02-20 06:44:29.000000000 +0000 @@ -1,3 +1,31 @@ +sosreport (4.7.0-0ubuntu0~20.04.1) focal; urgency=medium + + * New 4.7.0 upstream release. (LP: #2054395) + + * For more details, full release note is available here: + - https://github.com/sosreport/sos/releases/tag/4.7.0 + + * d/control: + - Add 'python3-packaging' as part of the runtime depends. + - Add 'python3-packaging' as part of the build depends. + - Add 'python3-yaml' as part of the build depends. + - Add 'X-Python3-Version: >= 3.6' to ensure we use the python + revision that is supported. (LP: #2038648) + - Add 'python3-boto3' to Suggests + + * Former patches, now fixed: + - d/p/0002-obfuscate-netplan-ssid-password.patch + + * Remaining patches: + - d/p/0001-debian-change-tmp-dir-location.patch + - d/p/0002-debian-remove-magic-stderr.patch + + * New patches: + - d/p/0003-collect-refactor-_format_version.patch + - d/p/0004-pacemaker-Use-pep440-formatted-version-on-comparison.patch + + -- Arif Ali Tue, 20 Feb 2024 06:44:29 +0000 + sosreport (4.5.6-0ubuntu1~20.04.2) focal; urgency=medium * d/tests/simple.sh: diff -Nru sosreport-4.5.6/debian/control sosreport-4.7.0/debian/control --- sosreport-4.5.6/debian/control 2023-06-13 05:48:11.000000000 +0100 +++ sosreport-4.7.0/debian/control 2024-02-20 06:44:29.000000000 +0000 @@ -13,13 +13,17 @@ python3-setuptools, python3-sphinx, python3-pexpect, + python3-packaging, + python3-yaml, Homepage: https://github.com/sosreport/sos Vcs-Browser: https://salsa.debian.org/sosreport-team/sosreport Vcs-Git: https://salsa.debian.org/sosreport-team/sosreport.git +X-Python3-Version: >= 3.6 Package: sosreport Architecture: any -Depends: ${python3:Depends}, ${misc:Depends}, python3-pexpect +Depends: ${python3:Depends}, ${misc:Depends}, python3-pexpect, python3-packaging +Suggests: python3-boto3 Description: Set of tools to gather troubleshooting data from a system Sos is a set of tools that gathers information about system hardware and configuration. The information can then be used for diff -Nru sosreport-4.5.6/debian/patches/0002-obfuscate-netplan-ssid-password.patch sosreport-4.7.0/debian/patches/0002-obfuscate-netplan-ssid-password.patch --- sosreport-4.5.6/debian/patches/0002-obfuscate-netplan-ssid-password.patch 2023-10-04 04:40:56.000000000 +0100 +++ sosreport-4.7.0/debian/patches/0002-obfuscate-netplan-ssid-password.patch 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -Description: Obfuscate passwords in netplan yaml files -Author: Arif Ali -Origin: upstream -Bug: https://github.com/sosreport/sos/issues/3365 ---- -This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ -Index: sosreport-4.5.6/sos/report/plugins/networking.py -=================================================================== ---- sosreport-4.5.6.orig/sos/report/plugins/networking.py -+++ sosreport-4.5.6/sos/report/plugins/networking.py -@@ -299,5 +299,13 @@ class UbuntuNetworking(Networking, Ubunt - self.add_cmd_output("/usr/sbin/traceroute -n %s" % self.trace_host, - priority=100) - -+ def postproc(self): -+ -+ self.do_path_regex_sub( -+ "/etc/netplan", -+ r"(\s+password:).*", -+ r"\1 ******" -+ ) -+ - - # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/debian/patches/0003-collect-refactor-_format_version.patch sosreport-4.7.0/debian/patches/0003-collect-refactor-_format_version.patch --- sosreport-4.5.6/debian/patches/0003-collect-refactor-_format_version.patch 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/debian/patches/0003-collect-refactor-_format_version.patch 2024-02-20 06:44:29.000000000 +0000 @@ -0,0 +1,74 @@ +From 7e81246fcfd35e17a69bc3d40400cd123e8ca7bf Mon Sep 17 00:00:00 2001 +From: David Negreira +Date: Sat, 2 Mar 2024 17:05:12 +0100 +Subject: [PATCH] [collect] refactor _format_version + +Ensure that we format the package versions to the pep440 standard. +This is necessary as we are using `parse_version()` from the `packaging` +package to verify that the sos collector node version is equal or bigger +than the versions we are collecting from the nodes. If we pass the wrong +format to `parse_version()`, we are unable to do the comparison and +unable to run `sos report` on those nodes. +This addresses the issue of the Ubuntu packaging version naming with `+` +and `~` signals as well as generic versioning schemes. + +Resolves: #3544 + +Signed-off-by: David Negreira +Signed-off-by: Arif Ali +Bug: https://github.com/sosreport/sos/issues/3544 +Origin: upstream, https://github.com/sosreport/sos/commit/7e81246fcfd35e17a69bc3d40400cd123e8ca7bf +--- + sos/collector/sosnode.py | 30 +++++++++++++++++------------- + 1 file changed, 17 insertions(+), 13 deletions(-) + +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index 878d3b3a..07a71865 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -412,25 +412,29 @@ class SosNode(): + :returns: True if installed version is at least ``ver``, else False + :rtype: ``bool`` + """ +- def _format_version(ver): +- # format the version we're checking to a standard form of X.Y.Z-R ++ def _format_version_to_pep440(ver): ++ """ Convert the version into a PEP440 compliant version scheme.""" ++ public_version_re = re.compile( ++ r"^([0-9][0-9.]*(?:(?:a|b|rc|.post|.dev)[0-9]+)*)\+?" ++ ) + try: +- _fver = ver.split('-')[0] +- _rel = '' +- if '-' in ver: +- _rel = '-' + ver.split('-')[-1].split('.')[0] +- if len(_fver.split('.')) == 2: +- _fver += '.0' +- +- return _fver + _rel ++ _, public, local = public_version_re.split(ver, maxsplit=1) ++ if not local: ++ return ver ++ sanitized_local = re.sub("[+~]+", ".", local).strip("-") ++ pep440_version = f"{public}+{sanitized_local}" ++ return pep440_version + except Exception as err: +- self.log_debug("Unable to format '%s': %s" % (ver, err)) ++ self.log_debug(f"Unable to format {ver} to pep440 format: " ++ f"{err}") + return ver + +- _ver = _format_version(ver) ++ _ver = _format_version_to_pep440(ver) ++ _node_formatted_version = _format_version_to_pep440( ++ self.sos_info['version']) + + try: +- _node_ver = parse_version(self.sos_info['version']) ++ _node_ver = parse_version(_node_formatted_version) + _test_ver = parse_version(_ver) + return _node_ver >= _test_ver + except Exception as err: +-- +2.40.1 + diff -Nru sosreport-4.5.6/debian/patches/0004-pacemaker-Use-pep440-formatted-version-on-comparison.patch sosreport-4.7.0/debian/patches/0004-pacemaker-Use-pep440-formatted-version-on-comparison.patch --- sosreport-4.5.6/debian/patches/0004-pacemaker-Use-pep440-formatted-version-on-comparison.patch 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/debian/patches/0004-pacemaker-Use-pep440-formatted-version-on-comparison.patch 2024-02-20 06:44:29.000000000 +0000 @@ -0,0 +1,185 @@ +From b3920dd305656dba02cae6f71ee2088fa609bfb2 Mon Sep 17 00:00:00 2001 +From: Ponnuvel Palaniyappan +Date: Tue, 5 Mar 2024 21:03:40 +0000 +Subject: [PATCH] [pacemaker] Use pep440 formatted version on comparison + +There are couple of instances (both on pacemaker) of +`parse_version` being used to compare the package +versions. In cases, notably on Ubuntu, where the version +comform to PEP440, this fails. So we now convert those +to PEP440 format before comparing. + +Fixes #3548. + +Signed-off-by: Ponnuvel Palaniyappan +Signed-off-by: Arif Ali +Bug: https://github.com/sosreport/sos/issues/3548 +Origin: upstream, https://github.com/sosreport/sos/commit/b3920dd305656dba02cae6f71ee2088fa609bfb2 +--- + sos/collector/clusters/juju.py | 7 ++++--- + sos/collector/clusters/pacemaker.py | 4 ++-- + sos/collector/sosnode.py | 28 +++------------------------- + sos/report/plugins/pacemaker.py | 4 ++-- + sos/utilities.py | 26 ++++++++++++++++++++++++++ + 5 files changed, 37 insertions(+), 32 deletions(-) + +diff --git a/sos/collector/clusters/juju.py b/sos/collector/clusters/juju.py +index be69759a..a8ef68fb 100644 +--- a/sos/collector/clusters/juju.py ++++ b/sos/collector/clusters/juju.py +@@ -13,7 +13,7 @@ import json + import re + + from sos.collector.clusters import Cluster +-from sos.utilities import parse_version ++from sos.utilities import sos_parse_version + from sos.utilities import sos_get_command_output + + +@@ -161,12 +161,13 @@ class juju(Cluster): + def _get_juju_version(self): + """Grab the version of juju""" + res = sos_get_command_output("juju version") +- return res['output'].split("-")[0] ++ return res['output'] + + def _execute_juju_status(self, model_name): + model_option = f"-m {model_name}" if model_name else "" + format_option = "--format json" +- if parse_version(self._get_juju_version()) > parse_version("3"): ++ juju_version = self._get_juju_version() ++ if sos_parse_version(juju_version) > sos_parse_version("3"): + format_option += " --no-color" + status_cmd = f"{self.cmd} status {model_option} {format_option}" + res = self.exec_primary_cmd(status_cmd) +diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py +index c83f8c3c..bd3a832b 100644 +--- a/sos/collector/clusters/pacemaker.py ++++ b/sos/collector/clusters/pacemaker.py +@@ -11,7 +11,7 @@ + import re + + from sos.collector.clusters import Cluster +-from sos.utilities import parse_version ++from sos.utilities import sos_parse_version + from xml.etree import ElementTree + + +@@ -63,7 +63,7 @@ class pacemaker(Cluster): + _ver = self.exec_primary_cmd('crm_mon --version') + if _ver['status'] == 0: + cver = _ver['output'].split()[1].split('-')[0] +- if not parse_version(cver) > parse_version('2.0.3'): ++ if not sos_parse_version(cver) > sos_parse_version('2.0.3'): + xmlopt = '--as-xml' + else: + return +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index 07a71865..f315c58a 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -26,7 +26,7 @@ from sos.collector.exceptions import (CommandTimeoutException, + ConnectionException, + UnsupportedHostException, + InvalidTransportException) +-from sos.utilities import parse_version ++from sos.utilities import sos_parse_version + + TRANSPORTS = { + 'local': LocalTransport, +@@ -412,31 +412,9 @@ class SosNode(): + :returns: True if installed version is at least ``ver``, else False + :rtype: ``bool`` + """ +- def _format_version_to_pep440(ver): +- """ Convert the version into a PEP440 compliant version scheme.""" +- public_version_re = re.compile( +- r"^([0-9][0-9.]*(?:(?:a|b|rc|.post|.dev)[0-9]+)*)\+?" +- ) +- try: +- _, public, local = public_version_re.split(ver, maxsplit=1) +- if not local: +- return ver +- sanitized_local = re.sub("[+~]+", ".", local).strip("-") +- pep440_version = f"{public}+{sanitized_local}" +- return pep440_version +- except Exception as err: +- self.log_debug(f"Unable to format {ver} to pep440 format: " +- f"{err}") +- return ver +- +- _ver = _format_version_to_pep440(ver) +- _node_formatted_version = _format_version_to_pep440( +- self.sos_info['version']) +- + try: +- _node_ver = parse_version(_node_formatted_version) +- _test_ver = parse_version(_ver) +- return _node_ver >= _test_ver ++ _node_ver = self.sos_info['version'] ++ return sos_parse_version(_node_ver) >= sos_parse_version(ver) + except Exception as err: + self.log_error("Error checking sos version: %s" % err) + return False +diff --git a/sos/report/plugins/pacemaker.py b/sos/report/plugins/pacemaker.py +index cf11e219..113691e1 100644 +--- a/sos/report/plugins/pacemaker.py ++++ b/sos/report/plugins/pacemaker.py +@@ -8,7 +8,7 @@ + + from sos.report.plugins import (Plugin, RedHatPlugin, DebianPlugin, + UbuntuPlugin, PluginOpt) +-from sos.utilities import parse_version ++from sos.utilities import sos_parse_version + from datetime import datetime, timedelta + import re + +@@ -55,7 +55,7 @@ class Pacemaker(Plugin): + ]) + + pcs_version = '.'.join(pcs_pkg['version']) +- if parse_version(pcs_version) > parse_version('0.10.8'): ++ if sos_parse_version(pcs_version) > sos_parse_version('0.10.8'): + self.add_cmd_output("pcs property config --all") + else: + self.add_cmd_output("pcs property list --all") +diff --git a/sos/utilities.py b/sos/utilities.py +index ce371b0a..c8f11993 100644 +--- a/sos/utilities.py ++++ b/sos/utilities.py +@@ -73,6 +73,32 @@ __all__ = [ + ] + + ++def format_version_to_pep440(ver): ++ """ Convert the version into a PEP440 compliant version scheme.""" ++ public_version_re = re.compile( ++ r"^([0-9][0-9.]*(?:(?:a|b|rc|.post|.dev)[0-9]+)*)\+?" ++ ) ++ try: ++ _, public, local = public_version_re.split(ver, maxsplit=1) ++ if not local: ++ return ver ++ sanitized_local = re.sub("[+~]+", ".", local).strip("-") ++ pep440_version = f"{public}+{sanitized_local}" ++ return pep440_version ++ except Exception as err: ++ log.debug(f"Unable to format {ver} to pep440 format: {err}") ++ return ver ++ ++ ++def sos_parse_version(ver, pep440=True): ++ """ Converts the version to PEP440 format before parsing """ ++ if pep440: ++ ver_pep440 = format_version_to_pep440(ver) ++ return parse_version(ver_pep440) ++ ++ return parse_version(ver) ++ ++ + def tail(filename, number_of_bytes): + """Returns the last number_of_bytes of filename""" + with open(filename, "rb") as f: +-- +2.40.1 + diff -Nru sosreport-4.5.6/debian/patches/series sosreport-4.7.0/debian/patches/series --- sosreport-4.5.6/debian/patches/series 2023-10-04 04:39:31.000000000 +0100 +++ sosreport-4.7.0/debian/patches/series 2024-02-20 06:44:29.000000000 +0000 @@ -1,3 +1,4 @@ 0001-debian-change-tmp-dir-location.patch 0002-debian-remove-magic-stderr.patch -0002-obfuscate-netplan-ssid-password.patch +0003-collect-refactor-_format_version.patch +0004-pacemaker-Use-pep440-formatted-version-on-comparison.patch diff -Nru sosreport-4.5.6/docs/conf.py sosreport-4.7.0/docs/conf.py --- sosreport-4.5.6/docs/conf.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/docs/conf.py 2024-02-20 00:39:46.000000000 +0000 @@ -59,9 +59,9 @@ # built documents. # # The short X.Y version. -version = '4.5.6' +version = '4.7.0' # The full version, including alpha/beta/rc tags. -release = '4.5.6' +release = '4.7.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff -Nru sosreport-4.5.6/.gitignore sosreport-4.7.0/.gitignore --- sosreport-4.5.6/.gitignore 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/.gitignore 2024-02-20 00:39:46.000000000 +0000 @@ -3,6 +3,7 @@ *swp* *.pyc *.sw* +*.DS_Store tags buildjar/ gpgkeys/rhsupport.* @@ -21,3 +22,9 @@ # Pycharm .idea/ + +# debian files +debian/sosreport* +debian/files +debian/.debhelper +debian/debhelper-build-stamp diff -Nru sosreport-4.5.6/man/en/sos-report.1 sosreport-4.7.0/man/en/sos-report.1 --- sosreport-4.5.6/man/en/sos-report.1 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/man/en/sos-report.1 2024-02-20 00:39:46.000000000 +0000 @@ -439,7 +439,7 @@ If --batch is used, this prompt will not occur, so any uploads are likely to fail unless this option is used. -Note that this will result in the plaintext string appearing in `ps` output that may +Note that this may result in the plaintext string appearing in `ps` output that may be collected by sos and be in the archive. If a password must be provided by you for uploading, it is strongly recommended to not use --batch and enter the password when prompted rather than using this option. @@ -498,9 +498,3 @@ .fi .SH AUTHORS & CONTRIBUTORS See \fBAUTHORS\fR file in the package documentation. -.nf -.SH TRANSLATIONS -.nf -Translations are handled by transifex (https://fedorahosted.org/transifex/) -.fi -.fi diff -Nru sosreport-4.5.6/.packit.yaml sosreport-4.7.0/.packit.yaml --- sosreport-4.5.6/.packit.yaml 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/.packit.yaml 2024-02-20 00:39:46.000000000 +0000 @@ -19,6 +19,8 @@ - fedora-development-aarch64 - fedora-development-ppc64le - fedora-development-s390x + - centos-stream-8 + - centos-stream-9 notifications: pull_request: diff -Nru sosreport-4.5.6/README.md sosreport-4.7.0/README.md --- sosreport-4.5.6/README.md 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/README.md 2024-02-20 00:39:46.000000000 +0000 @@ -1,4 +1,5 @@ -[![Build Status](https://api.cirrus-ci.com/github/sosreport/sos.svg?branch=main)](https://cirrus-ci.com/github/sosreport/sos) [![Documentation Status](https://readthedocs.org/projects/sos/badge/?version=main)](https://sos.readthedocs.io/en/main/?badge=main) +[![Build Status](https://api.cirrus-ci.com/github/sosreport/sos.svg?branch=main)](https://cirrus-ci.com/github/sosreport/sos) [![Documentation Status](https://readthedocs.org/projects/sos/badge/?version=main)](https://sos.readthedocs.io/en/main/?badge=main) [![sosreport](https://snapcraft.io/sosreport/badge.svg)](https://snapcraft.io/sosreport) + # SoS @@ -82,7 +83,7 @@ and run ``` -python3 setup.py build_sphinx -a +sphinx-build -b html docs ``` @@ -149,6 +150,12 @@ # sudo apt install sosreport ``` +### Snap Installation + +``` +# snap install sosreport --classic +``` + [0]: https://github.com/sosreport/sos/wiki/Contribution-Guidelines [1]: https://github.com/sosreport/sos/wiki/How-to-Write-a-Plugin [2]: https://github.com/sosreport/sos/wiki/How-to-Write-a-Policy diff -Nru sosreport-4.5.6/requirements.txt sosreport-4.7.0/requirements.txt --- sosreport-4.5.6/requirements.txt 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/requirements.txt 2024-02-20 00:39:46.000000000 +0000 @@ -3,5 +3,5 @@ Sphinx>=1.3.5 pexpect>=4.0.0 pyyaml -setuptools +packaging diff -Nru sosreport-4.5.6/setup.py sosreport-4.7.0/setup.py --- sosreport-4.5.6/setup.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/setup.py 2024-02-20 00:39:46.000000000 +0000 @@ -7,7 +7,9 @@ setup( name='sos', version=VERSION, - install_requires=['pexpect', 'pyyaml'], + # to avoid a packaging dependency on older RHELs + # we only declare it on recent Python versions + install_requires=['pexpect', 'pyyaml', 'packaging;python_version>="3.11"'], description=( 'A set of tools to gather troubleshooting information from a system' ), diff -Nru sosreport-4.5.6/snap/snapcraft.yaml sosreport-4.7.0/snap/snapcraft.yaml --- sosreport-4.5.6/snap/snapcraft.yaml 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/snap/snapcraft.yaml 2024-02-20 00:39:46.000000000 +0000 @@ -37,6 +37,7 @@ - wheel - python_magic - packaging + - boto3==1.26.155 apps: sos: diff -Nru sosreport-4.5.6/sos/archive.py sosreport-4.7.0/sos/archive.py --- sosreport-4.5.6/sos/archive.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/archive.py 2024-02-20 00:39:46.000000000 +0000 @@ -390,14 +390,14 @@ # on file content. dest = self.check_path(dest, P_FILE, force=True) - f = codecs.open(dest, mode, encoding='utf-8') - if isinstance(content, bytes): - content = content.decode('utf8', 'ignore') - f.write(content) - if os.path.exists(src): - self._copy_attributes(src, dest) - self.log_debug("added string at '%s' to FileCacheArchive '%s'" - % (src, self._archive_root)) + with codecs.open(dest, mode, encoding='utf-8') as f: + if isinstance(content, bytes): + content = content.decode('utf8', 'ignore') + f.write(content) + if os.path.exists(src): + self._copy_attributes(src, dest) + self.log_debug("added string at '%s' to FileCacheArchive '%s'" + % (src, self._archive_root)) def add_binary(self, content, dest): with self._path_lock: @@ -405,8 +405,8 @@ if not dest: return - f = codecs.open(dest, 'wb', encoding=None) - f.write(content) + with codecs.open(dest, 'wb', encoding=None) as f: + f.write(content) self.log_debug("added binary content at '%s' to archive '%s'" % (dest, self._archive_root)) @@ -559,6 +559,43 @@ self._archive_root = _new_root self._archive_name = os.path.join(self._tmp_dir, self.name()) + def do_file_sub(self, path, regexp, subst): + """Apply a regexp substitution to a file in the archive. + + :param path: Path in the archive where the file can be found + :type path: ``str`` + + :param regexp: A regex to match the contents of the file + :type regexp: ``str`` or compiled ``re`` object + + :param subst: The substitution string to be used to replace matches + within the file + :type subst: ``str`` + + :returns: Number of replacements made + :rtype: ``int`` + """ + common_flags = re.IGNORECASE | re.MULTILINE + if hasattr(regexp, "pattern"): + pattern = regexp.pattern + flags = regexp.flags | common_flags + else: + pattern = regexp + flags = common_flags + + content = "" + with self.open_file(path) as readable: + content = readable.read() + if not isinstance(content, str): + content = content.decode('utf8', 'ignore') + result, replacements = re.subn(pattern, subst, content, + flags=flags) + if replacements: + self.add_string(result, path) + else: + replacements = 0 + return replacements + def finalize(self, method): self.log_info("finalizing archive '%s' using method '%s'" % (self._archive_root, method)) diff -Nru sosreport-4.5.6/sos/cleaner/archives/__init__.py sosreport-4.7.0/sos/cleaner/archives/__init__.py --- sosreport-4.5.6/sos/cleaner/archives/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/cleaner/archives/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -25,6 +25,12 @@ def extract_archive(archive_path, tmpdir): archive = tarfile.open(archive_path) path = os.path.join(tmpdir, 'cleaner') + # set extract filter since python 3.12 (see PEP-706 for more) + # Because python 3.10 and 3.11 raises false alarms as exceptions + # (see #3330 for examples), we can't use data filter but must + # fully trust the archive (legacy behaviour) + archive.extraction_filter = getattr(tarfile, 'fully_trusted_filter', + (lambda member, path: member)) archive.extractall(path) archive.close() return os.path.join(path, archive.name.split('/')[-1].split('.tar')[0]) diff -Nru sosreport-4.5.6/sos/cleaner/__init__.py sosreport-4.7.0/sos/cleaner/__init__.py --- sosreport-4.5.6/sos/cleaner/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/cleaner/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -428,6 +428,7 @@ "representative and keep the mapping file private") self.cleanup() + return None def rebuild_nested_archive(self): """Handles repacking the nested tarball, now containing only obfuscated @@ -525,15 +526,14 @@ """ try: hash_size = 1024**2 # Hash 1MiB of content at a time. - archive_fp = open(archive_path, 'rb') - digest = hashlib.new(self.hash_name) - while True: - hashdata = archive_fp.read(hash_size) - if not hashdata: - break - digest.update(hashdata) - archive_fp.close() - return digest.hexdigest() + '\n' + with open(archive_path, 'rb') as archive_fp: + digest = hashlib.new(self.hash_name) + while True: + hashdata = archive_fp.read(hash_size) + if not hashdata: + break + digest.update(hashdata) + return digest.hexdigest() + '\n' except Exception as err: self.log_debug("Could not generate new checksum: %s" % err) return None @@ -751,7 +751,7 @@ """ if not filename: # the requested file doesn't exist in the archive - return + return None subs = 0 if not short_name: short_name = filename.split('/')[-1] diff -Nru sosreport-4.5.6/sos/cleaner/mappings/hostname_map.py sosreport-4.7.0/sos/cleaner/mappings/hostname_map.py --- sosreport-4.5.6/sos/cleaner/mappings/hostname_map.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/cleaner/mappings/hostname_map.py 2024-02-20 00:39:46.000000000 +0000 @@ -43,6 +43,8 @@ strip_exts = ('.yaml', '.yml', '.crt', '.key', '.pem', '.log', '.repo', '.rules', '.conf', '.cfg') + ignore_short_items = True + match_full_words_only = True host_count = 0 domain_count = 0 _domains = {} @@ -215,6 +217,7 @@ if all([h.isupper() for h in host]): _fqdn = _fqdn.upper() return _fqdn + return None def sanitize_short_name(self, hostname): """Obfuscate the short name of the host with an incremented counter diff -Nru sosreport-4.5.6/sos/cleaner/mappings/__init__.py sosreport-4.7.0/sos/cleaner/mappings/__init__.py --- sosreport-4.5.6/sos/cleaner/mappings/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/cleaner/mappings/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -25,6 +25,8 @@ # used for filename obfuscations in parser.parse_string_for_keys() skip_keys = [] compile_regexes = True + ignore_short_items = False + match_full_words_only = False def __init__(self): self.dataset = {} @@ -36,11 +38,13 @@ """Some items need to be completely ignored, for example link-local or loopback addresses should not be obfuscated """ - if not item or item in self.skip_keys or item in self.dataset.values(): + if not item or item in self.skip_keys or item in self.dataset.values()\ + or (self.ignore_short_items and len(item) <= 3): return True for skip in self.ignore_matches: if re.match(skip, item, re.I): return True + return False def add(self, item): """Add a particular item to the map, generating an obfuscated pair @@ -94,7 +98,11 @@ :returns: A compiled regex pattern for the item :rtype: ``re.Pattern`` """ - return re.compile(re.escape(item), re.I) + if self.match_full_words_only: + item = rf'(?=\b|_|-){re.escape(item)}(?=\b|_|-)' + else: + item = re.escape(item) + return re.compile(item, re.I) def sanitize_item(self, item): """Perform the obfuscation relevant to the item being added to the map. diff -Nru sosreport-4.5.6/sos/cleaner/mappings/keyword_map.py sosreport-4.7.0/sos/cleaner/mappings/keyword_map.py --- sosreport-4.5.6/sos/cleaner/mappings/keyword_map.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/cleaner/mappings/keyword_map.py 2024-02-20 00:39:46.000000000 +0000 @@ -21,6 +21,7 @@ is an incrementing integer. """ + match_full_words_only = True word_count = 0 def sanitize_item(self, item): diff -Nru sosreport-4.5.6/sos/cleaner/mappings/mac_map.py sosreport-4.7.0/sos/cleaner/mappings/mac_map.py --- sosreport-4.5.6/sos/cleaner/mappings/mac_map.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/cleaner/mappings/mac_map.py 2024-02-20 00:39:46.000000000 +0000 @@ -77,3 +77,4 @@ # match 48-bit IPv4 MAC addresses if re.match('([0-9a-fA-F][:_]?){12}', item): return self.mac_template % hextets + return None diff -Nru sosreport-4.5.6/sos/cleaner/mappings/username_map.py sosreport-4.7.0/sos/cleaner/mappings/username_map.py --- sosreport-4.5.6/sos/cleaner/mappings/username_map.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/cleaner/mappings/username_map.py 2024-02-20 00:39:46.000000000 +0000 @@ -20,6 +20,8 @@ Note that this specifically obfuscates user_names_ and not UIDs. """ + ignore_short_items = True + match_full_words_only = True name_count = 0 def sanitize_item(self, username): diff -Nru sosreport-4.5.6/sos/cleaner/parsers/__init__.py sosreport-4.7.0/sos/cleaner/parsers/__init__.py --- sosreport-4.5.6/sos/cleaner/parsers/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/cleaner/parsers/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -151,7 +151,8 @@ if self.compile_regexes: for item, reg in self.mapping.compiled_regexes: if reg.search(string_data): - string_data = reg.sub(self.mapping.get(item), string_data) + string_data = reg.sub(self.mapping.get(item.lower()), + string_data) else: for k, ob in sorted(self.mapping.dataset.items(), reverse=True, key=lambda x: len(x[0])): diff -Nru sosreport-4.5.6/sos/collector/clusters/__init__.py sosreport-4.7.0/sos/collector/clusters/__init__.py --- sosreport-4.5.6/sos/collector/clusters/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/clusters/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -294,7 +294,8 @@ """ return node.address == self.primary.address - def exec_primary_cmd(self, cmd, need_root=False, timeout=180): + def exec_primary_cmd(self, cmd, need_root=False, timeout=180, + use_shell='auto'): """Used to retrieve command output from a (primary) node in a cluster :param cmd: The command to run @@ -306,12 +307,14 @@ :param timeout: Amount of time to allow cmd to run in seconds :type timeout: ``int`` + :param use_shell: Does the command required execution within a shell? + :type use_shell: ``auto`` or ``bool`` + :returns: The output and status of `cmd` :rtype: ``dict`` """ - pty = self.primary.local is False - res = self.primary.run_command(cmd, get_pty=pty, need_root=need_root, - timeout=timeout) + res = self.primary.run_command(cmd, need_root=need_root, + use_shell=use_shell, timeout=timeout) if res['output']: res['output'] = res['output'].replace('Password:', '') return res diff -Nru sosreport-4.5.6/sos/collector/clusters/juju.py sosreport-4.7.0/sos/collector/clusters/juju.py --- sosreport-4.5.6/sos/collector/clusters/juju.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/clusters/juju.py 2024-02-20 00:39:46.000000000 +0000 @@ -13,6 +13,8 @@ import re from sos.collector.clusters import Cluster +from sos.utilities import parse_version +from sos.utilities import sos_get_command_output def _parse_option_string(strings=None): @@ -156,9 +158,16 @@ return index + def _get_juju_version(self): + """Grab the version of juju""" + res = sos_get_command_output("juju version") + return res['output'].split("-")[0] + def _execute_juju_status(self, model_name): model_option = f"-m {model_name}" if model_name else "" format_option = "--format json" + if parse_version(self._get_juju_version()) > parse_version("3"): + format_option += " --no-color" status_cmd = f"{self.cmd} status {model_option} {format_option}" res = self.exec_primary_cmd(status_cmd) if not res["status"] == 0: diff -Nru sosreport-4.5.6/sos/collector/clusters/ocp.py sosreport-4.7.0/sos/collector/clusters/ocp.py --- sosreport-4.5.6/sos/collector/clusters/ocp.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/clusters/ocp.py 2024-02-20 00:39:46.000000000 +0000 @@ -93,7 +93,8 @@ self.log_debug("Locating 'oc' failed: %s" % _oc_path['output']) if self.get_option('kubeconfig'): - self._oc_cmd += " --config %s" % self.get_option('kubeconfig') + self._oc_cmd += " --kubeconfig " \ + f"{self.get_option('kubeconfig')}" self.log_debug("oc base command set to %s" % self._oc_cmd) return self._oc_cmd @@ -127,7 +128,7 @@ collection via a container image """ if not self.set_transport_type() == 'oc': - return + return None out = self.exec_primary_cmd(self.fmt_oc_cmd("auth can-i '*' '*'")) self.oc_cluster_admin = out['status'] == 0 diff -Nru sosreport-4.5.6/sos/collector/clusters/saltstack.py sosreport-4.7.0/sos/collector/clusters/saltstack.py --- sosreport-4.5.6/sos/collector/clusters/saltstack.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/collector/clusters/saltstack.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,82 @@ +# Copyright Red Hat 2022, Trevor Benson + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +import json +from shlex import quote +from sos.collector.clusters import Cluster + + +class saltstack(Cluster): + """ + The saltstack cluster profile is intended to be used on saltstack + clusters (Salt Project). + """ + + cluster_name = "Saltstack" + packages = ("salt-master",) + sos_plugins = ["saltmaster"] + strict_node_list = True + option_list = [ + ("compound", "", "Filter node list to those matching compound"), + ("glob", "", "Filter node list to those matching glob pattern"), + ("grain", "", "Filter node list to those with matching grain"), + ("minion_id_unresolvable", False, "Returns the FQDN grain of each" + " minion in the node list when the minion ID is not a hostname."), + ("nodegroup", "", "Filter node list to those matching nodegroup"), + ("pillar", "", "Filter node list to those with matching pillar"), + ("subnet", "", "Filter node list to those in subnet"), + ] + targeted = False + + node_cmd = "salt-run --out=pprint manage.status" + + def _parse_manage_status(self, output: str) -> list: + nodes = [] + salt_json_output = json.loads(output.replace("'", '"')) + for _, value in salt_json_output.items(): + nodes.extend(value) + return nodes + + def _get_hostnames_from_grain(self, manage_status: dict) -> list: + hostnames = [] + fqdn_cmd = "salt --out=newline_values_only {minion} grains.get fqdn" + for status, minions in manage_status.items(): + if status == "down": + self.log_warn(f"Node(s) {minions} are status down.") + hostnames.extend(minions) + else: + for minion in minions: + node_cmd = fqdn_cmd.format(minion=minion) + hostnames.append( + self.exec_primary_cmd(node_cmd)["output"].strip() + ) + return hostnames + + def _get_nodes(self) -> list: + res = self.exec_primary_cmd(self.node_cmd) + if res["status"] != 0: + raise Exception("Node enumeration did not return usable output") + if self.get_option("minion_id_unresolvable"): + status = json.loads(res["output"].replace("'", '"')) + return self._get_hostnames_from_grain(status) + return self._parse_manage_status(res["output"]) + + def get_nodes(self): + # Default to all online nodes + for option in self.option_list: + if option[0] != "minion_id_unresolvable": + opt = self.get_option(option[0]) + if opt: + self.node_cmd += f" tgt={quote(opt)} tgt_type={option[0]}" + break + return self._get_nodes() + + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/collector/__init__.py sosreport-4.7.0/sos/collector/__init__.py --- sosreport-4.5.6/sos/collector/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -134,7 +134,13 @@ 'upload_pass': None, 'upload_method': 'auto', 'upload_no_ssl_verify': False, - 'upload_protocol': 'auto' + 'upload_protocol': 'auto', + 'upload_s3_endpoint': None, + 'upload_s3_region': None, + 'upload_s3_bucket': None, + 'upload_s3_access_key': None, + 'upload_s3_secret_key': None, + 'upload_s3_object_prefix': None } def __init__(self, parser, parsed_args, cmdline_args): @@ -237,7 +243,12 @@ def _import_modules(self, modname): """Import and return all found classes in a module""" mod_short_name = modname.split('.')[2] - module = __import__(modname, globals(), locals(), [mod_short_name]) + try: + module = __import__(modname, globals(), locals(), [mod_short_name]) + except ImportError as e: + print(f'Error while trying to load module {modname}: ' + f' {e.__class__.__name__}') + raise e modules = inspect.getmembers(module, inspect.isclass) for mod in modules: if mod[0] in ('SosHost', 'Cluster'): @@ -435,8 +446,21 @@ action='store_true', help="Disable SSL verification for upload url" ) + collect_grp.add_argument("--upload-s3-endpoint", default=None, + help="Endpoint to upload to for S3 bucket") + collect_grp.add_argument("--upload-s3-region", default=None, + help="Region for the S3 bucket") + collect_grp.add_argument("--upload-s3-bucket", default=None, + help="Name of the S3 bucket to upload to") + collect_grp.add_argument("--upload-s3-access-key", default=None, + help="Access key for the S3 bucket") + collect_grp.add_argument("--upload-s3-secret-key", default=None, + help="Secret key for the S3 bucket") + collect_grp.add_argument("--upload-s3-object-prefix", default=None, + help="Prefix for the S3 object/key") collect_grp.add_argument("--upload-protocol", default='auto', - choices=['auto', 'https', 'ftp', 'sftp'], + choices=['auto', 'https', 'ftp', 'sftp', + 's3'], help="Manually specify the upload protocol") # Group the cleaner options together @@ -598,6 +622,7 @@ return True else: return False + self.exit(f"Unknown option type: {cli.opt_type}") def log_info(self, msg): """Log info messages to both console and log file""" @@ -1264,7 +1289,8 @@ msg = 'No sosreports were collected, nothing to archive...' self.exit(msg, 1) - if self.opts.upload and self.policy.get_upload_url(): + if (self.opts.upload and self.policy.get_upload_url()) or \ + self.opts.upload_s3_endpoint: try: self.policy.upload_archive(arc_name) self.ui_log.info("Uploaded archive successfully") @@ -1354,6 +1380,7 @@ self.archive.add_final_manifest_data( self.opts.compression_type ) + self._obfuscate_upload_passwords() if do_clean: _dir = os.path.join(self.tmpdir, self.archive._name) cleaner.obfuscate_file( diff -Nru sosreport-4.5.6/sos/collector/sosnode.py sosreport-4.7.0/sos/collector/sosnode.py --- sosreport-4.5.6/sos/collector/sosnode.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/sosnode.py 2024-02-20 00:39:46.000000000 +0000 @@ -213,6 +213,7 @@ self.log_error("Could not create container on host: %s" % res['output']) raise Exception + return False def get_container_auth(self): """Determine what the auth string should be to pull the image used to @@ -331,6 +332,7 @@ self._load_sos_plugins(sosinfo['output']) if self.check_sos_version('3.6'): self._load_sos_presets() + return None def _load_sos_presets(self): cmd = '%s --list-presets' % self.sos_bin @@ -441,20 +443,25 @@ return False return self.host.package_manager.pkg_by_name(pkg) is not None - def run_command(self, cmd, timeout=180, get_pty=False, need_root=False, + def run_command(self, cmd, timeout=180, use_shell='auto', need_root=False, use_container=False, env=None): """Runs a given cmd, either via the SSH session or locally - Arguments: - cmd - the full command to be run - timeout - time in seconds to wait for the command to complete - get_pty - If a shell is absolutely needed to run a command, set - this to True - need_root - if a command requires root privileges, setting this to - True tells sos-collector to format the command with - sudo or su - as appropriate and to input the password - use_container - Run this command in a container *IF* the host is - containerized + :param cmd: The full command to be run + :type cmd: ``str`` + + :param timeout: Time in seconds to wait for `cmd` to complete + :type timeout: ``int`` + + :param use_shell: If a shell is needed to run `cmd`, set to True + :type use_shell: ``bool`` or ``auto`` for transport-determined + + :param use_container: Run this command in a container *IF* the host + is a containerized host + :type use_container: ``bool`` + + :param env: Pass environment variables to set for this `cmd` + :type env: ``dict`` """ if not self.connected and not self.local: self.log_debug('Node is disconnected, attempting to reconnect') @@ -470,15 +477,11 @@ cmd = self.host.format_container_command(cmd) if need_root: cmd = self._format_cmd(cmd) - - if 'atomic' in cmd: - get_pty = True - if env: _cmd_env = self.env_vars env = _cmd_env.update(env) return self._transport.run_command(cmd, timeout, need_root, env, - get_pty) + use_shell) def sosreport(self): """Run an sos report on the node, then collect it""" @@ -777,7 +780,8 @@ checksum = False res = self.run_command(self.sos_cmd, timeout=self.opts.timeout, - get_pty=True, need_root=True, + use_shell=True, + need_root=True, use_container=True, env=self.sos_env_vars) if res['status'] == 0: diff -Nru sosreport-4.5.6/sos/collector/transports/control_persist.py sosreport-4.7.0/sos/collector/transports/control_persist.py --- sosreport-4.5.6/sos/collector/transports/control_persist.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/transports/control_persist.py 2024-02-20 00:39:46.000000000 +0000 @@ -176,6 +176,7 @@ return False self.log_debug("Control socket not present when attempting to " "terminate session") + return False @property def connected(self): diff -Nru sosreport-4.5.6/sos/collector/transports/__init__.py sosreport-4.7.0/sos/collector/transports/__init__.py --- sosreport-4.5.6/sos/collector/transports/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/transports/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -102,6 +102,7 @@ section.add_text( 'Detailed information not available for this transport' ) + return None @classmethod def display_self_help(cls, section): @@ -193,8 +194,16 @@ raise NotImplementedError("Transport %s does not define disconnect" % self.name) + @property + def _need_shell(self): + """ + Transports may override this to control when/if commands executed over + the transport needs to utilize a shell on the remote host. + """ + return False + def run_command(self, cmd, timeout=180, need_root=False, env=None, - get_pty=False): + use_shell='auto'): """Run a command on the node, returning its output and exit code. This should return the exit code of the command being executed, not the exit code of whatever mechanism the transport uses to execute that @@ -204,26 +213,25 @@ :type cmd: ``str`` :param timeout: The maximum time in seconds to allow the cmd to run - :type timeout: ``int`` - - :param get_pty: Does ``cmd`` require a pty? - :type get_pty: ``bool`` + :type timeout: ``int``` :param need_root: Does ``cmd`` require root privileges? - :type neeed_root: ``bool`` + :type need_root: ``bool`` :param env: Specify env vars to be passed to the ``cmd`` :type env: ``dict`` - :param get_pty: Does ``cmd`` require execution with a pty? - :type get_pty: ``bool`` + :param use_shell: Does ``cmd`` require execution within a shell? + :type use_shell: ``bool`` or ``auto`` for transport-determined :returns: Output of ``cmd`` and the exit code :rtype: ``dict`` with keys ``output`` and ``status`` """ self.log_debug('Running command %s' % cmd) - if get_pty: + if (use_shell is True or + (self._need_shell if use_shell == 'auto' else False)): cmd = "/bin/bash -c %s" % quote(cmd) + self.log_debug(f"Shell requested, command is now {cmd}") # currently we only use/support the use of pexpect for handling the # execution of these commands, as opposed to directly invoking # subprocess.Popen() in conjunction with tools like sshpass. @@ -299,6 +307,11 @@ return {'status': result.exitstatus, 'output': out} elif index == 1: raise CommandTimeoutException(cmd) + # if we somehow manage to flow to this point, use this bogus exit code + # as a signal to debugging efforts that whatever went sideways did so + # as part of the above block + self.log_debug(f"Unexpected index {index} from pexpect: {result}") + return {'status': 999, 'output': ''} def _send_pexpect_password(self, index, result): """Handle password prompts for sudo and su usage for non-root SSH users diff -Nru sosreport-4.5.6/sos/collector/transports/oc.py sosreport-4.7.0/sos/collector/transports/oc.py --- sosreport-4.5.6/sos/collector/transports/oc.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/transports/oc.py 2024-02-20 00:39:46.000000000 +0000 @@ -208,15 +208,15 @@ return super(OCTransport, self)._format_cmd_for_exec(cmd) def run_command(self, cmd, timeout=180, need_root=False, env=None, - get_pty=False): + use_shell=False): # debug pod setup is slow, extend all timeouts to account for this if timeout: timeout += 10 - # since we always execute within a bash shell, force disable get_pty + # since we always execute within a bash shell, force disable use_shell # to avoid double-quoting return super(OCTransport, self).run_command(cmd, timeout, need_root, - env, False) + env, use_shell=False) def _disconnect(self): if os.path.exists(self.pod_tmp_conf): diff -Nru sosreport-4.5.6/sos/collector/transports/saltstack.py sosreport-4.7.0/sos/collector/transports/saltstack.py --- sosreport-4.5.6/sos/collector/transports/saltstack.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/collector/transports/saltstack.py 2024-02-20 00:39:46.000000000 +0000 @@ -33,14 +33,14 @@ def _convert_output_json(self, json_output): return list(json.loads(json_output).values())[0] - def run_command( - self, cmd, timeout=180, need_root=False, env=None, get_pty=False): + def run_command(self, cmd, timeout=180, need_root=False, env=None, + use_shell=False): """ Run a command on the remote host using SaltStack Master. If the output is json, convert it to a string. """ ret = super(SaltStackMaster, self).run_command( - cmd, timeout, need_root, env, get_pty) + cmd, timeout, need_root, env, use_shell) with contextlib.suppress(Exception): ret['output'] = self._convert_output_json(ret['output']) return ret @@ -106,6 +106,8 @@ self.log_info("Transport is locally supported and service running. ") cmd = "echo Connected" result = self.run_command(cmd, timeout=180) + if result['status'] == 1: + raise ConnectionException(self.address) return result['status'] == 0 def _disconnect(self): diff -Nru sosreport-4.5.6/sos/component.py sosreport-4.7.0/sos/component.py --- sosreport-4.5.6/sos/component.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/component.py 2024-02-20 00:39:46.000000000 +0000 @@ -69,6 +69,15 @@ "verbosity": 0 } + # files in collected archive that might contain upload password + files_with_upload_passwd = [ + "sos_logs/sos.log", + "sos_reports/manifest.json", + "sos_commands/process/ps_*", + "sos_commands/selinux/ps_*", + "sos_commands/systemd/systemctl_status_--all", + ] + def __init__(self, parser, parsed_args, cmdline_args): self.parser = parser self.args = parsed_args @@ -85,6 +94,10 @@ except Exception: pass + self.opts = SoSOptions(arg_defaults=self._arg_defaults) + if self.load_policy: + self.load_local_policy() + # update args from component's arg_defaults definition self._arg_defaults.update(self.arg_defaults) self.opts = self.load_options() # lgtm [py/init-calls-subclass] @@ -108,9 +121,6 @@ self.tempfile_util = TempFileUtil(self.tmpdir) self._setup_logging() - if self.load_policy: - self.load_local_policy() - if self.manifest is not None: self.manifest.add_field('version', __version__) self.manifest.add_field('cmdline', ' '.join(self.cmdline)) @@ -182,6 +192,7 @@ opts = [o for o in self.opts.dict().keys() if o.startswith('list')] if opts: return any([getattr(self.opts, opt) for opt in opts]) + return False @classmethod def add_parser_options(cls, parser): @@ -264,6 +275,28 @@ opts = self.apply_options_from_cmdline(opts) + # user specified command line preset + self.preset = None + if hasattr(opts, 'preset'): + if opts.preset != self._arg_defaults["preset"]: + self.preset = self.policy.find_preset(opts.preset) + if not self.preset: + sys.stderr.write("Unknown preset: '%s'\n" % opts.preset) + self.preset = self.policy.probe_preset() + opts.list_presets = True + + # --preset=auto + if not self.preset: + self.preset = self.policy.probe_preset() + # now merge preset options to opts + opts.merge(self.preset.opts) + # re-apply any cmdline overrides to the preset + opts = self.apply_options_from_cmdline(opts) + + if hasattr(self.preset.opts, 'verbosity') and \ + self.preset.opts.verbosity > 0: + self.set_loggers_verbosity(self.preset.opts.verbosity) + return opts def cleanup(self): @@ -343,6 +376,23 @@ self.archive.set_debug(self.opts.verbosity > 2) + def _obfuscate_upload_passwords(self): + # obfuscate strings like: + # --upload-pass=PASSWORD + # --upload-pass PASSWORD + # --upload-url https://user:PASSWORD@some.url + # in both sos_logs/sos.log and in sos_reports/manifest.json + # and several sos_commands/* places from plugins's collected data + _arc_path = self.archive.get_archive_path() + for path in self.files_with_upload_passwd: + for f in Path(_arc_path).glob(path): + # get just the relative path that archive works with + f = os.path.relpath(f, _arc_path) + for re in [r"(--upload-pass[\s=]+)\S+", + r"(--upload-url[\s=]+\S+://.*:)([^@]*)", + r"(--upload-s3-secret-key[\s=]+)\S+"]: + self.archive.do_file_sub(f, re, r"\1********") + def add_ui_log_to_stdout(self): ui_console = logging.StreamHandler(sys.stdout) ui_console.setFormatter(logging.Formatter('%(message)s')) @@ -352,15 +402,13 @@ self.ui_log.addHandler(ui_console) def set_loggers_verbosity(self, verbosity): - if verbosity: - if self.flog: - self.flog.setLevel(logging.DEBUG) - if self.opts.verbosity > 1: + if getattr(self, 'flog', None) and verbosity: + self.flog.setLevel(logging.DEBUG) + if getattr(self, 'console', None): + if verbosity and self.opts.verbosity > 1: self.console.setLevel(logging.DEBUG) else: self.console.setLevel(logging.WARNING) - else: - self.console.setLevel(logging.WARNING) def _setup_logging(self): """Creates the log handler that shall be used by all components and any diff -Nru sosreport-4.5.6/sos/help/__init__.py sosreport-4.7.0/sos/help/__init__.py --- sosreport-4.5.6/sos/help/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/help/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -126,6 +126,7 @@ _transport = self.opts.topic.split('.')[-1] if _transport in TRANSPORTS: return TRANSPORTS[_transport] + return None def _get_collect_cluster(self): from sos.collector import SoSCollector @@ -135,6 +136,7 @@ for cluster in clusters: if cluster[0] == self.opts.topic.split('.')[-1]: return cluster[1] + return None def _get_plugin_variant(self): mod = importlib.import_module('sos.' + self.opts.topic) @@ -145,6 +147,7 @@ if plugin.__subclasses__(): cls = self.policy.match_plugin(plugin.__subclasses__()) return cls + return None def _get_policy_by_name(self): _topic = self.opts.topic.split('.')[-1] @@ -157,6 +160,7 @@ _p = policy.__name__.lower().replace('policy', '') if _p == _topic: return policy + return None def display_self_help(self): """Displays the help information for this component directly, that is diff -Nru sosreport-4.5.6/sos/__init__.py sosreport-4.7.0/sos/__init__.py --- sosreport-4.5.6/sos/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -14,7 +14,7 @@ This module houses the i18n setup and message function. The default is to use gettext to internationalize messages. """ -__version__ = "4.5.6" +__version__ = "4.7.0" import os import sys diff -Nru sosreport-4.5.6/sos/policies/auth/__init__.py sosreport-4.7.0/sos/policies/auth/__init__.py --- sosreport-4.5.6/sos/policies/auth/__init__.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/policies/auth/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,210 @@ +# Copyright (C) 2023 Red Hat, Inc., Jose Castillo + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +import logging +try: + import requests + REQUESTS_LOADED = True +except ImportError: + REQUESTS_LOADED = False +import time +from datetime import datetime, timedelta + +DEVICE_AUTH_CLIENT_ID = "sos-tools" +GRANT_TYPE_DEVICE_CODE = "urn:ietf:params:oauth:grant-type:device_code" + +logger = logging.getLogger("sos") + + +class DeviceAuthorizationClass: + """ + Device Authorization Class + """ + + def __init__(self, client_identifier_url, token_endpoint): + + self._access_token = None + self._access_expires_at = None + self.__device_code = None + + self.client_identifier_url = client_identifier_url + self.token_endpoint = token_endpoint + self._use_device_code_grant() + + def _use_device_code_grant(self): + """ + Start the device auth flow. In the future we will + store the tokens in an in-memory keyring. + + """ + + self._request_device_code() + print( + "Please visit the following URL to authenticate this" + f" device: {self._verification_uri_complete}" + ) + self.poll_for_auth_completion() + + def _request_device_code(self): + """ + Initialize new Device Authorization Grant attempt by + requesting a new device code. + + """ + data = "client_id={}".format(DEVICE_AUTH_CLIENT_ID) + headers = {'content-type': 'application/x-www-form-urlencoded'} + if not REQUESTS_LOADED: + raise Exception("python3-requests is not installed and is required" + " for obtaining device auth token.") + try: + res = requests.post( + self.client_identifier_url, + data=data, + headers=headers) + res.raise_for_status() + response = res.json() + self._user_code = response.get("user_code") + self._verification_uri = response.get("verification_uri") + self._interval = response.get("interval") + self.__device_code = response.get("device_code") + self._verification_uri_complete = response.get( + "verification_uri_complete") + except requests.HTTPError as e: + raise requests.HTTPError("HTTP request failed " + "while attempting to acquire the tokens." + f"Error returned was {res.status_code} " + f"{e}") + + def poll_for_auth_completion(self): + """ + Continuously poll OIDC token endpoint until the user is successfully + authenticated or an error occurs. + + """ + token_data = {'grant_type': GRANT_TYPE_DEVICE_CODE, + 'client_id': DEVICE_AUTH_CLIENT_ID, + 'device_code': self.__device_code} + + if not REQUESTS_LOADED: + raise Exception("python3-requests is not installed and is required" + " for obtaining device auth token.") + while self._access_token is None: + time.sleep(self._interval) + try: + check_auth_completion = requests.post(self.token_endpoint, + data=token_data) + + status_code = check_auth_completion.status_code + + if status_code == 200: + logger.info("The SSO authentication is successful") + self._set_token_data(check_auth_completion.json()) + if status_code not in [200, 400]: + raise Exception(status_code, check_auth_completion.text) + if status_code == 400 and \ + check_auth_completion.json()['error'] not in \ + ("authorization_pending", "slow_down"): + raise Exception(status_code, check_auth_completion.text) + except requests.exceptions.RequestException as e: + logger.error(f"Error was found while posting a request: {e}") + + def _set_token_data(self, token_data): + """ + Set the class attributes as per the input token_data received. + In the future we will persist the token data in a local, + in-memory keyring, to avoid visting the browser frequently. + :param token_data: Token data containing access_token, refresh_token + and their expiry etc. + """ + self._access_token = token_data.get("access_token") + self._access_expires_at = datetime.utcnow() + \ + timedelta(seconds=token_data.get("expires_in")) + self._refresh_token = token_data.get("refresh_token") + self._refresh_expires_in = token_data.get("refresh_expires_in") + if self._refresh_expires_in == 0: + self._refresh_expires_at = datetime.max + else: + self._refresh_expires_at = datetime.utcnow() + \ + timedelta(seconds=self._refresh_expires_in) + + def get_access_token(self): + """ + Get the valid access_token at any given time. + :return: Access_token + :rtype: string + """ + if self.is_access_token_valid(): + return self._access_token + else: + if self.is_refresh_token_valid(): + self._use_refresh_token_grant() + return self._access_token + else: + self._use_device_code_grant() + return self._access_token + + def is_access_token_valid(self): + """ + Check the validity of access_token. We are considering it invalid 180 + sec. prior to it's exact expiry time. + :return: True/False + + """ + return self._access_token and self._access_expires_at and \ + self._access_expires_at - timedelta(seconds=180) > \ + datetime.utcnow() + + def is_refresh_token_valid(self): + """ + Check the validity of refresh_token. We are considering it invalid + 180 sec. prior to it's exact expiry time. + + :return: True/False + + """ + return self._refresh_token and self._refresh_expires_at and \ + self._refresh_expires_at - timedelta(seconds=180) > \ + datetime.utcnow() + + def _use_refresh_token_grant(self, refresh_token=None): + """ + Fetch the new access_token and refresh_token using the existing + refresh_token and persist it. + :param refresh_token: optional param for refresh_token + + """ + if not REQUESTS_LOADED: + raise Exception("python3-requests is not installed and is required" + " for obtaining device auth token.") + refresh_token_data = {'client_id': DEVICE_AUTH_CLIENT_ID, + 'grant_type': 'refresh_token', + 'refresh_token': self._refresh_token if not + refresh_token else refresh_token} + + refresh_token_res = requests.post(self.token_endpoint, + data=refresh_token_data) + + if refresh_token_res.status_code == 200: + self._set_token_data(refresh_token_res.json()) + + elif refresh_token_res.status_code == 400 and 'invalid' in\ + refresh_token_res.json()['error']: + logger.warning("Problem while fetching the new tokens from refresh" + " token grant - {} {}." + " New Device code will be requested !".format + (refresh_token_res.status_code, + refresh_token_res.json()['error'])) + self._use_device_code_grant() + else: + raise Exception( + "Something went wrong while using the " + "Refresh token grant for fetching tokens:" + f" Returned status code {refresh_token_res.status_code}" + f" and error {refresh_token_res.json()['error']}") diff -Nru sosreport-4.5.6/sos/policies/distros/__init__.py sosreport-4.7.0/sos/policies/distros/__init__.py --- sosreport-4.5.6/sos/policies/distros/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/policies/distros/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -20,6 +20,7 @@ from sos.policies.runtimes.crio import CrioContainerRuntime from sos.policies.runtimes.podman import PodmanContainerRuntime from sos.policies.runtimes.docker import DockerContainerRuntime +from sos.policies.runtimes.lxd import LxdContainerRuntime from sos.utilities import (shell_out, is_executable, bold, sos_get_command_output) @@ -31,6 +32,12 @@ except ImportError: REQUESTS_LOADED = False +try: + import boto3 + BOTO3_LOADED = True +except ImportError: + BOTO3_LOADED = False + # Container environment variables for detecting if we're in a container ENV_CONTAINER = 'container' ENV_HOST_SYSROOT = 'HOST' @@ -55,11 +62,23 @@ _upload_user = None _upload_password = None _upload_method = None + _upload_s3_endpoint = 'https://s3.amazonaws.com' + _upload_s3_bucket = None + _upload_s3_access_key = None + _upload_s3_secret_key = None + _upload_s3_region = None + _upload_s3_object_prefix = '' default_container_runtime = 'docker' _preferred_hash_name = None upload_url = None upload_user = None upload_password = None + upload_s3_endpoint = None + upload_s3_bucket = None + upload_s3_access_key = None + upload_s3_secret_key = None + upload_s3_region = None + upload_s3_object_prefix = None # collector-focused class attrs containerized = False container_image = None @@ -95,7 +114,8 @@ _crun = [ PodmanContainerRuntime(policy=self), DockerContainerRuntime(policy=self), - CrioContainerRuntime(policy=self) + CrioContainerRuntime(policy=self), + LxdContainerRuntime(policy=self), ] for runtime in _crun: if runtime.check_is_active(): @@ -290,6 +310,13 @@ self.upload_password = cmdline_opts.upload_pass self.upload_archive_name = '' + self.upload_s3_endpoint = cmdline_opts.upload_s3_endpoint + self.upload_s3_region = cmdline_opts.upload_s3_region + self.upload_s3_access_key = cmdline_opts.upload_s3_access_key + self.upload_s3_bucket = cmdline_opts.upload_s3_bucket + self.upload_s3_object_prefix = cmdline_opts.upload_s3_object_prefix + self.upload_s3_secret_key = cmdline_opts.upload_s3_secret_key + # set or query for case id if not cmdline_opts.batch and not \ cmdline_opts.quiet: @@ -312,9 +339,15 @@ cmdline_opts.quiet: try: # Policies will need to handle the prompts for user information - if cmdline_opts.upload and self.get_upload_url(): + if cmdline_opts.upload and self.get_upload_url() and \ + not cmdline_opts.upload_protocol == 's3': self.prompt_for_upload_user() self.prompt_for_upload_password() + elif cmdline_opts.upload_protocol == 's3': + self.prompt_for_upload_s3_bucket() + self.prompt_for_upload_s3_endpoint() + self.prompt_for_upload_s3_access_key() + self.prompt_for_upload_s3_secret_key() self.ui_log.info('') except KeyboardInterrupt: raise @@ -352,6 +385,58 @@ except Exception as err: self.soslog.error(f"Error setting report niceness to 19: {err}") + def prompt_for_upload_s3_access_key(self): + """Should be overridden by policies to determine if an access key needs + to be provided for upload or not + """ + if not self.get_upload_s3_access_key(): + + msg = ( + "Please provide the upload access key for bucket" + f" {self.get_upload_s3_bucket()} via endpoint" + f" {self.get_upload_s3_endpoint()}: " + ) + self.upload_s3_access_key = input(_(msg)) + + def prompt_for_upload_s3_secret_key(self): + """Should be overridden by policies to determine if a secret key needs + to be provided for upload or not + """ + if not self.get_upload_s3_secret_key(): + msg = ( + "Please provide the upload secret key for bucket" + f" {self.get_upload_s3_bucket()} via endpoint" + f" {self.get_upload_s3_endpoint()}: " + ) + self.upload_s3_secret_key = getpass(msg) + + def prompt_for_upload_s3_bucket(self): + """Should be overridden by policies to determine if a bucket needs to + be provided for upload or not + """ + if not self.upload_s3_bucket: + if self.upload_url and self.upload_url.startswith('s3://'): + self.upload_s3_bucket = self.upload_url[5:] + else: + user_input = input(_("Please provide the upload bucket: ")) + self.upload_s3_bucket = user_input.strip('/') + return self.upload_s3_bucket + + def prompt_for_upload_s3_endpoint(self): + """Should be overridden by policies to determine if an endpoint needs + to be provided for upload or not + """ + default_endpoint = self._upload_s3_endpoint + if not self.upload_s3_endpoint: + msg = ( + "Please provide the upload endpoint for bucket" + f" {self.get_upload_s3_bucket()}" + f" (default: {default_endpoint}): " + ) + user_input = input(_(msg)) + self.upload_s3_endpoint = user_input or default_endpoint + return self.upload_s3_endpoint + def prompt_for_upload_user(self): """Should be overridden by policies to determine if a user needs to be provided or not @@ -375,7 +460,7 @@ Entry point for sos attempts to upload the generated archive to a policy or user specified location. - Curerntly there is support for HTTPS, SFTP, and FTP. HTTPS uploads are + Currently there is support for HTTPS, SFTP, and FTP. HTTPS uploads are preferred for policy-defined defaults. Policies that need to override uploading methods should override the @@ -436,7 +521,8 @@ prots = { 'ftp': self.upload_ftp, 'sftp': self.upload_sftp, - 'https': self.upload_https + 'https': self.upload_https, + 's3': self.upload_s3 } if self.commons['cmdlineopts'].upload_protocol in prots.keys(): return prots[self.commons['cmdlineopts'].upload_protocol] @@ -456,7 +542,7 @@ :param password: Password for `user` to use for upload :type password: ``str`` - :returns: The user/password auth suitable for use in reqests calls + :returns: The user/password auth suitable for use in requests calls :rtype: ``requests.auth.HTTPBasicAuth()`` """ if not user: @@ -466,6 +552,73 @@ return requests.auth.HTTPBasicAuth(user, password) + def get_upload_s3_access_key(self): + """Helper function to determine if we should use the policy default + upload access key or one provided by the user + + :returns: The access_key to use for upload + :rtype: ``str`` + """ + return (os.getenv('SOSUPLOADS3ACCESSKEY', None) or + self.upload_s3_access_key or + self._upload_s3_access_key) + + def get_upload_s3_endpoint(self): + """Helper function to determine if we should use the policy default + upload endpoint or one provided by the user + + :returns: The S3 Endpoint to use for upload + :rtype: ``str`` + """ + if not self.upload_s3_endpoint: + self.prompt_for_upload_s3_endpoint() + return self.upload_s3_endpoint + + def get_upload_s3_region(self): + """Helper function to determine if we should use the policy default + upload region or one provided by the user + + :returns: The S3 region to use for upload + :rtype: ``str`` + """ + return self.upload_s3_region or self._upload_s3_region + + def get_upload_s3_bucket(self): + """Helper function to determine if we should use the policy default + upload bucket or one provided by the user + + :returns: The S3 bucket to use for upload + :rtype: ``str`` + """ + if self.upload_url and self.upload_url.startswith('s3://'): + bucket_and_prefix = self.upload_url[5:].split('/', 1) + self.upload_s3_bucket = bucket_and_prefix[0] + if len(bucket_and_prefix) > 1: + self.upload_s3_object_prefix = bucket_and_prefix[1] + if not self.upload_s3_bucket: + self.prompt_for_upload_s3_bucket() + return self.upload_s3_bucket or self._upload_s3_bucket + + def get_upload_s3_object_prefix(self): + """Helper function to determine if we should use the policy default + upload object prefix or one provided by the user + + :returns: The S3 object prefix to use for upload + :rtype: ``str`` + """ + return self.upload_s3_object_prefix or self._upload_s3_object_prefix + + def get_upload_s3_secret_key(self): + """Helper function to determine if we should use the policy default + upload secret key or one provided by the user + + :returns: The S3 secret key to use for upload + :rtype: ``str`` + """ + return (os.getenv('SOSUPLOADS3SECRETKEY', None) or + self.upload_s3_secret_key or + self._upload_s3_secret_key) + def get_upload_url(self): """Helper function to determine if we should use the policy default upload url or one provided by the user @@ -473,6 +626,14 @@ :returns: The URL to use for upload :rtype: ``str`` """ + if not self.upload_url and ( + self.upload_s3_bucket and + self.upload_s3_access_key and + self.upload_s3_secret_key + ): + bucket = self.get_upload_s3_bucket() + prefix = self.get_upload_s3_object_prefix() + self._upload_url = f"s3://{bucket}/{prefix}" return self.upload_url or self._upload_url def get_upload_url_string(self): @@ -760,6 +921,71 @@ except IOError: raise Exception("could not open archive file") + def upload_s3(self, endpoint=None, region=None, bucket=None, prefix=None, + access_key=None, secret_key=None): + """Attempts to upload the archive to an S3 bucket. + + :param endpoint: The S3 endpoint to upload to + :type endpoint: str + + :param region: The S3 region to upload to + :type region: str + + :param bucket: The name of the S3 bucket to upload to + :type bucket: str + + :param prefix: The prefix for the S3 object/key + :type prefix: str + + :param access_key: The access key for the S3 bucket + :type access_key: str + + :param secret_key: The secret key for the S3 bucket + :type secret_key: str + + :returns: True if upload is successful + :rtype: bool + + :raises: Exception if upload is unsuccessful + """ + if not BOTO3_LOADED: + raise Exception("Unable to upload due to missing python boto3 " + "library") + + if not endpoint: + endpoint = self.get_upload_s3_endpoint() + if not region: + region = self.get_upload_s3_region() + + if not bucket: + bucket = self.get_upload_s3_bucket().strip('/') + + if not prefix: + prefix = self.get_upload_s3_object_prefix() + if prefix != '' and prefix.startswith('/'): + prefix = prefix[1:] + if prefix != '' and not prefix.endswith('/'): + prefix = f'{prefix}/' if prefix else '' + + if not access_key: + access_key = self.get_upload_s3_access_key() + + if not secret_key: + secret_key = self.get_upload_s3_secret_key() + + s3_client = boto3.client('s3', endpoint_url=endpoint, + region_name=region, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + try: + key = prefix + self.upload_archive_name.split('/')[-1] + s3_client.upload_file(self.upload_archive_name, + bucket, key) + return True + except Exception as e: + raise Exception(f"Failed to upload to S3: {str(e)}") from e + def set_sos_prefix(self): """If sosreport commands need to always be prefixed with something, for example running in a specific container image, then it should be diff -Nru sosreport-4.5.6/sos/policies/distros/redhat.py sosreport-4.7.0/sos/policies/distros/redhat.py --- sosreport-4.5.6/sos/policies/distros/redhat.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/policies/distros/redhat.py 2024-02-20 00:39:46.000000000 +0000 @@ -12,13 +12,15 @@ import os import sys import re +from sos.policies.auth import DeviceAuthorizationClass from sos.report.plugins import RedHatPlugin -from sos.presets.redhat import (RHEL_PRESETS, ATOMIC_PRESETS, RHV, RHEL, - CB, RHOSP, RHOCP, RH_CFME, RH_SATELLITE, - ATOMIC) +from sos.presets.redhat import (RHEL_PRESETS, RHV, RHEL, CB, RHOSP, + RHOCP, RH_CFME, RH_SATELLITE, AAPEDA) from sos.policies.distros import LinuxPolicy, ENV_HOST_SYSROOT from sos.policies.package_managers.rpm import RpmPackageManager +from sos.policies.package_managers.flatpak import FlatpakPackageManager +from sos.policies.package_managers import MultiPackageManager from sos.utilities import bold from sos import _sos as _ @@ -30,7 +32,6 @@ OS_RELEASE = "/etc/os-release" RHEL_RELEASE_STR = "Red Hat Enterprise Linux" -ATOMIC_RELEASE_STR = "Atomic" class RedHatPolicy(LinuxPolicy): @@ -49,6 +50,10 @@ default_container_runtime = 'podman' sos_pkg_name = 'sos' sos_bin_path = '/usr/sbin' + client_identifier_url = "https://sso.redhat.com/auth/"\ + "realms/redhat-external/protocol/openid-connect/auth/device" + token_endpoint = "https://sso.redhat.com/auth/realms/"\ + "redhat-external/protocol/openid-connect/token" def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -57,8 +62,11 @@ remote_exec=remote_exec) self.usrmove = False - self.package_manager = RpmPackageManager(chroot=self.sysroot, - remote_exec=remote_exec) + self.package_manager = MultiPackageManager( + primary=RpmPackageManager, + fallbacks=[FlatpakPackageManager], + chroot=self.sysroot, + remote_exec=remote_exec) self.valid_subclasses += [RedHatPlugin] @@ -223,6 +231,7 @@ """ + disclaimer_text + "%(vendor_text)s\n") _upload_url = RH_SFTP_HOST _upload_method = 'post' + _device_token = None def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -261,24 +270,23 @@ def prompt_for_upload_user(self): if self.commons['cmdlineopts'].upload_user: - return - # Not using the default, so don't call this prompt for RHCP - if self.commons['cmdlineopts'].upload_url: - super(RHELPolicy, self).prompt_for_upload_user() - return - if not self.get_upload_user(): - if self.case_id: - self.upload_user = input(_( - "Enter your Red Hat Customer Portal username for " - "uploading [empty for anonymous SFTP]: ") - ) - else: # no case id provided => failover to SFTP - self.upload_url = RH_SFTP_HOST - self.ui_log.info("No case id provided, uploading to SFTP") - self.upload_user = input(_( - "Enter your Red Hat Customer Portal username for " - "uploading to SFTP [empty for anonymous]: ") - ) + self.ui_log.info( + _("The option --upload-user has been deprecated in favour" + " of device authorization in RHEL") + ) + if not self.case_id: + # no case id provided => failover to SFTP + self.upload_url = RH_SFTP_HOST + self.ui_log.info("No case id provided, uploading to SFTP") + + def prompt_for_upload_password(self): + # With OIDC we don't ask for user/pass anymore + if self.commons['cmdlineopts'].upload_pass: + self.ui_log.info( + _("The option --upload-pass has been deprecated in favour" + " of device authorization in RHEL") + ) + return def get_upload_url(self): if self.upload_url: @@ -287,10 +295,42 @@ return self.commons['cmdlineopts'].upload_url elif self.commons['cmdlineopts'].upload_protocol == 'sftp': return RH_SFTP_HOST + elif not self.commons['cmdlineopts'].case_id: + self.ui_log.info("No case id provided, uploading to SFTP") + return RH_SFTP_HOST else: rh_case_api = "/support/v1/cases/%s/attachments" return RH_API_HOST + rh_case_api % self.case_id + def _get_upload_https_auth(self): + str_auth = "Bearer {}".format(self._device_token) + return {'Authorization': str_auth} + + def _upload_https_post(self, archive, verify=True): + """If upload_https() needs to use requests.post(), use this method. + + Policies should override this method instead of the base upload_https() + + :param archive: The open archive file object + """ + files = { + 'file': (archive.name.split('/')[-1], archive, + self._get_upload_headers()) + } + # Get the access token at this point. With this, + # we cover the cases where report generation takes + # longer than the token timeout + RHELAuth = DeviceAuthorizationClass( + self.client_identifier_url, + self.token_endpoint + ) + self._device_token = RHELAuth.get_access_token() + self.ui_log.info("Device authorized correctly. Uploading file to " + f"{self.get_upload_url_string()}") + return requests.post(self.get_upload_url(), files=files, + headers=self._get_upload_https_auth(), + verify=verify) + def _get_upload_headers(self): if self.get_upload_url().startswith(RH_API_HOST): return {'isPrivate': 'false', 'cache-control': 'no-cache'} @@ -327,15 +367,38 @@ " for obtaining SFTP auth token.") _token = None _user = None + + # We may have a device token already if we attempted + # to upload via http but the upload failed. So + # lets check first if there isn't one. + if not self._device_token: + try: + RHELAuth = DeviceAuthorizationClass( + self.client_identifier_url, + self.token_endpoint + ) + except Exception as e: + # We end up here if the user cancels the device + # authentication in the web interface + if "end user denied" in str(e): + self.ui_log.info( + "Device token authorization " + "has been cancelled by the user." + ) + else: + self._device_token = RHELAuth.get_access_token() + if self._device_token: + self.ui_log.info("Device authorized correctly. Uploading file to" + f" {self.get_upload_url_string()}") + url = RH_API_HOST + '/support/v2/sftp/token' - # we have a username and password, but we need to reset the password - # to be the token returned from the auth endpoint - if self.get_upload_user() and self.get_upload_password(): - auth = self.get_upload_https_auth() - ret = requests.post(url, auth=auth, timeout=10) + ret = None + if self._device_token: + headers = self._get_upload_https_auth() + ret = requests.post(url, headers=headers, timeout=10) if ret.status_code == 200: # credentials are valid - _user = self.get_upload_user() + _user = json.loads(ret.text)['username'] _token = json.loads(ret.text)['token'] else: self.ui_log.debug( @@ -346,8 +409,7 @@ "Unable to retrieve Red Hat auth token using provided " "credentials. Will try anonymous." ) - # we either do not have a username or password/token, or both - if not _token: + else: adata = {"isAnonymous": True} anon = requests.post(url, data=json.dumps(adata), timeout=10) if anon.status_code == 200: @@ -363,7 +425,6 @@ f"DEBUG: anonymous request failed (status: " f"{anon.status_code}): {anon.json()}" ) - if _user and _token: return super(RHELPolicy, self).upload_sftp(user=_user, password=_token) @@ -375,17 +436,18 @@ """ try: if self.upload_url and self.upload_url.startswith(RH_API_HOST) and\ - (not self.get_upload_user() or not self.get_upload_password()): + (not self.get_upload_user() or + not self.get_upload_password()): self.upload_url = RH_SFTP_HOST uploaded = super(RHELPolicy, self).upload_archive(archive) - except Exception: + except Exception as e: uploaded = False if not self.upload_url.startswith(RH_API_HOST): raise else: self.ui_log.error( - _(f"Upload to Red Hat Customer Portal failed. Trying " - f"{RH_SFTP_HOST}") + _(f"Upload to Red Hat Customer Portal failed due to " + f"{e}. Trying {RH_SFTP_HOST}") ) self.upload_url = RH_SFTP_HOST uploaded = super(RHELPolicy, self).upload_archive(archive) @@ -418,6 +480,10 @@ if self.pkg_by_name("ovirt-engine") is not None or \ self.pkg_by_name("vdsm") is not None: return self.find_preset(RHV) + for pkg in ['automation-eda-controller', + 'automation-eda-controller-server']: + if self.pkg_by_name(pkg) is not None: + return self.find_preset(AAPEDA) # Vanilla RHEL is default return self.find_preset(RHEL) @@ -429,73 +495,6 @@ vendor_urls = [('Community Website', 'https://www.centos.org/')] -class RedHatAtomicPolicy(RHELPolicy): - distro = "Red Hat Atomic Host" - msg = _("""\ -This command will collect diagnostic and configuration \ -information from this %(distro)s system. - -An archive containing the collected information will be \ -generated in %(tmpdir)s and may be provided to a %(vendor)s \ -support representative. -""" + disclaimer_text + "%(vendor_text)s\n") - - containerzed = True - container_runtime = 'docker' - container_image = 'registry.access.redhat.com/rhel7/support-tools' - sos_path_strip = '/host' - container_version_command = 'rpm -q sos' - - def __init__(self, sysroot=None, init=None, probe_runtime=True, - remote_exec=None): - super(RedHatAtomicPolicy, self).__init__(sysroot=sysroot, init=init, - probe_runtime=probe_runtime, - remote_exec=remote_exec) - self.register_presets(ATOMIC_PRESETS) - - @classmethod - def check(cls, remote=''): - - if remote: - return cls.distro in remote - - atomic = False - if ENV_HOST_SYSROOT not in os.environ: - return atomic - host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE - if not os.path.exists(host_release): - return False - try: - for line in open(host_release, "r").read().splitlines(): - atomic |= ATOMIC_RELEASE_STR in line - except IOError: - pass - return atomic - - def probe_preset(self): - if self.pkg_by_name('atomic-openshift'): - return self.find_preset(RHOCP) - - return self.find_preset(ATOMIC) - - def create_sos_container(self, image=None, auth=None, force_pull=False): - _cmd = ("{runtime} run -di --name {name} --privileged --ipc=host" - " --net=host --pid=host -e HOST=/host -e NAME={name} -e " - "IMAGE={image} {pull} -v /run:/run -v /var/log:/var/log -v " - "/etc/machine-id:/etc/machine-id -v " - "/etc/localtime:/etc/localtime -v /:/host {auth} {image}") - _image = image or self.container_image - _pull = '--pull=always' if force_pull else '' - return _cmd.format(runtime=self.container_runtime, - name=self.sos_container_name, - image=_image, - pull=_pull, - auth=auth or '') - - def set_cleanup_cmd(self): - return 'docker rm --force sos-collector-tmp' - - class RedHatCoreOSPolicy(RHELPolicy): """ Red Hat CoreOS is a containerized host built upon Red Hat Enterprise Linux @@ -551,8 +550,9 @@ return coreos host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE try: - for line in open(host_release, 'r').read().splitlines(): - coreos |= 'Red Hat Enterprise Linux CoreOS' in line + with open(host_release, 'r') as hfile: + for line in hfile.read().splitlines(): + coreos |= 'Red Hat Enterprise Linux CoreOS' in line except IOError: pass return coreos @@ -580,12 +580,6 @@ return 'podman rm --force %s' % self.sos_container_name -class CentOsAtomicPolicy(RedHatAtomicPolicy): - distro = "CentOS Atomic Host" - vendor = "CentOS" - vendor_urls = [('Community Website', 'https://www.centos.org/')] - - class FedoraPolicy(RedHatPolicy): """ The policy for Fedora based systems, regardless of spin/edition. This diff -Nru sosreport-4.5.6/sos/policies/distros/ubuntu.py sosreport-4.7.0/sos/policies/distros/ubuntu.py --- sosreport-4.5.6/sos/policies/distros/ubuntu.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/policies/distros/ubuntu.py 2024-02-20 00:39:46.000000000 +0000 @@ -42,9 +42,13 @@ chroot=self.sysroot, remote_exec=remote_exec) - if self.package_manager.pkg_by_name( - 'sosreport')['pkg_manager'] == 'snap': - self.sos_bin_path = '/snap/bin' + try: + if self.package_manager.pkg_by_name( + 'sosreport')['pkg_manager'] == 'snap': + self.sos_bin_path = '/snap/bin' + except TypeError: + # Use the default sos_bin_path + pass self.valid_subclasses += [UbuntuPlugin] @@ -70,7 +74,7 @@ lines = fp.readlines() for line in lines: if "DISTRIB_RELEASE" in line: - return int(line.split("=")[1].strip()) + return float(line.split("=")[1].strip()) return False except (IOError, ValueError): return False diff -Nru sosreport-4.5.6/sos/policies/__init__.py sosreport-4.7.0/sos/policies/__init__.py --- sosreport-4.5.6/sos/policies/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/policies/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -225,7 +225,7 @@ * name - the short hostname of the system * label - the label given by --label - * case - the case id given by --case-id or --ticker-number + * case - the case id given by --case-id * rand - a random string of 7 alpha characters Note that if a datestamp is needed, the substring should be set @@ -467,6 +467,7 @@ self.ui_log.info( _("\nPlease send this file to your support representative.\n") ) + return None def get_msg(self): """This method is used to prepare the preamble text to display to diff -Nru sosreport-4.5.6/sos/policies/package_managers/flatpak.py sosreport-4.7.0/sos/policies/package_managers/flatpak.py --- sosreport-4.5.6/sos/policies/package_managers/flatpak.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/policies/package_managers/flatpak.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,29 @@ +# Copyright 2023 Red Hat, Inc. Jose Castillo + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.policies.package_managers import PackageManager + + +class FlatpakPackageManager(PackageManager): + """Package Manager for Flatpak distributions + """ + + query_command = 'flatpak list --columns=name,version,branch' + query_path_command = '' + files_command = '' + verify_command = '' + verify_filter = '' + + def _parse_pkg_list(self, pkg_list): + for line in pkg_list.splitlines(): + pkg = line.split("\t") + yield (pkg[0], pkg[1], pkg[2]) + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/policies/package_managers/__init__.py sosreport-4.7.0/sos/policies/package_managers/__init__.py --- sosreport-4.5.6/sos/policies/package_managers/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/policies/package_managers/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -72,7 +72,7 @@ return self.__class__.__name__.lower().split('package')[0] def exec_cmd(self, command, timeout=30, need_root=False, env=None, - get_pty=False, chroot=None): + use_shell=False, chroot=None): """ Runs a package manager command, either via sos_get_command_output() if local, or via a SoSTransport's run_command() if this needs to be run @@ -90,9 +90,9 @@ :param env: Environment variables to set :type env: ``dict`` with keys being env vars to define - :param get_pty: If running remotely, does the command require - obtaining a pty? - :type get_pty: ``bool`` + :param use_shell: If running remotely, does the command require + obtaining a shell? + :type use_shell: ``bool`` :param chroot: If necessary, chroot command execution to here :type chroot: ``None`` or ``str`` @@ -101,7 +101,7 @@ :rtype: ``str`` """ if self.remote_exec: - ret = self.remote_exec(command, timeout, need_root, env, get_pty) + ret = self.remote_exec(command, timeout, need_root, env, use_shell) else: ret = sos_get_command_output(command, timeout, chroot=chroot, env=env) @@ -206,20 +206,6 @@ return self.packages[pkg] return None - def pkg_nvra(self, pkg): - """Get the name, version, release, and architecture for a package - - :param pkg: The name of the package - :type pkg: ``str`` - - :returns: name, version, release, and arch of the package - :rtype: ``tuple`` - """ - fields = pkg.split("-") - version, release, arch = fields[-3:] - name = "-".join(fields[:-3]) - return (name, version, release, arch) - def all_files(self): """ Get a list of files known by the package manager diff -Nru sosreport-4.5.6/sos/policies/runtimes/docker.py sosreport-4.7.0/sos/policies/runtimes/docker.py --- sosreport-4.5.6/sos/policies/runtimes/docker.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/policies/runtimes/docker.py 2024-02-20 00:39:46.000000000 +0000 @@ -18,9 +18,9 @@ name = 'docker' binary = 'docker' - def check_is_active(self, sysroot=None): + def check_is_active(self): # the daemon must be running - if (is_executable('docker', sysroot) and + if (is_executable('docker', self.policy.sysroot) and (self.policy.init_system.is_running('docker') or self.policy.init_system.is_running('snap.docker.dockerd'))): self.active = True @@ -28,6 +28,6 @@ return False def check_can_copy(self): - return self.check_is_active(sysroot=self.policy.sysroot) + return self.active # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/policies/runtimes/lxd.py sosreport-4.7.0/sos/policies/runtimes/lxd.py --- sosreport-4.5.6/sos/policies/runtimes/lxd.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/policies/runtimes/lxd.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,151 @@ +# Copyright (C) 2023 Canonical Ltd., Arif Ali + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +import json + +from sos.policies.runtimes import ContainerRuntime +from sos.utilities import sos_get_command_output +from sos.utilities import is_executable + + +class LxdContainerRuntime(ContainerRuntime): + """Runtime class to use for systems running LXD""" + + name = 'lxd' + binary = 'lxc' + + def check_is_active(self): + # the daemon must be running + if (is_executable('lxc', self.policy.sysroot) and + (self.policy.init_system.is_running('lxd') or + self.policy.init_system.is_running('snap.lxd.daemon'))): + self.active = True + return True + return False + + def get_containers(self, get_all=False): + """Get a list of containers present on the system. + + :param get_all: If set, include stopped containers as well + :type get_all: ``bool`` + """ + containers = [] + + _cmd = f"{self.binary} list --format json" + if self.active: + out = sos_get_command_output(_cmd, chroot=self.policy.sysroot) + + if out["status"] == 0: + out_json = json.loads(out["output"]) + + for container in out_json: + if container['status'] == 'Running' or get_all: + # takes the form (container_id, container_name) + containers.append( + (container['expanded_config']['volatile.uuid'], + container['name'])) + + return containers + + def get_images(self): + """Get a list of images present on the system + + :returns: A list of 2-tuples containing (image_name, image_id) + :rtype: ``list`` + """ + images = [] + if self.active: + out = sos_get_command_output( + f"{self.binary} image list --format json", + chroot=self.policy.sysroot + ) + if out['status'] == 0: + out_json = json.loads(out["output"]) + for ent in out_json: + # takes the form (image_name, image_id) + if 'update_source' in ent: + images.append(( + ent['update_source']['alias'], + ent['fingerprint'])) + return images + + def get_volumes(self): + """Get a list of container volumes present on the system + + :returns: A list of volume IDs on the system + :rtype: ``list`` + """ + vols = [] + stg_pool = "default" + if self.active: + + # first get the default storage pool + out = sos_get_command_output( + f"{self.binary} profile list --format json", + chroot=self.policy.sysroot + ) + if out['status'] == 0: + out_json = json.loads(out['output']) + for profile in out_json: + if (profile['name'] == 'default' and + 'root' in profile['devices']): + stg_pool = profile['devices']['root']['pool'] + break + + out = sos_get_command_output( + f"{self.binary} storage volume list {stg_pool} --format json", + chroot=self.policy.sysroot + ) + if out['status'] == 0: + out_json = json.loads(out['output']) + for ent in out_json: + vols.append(ent['name']) + return vols + + def get_logs_command(self, container): + """Get the command string used to dump container logs from the + runtime + + :param container: The name or ID of the container to get logs for + :type container: ``str`` + + :returns: Formatted runtime command to get logs from `container` + :type: ``str`` + """ + return f"{self.binary} info {container} --show-log" + + def get_copy_command(self, container, path, dest, sizelimit=None): + """Generate the command string used to copy a file out of a container + by way of the runtime. + + :param container: The name or ID of the container + :type container: ``str`` + + :param path: The path to copy from the container. Note that at + this time, no supported runtime supports globbing + :type path: ``str`` + + :param dest: The destination on the *host* filesystem to write + the file to + :type dest: ``str`` + + :param sizelimit: Limit the collection to the last X bytes of the + file at PATH + :type sizelimit: ``int`` + + :returns: Formatted runtime command to copy a file from a container + :rtype: ``str`` + """ + if sizelimit: + return f"{self.run_cmd} {container} tail -c {sizelimit} {path}" + return f"{self.binary} file pull {container}{path} {dest}" + + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/presets/redhat/__init__.py sosreport-4.7.0/sos/presets/redhat/__init__.py --- sosreport-4.5.6/sos/presets/redhat/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/presets/redhat/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -53,6 +53,17 @@ RH_SATELLITE_DESC = "Red Hat Satellite" SAT_OPTS = SoSOptions(log_size=100, plugopts=['apache.log=on']) +AAPEDA = 'aap_eda' +AAPEDA_DESC = 'Ansible Automation Platform Event Driven Controller' +AAPEDA_OPTS = SoSOptions( + enable_plugins=['containers_common'], + plugopts=[ + 'containers_common.rootlessusers=eda' + ]) +AAPEDA_NOTE = ('Collects \'eda\' user output for the containers_common plugin.' + ' If you need more users, do not forget to add \'eda\' ' + 'to your own list for the \'rootlessusers\' option.') + CB = "cantboot" CB_DESC = "For use when normal system startup fails" CB_OPTS = SoSOptions( @@ -66,6 +77,8 @@ NOTE_SIZE_TIME = "This preset may increase report size and run time" RHEL_PRESETS = { + AAPEDA: PresetDefaults(name=AAPEDA, desc=AAPEDA_DESC, opts=AAPEDA_OPTS, + note=AAPEDA_NOTE), RHV: PresetDefaults(name=RHV, desc=RHV_DESC, note=NOTE_TIME, opts=_opts_verify), RHEL: PresetDefaults(name=RHEL, desc=RHEL_DESC), @@ -80,14 +93,4 @@ } -ATOMIC = "atomic" -ATOMIC_RELEASE_STR = "Atomic" -ATOMIC_DESC = "Red Hat Enterprise Linux Atomic Host" - -ATOMIC_PRESETS = { - ATOMIC: PresetDefaults(name=ATOMIC, desc=ATOMIC_DESC, note=NOTE_TIME, - opts=_opts_verify) -} - - # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/__init__.py sosreport-4.7.0/sos/report/__init__.py --- sosreport-4.5.6/sos/report/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -129,6 +129,12 @@ 'upload_method': 'auto', 'upload_no_ssl_verify': False, 'upload_protocol': 'auto', + 'upload_s3_endpoint': None, + 'upload_s3_region': None, + 'upload_s3_bucket': None, + 'upload_s3_access_key': None, + 'upload_s3_secret_key': None, + 'upload_s3_object_prefix': None, 'add_preset': '', 'del_preset': '' } @@ -142,7 +148,6 @@ self.archive = None self._args = args self.sysroot = "/" - self.preset = None self.estimated_plugsizes = {} self.print_header() @@ -153,25 +158,6 @@ # add a manifest section for report self.report_md = self.manifest.components.add_section('report') - # user specified command line preset - if self.opts.preset != self.arg_defaults["preset"]: - self.preset = self.policy.find_preset(self.opts.preset) - if not self.preset: - sys.stderr.write("Unknown preset: '%s'\n" % self.opts.preset) - self.preset = self.policy.probe_preset() - self.opts.list_presets = True - - # --preset=auto - if not self.preset: - self.preset = self.policy.probe_preset() - # now merge preset options to self.opts - self.opts.merge(self.preset.opts) - # re-apply any cmdline overrides to the preset - self.opts = self.apply_options_from_cmdline(self.opts) - if hasattr(self.preset.opts, 'verbosity') and \ - self.preset.opts.verbosity > 0: - self.set_loggers_verbosity(self.preset.opts.verbosity) - self._set_directories() msg = "default" @@ -332,8 +318,21 @@ report_grp.add_argument("--upload-no-ssl-verify", default=False, action='store_true', help="Disable SSL verification for upload url") + report_grp.add_argument("--upload-s3-endpoint", default=None, + help="Endpoint to upload to for S3 bucket") + report_grp.add_argument("--upload-s3-region", default=None, + help="Region to upload to for S3 bucket") + report_grp.add_argument("--upload-s3-bucket", default=None, + help="Name of the S3 bucket to upload to") + report_grp.add_argument("--upload-s3-access-key", default=None, + help="Access key for the S3 bucket") + report_grp.add_argument("--upload-s3-secret-key", default=None, + help="Secret key for the S3 bucket") + report_grp.add_argument("--upload-s3-object-prefix", default=None, + help="Prefix for the S3 object/key") report_grp.add_argument("--upload-protocol", default='auto', - choices=['auto', 'https', 'ftp', 'sftp'], + choices=['auto', 'https', 'ftp', 'sftp', + 's3'], help="Manually specify the upload protocol") # Group to make add/del preset exclusive @@ -612,10 +611,18 @@ filt_devs = ['bonding_masters'] _eth_devs = [] if not namespace: - _eth_devs = [ - dev for dev in listdir('/sys/class/net', self.opts.sysroot) - if dev not in filt_devs - ] + try: + # Override checking sysroot here, as network devices will not + # be under the sysroot in live environments or in containers + # that are correctly setup to collect from the host + _eth_devs = [ + dev for dev in listdir('/sys/class/net', None) + if dev not in filt_devs + ] + except Exception as err: + self.soslog.warning( + f'Failed to manually determine network devices: {err}' + ) else: try: _nscmd = "ip netns exec %s ls /sys/class/net" % namespace @@ -1377,6 +1384,7 @@ self.handle_exception(plugname, "collect") except Exception: self.handle_exception(plugname, "collect") + return None def ui_progress(self, status_line): if self.opts.verbosity == 0 and not self.opts.batch: @@ -1526,6 +1534,8 @@ self._add_sos_logs() if self.manifest is not None: self.archive.add_final_manifest_data(self.opts.compression_type) + # Hide upload passwords in the log files + self._obfuscate_upload_passwords() # Now, separately clean the log files that cleaner also wrote to if do_clean: _dir = os.path.join(self.tmpdir, self.archive._name) @@ -1681,7 +1691,8 @@ self.policy.display_results(archive, directory, checksum, map_file=map_file) - if self.opts.upload or self.opts.upload_url: + if (self.opts.upload or self.opts.upload_url + or self.opts.upload_s3_endpoint): if not self.opts.build: try: self.policy.upload_archive(archive) @@ -1800,12 +1811,14 @@ self.list_presets() raise SystemExit if self.opts.add_preset: - return self.add_preset(self.opts.add_preset) + self.add_preset(self.opts.add_preset) + raise SystemExit if self.opts.del_preset: - return self.del_preset(self.opts.del_preset) + self.del_preset(self.opts.del_preset) + raise SystemExit # verify that at least one plug-in is enabled if not self.verify_plugins(): - return False + raise SystemExit self.batch() self.prework() diff -Nru sosreport-4.5.6/sos/report/plugins/aap_eda.py sosreport-4.7.0/sos/report/plugins/aap_eda.py --- sosreport-4.5.6/sos/report/plugins/aap_eda.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/aap_eda.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,51 @@ +# Copyright (c) 2023 Rudnei Bertol Jr + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, RedHatPlugin + + +class AAPEDAControllerPlugin(Plugin, RedHatPlugin): + short_desc = 'AAP EDA Controller plugin' + plugin_name = 'aap_eda' + profiles = ('sysmgmt', 'ansible') + packages = ('automation-eda-controller', + 'automation-eda-controller-server') + + def setup(self): + self.add_copy_spec([ + "/etc/ansible-automation-platform/", + "/var/log/ansible-automation-platform/eda/worker.log*", + "/var/log/ansible-automation-platform/eda/scheduler.log*", + "/var/log/ansible-automation-platform/eda/gunicorn.log*", + "/var/log/ansible-automation-platform/eda/activation.log*", + ]) + + self.add_forbidden_path([ + "/etc/ansible-automation-platform/eda/SECRET_KEY", + "/etc/ansible-automation-platform/eda/server.cert", + "/etc/ansible-automation-platform/eda/server.key", + ]) + + self.add_cmd_output([ + "aap-eda-manage --version", # eda version + "ls -alhR /etc/ansible-automation-platform/", + "ls -alhR /var/log/ansible-automation-platform/", + ]) + + self.add_cmd_output("su - eda -c 'export'", + suggest_filename="eda_export") + + def postproc(self): + self.do_path_regex_sub( + "/etc/ansible-automation-platform/eda/environment", + r"(EDA_SECRET_KEY|EDA_DB_PASSWORD)(\s*)(=|:)(\s*)(.*)", + r'\1\2\3\4********') + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/aide.py sosreport-4.7.0/sos/report/plugins/aide.py --- sosreport-4.5.6/sos/report/plugins/aide.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/aide.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,56 @@ +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin + + +class Aide(Plugin): + + short_desc = 'Advanced Intrusion Detection Environment' + + plugin_name = "aide" + profiles = ('system', 'security') + + packages = ('aide',) + conf_file = "/etc/aide/aide.conf" + + def setup(self): + self.add_cmd_output(f"aide -c {self.conf_file} --config-check") + + if self.get_option("all_logs"): + self.add_copy_spec([ + "/var/log/aide/", + ]) + else: + self.add_copy_spec([ + "/var/log/aide/aide.log" + ]) + + +class RedHatAide(Aide, RedHatPlugin): + conf_file = "/etc/aide.conf" + + def setup(self): + super(RedHatAide, self).setup() + self.add_copy_spec([ + "/etc/aide.conf", + "/etc/logrotate.d/aide" + ]) + + +class DebianAide(Aide, DebianPlugin, UbuntuPlugin): + conf_file = "/etc/aide/aide.conf" + + def setup(self): + super(DebianAide, self).setup() + self.add_copy_spec([ + "/etc/aide/", + "/etc/default/aide" + ]) + +# vim: et ts=4 sw=4 diff -Nru sosreport-4.5.6/sos/report/plugins/ansible.py sosreport-4.7.0/sos/report/plugins/ansible.py --- sosreport-4.5.6/sos/report/plugins/ansible.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ansible.py 2024-02-20 00:39:46.000000000 +0000 @@ -29,7 +29,11 @@ "ansible --version" ]) - # let rhui plugin collects the RHUI specific files - self.add_forbidden_path("/etc/ansible/facts.d/rhui_*.fact") + # don't generic & collect potentially sensitive files and dirs + self.add_forbidden_path([ + "/etc/ansible/facts.d/", + "/etc/ansible/roles/", + "/etc/ansible/hosts", + ]) # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/arcconf.py sosreport-4.7.0/sos/report/plugins/arcconf.py --- sosreport-4.5.6/sos/report/plugins/arcconf.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/arcconf.py 2024-02-20 00:39:46.000000000 +0000 @@ -10,6 +10,8 @@ # This sosreport plugin is meant for sas adapters. # This plugin logs inforamtion on each adapter it finds. +import re + from sos.report.plugins import Plugin, IndependentPlugin @@ -21,11 +23,34 @@ commands = ("arcconf",) def setup(self): + # Get the list of available adapters + listarcconf = self.collect_cmd_output("arcconf list") - # get list of adapters - self.add_cmd_output([ - "arcconf getconfig 1", - "arcconf list", - "arcconf GETLOGS 1 UART" - ]) + # Parse the 'arcconf list' output and extract controller IDs. + # For each Controller ID found in 'arcconf list', add commands + # for getconfig and GETLOGS + # + # Sample 'arcconf list' output: + # + # Controller information + # ------------------------------------------------------------- + # Controller ID : Status, Slot, Mode, Name, SerialNumber, WWN + # ------------------------------------------------------------- + # Controller 1: : Optimal, Slot XXXX, XXXX, XXXX, XXXX, XXXX + # ------------------------------------------------------------- + # Controller 2: : Optimal, Slot XXXX, XXXX, XXXX, XXXX, XXXX + + if listarcconf['status'] == 0: + for line in listarcconf['output'].splitlines(): + try: + match = re.match(r"^[\s]*Controller (\d)+", line).group(0) + controller_id = match.split()[1] + if controller_id: + # Add new commands with Controller ID + self.add_cmd_output([ + f"arcconf getconfig {controller_id}", + f"arcconf GETLOGS {controller_id} UART", + ]) + except AttributeError: + continue # vim: et ts=4 sw=4 diff -Nru sosreport-4.5.6/sos/report/plugins/atomichost.py sosreport-4.7.0/sos/report/plugins/atomichost.py --- sosreport-4.5.6/sos/report/plugins/atomichost.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/atomichost.py 1970-01-01 01:00:00.000000000 +0100 @@ -1,40 +0,0 @@ -# Copyright (C) 2015 Red Hat, Inc. - -# This file is part of the sos project: https://github.com/sosreport/sos -# -# This copyrighted material is made available to anyone wishing to use, -# modify, copy, or redistribute it subject to the terms and conditions of -# version 2 of the GNU General Public License. -# -# See the LICENSE file in the source distribution for further information. - -from sos.report.plugins import Plugin, RedHatPlugin, PluginOpt - - -class AtomicHost(Plugin, RedHatPlugin): - - short_desc = 'Atomic Host' - - plugin_name = "atomichost" - profiles = ('container',) - option_list = [ - PluginOpt("info", default=False, - desc="gather atomic info for each image") - ] - - def check_enabled(self): - return self.policy.in_container() - - def setup(self): - self.add_cmd_output("atomic host status") - - if self.get_option('info'): - # The 'docker images' command may include duplicate rows of - # output (repeated "IMAGE ID" values). Use a set to filter - # these out and only obtain 'docker info' data once per image - # identifier. - images = self.exec_cmd("docker images -q") - for image in set(images['output'].splitlines()): - self.add_cmd_output("atomic info {0}".format(image)) - -# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/auditd.py sosreport-4.7.0/sos/report/plugins/auditd.py --- sosreport-4.5.6/sos/report/plugins/auditd.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/auditd.py 2024-02-20 00:39:46.000000000 +0000 @@ -29,7 +29,7 @@ ]) self.add_cmd_output( - "ausearch --input-logs -m avc,user_avc,fanotify -ts today" + "ausearch -i --input-logs -m avc,user_avc,fanotify -ts today" ) self.add_cmd_output("auditctl -l", tags="auditctl_rules") self.add_cmd_output("auditctl -s", tags="auditctl_status") diff -Nru sosreport-4.5.6/sos/report/plugins/autofs.py sosreport-4.7.0/sos/report/plugins/autofs.py --- sosreport-4.5.6/sos/report/plugins/autofs.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/autofs.py 2024-02-20 00:39:46.000000000 +0000 @@ -40,6 +40,7 @@ *self.files) for i in debugout: return i[1] + return None def setup(self): self.add_copy_spec("/etc/auto*") diff -Nru sosreport-4.5.6/sos/report/plugins/candlepin.py sosreport-4.7.0/sos/report/plugins/candlepin.py --- sosreport-4.5.6/sos/report/plugins/candlepin.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/candlepin.py 2024-02-20 00:39:46.000000000 +0000 @@ -30,7 +30,9 @@ self.dbpasswd = "" cfg_file = "/etc/candlepin/candlepin.conf" try: - for line in open(cfg_file).read().splitlines(): + with open(cfg_file, 'r') as cfile: + candle_lines = cfile.read().splitlines() + for line in candle_lines: # skip empty lines and lines with comments if not line or line[0] == '#': continue diff -Nru sosreport-4.5.6/sos/report/plugins/canonical_livepatch_onprem.py sosreport-4.7.0/sos/report/plugins/canonical_livepatch_onprem.py --- sosreport-4.5.6/sos/report/plugins/canonical_livepatch_onprem.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/canonical_livepatch_onprem.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,42 @@ +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, UbuntuPlugin + + +class CanonicaLivepatchOnprem(Plugin, UbuntuPlugin): + + short_desc = 'Canonical Livepatch Onprem Service' + + plugin_name = 'canonical_livepatch_onprem' + profiles = ('services',) + services = ("livepatch-server",) + + def setup(self): + self.add_copy_spec([ + "/etc/livepatchd.yaml", + ]) + + def postproc(self): + onprem_conf = "/etc/livepatchd.yaml" + protect_keys = [ + "username", + "password", + "token", + "connection_string", + ] + + # Redact simple yaml style "key: value". + keys_regex = r"(^(-|\s)*(%s)\s*:\s*)(.*)" % "|".join(protect_keys) + sub_regex = r"\1*********" + self.do_path_regex_sub(onprem_conf, keys_regex, sub_regex) + + # Redact conf + self.do_file_private_sub(onprem_conf) + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/ceph_common.py sosreport-4.7.0/sos/report/plugins/ceph_common.py --- sosreport-4.5.6/sos/report/plugins/ceph_common.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ceph_common.py 2024-02-20 00:39:46.000000000 +0000 @@ -27,7 +27,6 @@ 'libcephfs1', 'ceph-fs-common', 'calamari-server', - 'librados2' ) services = ( diff -Nru sosreport-4.5.6/sos/report/plugins/ceph_mds.py sosreport-4.7.0/sos/report/plugins/ceph_mds.py --- sosreport-4.5.6/sos/report/plugins/ceph_mds.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ceph_mds.py 2024-02-20 00:39:46.000000000 +0000 @@ -46,6 +46,8 @@ "client ls", "config diff", "config show", + "counter dump", + "counter schema", "damage ls", "dump loads", "dump tree", @@ -57,13 +59,13 @@ "get subtrees", "objecter_requests", "ops", + "perf dump", "perf histogram dump", "perf histogram schema", "perf schema", - "perf dump", + "session ls", "status", "version", - "session ls" ] mds_ids = [] diff -Nru sosreport-4.5.6/sos/report/plugins/ceph_mgr.py sosreport-4.7.0/sos/report/plugins/ceph_mgr.py --- sosreport-4.5.6/sos/report/plugins/ceph_mgr.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ceph_mgr.py 2024-02-20 00:39:46.000000000 +0000 @@ -1,3 +1,5 @@ +# Copyright (C) 2023 Canonical Ltd., Nikhil Kshirsagar + # This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, @@ -36,84 +38,117 @@ plugin_name = 'ceph_mgr' profiles = ('storage', 'virt', 'container', 'ceph') - files = ('/var/lib/ceph/mgr/*', '/var/lib/ceph/*/mgr*') + files = ('/var/lib/ceph/mgr/*', '/var/lib/ceph/*/mgr*', + '/var/snap/microceph/common/data/mgr/*') containers = ('ceph-(.*-)?mgr.*',) def setup(self): + microceph_pkg = self.policy.package_manager.pkg_by_name('microceph') - self.add_file_tags({ - '/var/log/ceph/(.*/)?ceph-mgr.*.log': 'ceph_mgr_log', - }) - - self.add_forbidden_path([ - "/etc/ceph/*keyring*", - "/var/lib/ceph/**/*keyring*", - "/var/lib/ceph/**/osd*", - "/var/lib/ceph/**/mon*", - # Excludes temporary ceph-osd mount location like - # /var/lib/ceph/tmp/mnt.XXXX from sos collection. - "/var/lib/ceph/**/tmp/*mnt*", - "/etc/ceph/*bindpass*", - ]) - - self.add_copy_spec([ - "/var/log/ceph/**/ceph-mgr*.log", - "/var/lib/ceph/**/mgr*", - "/var/lib/ceph/**/bootstrap-mgr/", - "/run/ceph/**/ceph-mgr*", - ]) - - # more commands to be added later ceph_mgr_cmds = ([ "balancer status", - "orch host ls", - "orch device ls", - "orch ls", - "orch ls --export", - "orch ps", - "orch status --detail", - "orch upgrade status", - "log last cephadm" + "healthcheck history ls", + "log last cephadm", + "mgr dump", + "mgr metadata", + "mgr module ls", + "mgr stat", + "mgr versions" ]) - self.add_cmd_output( - [f"ceph {cmd}" for cmd in ceph_mgr_cmds]) - # get ceph_cmds again as json for easier automation parsing - self.add_cmd_output( - [f"ceph {cmd} --format json-pretty" for cmd in ceph_mgr_cmds], - subdir="json_output", - ) + # if orchestrator is configured + orch_configured = self.exec_cmd('ceph orch status') + if orch_configured['status'] == 0: + ceph_mgr_cmds += ([ + "orch host ls", + "orch device ls", + "orch ls", + "orch ls --export", + "orch ps", + "orch status --detail", + "orch upgrade status" + ]) cmds = [ "config diff", "config show", + "counter dump", + "counter schema", "dump_cache", "dump_mempools", "dump_osd_network", "mds_requests", "mds_sessions", "objecter_requests", - "mds_requests", - "mds_sessions", "perf dump", "perf histogram dump", "perf histogram schema", "perf schema", "status", - "version" + "version", ] + directory = '' + if not microceph_pkg: + directory = '/var/run/ceph' + self.add_file_tags({ + '/var/log/ceph/(.*/)?ceph-mgr.*.log': 'ceph_mgr_log', + }) + + self.add_forbidden_path([ + "/etc/ceph/*keyring*", + "/var/lib/ceph/**/*keyring*", + "/var/lib/ceph/**/osd*", + "/var/lib/ceph/**/mon*", + # Excludes temporary ceph-osd mount location like + # /var/lib/ceph/tmp/mnt.XXXX from sos collection. + "/var/lib/ceph/**/tmp/*mnt*", + "/etc/ceph/*bindpass*", + ]) + + self.add_copy_spec([ + "/var/log/ceph/**/ceph-mgr*.log", + "/var/lib/ceph/**/mgr*", + "/var/lib/ceph/**/bootstrap-mgr/", + "/run/ceph/**/ceph-mgr*", + ]) + + else: + directory = '/var/snap/microceph' + self.add_file_tags({ + '/var/snap/microceph/common/logs/ceph-mgr.*.log': + 'ceph_mgr_log', + }) + + self.add_forbidden_path([ + "/var/snap/microceph/common/**/*keyring*", + ]) + + self.add_copy_spec([ + "/var/snap/microceph/common/logs/ceph-mgr*.log", + ]) + + self.add_cmd_output( + [f"ceph {cmd}" for cmd in ceph_mgr_cmds]) + + # get ceph_cmds again as json for easier automation parsing + self.add_cmd_output( + [f"ceph {cmd} --format json-pretty" for cmd in ceph_mgr_cmds], + subdir="json_output", + ) + self.add_cmd_output([ - f"ceph daemon {m} {cmd}" for m in self.get_socks() for cmd in cmds] + f"ceph daemon {m} {cmd}" for m in self.get_socks(directory) + for cmd in cmds] ) - def get_socks(self): + def get_socks(self, directory): """ Find any available admin sockets under /var/run/ceph (or subdirs for later versions of Ceph) which can be used for ceph daemon commands """ ceph_sockets = [] - for rdir, dirs, files in os.walk('/var/run/ceph/'): + for rdir, _, files in os.walk(directory): for file in files: if file.startswith('ceph-mgr') and file.endswith('.asok'): ceph_sockets.append(self.path_join(rdir, file)) diff -Nru sosreport-4.5.6/sos/report/plugins/ceph_mon.py sosreport-4.7.0/sos/report/plugins/ceph_mon.py --- sosreport-4.5.6/sos/report/plugins/ceph_mon.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ceph_mon.py 2024-02-20 00:39:46.000000000 +0000 @@ -86,29 +86,29 @@ # The ceph_mon plugin will collect all the "ceph ..." commands # which typically require the keyring. - "ceph mon stat", - "ceph quorum_status", - "ceph-disk list", - "ceph versions", - "ceph features", - "ceph insights", - "ceph crash stat", "ceph config dump", - "ceph config log", "ceph config generate-minimal-conf", + "ceph config log", "ceph config-key dump", - "ceph osd metadata", - "ceph osd erasure-code-profile ls", - "ceph osd crush dump", - "ceph osd crush show-tunables", - "ceph osd crush tree --show-shadow", + "ceph crash stat", + "ceph features", + "ceph insights", + "ceph log last 10000 debug audit", + "ceph log last 10000 debug cluster", "ceph mgr dump", "ceph mgr metadata", "ceph mgr module ls", "ceph mgr services", "ceph mgr versions", - "ceph log last 10000 debug cluster", - "ceph log last 10000 debug audit" + "ceph mon stat", + "ceph osd crush dump", + "ceph osd crush show-tunables", + "ceph osd crush tree --show-shadow", + "ceph osd erasure-code-profile ls", + "ceph osd metadata", + "ceph quorum_status", + "ceph versions", + "ceph-disk list", ]) crashes = self.collect_cmd_output('ceph crash ls') @@ -119,26 +119,26 @@ self.add_cmd_output(f"ceph crash info {cid}") ceph_cmds = [ - "mon dump", - "status", "device ls", - "df", "df detail", - "fs ls", + "df", "fs dump", - "pg dump", - "pg stat", - "time-sync-status", - "osd stat", + "fs ls", + "mds stat", + "mon dump", + "osd blocked-by", "osd df tree", - "osd dump", "osd df", + "osd dump", + "osd numa-status", "osd perf", - "osd blocked-by", - "osd pool ls detail", "osd pool autoscale-status", - "mds stat", - "osd numa-status" + "osd pool ls detail", + "osd stat", + "pg dump", + "pg stat", + "status", + "time-sync-status", ] self.add_cmd_output("ceph health detail --format json-pretty", @@ -176,7 +176,7 @@ def get_ceph_ids(self): ceph_ids = [] # ceph version 14 correlates to RHCS 4 - if self.ceph_version == 14 or self.ceph_version == 15: + if self.ceph_version in (14, 15): # Get the ceph user processes out = self.exec_cmd('ps -u ceph -o args') diff -Nru sosreport-4.5.6/sos/report/plugins/ceph_osd.py sosreport-4.7.0/sos/report/plugins/ceph_osd.py --- sosreport-4.5.6/sos/report/plugins/ceph_osd.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ceph_osd.py 2024-02-20 00:39:46.000000000 +0000 @@ -1,3 +1,5 @@ +# Copyright (C) 2023 Canonical Ltd., Nikhil Kshirsagar +# # This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, @@ -35,8 +37,46 @@ '/var/snap/microceph/common/data/osd/*') def setup(self): + directory = '' microceph_pkg = self.policy.package_manager.pkg_by_name('microceph') + cmds = [ + # will work pre quincy + "bluestore bluefs available", + "dump_reservations", + # will work quincy onward + "bluefs stats", + "bluestore bluefs device info", + "config diff", + "config show", + "counter dump", + "counter schema", + "dump_blocked_ops", + "dump_blocklist", + "dump_historic_ops_by_duration", + "dump_historic_slow_ops", + "dump_mempools", + "dump_op_pq_state", + "dump_ops_in_flight", + "dump_osd_network", + "dump_pgstate_history", + "dump_recovery_reservations", + "dump_scrubs", + "dump_watchers", + "get_mapped_pools", + "list_devices", + "list_unfound", + "log dump", + "objecter_requests", + "ops", + "perf dump", + "perf histogram dump", + "perf schema", + "status", + "version", + ] + if not microceph_pkg: + directory = '/var/run/ceph' self.add_file_tags({ "/var/log/ceph/(.*/)?ceph-(.*-)?osd.*.log": 'ceph_osd_log', }) @@ -63,34 +103,8 @@ "ceph-volume lvm list" ]) - cmds = [ - "bluestore bluefs available", - "config diff", - "config show", - "dump_blacklist", - "dump_blocked_ops", - "dump_historic_ops_by_duration", - "dump_historic_slow_ops", - "dump_mempools", - "dump_ops_in_flight", - "dump_op_pq_state", - "dump_osd_network", - "dump_reservations", - "dump_watchers", - "log dump", - "perf dump", - "perf histogram dump", - "objecter_requests", - "ops", - "status", - "version", - ] - - self.add_cmd_output( - [f"ceph daemon {i} {c}" for i in self.get_socks() for c in cmds] - ) - else: + directory = '/var/snap/microceph' # Only collect microceph files, don't run any commands self.add_forbidden_path([ "/var/snap/microceph/common/**/*keyring*", @@ -103,15 +117,21 @@ "/var/snap/microceph/common/logs/*ceph-osd*.log", ]) - def get_socks(self): + # common add_cmd_output for ceph and microceph + self.add_cmd_output([ + f"ceph daemon {i} {c}" for i in + self.get_socks(directory) for c in cmds] + ) + + def get_socks(self, directory): """ Find any available admin sockets under /var/run/ceph (or subdirs for later versions of Ceph) which can be used for ceph daemon commands """ ceph_sockets = [] - for rdir, dirs, files in os.walk('/var/run/ceph/'): + for rdir, _, files in os.walk(directory): for file in files: - if file.endswith('.asok'): + if file.endswith('.asok') and 'osd' in file: ceph_sockets.append(self.path_join(rdir, file)) return ceph_sockets diff -Nru sosreport-4.5.6/sos/report/plugins/cockpit.py sosreport-4.7.0/sos/report/plugins/cockpit.py --- sosreport-4.5.6/sos/report/plugins/cockpit.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/cockpit.py 2024-02-20 00:39:46.000000000 +0000 @@ -16,15 +16,17 @@ short_desc = 'Cockpit Web Service' plugin_name = 'cockpit' - packages = ('cockpit-ws', 'cockpit-system') + packages = ('cockpit-ws', 'cockpit-system', + 'cockpit-bridge') services = ('cockpit',) def setup(self): + self.add_forbidden_path('/etc/cockpit/ws-certs.d/') self.add_copy_spec([ - '/etc/cockpit/cockpit.conf', + '/etc/cockpit/', '/etc/pam.d/cockpit' ]) - self.add_cmd_output('remotectl certificate') + self.add_cmd_output('cockpit-bridge --packages') # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/containerd.py sosreport-4.7.0/sos/report/plugins/containerd.py --- sosreport-4.5.6/sos/report/plugins/containerd.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/containerd.py 2024-02-20 00:39:46.000000000 +0000 @@ -14,11 +14,12 @@ short_desc = 'Containerd containers' plugin_name = 'containerd' profiles = ('container',) - packages = ('containerd',) + packages = ('containerd', 'containerd.io',) def setup(self): self.add_copy_spec([ "/etc/containerd/", + "/etc/cni/net.d/", ]) self.add_cmd_output('containerd config dump') diff -Nru sosreport-4.5.6/sos/report/plugins/convert2rhel.py sosreport-4.7.0/sos/report/plugins/convert2rhel.py --- sosreport-4.5.6/sos/report/plugins/convert2rhel.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/convert2rhel.py 2024-02-20 00:39:46.000000000 +0000 @@ -23,6 +23,7 @@ "/var/log/convert2rhel/convert2rhel.log", "/var/log/convert2rhel/archive/convert2rhel-*.log", "/var/log/convert2rhel/rpm_va.log", + "/var/log/convert2rhel/convert2rhel-pre-conversion.*", ]) diff -Nru sosreport-4.5.6/sos/report/plugins/coredump.py sosreport-4.7.0/sos/report/plugins/coredump.py --- sosreport-4.5.6/sos/report/plugins/coredump.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/coredump.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,43 @@ +# Copyright (C) 2023 Red Hat, Inc., Jose Castillo + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, IndependentPlugin, PluginOpt + + +class Coredump(Plugin, IndependentPlugin): + + short_desc = 'Retrieve coredump information' + + plugin_name = "coredump" + profiles = ('system', 'debug') + packages = ('systemd-udev', 'systemd-coredump') + + option_list = [ + PluginOpt("detailed", default=False, + desc="collect detailed information for every report") + ] + + def setup(self): + self.add_copy_spec([ + "/etc/systemd/coredump.conf", + "/etc/systemd/coredump.conf.d/", + "/run/systemd/coredump.conf.d/", + "/usr/lib/systemd/coredump.conf.d/" + ]) + + self.add_cmd_output("coredumpctl dump") + + coredump_list = self.collect_cmd_output("coredumpctl list") + if self.get_option("detailed") and coredump_list['status'] == 0: + for line in coredump_list["output"].splitlines()[1:]: + self.add_cmd_output("coredumpctl info " + f"{line.split()[4]}") + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/discovery.py sosreport-4.7.0/sos/report/plugins/discovery.py --- sosreport-4.5.6/sos/report/plugins/discovery.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/discovery.py 2024-02-20 00:39:46.000000000 +0000 @@ -16,16 +16,16 @@ short_desc = 'Discovery inspection and reporting tool' plugin_name = 'discovery' packages = ('discovery', 'discovery-tools',) + containers = ('dsc-db', 'discovery', 'discovery-toolbox') def setup(self): self.add_copy_spec([ "/root/discovery/db/volume/data/userdata/pg_log/", "/root/discovery/server/volumes/log/app.log", - "/root/discovery/server/volumes/log/discovery-server.log" + "/root/discovery/server/volumes/log/discovery-server.log", + "/var/lib/containers/storage/volumes/dsc-data/_data/userdata/log/", + "/var/discovery/server/volumes/log/", ]) - self.add_container_logs([ - 'discovery', - 'dsc-db' - ]) + self.add_container_logs(list(self.containers)) # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/filesys.py sosreport-4.7.0/sos/report/plugins/filesys.py --- sosreport-4.5.6/sos/report/plugins/filesys.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/filesys.py 2024-02-20 00:39:46.000000000 +0000 @@ -44,7 +44,8 @@ "/proc/self/mountstats", "/proc/[0-9]*/mountinfo", "/etc/mtab", - "/etc/fstab" + "/etc/fstab", + "/run/mount/utab", ]) self.add_cmd_output("mount -l", root_symlink="mount", tags="mount") @@ -56,7 +57,21 @@ "lslocks" ]) - self.add_forbidden_path('/proc/fs/panfs') + self.add_forbidden_path([ + # cifs plugin + '/proc/fs/cifs', + # lustre plugin + '/proc/fs/ldiskfs', + '/proc/fs/lustre', + # nfs plugin + '/proc/fs/nfsd', + '/proc/fs/nfsfs', + # panfs (from Panasas company) provides statistics which can be + # very large (100s of GB) + '/proc/fs/panfs', + # xfs plugin + '/proc/fs/xfs' + ]) if self.get_option('lsof'): self.add_cmd_output("lsof -b +M -n -l -P", root_symlink="lsof", diff -Nru sosreport-4.5.6/sos/report/plugins/firewall_tables.py sosreport-4.7.0/sos/report/plugins/firewall_tables.py --- sosreport-4.5.6/sos/report/plugins/firewall_tables.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/firewall_tables.py 2024-02-20 00:39:46.000000000 +0000 @@ -56,7 +56,7 @@ nft_pred = SoSPredicate(self, kmods=['nf_tables', 'nfnetlink'], required={'kmods': 'all'}) - return self.collect_cmd_output("nft list ruleset", pred=nft_pred, + return self.collect_cmd_output("nft -a list ruleset", pred=nft_pred, changes=True) def setup(self): @@ -75,9 +75,10 @@ # collect iptables -t for any existing table, if we can't read the # tables, collect 2 default ones (mangle, filter) # do collect them only when relevant nft list ruleset exists - default_ip_tables = "mangle\nfilter\n" + default_ip_tables = "mangle\nfilter\nnat\n" try: - ip_tables_names = open("/proc/net/ip_tables_names").read() + with open('/proc/net/ip_tables_names', 'r') as ifile: + ip_tables_names = ifile.read() except IOError: ip_tables_names = default_ip_tables for table in ip_tables_names.splitlines(): @@ -85,7 +86,8 @@ self.collect_iptable(table) # collect the same for ip6tables try: - ip_tables_names = open("/proc/net/ip6_tables_names").read() + with open('/proc/net/ip6_tables_names', 'r') as ipfile: + ip_tables_names = ipfile.read() except IOError: ip_tables_names = default_ip_tables for table in ip_tables_names.splitlines(): diff -Nru sosreport-4.5.6/sos/report/plugins/flatpak.py sosreport-4.7.0/sos/report/plugins/flatpak.py --- sosreport-4.5.6/sos/report/plugins/flatpak.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/flatpak.py 2024-02-20 00:39:46.000000000 +0000 @@ -19,6 +19,7 @@ packages = ("flatpak",) def setup(self): + env = {"GVFS_REMOTE_VOLUME_MONITOR_IGNORE": "1"} self.add_cmd_output([ "flatpak --version", "flatpak --default-arch", @@ -31,8 +32,8 @@ "flatpak list --runtime --show-details", "flatpak list --app --show-details", "flatpak history --columns=all", - ]) + ], env=env) if self.get_option("verify"): - self.add_cmd_output("flatpak repair --dry-run") + self.add_cmd_output("flatpak repair --dry-run", env=env) # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/foreman.py sosreport-4.7.0/sos/report/plugins/foreman.py --- sosreport-4.5.6/sos/report/plugins/foreman.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/foreman.py 2024-02-20 00:39:46.000000000 +0000 @@ -25,8 +25,8 @@ profiles = ('sysmgmt',) packages = ('foreman',) option_list = [ - PluginOpt('months', default=1, - desc='number of months for dynflow output'), + PluginOpt('days', default=14, + desc='number of days for dynflow output'), PluginOpt('proxyfeatures', default=False, desc='collect features of smart proxies'), PluginOpt('puma-gc', default=False, @@ -43,7 +43,9 @@ self.dbhost = "localhost" self.dbpasswd = "" try: - for line in open("/etc/foreman/database.yml").read().splitlines(): + with open('/etc/foreman/database.yml', 'r') as dfile: + foreman_lines = dfile.read().splitlines() + for line in foreman_lines: # skip empty lines and lines with comments if not line or line[0] == '#': continue @@ -177,9 +179,9 @@ self.add_cmd_output(_cmd, suggest_filename='foreman_db_tables_sizes', env=self.env) - months = '%s months' % self.get_option('months') + days = '%s days' % self.get_option('days') - # Construct the DB queries, using the months option to limit the range + # Construct the DB queries, using the days option to limit the range # of entries returned scmd = ( @@ -188,7 +190,7 @@ ) authcmd = ( - 'select type,name,host,port,account,base_dn,attr_login,' + 'select id,type,name,host,port,account,base_dn,attr_login,' 'onthefly_register,tls from auth_sources' ) @@ -196,7 +198,7 @@ 'select dynflow_execution_plans.* from foreman_tasks_tasks join ' 'dynflow_execution_plans on (foreman_tasks_tasks.external_id = ' 'dynflow_execution_plans.uuid::varchar) where foreman_tasks_tasks.' - 'started_at > NOW() - interval %s' % quote(months) + 'started_at > NOW() - interval %s' % quote(days) ) dactioncmd = ( @@ -204,7 +206,7 @@ 'dynflow_actions on (foreman_tasks_tasks.external_id = ' 'dynflow_actions.execution_plan_uuid::varchar) where ' 'foreman_tasks_tasks.started_at > NOW() - interval %s' - % quote(months) + % quote(days) ) dstepscmd = ( @@ -212,7 +214,7 @@ 'dynflow_steps on (foreman_tasks_tasks.external_id = ' 'dynflow_steps.execution_plan_uuid::varchar) where ' 'foreman_tasks_tasks.started_at > NOW() - interval %s' - % quote(months) + % quote(days) ) # counts of fact_names prefixes/types: much of one type suggests @@ -230,6 +232,7 @@ foremandb = { 'foreman_settings_table': scmd, + 'foreman_schema_migrations': 'select * from schema_migrations', 'foreman_auth_table': authcmd, 'dynflow_schema_info': 'select * from dynflow_schema_info', 'audits_table_count': 'select count(*) from audits', @@ -303,10 +306,6 @@ def postproc(self): self.do_path_regex_sub( - "/var/log/%s*/foreman-ssl_access_ssl.log*" % self.apachepkg, - r"(.*\?(passw|cred|token|secret|key).*=)(.*) (HTTP.*(.*))", - r"\1******** \4") - self.do_path_regex_sub( r"/etc/foreman/(.*)((conf)(.*)?)", r"((\:|\s*)(passw|cred|token|secret|key).*(\:\s|=))(.*)", r"\1********") @@ -335,8 +334,7 @@ self.pumactl = "scl enable tfm '%s'" % self.pumactl super(RedHatForeman, self).setup() - self.add_cmd_output_scl('tfm', 'gem list', - suggest_filename='scl enable tfm gem list') + self.add_cmd_output('gem list') class DebianForeman(Foreman, DebianPlugin, UbuntuPlugin): diff -Nru sosreport-4.5.6/sos/report/plugins/frr.py sosreport-4.7.0/sos/report/plugins/frr.py --- sosreport-4.5.6/sos/report/plugins/frr.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/frr.py 2024-02-20 00:39:46.000000000 +0000 @@ -29,7 +29,11 @@ containers = ('frr',) def setup(self): - self.add_copy_spec("/etc/frr/") + var_ansible_gen = "/var/lib/config-data/ansible-generated/frr" + self.add_copy_spec([ + "/etc/frr/", + var_ansible_gen + "/etc/frr/", + ]) if self.container_exists('frr'): subcmds = [ diff -Nru sosreport-4.5.6/sos/report/plugins/fwupd.py sosreport-4.7.0/sos/report/plugins/fwupd.py --- sosreport-4.5.6/sos/report/plugins/fwupd.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/fwupd.py 2024-02-20 00:39:46.000000000 +0000 @@ -6,7 +6,7 @@ # # See the LICENSE file in the source distribution for further information. -from sos.report.plugins import Plugin, IndependentPlugin +from sos.report.plugins import Plugin, IndependentPlugin, SoSPredicate class Fwupd(Plugin, IndependentPlugin): @@ -19,6 +19,7 @@ packages = ('fwupd',) def setup(self): + self.set_cmd_predicate(SoSPredicate(self, services=["fwupd"])) self.add_cmd_output([ "fwupdmgr get-approved-firmware", "fwupdmgr get-devices --no-unreported-check", diff -Nru sosreport-4.5.6/sos/report/plugins/grafana.py sosreport-4.7.0/sos/report/plugins/grafana.py --- sosreport-4.5.6/sos/report/plugins/grafana.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/grafana.py 2024-02-20 00:39:46.000000000 +0000 @@ -19,29 +19,52 @@ packages = ('grafana',) + def _is_snap_installed(self): + grafana_pkg = self.policy.package_manager.pkg_by_name('grafana') + if grafana_pkg: + return grafana_pkg['pkg_manager'] == 'snap' + return False + def setup(self): - if self.get_option("all_logs"): - self.add_copy_spec("/var/log/grafana/*.log*") + self._is_snap = self._is_snap_installed() + if self._is_snap: + grafana_cli = "grafana.grafana-cli" + log_path = "/var/snap/grafana/common/data/log/" + config_path = "/var/snap/grafana/current/conf/grafana.ini" + + self.add_cmd_output("snap info grafana") else: - self.add_copy_spec("/var/log/grafana/*.log") + grafana_cli = "grafana-cli" + log_path = "/var/log/grafana/" + config_path = "/etc/grafana/" self.add_cmd_output([ - "grafana-cli plugins ls", - "grafana-cli plugins list-remote", - "grafana-cli -v", - "grafana-server -v", + f'{grafana_cli} plugins ls', + f'{grafana_cli} plugins list-remote', + f'{grafana_cli} -v', + 'grafana-server -v', ]) + log_file_pattern = "*.log*" if self.get_option("all_logs") else "*.log" + self.add_copy_spec([ - "/etc/grafana/", + log_path + log_file_pattern, + config_path, "/etc/sysconfig/grafana-server", ]) def postproc(self): protect_keys = [ - "admin_password", "secret_key" + "admin_password", + "secret_key", + "password", + "client_secret", ] + inifile = ( + "/var/snap/grafana/current/conf/grafana.ini" + if self._is_snap + else "/etc/grafana/grafana.ini" + ) regexp = r"(^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys) - self.do_path_regex_sub("/etc/grafana/grafana.ini", - regexp, r"\1*********") + self.do_path_regex_sub(inifile, regexp, r"\1*********") diff -Nru sosreport-4.5.6/sos/report/plugins/greenboot.py sosreport-4.7.0/sos/report/plugins/greenboot.py --- sosreport-4.5.6/sos/report/plugins/greenboot.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/greenboot.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,26 @@ +# Copyright 2023 Red Hat, Inc. Evgeny Slutsky +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, RedHatPlugin + + +class Greenboot(Plugin, RedHatPlugin): + """The greenboot plugin collects systemd service logs and configuration. + """ + + short_desc = 'Greenboot' + plugin_name = 'greenboot' + services = (plugin_name, 'greenboot-healthcheck', + 'greenboot-task-runner', 'redboot-task-runner',) + profiles = ('system',) + + def setup(self): + self.add_copy_spec([ + "/etc/greenboot/greenboot.conf", + ]) diff -Nru sosreport-4.5.6/sos/report/plugins/haproxy.py sosreport-4.7.0/sos/report/plugins/haproxy.py --- sosreport-4.5.6/sos/report/plugins/haproxy.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/haproxy.py 2024-02-20 00:39:46.000000000 +0000 @@ -48,11 +48,12 @@ matched = None provision_ip = None try: - for line in open("/etc/haproxy/haproxy.cfg").read().splitlines(): - if matched: - provision_ip = line.split()[1] - break - matched = match(r".*haproxy\.stats.*", line) + with open("/etc/haproxy/haproxy.cfg", 'r') as hfile: + for line in hfile.read().splitlines(): + if matched: + provision_ip = line.split()[1] + break + matched = match(r".*haproxy\.stats.*", line) except IOError: # fallback when the cfg file is not accessible pass diff -Nru sosreport-4.5.6/sos/report/plugins/hardware.py sosreport-4.7.0/sos/report/plugins/hardware.py --- sosreport-4.5.6/sos/report/plugins/hardware.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/hardware.py 2024-02-20 00:39:46.000000000 +0000 @@ -21,6 +21,8 @@ self.add_copy_spec("/proc/interrupts", tags='interrupts') self.add_copy_spec([ + "/proc/device-tree/compatible", + "/proc/device-tree/model", "/proc/irq", "/proc/dma", "/proc/devices", diff -Nru sosreport-4.5.6/sos/report/plugins/hpssm.py sosreport-4.7.0/sos/report/plugins/hpssm.py --- sosreport-4.5.6/sos/report/plugins/hpssm.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/hpssm.py 2024-02-20 00:39:46.000000000 +0000 @@ -45,7 +45,7 @@ ["%s %s" % (cmd, subcmd) for subcmd in subcmds] ) - pattern = re.compile("^HP.*Smart Array (.*) in Slot ([0123456789])") + pattern = re.compile("^HP[E] (.*) in Slot ([0123456789]+)") config_detail_cmd = cmd + ' ctrl all show config detail' config_detail = self.collect_cmd_output(config_detail_cmd) ctrl_slots = [] diff -Nru sosreport-4.5.6/sos/report/plugins/infinidat.py sosreport-4.7.0/sos/report/plugins/infinidat.py --- sosreport-4.5.6/sos/report/plugins/infinidat.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/infinidat.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,44 @@ +# Copyright (C) 2024 Alejandro Santoyo +# +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, IndependentPlugin + + +class InfinidatStorage(Plugin, IndependentPlugin): + + short_desc = 'Infinidat Storage plugin' + plugin_name = 'infinidat' + profiles = ('storage',) + packages = ('host-power-tools',) + + def setup(self): + # Get infinidat logs + if not self.get_option("all_logs"): + self.add_copy_spec([ + "/var/log/infinihost.latest*.log", + "/var/log/infinihost.usage*.log", + ]) + else: + self.add_copy_spec([ + "/var/log/infinihost*.log", + "/var/log/buildout.*.log", + ]) + + # Get info from the infinidat boxes, etc. + self.add_cmd_output([ + "infinihost volume list", + "infinihost connectivity list", + "infinihost system list", + "infinihost pool list", + "infinihost snapshot list", + "infinihost --version" + ]) + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/__init__.py sosreport-4.7.0/sos/report/plugins/__init__.py --- sosreport-4.5.6/sos/report/plugins/__init__.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/__init__.py 2024-02-20 00:39:46.000000000 +0000 @@ -61,6 +61,7 @@ for t in _types: if t[0](st.st_mode): return t[1] + return '' _certmatch = re.compile("-*BEGIN.*?-*END", re.DOTALL) @@ -187,6 +188,10 @@ return all(items) elif required == 'none': return not any(items) + raise ValueError( + f"predicate requires must be 'any', 'all', or 'none' " + f"not {required}" + ) def _failed_or_forbidden(self, test, item): """Helper to direct failed predicates to provide the proper messaging @@ -453,7 +458,7 @@ if type('') in self.val_type: self.value = str(val) return - if not any([type(val) == _t for _t in self.val_type]): + if not any([type(val) is _t for _t in self.val_type]): valid = [] for t in self.val_type: if t is None: @@ -1274,28 +1279,15 @@ """ try: path = self._get_dest_for_srcpath(srcpath) - common_flags = re.IGNORECASE | re.MULTILINE - if hasattr(regexp, "pattern"): - pattern = regexp.pattern - flags = regexp.flags | common_flags - else: - pattern = regexp - flags = common_flags self._log_debug("substituting scrpath '%s'" % srcpath) self._log_debug("substituting '%s' for '%s' in '%s'" - % (subst, pattern, path)) + % (subst, + regexp.pattern if hasattr(regexp, "pattern") + else regexp, + path)) if not path: return 0 - readable = self.archive.open_file(path) - content = readable.read() - if not isinstance(content, str): - content = content.decode('utf8', 'ignore') - result, replacements = re.subn(pattern, subst, content, - flags=flags) - if replacements: - self.archive.add_string(result, self.strip_sysroot(srcpath)) - else: - replacements = 0 + replacements = self.archive.do_file_sub(path, regexp, subst) except (OSError, IOError) as e: # if trying to regexp a nonexisting file, dont log it as an # error to stdout @@ -1458,11 +1450,11 @@ saved for use later in preparing a report. """ if self._timeout_hit: - return + return None if self._is_forbidden_path(srcpath): self._log_debug("skipping forbidden path '%s'" % srcpath) - return '' + return None if not dest: dest = srcpath @@ -1474,19 +1466,19 @@ st = os.lstat(srcpath) except (OSError, IOError): self._log_info("failed to stat '%s'" % srcpath) - return + return None if stat.S_ISLNK(st.st_mode): self._copy_symlink(srcpath) - return + return None else: if stat.S_ISDIR(st.st_mode) and os.access(srcpath, os.R_OK): # copy empty directory if not self.listdir(srcpath): self.archive.add_dir(dest) - return + return None self._copy_dir(srcpath) - return + return None # handle special nodes (block, char, fifo, socket) if not (stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode)): @@ -1494,7 +1486,7 @@ self._log_debug("creating %s node at archive:'%s'" % (ntype, dest)) self._copy_node(dest, st) - return + return None # if we get here, it's definitely a regular file (not a symlink or dir) self._log_debug("copying path '%s' to archive:'%s'" % (srcpath, dest)) @@ -1512,7 +1504,7 @@ 'symlink': "no" }) - return + return None def add_forbidden_path(self, forbidden): """Specify a path, or list of paths, to not copy, even if it's part of @@ -1695,7 +1687,7 @@ if not self.test_predicate(pred=pred): self._log_info("skipped copy spec '%s' due to predicate (%s)" % (copyspecs, self.get_predicate(pred=pred))) - return + return None if sizelimit is None: sizelimit = self.get_option("log_size") @@ -1723,11 +1715,12 @@ mangled to _conf or similar. """ if fname.startswith(('/proc', '/sys')): - return + return None _fname = fname.split('/')[-1] _fname = _fname.replace('-', '_') if _fname.endswith(('.conf', '.log', '.txt')): return _fname.replace('.', '_') + return None for copyspec in copyspecs: if not (copyspec and len(copyspec)): @@ -1890,6 +1883,7 @@ 'files_copied': _manifest_files, 'tags': _spec_tags }) + return None def add_device_cmd(self, cmds, devices, timeout=None, sizelimit=None, chroot=True, runat=None, env=None, binary=False, @@ -2322,7 +2316,7 @@ """ if self._timeout_hit: - return + return None if timeout is None: timeout = self.cmdtimeout @@ -3168,17 +3162,16 @@ :type subdir: ``str`` :param tags: Tags to be added to this file in the manifest - :type tags: ``str`` or ``list`` of ``str``s + :type tags: ``str`` or ``list`` of ``str`` """ try: start = time() _pfname = self._make_command_filename(fname, subdir=subdir) self.archive.check_path(_pfname, P_FILE) _name = self.archive.dest_path(_pfname) - _file = open(_name, 'w') - self._log_debug(f"manual collection file opened: {_name}") - yield _file - _file.close() + with open(_name, 'w') as _file: + self._log_debug(f"manual collection file opened: {_name}") + yield _file end = time() run = end - start self._log_info(f"manual collection '{fname}' finished in {run}") @@ -3447,10 +3440,10 @@ try: cmd_line_paths = glob.glob(cmd_line_glob) for path in cmd_line_paths: - f = open(self.path_join(path), 'r') - cmd_line = f.read().strip() - if process in cmd_line: - status = True + with open(self.path_join(path), 'r') as pfile: + cmd_line = pfile.read().strip() + if process in cmd_line: + status = True except IOError: return False return status @@ -3570,20 +3563,6 @@ to match these against all found SCLs on the system. SCLs that do match class.files or class.packages are then accessible via self.scls_matched when the plugin is invoked. - - Additionally, this plugin class provides "add_cmd_output_scl" (run - a command in context of given SCL), and "add_copy_spec_scl" and - "add_copy_spec_limit_scl" (copy package from file system of given SCL). - - For example, you can implement a plugin that will list all global npm - packages in every SCL that contains "npm" package: - - class SCLNpmPlugin(Plugin, SCLPlugin): - packages = ("%(scl_name)s-npm",) - - def setup(self): - for scl in self.scls_matched: - self.add_cmd_output_scl(scl, "npm ls -g --json") """ @property @@ -3602,19 +3581,6 @@ scl_cmd = "scl enable %s \"%s\"" % (scl, cmd) return scl_cmd - def add_cmd_output_scl(self, scl, cmds, **kwargs): - """Same as add_cmd_output, except that it wraps command in - "scl enable" call and sets proper PATH. - """ - if scl not in self.scls_matched: - return - if isinstance(cmds, str): - cmds = [cmds] - scl_cmds = [] - for cmd in cmds: - scl_cmds.append(self.convert_cmd_scl(scl, cmd)) - self.add_cmd_output(scl_cmds, **kwargs) - # config files for Software Collections are under /etc/${prefix}/${scl} and # var files are under /var/${prefix}/${scl} where the ${prefix} is distro # specific path. So we need to insert the paths after the appropriate root diff -Nru sosreport-4.5.6/sos/report/plugins/insights.py sosreport-4.7.0/sos/report/plugins/insights.py --- sosreport-4.5.6/sos/report/plugins/insights.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/insights.py 2024-02-20 00:39:46.000000000 +0000 @@ -25,16 +25,18 @@ plugin_name = 'insights' packages = ('redhat-access-insights', 'insights-client') profiles = ('system', 'sysmgmt') - config = ( + config_and_status = ( '/etc/insights-client/insights-client.conf', '/etc/insights-client/.registered', '/etc/insights-client/tags.yaml', '/etc/insights-client/malware-detection-config.yml', - '/etc/redhat-access-insights/redhat-access-insights.conf' + '/etc/redhat-access-insights/redhat-access-insights.conf', + '/etc/insights-client/.lastupload', + '/etc/insights-client/machine-id', ) def setup(self): - self.add_copy_spec(self.config) + self.add_copy_spec(self.config_and_status) self.add_copy_spec('/var/lib/insights') # Legacy log file location @@ -50,8 +52,11 @@ timeout=30 ) + for _dir in ["/etc/rhsm", "/sys/kernel", "/var/lib/sss"]: + self.add_cmd_output(f"/bin/ls -lanR {_dir}", cmd_as_tag=True) + def postproc(self): - for conf in self.config: + for conf in self.config_and_status: self.do_file_sub( conf, r'(password[\t\ ]*=[\t\ ]*)(.+)', r'\1********' ) diff -Nru sosreport-4.5.6/sos/report/plugins/ipa.py sosreport-4.7.0/sos/report/plugins/ipa.py --- sosreport-4.5.6/sos/report/plugins/ipa.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ipa.py 2024-02-20 00:39:46.000000000 +0000 @@ -37,14 +37,17 @@ def ca_installed(self): # Follow the same checks as IPA CA installer code - if self.path_exists("%s/conf/ca/CS.cfg" % self.pki_tomcat_dir_v4) \ - or self.path_exists("%s/conf/CS.cfg" % self.pki_tomcat_dir_v3): - return True + return any( + self.path_exists(path) for path in [ + f"{self.pki_tomcat_dir_v4}/conf/ca/CS.cfg", + f"{self.pki_tomcat_dir_v3}/conf/CS.cfg" + ] + ) def ipa_server_installed(self): - if self.is_installed("ipa-server") \ - or self.is_installed("freeipa-server"): - return True + return any( + self.is_installed(pkg) for pkg in ['ipa-server', 'freeipa-server'] + ) def retrieve_pki_logs(self, ipa_version): if ipa_version == "v4": diff -Nru sosreport-4.5.6/sos/report/plugins/kernelrt.py sosreport-4.7.0/sos/report/plugins/kernelrt.py --- sosreport-4.5.6/sos/report/plugins/kernelrt.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/kernelrt.py 2024-02-20 00:39:46.000000000 +0000 @@ -9,7 +9,7 @@ # # See the LICENSE file in the source distribution for further information. -from sos.report.plugins import Plugin, RedHatPlugin +from sos.report.plugins import Plugin, RedHatPlugin, SoSPredicate class KernelRT(Plugin, RedHatPlugin): @@ -36,6 +36,12 @@ # note: rhbz#1059685 'tuna - NameError: global name 'cgroups' is not # defined this command throws an exception on versions prior to # 0.10.4-5. - self.add_cmd_output('tuna -CP') + co = {'cmd': 'tuna --help', 'output': '-P'} + option_present = self.test_predicate( + self, pred=SoSPredicate(self, cmd_outputs=co) + ) + self.add_cmd_output( + f"tuna {'-CP' if option_present else 'show_threads -C'}" + ) # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/kubernetes.py sosreport-4.7.0/sos/report/plugins/kubernetes.py --- sosreport-4.5.6/sos/report/plugins/kubernetes.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/kubernetes.py 2024-02-20 00:39:46.000000000 +0000 @@ -9,7 +9,8 @@ # # See the LICENSE file in the source distribution for further information. -from sos.report.plugins import Plugin, RedHatPlugin, UbuntuPlugin, PluginOpt +from sos.report.plugins import (Plugin, RedHatPlugin, DebianPlugin, + UbuntuPlugin, PluginOpt) from fnmatch import translate import re @@ -175,12 +176,12 @@ # like "pass", "pwd", "key" or "token" env_regexp = r'(?P{\s*"name":\s*[^,]*' \ r'(pass|pwd|key|token|cred|PASS|PWD|KEY)[^,]*,\s*"value":)[^}]*' - self.do_cmd_output_sub('kubectl', env_regexp, + self.do_cmd_output_sub(self.kube_cmd, env_regexp, r'\g "********"') # Next, we need to handle the private keys and certs in some # output that is not hit by the previous iteration. - self.do_cmd_private_sub('kubectl') + self.do_cmd_private_sub(self.kube_cmd) class RedHatKubernetes(Kubernetes, RedHatPlugin): @@ -205,12 +206,13 @@ super(RedHatKubernetes, self).setup() -class UbuntuKubernetes(Kubernetes, UbuntuPlugin): +class UbuntuKubernetes(Kubernetes, UbuntuPlugin, DebianPlugin): packages = ('kubernetes',) files = ( '/root/cdk/cdk_addons_kubectl_config', - '/etc/kubernetes/admin.conf' + '/etc/kubernetes/admin.conf', + '/var/snap/microk8s/current/credentials/client.config', ) services = ( @@ -227,6 +229,9 @@ for svc in self.services: self.add_journal(units=svc) + if self.is_installed('microk8s'): + self.kube_cmd = 'microk8s kubectl' + super(UbuntuKubernetes, self).setup() diff -Nru sosreport-4.5.6/sos/report/plugins/landscape.py sosreport-4.7.0/sos/report/plugins/landscape.py --- sosreport-4.5.6/sos/report/plugins/landscape.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/landscape.py 2024-02-20 00:39:46.000000000 +0000 @@ -7,6 +7,7 @@ # See the LICENSE file in the source distribution for further information. from sos.report.plugins import Plugin, UbuntuPlugin +import os class Landscape(Plugin, UbuntuPlugin): @@ -19,6 +20,33 @@ packages = ('landscape-client', 'landscape-server') def setup(self): + + vars_all = [p in os.environ for p in [ + 'LANDSCAPE_API_KEY', + 'LANDSCAPE_API_SECRET', + 'LANDSCAPE_API_URI', + ]] + + if not (all(vars_all)): + self.soslog.warning("Not all environment variables set. " + "Source the environment file for the user " + "intended to connect to the Landscape " + "environment so that the landscape-api " + "commands can be used.") + else: + self.add_cmd_output([ + "landscape-api get-distributions", + "landscape-api get-apt-sources", + "landscape-api get-repository-profiles", + "landscape-api get activites --limit 100", + ]) + self.add_cmd_output([ + "landscape-api --json get-distributions", + "landscape-api --json get-apt-sources", + "landscape-api --json get-repository-profiles", + "landscape-api --json get activites --limit 100", + ]) + self.add_copy_spec([ "/etc/default/landscape-client", "/etc/default/landscape-server", diff -Nru sosreport-4.5.6/sos/report/plugins/libvirt.py sosreport-4.7.0/sos/report/plugins/libvirt.py --- sosreport-4.5.6/sos/report/plugins/libvirt.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/libvirt.py 2024-02-20 00:39:46.000000000 +0000 @@ -15,7 +15,7 @@ short_desc = 'libvirt virtualization API' plugin_name = 'libvirt' - profiles = ('system', 'virt') + profiles = ('system', 'virt', 'openstack_edpm') def setup(self): libvirt_keytab = "/etc/libvirt/krb5.tab" @@ -43,6 +43,9 @@ "/etc/libvirt/qemu-lockd.conf", "/etc/libvirt/virtlockd.conf", "/var/lib/libvirt/dnsmasq/*", + "/var/lib/libvirt/qemu/snapshot/*/*.xml", + "/var/lib/openstack/config/libvirt", + "/var/lib/openstack/containers/libvirt*.json", ]) if not self.get_option("all_logs"): @@ -57,10 +60,15 @@ "/var/log/containers/libvirt/lxc/*.log", "/var/log/containers/libvirt/swtpm/libvirt/qemu/*.log", "/var/log/containers/libvirt/uml/*.log", + "/var/log/containers/qemu/*.log", + "/var/log/containers/libvirt/*.log", ]) else: - self.add_copy_spec("/var/log/libvirt") - self.add_copy_spec("/var/log/containers/libvirt") + self.add_copy_spec([ + "/var/log/libvirt", + "/var/log/containers/qemu/", + "/var/log/containers/libvirt/", + ]) if self.path_exists(self.path_join(libvirt_keytab)): self.add_cmd_output("klist -ket %s" % libvirt_keytab) @@ -69,9 +77,10 @@ # get details of processes of KVM hosts for pidfile in glob.glob("/run/libvirt/*/*.pid"): - pid = open(pidfile).read().splitlines()[0] - for pf in ["environ", "cgroup", "maps", "numa_maps", "limits"]: - self.add_copy_spec("/proc/%s/%s" % (pid, pf)) + with open(pidfile, 'r') as pfile: + pid = pfile.read().splitlines()[0] + for pf in ["environ", "cgroup", "maps", "numa_maps", "limits"]: + self.add_copy_spec("/proc/%s/%s" % (pid, pf)) self.add_file_tags({ "/run/libvirt/qemu/*.xml": "var_qemu_xml", diff -Nru sosreport-4.5.6/sos/report/plugins/logrotate.py sosreport-4.7.0/sos/report/plugins/logrotate.py --- sosreport-4.5.6/sos/report/plugins/logrotate.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/logrotate.py 2024-02-20 00:39:46.000000000 +0000 @@ -17,6 +17,7 @@ profiles = ('system',) var_puppet_gen = "/var/lib/config-data/puppet-generated/crond" + var_ansible_gen = "/var/lib/config-data/ansible-generated/crond" def setup(self): self.add_cmd_output("logrotate --debug /etc/logrotate.conf", @@ -26,7 +27,9 @@ "/var/lib/logrotate.status", "/var/lib/logrotate/logrotate.status", self.var_puppet_gen + "/etc/logrotate-crond.conf", - self.var_puppet_gen + "/var/spool/cron/root" + self.var_puppet_gen + "/var/spool/cron/root", + self.var_ansible_gen + "/etc/logrotate-crond.conf", + self.var_ansible_gen + "/var/spool/cron/root" ]) # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/logs.py sosreport-4.7.0/sos/report/plugins/logs.py --- sosreport-4.5.6/sos/report/plugins/logs.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/logs.py 2024-02-20 00:39:46.000000000 +0000 @@ -51,6 +51,7 @@ "/var/log/secure*", "/var/log/udev", "/var/log/dist-upgrade", + "/var/log/auth.log", ]) self.add_cmd_output("journalctl --disk-usage") diff -Nru sosreport-4.5.6/sos/report/plugins/lustre.py sosreport-4.7.0/sos/report/plugins/lustre.py --- sosreport-4.5.6/sos/report/plugins/lustre.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/lustre.py 2024-02-20 00:39:46.000000000 +0000 @@ -7,6 +7,7 @@ # See the LICENSE file in the source distribution for further information. from sos.report.plugins import Plugin, RedHatPlugin +import re class Lustre(Plugin, RedHatPlugin): @@ -35,20 +36,18 @@ "lnetctl net show -v" ]) + # Grab almost everything + output = self.exec_cmd("lctl list_param -R *")['output'] + exclude = (".*@.*|.*dump_page_cache|peers|.*quota.*|osd-.*|.*osc.*|" + "mgs.*|.*mgc_irstate|ldlm.*state|.*job_stats|.*exports.*") + params = [item for item in output.splitlines() + if not re.match(exclude, item)] + self.get_params("all", params) + self.get_params( "basic", - ["version", "health_check", "debug", "timeout"] - ) - self.get_params("lnet", ["peers", "routes", "routers", "nis"]) - self.get_params( - "ldlm-lru", - ["ldlm.namespaces.*.lru_max_age", "ldlm.namespaces.*.lru_size"] + ["version", "health_check", "debug"] ) - self.get_params("ldlm-states", ["*.*.state"]) - self.get_params("jobid", ["jobid_name", "jobid_var"]) - self.get_params("job-stats", ["*.*.job_stats"]) - self.get_params("server_uuids", ["*.*.*server_uuid"]) - self.get_params("mgc_irstate", ["mgc.*.ir_state"]) # Client Specific self.add_cmd_output([ @@ -63,13 +62,12 @@ ]) # Server Specific - self.get_params("osd", ["osd-*.*.{mntdev,files*," + - "kbytes*,blocksize,brw_stats}"]) - self.get_params("quota", ["osd-*.*.quota_slave." + - "{info,limit_*,acct_*}"]) + self.get_params( + "osd", + ["osd-*.*.{mntdev,files*,kbytes*,blocksize,brw_stats}"] + ) + self.get_params("quota", ["osd-*.*.quota_slave.{info,limit_*,acct_*}"]) self.get_params("mgs", ["mgs.MGS.ir_timeout", "mgs.MGS.live.*"]) - self.get_params("exports", ["*.*.exports.*.*"]) - self.get_params("mntdev", ["osd*.*.mntdev"]) # mb_groups can be VERY large, and provide minimal debug usefulness self.add_forbidden_path("*/mb_groups") @@ -78,8 +76,10 @@ "/proc/fs/ldiskfs", ]) - # Grab emergency ring buffer dumps + # Grab emergency ring buffer dumps and other largish info if self.get_option("all_logs"): self.add_copy_spec("/tmp/lustre-log.*") + self.get_params("job-stats", ["*.*.job_stats"]) + self.get_params("peers", ["peers"]) # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/lvm2.py sosreport-4.7.0/sos/report/plugins/lvm2.py --- sosreport-4.5.6/sos/report/plugins/lvm2.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/lvm2.py 2024-02-20 00:39:46.000000000 +0000 @@ -21,7 +21,9 @@ PluginOpt('lvmdump', default=False, desc='collect an lvmdump tarball'), PluginOpt('lvmdump-am', default=False, desc=('attempt to collect lvmdump with advanced options and ' - 'raw metadata')) + 'raw metadata')), + PluginOpt('metadata', default=False, + desc=('attempt to collect headers and metadata via pvck')) ] def do_lvmdump(self, metadata=False): @@ -39,6 +41,24 @@ self.add_cmd_output(cmd, chroot=self.tmp_in_sysroot()) + def get_pvck_output(self): + """ Collects the output of the command pvck for each block device + present in the system. + """ + + block_list = self.exec_cmd( + 'pvs -o pv_name --no-headings' + ) + if block_list['status'] == 0: + for line in block_list['output'].splitlines(): + cmds = [ + f"pvck --dump headers {line}", + f"pvck --dump metadata {line}", + f"pvck --dump metadata_all {line} -v", + f"pvck --dump metadata_search {line} -v" + ] + self.add_cmd_output(cmds, subdir="metadata") + def setup(self): # When running LVM2 comamnds: # - use nolocking if supported, else locking_type 0 (no locks) @@ -92,4 +112,7 @@ elif self.get_option('lvmdump-am'): self.do_lvmdump(metadata=True) + if self.get_option('metadata'): + self.get_pvck_output() + # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/lxd.py sosreport-4.7.0/sos/report/plugins/lxd.py --- sosreport-4.5.6/sos/report/plugins/lxd.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/lxd.py 2024-02-20 00:39:46.000000000 +0000 @@ -18,31 +18,16 @@ profiles = ('container',) packages = ('lxd',) commands = ('lxc', 'lxd',) + services = ('snap.lxd.daemon', 'snap.lxd.activate') def setup(self): - lxd_kmods = [ - 'bpfilter', - 'ebtable_filter', - 'ebtables', - 'ip6table_filter', - 'ip6table_mangle', - 'ip6table_nat', - 'ip6table_raw', - 'ip6_tables', - 'iptable_filter', - 'iptable_mangle', - 'iptable_nat', - 'iptable_raw', - 'nf_nat', - 'nf_tables', - ] - - lxd_pred = SoSPredicate(self, kmods=lxd_kmods, - required={'kmods': 'all'}) - lxd_pkg = self.policy.package_manager.pkg_by_name('lxd') if lxd_pkg and lxd_pkg['pkg_manager'] == 'snap': + + lxd_pred = SoSPredicate(self, services=['snap.lxd.daemon'], + required={'services': 'all'}) + self.add_cmd_output("lxd.buginfo", pred=lxd_pred) self.add_copy_spec([ @@ -62,6 +47,8 @@ '/var/snap/lxd/common/lxd/logs/**', ]) else: + lxd_pred = SoSPredicate(self, services=['lxd'], + required={'services': 'all'}) self.add_copy_spec([ "/etc/default/lxd-bridge", "/var/log/lxd/*" @@ -79,4 +66,7 @@ "find /var/lib/lxd -maxdepth 2 -type d -ls", ], suggest_filename='var-lxd-dirs.txt') + def postproc(self): + self.do_cmd_private_sub('lxd.buginfo') + # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/maas.py sosreport-4.7.0/sos/report/plugins/maas.py --- sosreport-4.5.6/sos/report/plugins/maas.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/maas.py 2024-02-20 00:39:46.000000000 +0000 @@ -28,8 +28,14 @@ 'maas-rackd', 'maas-regiond', 'maas-syslog', - # For the snap: + # MAAS 3.5 deb: + 'maas-temporal', + 'maas-apiserver', + 'maas-agent', + # For the pre-3.5 snap: 'snap.maas.supervisor', + # MAAS 3.5 snap uses `snap.maas.pebble` service, but it's not + # included here to prevent automatic journald log collection. ) option_list = [ @@ -69,8 +75,20 @@ 'snap info maas', 'maas status' ]) + + if self.is_service("snap.maas.pebble"): + # Because `snap.maas.pebble` is not in the services + # tuple to prevent timeouts caused by log collection, + # service status and logs are collected here. + self.add_service_status("snap.maas.pebble") + since = self.get_option("since") or "-1days" + self.add_journal(units="snap.maas.pebble", since=since) + # Don't send secrets - self.add_forbidden_path("/var/snap/maas/current/bind/session.key") + self.add_forbidden_path([ + "/var/snap/maas/current/bind/session.key", + "/var/snap/maas/current/http/certs/regiond-proxy-key.pem", + ]) self.add_copy_spec([ "/var/snap/maas/common/log", "/var/snap/maas/common/snap_mode", @@ -80,7 +98,7 @@ "/var/snap/maas/current/supervisord", "/var/snap/maas/current/preseeds", "/var/snap/maas/current/proxy", - "/var/snap/maas/current/rsyslog", + "/var/snap/maas/current/syslog", ]) else: self.add_copy_spec([ diff -Nru sosreport-4.5.6/sos/report/plugins/mellanox_firmware.py sosreport-4.7.0/sos/report/plugins/mellanox_firmware.py --- sosreport-4.5.6/sos/report/plugins/mellanox_firmware.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/mellanox_firmware.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,141 @@ +# Copyright (C) 2023 Nvidia Corporation, Alin Serdean + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, IndependentPlugin +import os +import time + + +class MellanoxFirmware(Plugin, IndependentPlugin): + + short_desc = 'Nvidia(Mellanox) firmware tools output' + + plugin_name = "mellanox_firmware" + profiles = ('hardware', 'system') + packages = ('mst', 'mstflint') + + MLNX_STRING = "Mellanox Technologies" + + def check_enabled(self): + """ + Checks if this plugin should be executed at all. + We will only enable the plugin if there is a + Mellanox Technologies network adapter + """ + lspci = self.exec_cmd("lspci -D -d 15b3::0200") + return lspci['status'] == 0 and self.MLNX_STRING in lspci['output'] + + def collect(self): + if not self.get_option('allow_system_changes'): + self._log_info("Skipping mst/mlx cable commands as system changes" + "would be made. Use --allow-system-changes to" + "enable this collection.") + return + + """ + Run only if mft package is installed. + flint is available from the mft package. + """ + co = self.exec_cmd('flint --version') + if co['status'] != 0: + return + + co = self.collect_cmd_output('mst start') + if co['status'] != 0: + return + + self.collect_cmd_output('mst cable add') + self.collect_cmd_output("mst status -v", timeout=10) + self.collect_cmd_output("mlxcables", timeout=10) + co = os.listdir("/dev/mst") + mlxcables = [] + for device in co: + if 'cable' in device: + mlxcables.append(device) + for mlxcable in mlxcables: + self.collect_cmd_output(f"mlxcables -d {mlxcable} --DDM", + timeout=10) + self.collect_cmd_output(f"mlxcables -d {mlxcable} --dump", + timeout=10) + self.collect_cmd_output("mst stop", changes=True) + + def setup(self): + # Get all devices which have the vendor Mellanox Technologies + devices = [] + device_list = self.collect_cmd_output('lspci -D -d 15b3::0200') + """ + Will return a string of the following format: + 0000:08:00.0 Ethernet controller: Mellanox Technologies MT2892 + Family + """ + if device_list['status'] != 0: + # bail out if there no Mellanox PCI devices + return + + for line in device_list["output"].splitlines(): + """ + Should return 0000:08:00.0 + from the following string + 0000:08:00.0 Ethernet controller: Mellanox Technologies MT2892 + Family + """ + devices.append(line[0:8]+'00.0') + + devices = set(devices) + + """ + # Mft package is present if OFED is installed + # mstflint package is part of the distro and can be installed. + """ + commands = [] + + # mft package is installed if flint command is available + co = self.exec_cmd('flint --version') + if co['status'] != 0: + """ + mstflint package commands + the commands do not support position independent arguments + """ + commands = [ + ["mstconfig -d ", " -e q"], + ["mstflint -d ", " dc"], + ["mstflint -d ", " q"], + ["mstreg -d ", " --reg_name ROCE_ACCL --get"], + ["mstlink -d ", ""], + ] + else: + """ + mft package commands + the commands do not support position independent arguments + """ + commands = [ + ["mlxdump -d ", " pcie_uc --all"], + ["mstconfig -d ", " -e q"], + ["flint -d ", " dc"], + ["flint -d ", " q"], + ["mlxreg -d ", " --reg_name ROCE_ACCL --get"], + ["mlxlink -d ", ""], + ["fwtrace -d ", " -i all --tracer_mode FIFO"], + ] + for device in devices: + for command in commands: + self.add_cmd_output(f"{command[0]} {device} " + f"{command[1]}", timeout=30) + + """ + Dump the output of the mstdump command three times + waiting for one second. This output is useful to check + if certain registers changed + """ + for i in range(3): + self.add_cmd_output(f"mstdump {device}") + time.sleep(1) + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/microk8s.py sosreport-4.7.0/sos/report/plugins/microk8s.py --- sosreport-4.5.6/sos/report/plugins/microk8s.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/microk8s.py 2024-02-20 00:39:46.000000000 +0000 @@ -40,6 +40,9 @@ 'status', 'version' ] + self.add_copy_spec( + "/var/snap/microk8s/current/credentials/client.config" + ) self.add_cmd_output([ f"microk8s {subcmd}" for subcmd in microk8s_subcmds @@ -49,4 +52,17 @@ rsub = r'(certificate-authority-data:|token:)\s.*' self.do_cmd_output_sub("microk8s", rsub, r'\1 "**********"') + protect_keys = [ + "certificate-authority-data", + "client-certificate-data", + "client-key-data", + ] + + key_regex = fr'(^\s*({"|".join(protect_keys)})\s*:\s*)(.*)' + + self.do_path_regex_sub( + "/var/snap/microk8s/current/credentials/client.config", + key_regex, r"\1*********" + ) + # vim: set et ts=4 sw=4 diff -Nru sosreport-4.5.6/sos/report/plugins/microshift.py sosreport-4.7.0/sos/report/plugins/microshift.py --- sosreport-4.5.6/sos/report/plugins/microshift.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/microshift.py 2024-02-20 00:39:46.000000000 +0000 @@ -26,10 +26,8 @@ short_desc = 'Microshift' plugin_name = 'microshift' plugin_timeout = 900 - packages = ('microshift', 'microshift-selinux', 'microshift-networking', - 'microshift-greenboot') - services = (plugin_name, 'greenboot-healthcheck', - 'greenboot-task-runner', 'redboot-task-runner') + packages = ('microshift', 'microshift-selinux', 'microshift-networking',) + services = (plugin_name,) profiles = (plugin_name,) localhost_kubeconfig = '/var/lib/microshift/resources/kubeadmin/kubeconfig' @@ -148,9 +146,14 @@ Output format for this function is based on `oc adm inspect` command, which is used to retrieve all API resources from the cluster. """ + self.add_journal('microshift-etcd.scope') + + self.add_copy_spec('/etc/microshift') + if self.path_exists('/var/lib/microshift-backups'): self.add_copy_spec(['/var/lib/microshift-backups/*/version', '/var/lib/microshift-backups/*.json']) + self.add_copy_spec(['/var/log/kube-apiserver/*.log']) self.add_cmd_output([ 'microshift version', diff -Nru sosreport-4.5.6/sos/report/plugins/mssql.py sosreport-4.7.0/sos/report/plugins/mssql.py --- sosreport-4.5.6/sos/report/plugins/mssql.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/mssql.py 2024-02-20 00:39:46.000000000 +0000 @@ -43,19 +43,20 @@ sqlagent_errorlogfile = '/var/opt/mssql/log/sqlagentstartup.log' kerberoskeytabfile = None try: - for line in open(mssql_conf).read().splitlines(): - if line.startswith('['): - section = line - continue - words = line.split('=') - if words[0].strip() == 'errorlogfile': - if section == '[filelocation]': - errorlogfile = words[1].strip() - elif section == '[sqlagent]': - sqlagent_errorlogfile = words[1].strip() - elif words[0].strip() == 'kerberoskeytabfile': - if section == '[network]': - kerberoskeytabfile = words[1].strip() + with open(mssql_conf, 'r') as mfile: + for line in mfile.read().splitlines(): + if line.startswith('['): + section = line + continue + words = line.split('=') + if words[0].strip() == 'errorlogfile': + if section == '[filelocation]': + errorlogfile = words[1].strip() + elif section == '[sqlagent]': + sqlagent_errorlogfile = words[1].strip() + elif words[0].strip() == 'kerberoskeytabfile': + if section == '[network]': + kerberoskeytabfile = words[1].strip() except IOError as ex: self._log_error('Could not open conf file %s: %s' % (mssql_conf, ex)) diff -Nru sosreport-4.5.6/sos/report/plugins/networking.py sosreport-4.7.0/sos/report/plugins/networking.py --- sosreport-4.5.6/sos/report/plugins/networking.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/networking.py 2024-02-20 00:39:46.000000000 +0000 @@ -36,6 +36,10 @@ # switch to enable netstat "wide" (non-truncated) output mode ns_wide = "-W" + # list of kernel modules needed by ss_cmd, this may vary by distro version + ss_kmods = ['tcp_diag', 'udp_diag', 'inet_diag', 'unix_diag', + 'netlink_diag', 'af_packet_diag', 'xsk_diag'] + # list of ethtool short options, used in add_copy_spec and add_cmd_tags # do NOT add there "e" (see eepromdump plugopt) ethtool_shortopts = "acdgiklmPST" @@ -133,10 +137,8 @@ self.add_cmd_output(ip_macsec_show_cmd, pred=macsec_pred, changes=True) ss_cmd = "ss -peaonmi" - ss_pred = SoSPredicate(self, kmods=[ - 'tcp_diag', 'udp_diag', 'inet_diag', 'unix_diag', 'netlink_diag', - 'af_packet_diag', 'xsk_diag' - ], required={'kmods': 'all'}) + ss_pred = SoSPredicate(self, kmods=self.ss_kmods, + required={'kmods': 'all'}) self.add_cmd_output(ss_cmd, pred=ss_pred, changes=True) # Get ethtool output for every device that does not exist in a @@ -282,6 +284,17 @@ trace_host = "archive.ubuntu.com" def setup(self): + + ubuntu_ss_kmods = dict.fromkeys([22.04, 23.10], + ['tcp_diag', 'udp_diag', + 'inet_diag', 'unix_diag', + 'netlink_diag', + 'af_packet_diag', 'xsk_diag', + 'mptcp_diag', 'raw_diag']) + + if self.policy.dist_version() in ubuntu_ss_kmods: + self.ss_kmods = ubuntu_ss_kmods[self.policy.dist_version()] + super(UbuntuNetworking, self).setup() self.add_copy_spec([ @@ -299,5 +312,13 @@ self.add_cmd_output("/usr/sbin/traceroute -n %s" % self.trace_host, priority=100) + def postproc(self): + + self.do_path_regex_sub( + "/etc/netplan", + r"(\s+password:).*", + r"\1 ******" + ) + # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/networkmanager.py sosreport-4.7.0/sos/report/plugins/networkmanager.py --- sosreport-4.5.6/sos/report/plugins/networkmanager.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/networkmanager.py 2024-02-20 00:39:46.000000000 +0000 @@ -22,11 +22,17 @@ self.add_copy_spec([ "/etc/NetworkManager/NetworkManager.conf", "/etc/NetworkManager/system-connections", - "/etc/NetworkManager/dispatcher.d" + "/etc/NetworkManager/dispatcher.d", + "/etc/NetworkManager/conf.d", + "/usr/lib/NetworkManager/conf.d", + "/run/NetworkManager/conf.d", + "/var/lib/NetworkManager/NetworkManager-intern.conf" ]) self.add_journal(units="NetworkManager") + self.add_cmd_output("NetworkManager --print-config") + # There are some incompatible changes in nmcli since # the release of NetworkManager >= 0.9.9. In addition, # NetworkManager >= 0.9.9 will use the long names of diff -Nru sosreport-4.5.6/sos/report/plugins/nfs.py sosreport-4.7.0/sos/report/plugins/nfs.py --- sosreport-4.5.6/sos/report/plugins/nfs.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/nfs.py 2024-02-20 00:39:46.000000000 +0000 @@ -33,7 +33,6 @@ ]) self.add_cmd_output([ - "rpcinfo -p localhost", "nfsstat -o all", "exportfs -v", "nfsdclnts", diff -Nru sosreport-4.5.6/sos/report/plugins/nvidia.py sosreport-4.7.0/sos/report/plugins/nvidia.py --- sosreport-4.5.6/sos/report/plugins/nvidia.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/nvidia.py 2024-02-20 00:39:46.000000000 +0000 @@ -30,6 +30,7 @@ 'nvlink -e' ] + self.add_service_status("nvidia-persistenced") self.add_cmd_output(["nvidia-smi %s" % cmd for cmd in subcmds]) query = ('gpu_name,gpu_bus_id,vbios_version,temperature.gpu,' diff -Nru sosreport-4.5.6/sos/report/plugins/nvme.py sosreport-4.7.0/sos/report/plugins/nvme.py --- sosreport-4.5.6/sos/report/plugins/nvme.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/nvme.py 2024-02-20 00:39:46.000000000 +0000 @@ -26,7 +26,13 @@ kernel_mods = ('nvme', 'nvme_core') def setup(self): - self.add_copy_spec("/etc/nvme/*") + self.add_copy_spec([ + "/etc/nvme/*", + "/sys/class/nvme-fabrics/ctl/nvme*", + "/sys/class/nvme-subsystem/nvme-subsys*/*", + "/sys/module/nvme_core/parameters/*", + ]) + self.add_cmd_output([ "nvme list", "nvme list-subsys", diff -Nru sosreport-4.5.6/sos/report/plugins/omnipath_client.py sosreport-4.7.0/sos/report/plugins/omnipath_client.py --- sosreport-4.5.6/sos/report/plugins/omnipath_client.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/omnipath_client.py 2024-02-20 00:39:46.000000000 +0000 @@ -14,11 +14,11 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -from sos.report.plugins import Plugin, RedHatPlugin +from sos.report.plugins import Plugin, RedHatPlugin, UbuntuPlugin from os.path import join -class OmnipathClient(Plugin, RedHatPlugin): +class OmnipathClient(Plugin, RedHatPlugin, UbuntuPlugin): short_desc = 'OmniPath Tools and Fast Fabric Client' diff -Nru sosreport-4.5.6/sos/report/plugins/omnipath_manager.py sosreport-4.7.0/sos/report/plugins/omnipath_manager.py --- sosreport-4.5.6/sos/report/plugins/omnipath_manager.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/omnipath_manager.py 2024-02-20 00:39:46.000000000 +0000 @@ -14,10 +14,10 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -from sos.report.plugins import Plugin, RedHatPlugin +from sos.report.plugins import Plugin, RedHatPlugin, UbuntuPlugin -class OmnipathManager(Plugin, RedHatPlugin): +class OmnipathManager(Plugin, RedHatPlugin, UbuntuPlugin): short_desc = 'OmniPath Fabric Manager' @@ -25,6 +25,7 @@ profiles = ('hardware',) packages = ('opa-fm',) + services = ('opa-fm',) def setup(self): diff -Nru sosreport-4.5.6/sos/report/plugins/opencontrail.py sosreport-4.7.0/sos/report/plugins/opencontrail.py --- sosreport-4.5.6/sos/report/plugins/opencontrail.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/opencontrail.py 2024-02-20 00:39:46.000000000 +0000 @@ -16,7 +16,10 @@ plugin_name = 'opencontrail' profiles = ("network",) packages = ('opencontrail',) - containers = ('opencontrail.*',) + containers = ( + 'opencontrail.*', + 'vrouter.*', + ) def setup(self): # assuming the container names will start with "opencontrail" diff -Nru sosreport-4.5.6/sos/report/plugins/openshift_ovn.py sosreport-4.7.0/sos/report/plugins/openshift_ovn.py --- sosreport-4.5.6/sos/report/plugins/openshift_ovn.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openshift_ovn.py 2024-02-20 00:39:46.000000000 +0000 @@ -16,18 +16,35 @@ """ short_desc = 'Openshift OVN' plugin_name = "openshift_ovn" - containers = ('ovnkube-master', 'ovnkube-node', 'ovn-ipsec') + containers = ('ovnkube-master', 'ovnkube-node', 'ovn-ipsec', + 'ovnkube-controller') profiles = ('openshift',) def setup(self): + all_logs = self.get_option("all_logs") + self.add_copy_spec([ "/var/lib/ovn/etc/ovnnb_db.db", "/var/lib/ovn/etc/ovnsb_db.db", - "/var/lib/openvswitch/etc/keys", - "/var/log/openvswitch/libreswan.log", - "/var/log/openvswitch/ovs-monitor-ipsec.log" - ]) + "/var/lib/openvswitch/etc/keys" + ], sizelimit=300) + + # Collect ovn interconnect specific db files if exists. + self.add_copy_spec([ + "/var/lib/ovn-ic/etc/ovnnb_db.db", + "/var/lib/ovn-ic/etc/ovnsb_db.db" + ], sizelimit=300) + + # Collect libovsdb logs in case of ovn interconnect setup. + if not all_logs: + self.add_copy_spec([ + "/var/lib/ovn-ic/etc/libovsdb.log", + "/var/lib/ovn-ic/etc/libovsdb*log.gz" + ], sizelimit=100) + else: + self.add_copy_spec("/var/lib/ovn-ic/etc/libovsdb*log*") + # The ovn cluster/status is not valid anymore for interconnect setup. self.add_cmd_output([ 'ovn-appctl -t /var/run/ovn/ovnnb_db.ctl ' + 'cluster/status OVN_Northbound', @@ -39,6 +56,14 @@ 'ct-zone-list'], container='ovnkube-node') self.add_cmd_output([ + 'ovs-appctl -t /var/run/ovn/ovn-controller.*.ctl ' + + 'ct-zone-list'], + container='ovnkube-controller') + # Collect ovs ct-zone-list directly on host for interconnect setup. + self.add_cmd_output([ + 'ovs-appctl -t /var/run/ovn-ic/ovn-controller.*.ctl ' + + 'ct-zone-list']) + self.add_cmd_output([ 'ovs-appctl -t ovs-monitor-ipsec tunnels/show', 'ipsec status', 'certutil -L -d sql:/etc/ipsec.d'], diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_cinder.py sosreport-4.7.0/sos/report/plugins/openstack_cinder.py --- sosreport-4.5.6/sos/report/plugins/openstack_cinder.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_cinder.py 2024-02-20 00:39:46.000000000 +0000 @@ -13,6 +13,7 @@ # See the LICENSE file in the source distribution for further information. from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin +import os class OpenStackCinder(Plugin): @@ -48,6 +49,66 @@ "cinder-manage " + cinder_config + " db version", suggest_filename="cinder_db_version" ) + self.add_cmd_output( + f"cinder-manage {cinder_config} backup list" + ) + self.add_cmd_output( + f"cinder-manage {cinder_config} config list" + ) + self.add_cmd_output( + f"cinder-manage {cinder_config} host list" + ) + self.add_cmd_output( + f"cinder-status {cinder_config} upgrade check" + ) + + vars_all = [p in os.environ for p in [ + 'OS_USERNAME', 'OS_PASSWORD']] + + vars_any = [p in os.environ for p in [ + 'OS_TENANT_NAME', 'OS_PROJECT_NAME']] + + if not (all(vars_all) and any(vars_any)): + self.soslog.warning("Not all environment variables set. " + "Source the environment file for the user " + "intended to connect to the OpenStack " + "environment.") + else: + list_cmds = [ + "backend pool", + "group type", + "message", + "qos", + "service", + "type", + ] + + for cmd in list_cmds: + self.add_cmd_output(f"openstack volume {cmd} list") + + list_cmds_projects = [ + "backup", + "group", + "group snapshot", + "snapshot", + "transfer request", + "", + ] + + for cmd in list_cmds_projects: + self.add_cmd_output( + f"openstack volume {cmd} list --all-projects" + ) + + # get details for each volume + cmd = "openstack volume list -f value --all-projects" + res = self.exec_cmd(cmd) + if res['status'] == 0: + cinder_volumes = res['output'] + for volume in cinder_volumes.splitlines(): + volume = volume.split()[0] + cmd = f"openstack volume show {volume}" + self.add_cmd_output(cmd) self.add_forbidden_path('/etc/cinder/volumes') self.add_copy_spec([ diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_designate.py sosreport-4.7.0/sos/report/plugins/openstack_designate.py --- sosreport-4.5.6/sos/report/plugins/openstack_designate.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_designate.py 2024-02-20 00:39:46.000000000 +0000 @@ -17,6 +17,7 @@ profiles = ('openstack', 'openstack_controller') var_puppet_gen = "/var/lib/config-data/puppet-generated/designate" + var_ansible_gen = "/var/lib/config-data/ansible-generated" def setup(self): # collect current pool config @@ -32,6 +33,9 @@ "/etc/designate/*", self.var_puppet_gen + "/etc/designate/designate.conf", self.var_puppet_gen + "/etc/designate/pools.yaml", + self.var_ansible_gen + "/designate/etc/designate/named.conf", + self.var_ansible_gen + "/designate/etc/designate/named/*", + self.var_ansible_gen + "/unbound/*" ]) # logs @@ -39,11 +43,15 @@ self.add_copy_spec([ "/var/log/designate/*", "/var/log/containers/designate/*", + "/var/log/containers/designate-bind/*", + "/var/log/containers/unbound/*" ]) else: self.add_copy_spec([ "/var/log/designate/*.log", - "/var/log/containers/designate/*.log" + "/var/log/containers/designate/*.log", + "/var/log/containers/designate-bind/*.log", + "/var/log/containers/unbound/*.log" ]) subcmds = [ diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_edpm.py sosreport-4.7.0/sos/report/plugins/openstack_edpm.py --- sosreport-4.5.6/sos/report/plugins/openstack_edpm.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_edpm.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright (C) 2023 Red Hat, Inc., Roberto Alfieri + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, RedHatPlugin + + +class OpenStackEDPM(Plugin, RedHatPlugin): + + short_desc = 'Installation information from OpenStack EDPM deployment' + + plugin_name = 'openstack_edpm' + profiles = ('openstack', 'openstack_edpm') + services = ('edpm-container-shutdown') + + def setup(self): + # Notes: recursion is max 2 for edpm-config + # Those directories are present on all OpenStack nodes + self.edpm_log_paths = [ + '/var/lib/edpm-config/' + ] + self.add_copy_spec(self.edpm_log_paths) + + def postproc(self): + # Ensures we do not leak passwords from the edpm related locations + # Other locations don't have sensitive data. + regexp = r'(".*(key|password|pass|secret|database_connection))' \ + r'([":\s]+)(.*[^"])([",]+)' + for path in self.edpm_log_paths: + self.do_path_regex_sub(path, regexp, r'\1\3*********\5') + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_glance.py sosreport-4.7.0/sos/report/plugins/openstack_glance.py --- sosreport-4.5.6/sos/report/plugins/openstack_glance.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_glance.py 2024-02-20 00:39:46.000000000 +0000 @@ -70,7 +70,16 @@ "intended to connect to the OpenStack " "environment.") else: - self.add_cmd_output("openstack image list --long") + res = self.collect_cmd_output( + "openstack image list --long" + ) + + if res['status'] == 0: + glance_images = res['output'] + for image in glance_images.splitlines()[3:-1]: + image = image.split()[1] + cmd = f"openstack image show {image}" + self.add_cmd_output(cmd) self.add_file_tags({ "/etc/glance/glance-api.conf": "glance_api_conf", @@ -117,7 +126,7 @@ 'python-glance', 'python3-glance', ) - service_name = 'glance-api.service' + service_name = 'apache2.service' class RedHatGlance(OpenStackGlance, RedHatPlugin): diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_heat.py sosreport-4.7.0/sos/report/plugins/openstack_heat.py --- sosreport-4.5.6/sos/report/plugins/openstack_heat.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_heat.py 2024-02-20 00:39:46.000000000 +0000 @@ -52,7 +52,21 @@ "intended to connect to the OpenStack " "environment.") else: - self.add_cmd_output("openstack stack list") + self.add_cmd_output("openstack stack list --all-projects " + "--nested") + + res = self.collect_cmd_output( + "openstack stack list --all-projects" + ) + + if res['status'] == 0: + heat_stacks = res['output'] + for stack in heat_stacks.splitlines()[3:-1]: + stack = stack.split()[1] + cmd = f"openstack stack show {stack}" + self.add_cmd_output(cmd) + cmd = f"openstack stack resource list {stack} -n 10" + self.add_cmd_output(cmd) if self.get_option("all_logs"): self.add_copy_spec([ diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_masakarimonitors.py sosreport-4.7.0/sos/report/plugins/openstack_masakarimonitors.py --- sosreport-4.5.6/sos/report/plugins/openstack_masakarimonitors.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_masakarimonitors.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,57 @@ +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, UbuntuPlugin + + +class OpenStackMasakariMonitors(Plugin, UbuntuPlugin): + + short_desc = 'OpenStack Masakari Monitors' + plugin_name = "openstack_masakarimonitors" + profiles = ('openstack', 'openstack_controller') + + packages = ('masakari-monitors-common', ) + + services = ( + 'masakari-host-monitor', + 'masakari-instance-monitor', + 'masakari-process-monitor', + ) + + config_dir = "/etc/masakarimonitors" + + def setup(self): + + self.add_copy_spec([ + self.config_dir, + ]) + + if self.get_option("all_logs"): + self.add_copy_spec([ + "/var/log/masakarimonitors/*", + ]) + else: + self.add_copy_spec([ + "/var/log/masakarimonitors/*.log", + ]) + + self.add_file_tags({ + f"{self.config_dir}/masakarimonitors.conf": "masakarimonitors_conf" + }) + + def postproc(self): + protect_keys = [".*password.*"] + + self.do_path_regex_sub( + f"{self.config_dir}/*", + r"(^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys), + r"\1*********" + ) + + +# vim: et ts=4 sw=4 diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_masakari.py sosreport-4.7.0/sos/report/plugins/openstack_masakari.py --- sosreport-4.5.6/sos/report/plugins/openstack_masakari.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_masakari.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,74 @@ +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, UbuntuPlugin + + +class OpenStackMasakari(Plugin, UbuntuPlugin): + + short_desc = 'OpenStack Masakari' + plugin_name = "openstack_masakari" + profiles = ('openstack', 'openstack_controller') + + packages = ( + 'masakari-engine', + 'masakari-api', + 'python3-masakari', + ) + + services = ('masakari-engine', ) + + config_dir = "/etc/masakari" + + def setup(self): + + masakari_cmd = "masakari-manage --config-dir"\ + f"{self.config_dir} db version" + self.add_cmd_output( + masakari_cmd, + suggest_filename="masakari_db_version" + ) + + self.add_copy_spec([ + self.config_dir, + ]) + + if self.get_option("all_logs"): + self.add_copy_spec([ + "/var/log/masakari/*", + "/var/log/apache2/masakari*", + ]) + else: + self.add_copy_spec([ + "/var/log/masakari/*.log", + "/var/log/apache2/masakari*.log", + ]) + + self.add_file_tags({ + f"{self.config_dir}/masakari.conf": "masakari_conf" + }) + + def postproc(self): + protect_keys = [".*password.*", "transport_url", + "memcache_secret_key", "rabbit_password"] + connection_keys = ["connection", "sql_connection"] + + self.do_path_regex_sub( + f"{self.config_dir}/*", + r"(^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys), + r"\1*********" + ) + self.do_path_regex_sub( + f"{self.config_dir}/*", + r"(^\s*(%s)\s*=\s*(.*)://(\w*):)(.*)(@(.*))" % + "|".join(connection_keys), + r"\1*********\6" + ) + + +# vim: et ts=4 sw=4 diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_neutron.py sosreport-4.7.0/sos/report/plugins/openstack_neutron.py --- sosreport-4.5.6/sos/report/plugins/openstack_neutron.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_neutron.py 2024-02-20 00:39:46.000000000 +0000 @@ -17,7 +17,8 @@ short_desc = 'OpenStack Networking' plugin_name = "openstack_neutron" - profiles = ('openstack', 'openstack_controller', 'openstack_compute') + profiles = ('openstack', 'openstack_controller', + 'openstack_compute', 'openstack_edpm') var_puppet_gen = "/var/lib/config-data/puppet-generated/neutron" @@ -54,14 +55,26 @@ "the environment file for the user intended " "to connect to the OpenStack environment.") else: - self.add_cmd_output("openstack subnet list") - self.add_cmd_output("openstack port list") - self.add_cmd_output("openstack router list") - self.add_cmd_output("openstack network agent list") - self.add_cmd_output("openstack network list") - self.add_cmd_output("openstack extension list") - self.add_cmd_output("openstack floating ip list") - self.add_cmd_output("openstack security group list") + cmds = [ + "subnet", + "port", + "router", + "network agent", + "network", + "extension", + "floating ip", + "security group", + ] + + for cmd in cmds: + res = self.collect_cmd_output(f"openstack {cmd} list") + + if res['status'] == 0: + neutron_items = res['output'] + for item in neutron_items.splitlines()[3:-1]: + item = item.split()[1] + show_cmd = f"openstack {cmd} show {item}" + self.add_cmd_output(show_cmd) self.add_file_tags({ ".*/etc/neutron/plugins/ml2/ml2_conf.ini": "neutronml2_conf", @@ -125,9 +138,15 @@ class RedHatNeutron(OpenStackNeutron, RedHatPlugin): packages = ('openstack-selinux',) + var_ansible_gen = "/var/lib/config-data/ansible-generated/" def setup(self): super(RedHatNeutron, self).setup() - self.add_copy_spec("/etc/sudoers.d/neutron-rootwrap") + self.add_copy_spec([ + "/etc/sudoers.d/neutron-rootwrap", + self.var_ansible_gen + "/neutron-dhcp-agent/", + self.var_ansible_gen + "/neutron-dhcp-ovn/", + self.var_ansible_gen + "/neutron-sriov-agent/" + ]) # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_nova.py sosreport-4.7.0/sos/report/plugins/openstack_nova.py --- sosreport-4.5.6/sos/report/plugins/openstack_nova.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_nova.py 2024-02-20 00:39:46.000000000 +0000 @@ -22,7 +22,8 @@ short_desc = 'OpenStack Nova' plugin_name = "openstack_nova" - profiles = ('openstack', 'openstack_controller', 'openstack_compute') + profiles = ('openstack', 'openstack_controller', + 'openstack_compute', 'openstack_edpm') containers = ('.*nova_api',) var_puppet_gen = "/var/lib/config-data/puppet-generated/nova" @@ -70,24 +71,25 @@ "intended to connect to the OpenStack " "environment.") else: - self.add_cmd_output("nova service-list") + self.add_cmd_output("openstack compute service list") self.add_cmd_output("openstack flavor list --long") - self.add_cmd_output("nova network-list") - self.add_cmd_output("nova list --all-tenants") - self.add_cmd_output("nova agent-list") + self.add_cmd_output("openstack compute agent list") self.add_cmd_output("nova version-list") - self.add_cmd_output("nova hypervisor-list") + self.add_cmd_output("openstack hypervisor list") self.add_cmd_output("openstack quota show") self.add_cmd_output("openstack hypervisor stats show") + + res = self.collect_cmd_output( + "openstack server list --all-projects" + ) + # get details for each nova instance - cmd = "openstack server list -f value" - nova_instances = self.exec_cmd(cmd)['output'] - for instance in nova_instances.splitlines(): - instance = instance.split()[0] - cmd = "openstack server show %s" % (instance) - self.add_cmd_output( - cmd, - suggest_filename="instance-" + instance + ".log") + if res['status'] == 0: + nova_instances = res['output'] + for instance in nova_instances.splitlines()[3:-1]: + instance = instance.split()[1] + cmd = f"openstack server show {instance}" + self.add_cmd_output(cmd) if self.get_option("all_logs"): self.add_copy_spec([ @@ -150,7 +152,8 @@ "xenapi_connection_password", "password", "host_password", "vnc_password", "admin_password", "connection_password", "memcache_secret_key", "s3_secret_key", - "metadata_proxy_shared_secret", "fixed_key", "transport_url" + "metadata_proxy_shared_secret", "fixed_key", "transport_url", + "rbd_secret_uuid" ] connection_keys = ["connection", "sql_connection"] @@ -215,15 +218,24 @@ "/etc/polkit-1/localauthority/50-local.d/50-nova.pkla", "/etc/sudoers.d/nova", "/etc/security/limits.d/91-nova.conf", - "/etc/sysconfig/openstack-nova-novncproxy" + "/etc/sysconfig/openstack-nova-novncproxy", + "/var/lib/openstack/config/nova", + "/var/lib/openstack/containers/nova*.json" ]) + if self.get_option("all_logs"): self.add_copy_spec([ "/var/log/httpd/placement*", + "/var/log/containers/nova/*" ]) else: self.add_copy_spec([ "/var/log/httpd/placement*.log", + "/var/log/containers/nova/*.log" ]) + self.add_forbidden_path([ + "/var/lib/openstack/config/nova/ssh-privatekey" + ]) + # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/openstack_placement.py sosreport-4.7.0/sos/report/plugins/openstack_placement.py --- sosreport-4.5.6/sos/report/plugins/openstack_placement.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openstack_placement.py 2024-02-20 00:39:46.000000000 +0000 @@ -9,6 +9,7 @@ # See the LICENSE file in the source distribution for further information. from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin +import os class OpenStackPlacement(Plugin): @@ -39,6 +40,37 @@ suggest_filename="placement-manage_db_version" ) + vars_all = [p in os.environ for p in [ + 'OS_USERNAME', 'OS_PASSWORD']] + + vars_any = [p in os.environ for p in [ + 'OS_TENANT_NAME', 'OS_PROJECT_NAME']] + + if not (all(vars_all) and any(vars_any)): + self.soslog.warning("Not all environment variables set. " + "Source the environment file for the user " + "intended to connect to the OpenStack " + "environment.") + else: + res = self.collect_cmd_output( + "openstack resource provider list" + ) + + if res['status'] == 0: + resource_provider_list = res['output'] + for provider in resource_provider_list.splitlines()[3:-1]: + res_provider = provider.split()[1] + sub_cmds = [ + "inventory", + "trait", + "aggregate", + ] + self.add_cmd_output([ + f"openstack resource provider {sub_cmd} list " + f"{res_provider}" + for sub_cmd in sub_cmds + ]) + if self.get_option("all_logs"): self.add_copy_spec([ "/var/log/placement/", diff -Nru sosreport-4.5.6/sos/report/plugins/opensvc.py sosreport-4.7.0/sos/report/plugins/opensvc.py --- sosreport-4.5.6/sos/report/plugins/opensvc.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/opensvc.py 2024-02-20 00:39:46.000000000 +0000 @@ -53,6 +53,7 @@ "om net status --verbose --color=no", "om mon --color=no", "om daemon dns dump --color=no", + "om daemon relay status --color=no", "om daemon status --format flat_json --color=no" ]) self.get_status('vol') diff -Nru sosreport-4.5.6/sos/report/plugins/openvswitch.py sosreport-4.7.0/sos/report/plugins/openvswitch.py --- sosreport-4.5.6/sos/report/plugins/openvswitch.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/openvswitch.py 2024-02-20 00:39:46.000000000 +0000 @@ -102,7 +102,10 @@ "dpdk_nic_bind --status", "dpdk-devbind.py --status", "driverctl list-devices", + "driverctl -v list-devices", "driverctl list-overrides", + "driverctl -v list-overrides", + "driverctl list-persisted", # Capture a list of all bond devices "ovs-appctl bond/list", # Capture more details from bond devices @@ -129,6 +132,8 @@ # Capture OVS datapath list "ovs-vsctl -t 5 list datapath", # Capture DPDK queue to pmd mapping + "ovs-appctl dpif-netdev/pmd-rxq-show -secs 5", + "ovs-appctl dpif-netdev/pmd-rxq-show -secs 30", "ovs-appctl dpif-netdev/pmd-rxq-show", # Capture DPDK pmd stats "ovs-appctl dpif-netdev/pmd-stats-show", @@ -149,7 +154,15 @@ # Capture dpif implementations "ovs-appctl dpif-netdev/dpif-impl-get", # Capture miniflow extract implementations - "ovs-appctl dpif-netdev/miniflow-parser-get" + "ovs-appctl dpif-netdev/miniflow-parser-get", + # Capture DPDK pmd sleep config + "ovs-appctl dpif-netdev/pmd-sleep-show", + # Capture additional DPDK info + "ovs-appctl dpdk/lcore-list", + "ovs-appctl dpdk/log-list", + "ovs-appctl dpdk/get-malloc-stats", + # Capture dpdk mempool info + "ovs-appctl netdev-dpdk/get-mempool-info" ]) # Capture DPDK and other parameters self.add_cmd_output("ovs-vsctl -t 5 get Open_vSwitch . other_config", diff -Nru sosreport-4.5.6/sos/report/plugins/ovn_central.py sosreport-4.7.0/sos/report/plugins/ovn_central.py --- sosreport-4.5.6/sos/report/plugins/ovn_central.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ovn_central.py 2024-02-20 00:39:46.000000000 +0000 @@ -51,12 +51,12 @@ if res['status'] != 0: self._log_error("Could not retrieve DB schema file from " "container %s" % self._container_name) - return + return None try: db = json.loads(res['output']) except Exception: self._log_error("Cannot parse JSON file %s" % filename) - return + return None else: try: with open(self.path_join(filename), 'r') as f: @@ -65,16 +65,17 @@ except Exception: self._log_error( "Cannot parse JSON file %s" % filename) - return + return None except IOError as ex: self._log_error( "Could not open DB schema file %s: %s" % (filename, ex)) - return + return None try: return [table for table in dict.keys( db['tables']) if table not in skip] except AttributeError: self._log_error("DB schema %s has no 'tables' key" % filename) + return None def add_database_output(self, tables, cmds, ovn_cmd): if not tables: diff -Nru sosreport-4.5.6/sos/report/plugins/ovn_host.py sosreport-4.7.0/sos/report/plugins/ovn_host.py --- sosreport-4.5.6/sos/report/plugins/ovn_host.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ovn_host.py 2024-02-20 00:39:46.000000000 +0000 @@ -24,7 +24,7 @@ short_desc = 'OVN Controller' plugin_name = "ovn_host" - profiles = ('network', 'virt') + profiles = ('network', 'virt', 'openstack_edpm') def setup(self): if os.environ.get('OVS_RUNDIR'): @@ -56,6 +56,22 @@ class RedHatOVNHost(OVNHost, RedHatPlugin): packages = ('openvswitch-ovn-host', 'ovn.*-host', ) + var_ansible_gen = "/var/lib/config-data/ansible-generated/ovn-bgp-agent" + + def setup(self): + super(RedHatOVNHost, self).setup() + self.add_copy_spec([ + self.var_ansible_gen, + ]) + + if self.get_option("all_logs"): + self.add_copy_spec([ + "/var/log/containers/ovn-bgp-agent/", + ]) + else: + self.add_copy_spec([ + "/var/log/containers/ovn-bgp-agent/*.log", + ]) class DebianOVNHost(OVNHost, DebianPlugin, UbuntuPlugin): diff -Nru sosreport-4.5.6/sos/report/plugins/pacemaker.py sosreport-4.7.0/sos/report/plugins/pacemaker.py --- sosreport-4.5.6/sos/report/plugins/pacemaker.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/pacemaker.py 2024-02-20 00:39:46.000000000 +0000 @@ -8,6 +8,7 @@ from sos.report.plugins import (Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin, PluginOpt) +from sos.utilities import parse_version from datetime import datetime, timedelta import re @@ -42,13 +43,23 @@ ]) def setup_pcs(self): + pcs_pkg = self.policy.package_manager.pkg_by_name('pcs') + if pcs_pkg is None: + return + self.add_copy_spec("/var/log/pcsd/pcsd.log") self.add_cmd_output([ "pcs stonith sbd status --full", "pcs stonith sbd watchdog list", "pcs stonith history show", - "pcs property list --all" ]) + + pcs_version = '.'.join(pcs_pkg['version']) + if parse_version(pcs_version) > parse_version('0.10.8'): + self.add_cmd_output("pcs property config --all") + else: + self.add_cmd_output("pcs property list --all") + self.add_cmd_output("pcs config", tags="pcs_config") self.add_cmd_output("pcs quorum status", tags="pcs_quorum_status") self.add_cmd_output("pcs status --full", tags="pcs_status") diff -Nru sosreport-4.5.6/sos/report/plugins/pam.py sosreport-4.7.0/sos/report/plugins/pam.py --- sosreport-4.5.6/sos/report/plugins/pam.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/pam.py 2024-02-20 00:39:46.000000000 +0000 @@ -41,6 +41,7 @@ def setup(self): super(RedHatPam, self).setup() + self.add_cmd_output(["authselect current"]) class DebianPam(Pam, DebianPlugin, UbuntuPlugin): diff -Nru sosreport-4.5.6/sos/report/plugins/pcp.py sosreport-4.7.0/sos/report/plugins/pcp.py --- sosreport-4.5.6/sos/report/plugins/pcp.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/pcp.py 2024-02-20 00:39:46.000000000 +0000 @@ -47,9 +47,8 @@ def pcp_parse_conffile(self): try: - pcpconf = open(self.pcp_conffile, "r") - lines = pcpconf.readlines() - pcpconf.close() + with open(self.pcp_conffile, "r") as pcpconf: + lines = pcpconf.readlines() except IOError: return False env_vars = {} diff -Nru sosreport-4.5.6/sos/report/plugins/powerpc.py sosreport-4.7.0/sos/report/plugins/powerpc.py --- sosreport-4.5.6/sos/report/plugins/powerpc.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/powerpc.py 2024-02-20 00:39:46.000000000 +0000 @@ -40,6 +40,7 @@ "/proc/version", "/dev/nvram", "/var/lib/lsvpd/", + "/var/log/lp_diag.log", "/etc/ct_node_id" ]) self.add_cmd_output([ @@ -72,6 +73,7 @@ "/var/log/drmgr", "/var/log/drmgr.0", "/var/log/hcnmgr", + "/var/log/rtas_errd.log", "/var/ct/IBM.DRM.stderr", "/var/ct/IW/log/mc/IBM.DRM/trace*" ]) @@ -115,6 +117,9 @@ "/var/log/opal-prd", "/var/log/opal-prd.log*" ]) + self.add_cmd_output([ + "opal-prd --expert-mode run nvdimm_info" + ]) if self.path_isdir("/var/log/dump"): self.add_cmd_output("ls -l /var/log/dump") diff -Nru sosreport-4.5.6/sos/report/plugins/pulpcore.py sosreport-4.7.0/sos/report/plugins/pulpcore.py --- sosreport-4.5.6/sos/report/plugins/pulpcore.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/pulpcore.py 2024-02-20 00:39:46.000000000 +0000 @@ -45,34 +45,35 @@ return val try: - # split the lines to "one option per line" format - for line in open("/etc/pulp/settings.py").read() \ - .replace(',', ',\n').replace('{', '{\n') \ - .replace('}', '\n}').splitlines(): - # skip empty lines and lines with comments - if not line or line[0] == '#': - continue - if line.startswith("DATABASES"): - databases_scope = True - continue - # example HOST line to parse: - # 'HOST': 'localhost', - pattern = r"\s*['|\"]%s['|\"]\s*:\s*\S+" - if databases_scope and match(pattern % 'HOST', line): - self.dbhost = separate_value(line) - if databases_scope and match(pattern % 'PORT', line): - self.dbport = separate_value(line) - if databases_scope and match(pattern % 'NAME', line): - self.dbname = separate_value(line) - if databases_scope and match(pattern % 'PASSWORD', line): - self.dbpasswd = separate_value(line) - # if line contains closing '}' database_scope end - if databases_scope and '}' in line: - databases_scope = False - if line.startswith("STATIC_ROOT = "): - self.staticroot = separate_value(line, sep='=') - if line.startswith("CHUNKED_UPLOAD_DIR = "): - self.uploaddir = separate_value(line, sep='=') + with open("/etc/pulp/settings.py", 'r') as pfile: + # split the lines to "one option per line" format + for line in pfile.read() \ + .replace(',', ',\n').replace('{', '{\n') \ + .replace('}', '\n}').splitlines(): + # skip empty lines and lines with comments + if not line or line[0] == '#': + continue + if line.startswith("DATABASES"): + databases_scope = True + continue + # example HOST line to parse: + # 'HOST': 'localhost', + pattern = r"\s*['|\"]%s['|\"]\s*:\s*\S+" + if databases_scope and match(pattern % 'HOST', line): + self.dbhost = separate_value(line) + if databases_scope and match(pattern % 'PORT', line): + self.dbport = separate_value(line) + if databases_scope and match(pattern % 'NAME', line): + self.dbname = separate_value(line) + if databases_scope and match(pattern % 'PASSWORD', line): + self.dbpasswd = separate_value(line) + # if line contains closing '}' database_scope end + if databases_scope and '}' in line: + databases_scope = False + if line.startswith("STATIC_ROOT = "): + self.staticroot = separate_value(line, sep='=') + if line.startswith("CHUNKED_UPLOAD_DIR = "): + self.uploaddir = separate_value(line, sep='=') except IOError: # fallback when the cfg file is not accessible pass @@ -143,29 +144,18 @@ return _dbcmd % (self.dbhost, self.dbport, self.dbname, quote(query)) def postproc(self): - # TODO obfuscate from /etc/pulp/settings.py : + # obfuscate from /etc/pulp/settings.py and "dynaconf list": # SECRET_KEY = "eKfeDkTnvss7p5WFqYdGPWxXfHnsbDBx" # 'PASSWORD': 'tGrag2DmtLqKLTWTQ6U68f6MAhbqZVQj', + # AUTH_LDAP_BIND_PASSWORD = 'ouch-a-secret' # the PASSWORD can be also in an one-liner list, so detect its value # in non-greedy manner till first ',' or '}' - self.do_path_regex_sub( - "/etc/pulp/settings.py", - r"(SECRET_KEY\s*=\s*)(.*)", - r"\1********") - self.do_path_regex_sub( - "/etc/pulp/settings.py", - r"(PASSWORD\S*\s*:\s*)(.*?)(,|\})", - r"\1********\3") - # apply the same for "dynaconf list" output that prints settings.py - # in a pythonic format - self.do_cmd_output_sub( - "dynaconf list", - r"(SECRET_KEY\s*)'(.*)'", - r"\1********") - self.do_cmd_output_sub( - "dynaconf list", - r"(PASSWORD\S*\s*:\s*)(.*)", - r"\1********") + key_pass_re = r"((?:SECRET_KEY|AUTH_LDAP_BIND_PASSWORD)" \ + r"(?:\<.+\>)?(\s*=)?|(password|PASSWORD)" \ + r"(\"|'|:)+)\s*(\S*)" + repl = r"\1 ********" + self.do_path_regex_sub("/etc/pulp/settings.py", key_pass_re, repl) + self.do_cmd_output_sub("dynaconf list", key_pass_re, repl) # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/pulp.py sosreport-4.7.0/sos/report/plugins/pulp.py --- sosreport-4.5.6/sos/report/plugins/pulp.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/pulp.py 2024-02-20 00:39:46.000000000 +0000 @@ -45,7 +45,9 @@ self.messaging_cert_file = "" in_messaging_section = False try: - for line in open("/etc/pulp/server.conf").read().splitlines(): + with open("/etc/pulp/server.conf", 'r') as pfile: + pulp_lines = pfile.read().splitlines() + for line in pulp_lines: if match(r"^\s*seeds:\s+\S+:\S+", line): uri = line.split()[1].split(',')[0].split(':') self.dbhost = uri[0] @@ -168,10 +170,13 @@ repl = r"\1********" self.do_path_regex_sub("/etc/pulp(.*)(.json$)", jreg, repl) - # obfuscate SECRET_KEY = .. and 'PASSWORD': .. in dynaconf list output - # and also in settings.py + # obfuscate SECRET_KEY = .., 'PASSWORD': .., + # and AUTH_LDAP_BIND_PASSWORD = .. + # in dynaconf list output and also in settings.py # count with option that PASSWORD is with(out) quotes or in capitals - key_pass_re = r"(SECRET_KEY\s*=|(password|PASSWORD)(\"|'|:)+)\s*(\S*)" + key_pass_re = r"((?:SECRET_KEY|AUTH_LDAP_BIND_PASSWORD)" \ + r"(?:\<.+\>)?(\s*=)?|(password|PASSWORD)" \ + r"(\"|'|:)+)\s*(\S*)" repl = r"\1 ********" self.do_path_regex_sub("/etc/pulp/settings.py", key_pass_re, repl) self.do_cmd_output_sub("dynaconf list", key_pass_re, repl) diff -Nru sosreport-4.5.6/sos/report/plugins/release.py sosreport-4.7.0/sos/report/plugins/release.py --- sosreport-4.5.6/sos/report/plugins/release.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/release.py 2024-02-20 00:39:46.000000000 +0000 @@ -6,7 +6,7 @@ # # See the LICENSE file in the source distribution for further information. -from sos.report.plugins import Plugin, RedHatPlugin,\ +from sos.report.plugins import Plugin, RedHatPlugin, \ DebianPlugin, UbuntuPlugin, CosPlugin diff -Nru sosreport-4.5.6/sos/report/plugins/rhui.py sosreport-4.7.0/sos/report/plugins/rhui.py --- sosreport-4.5.6/sos/report/plugins/rhui.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/rhui.py 2024-02-20 00:39:46.000000000 +0000 @@ -28,6 +28,7 @@ "/var/cache/rhui/*", "/root/.rhui/*", "/var/log/rhui/*", + "/var/log/rhui-installer/*", ]) # skip collecting certificate keys self.add_forbidden_path("/etc/pki/rhui/**/*.key") diff -Nru sosreport-4.5.6/sos/report/plugins/rpmostree.py sosreport-4.7.0/sos/report/plugins/rpmostree.py --- sosreport-4.5.6/sos/report/plugins/rpmostree.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/rpmostree.py 2024-02-20 00:39:46.000000000 +0000 @@ -22,7 +22,7 @@ self.add_copy_spec('/etc/ostree/remotes.d/') subcmds = [ - 'status -v', + 'status --json', 'kargs', 'db list', 'db diff', diff -Nru sosreport-4.5.6/sos/report/plugins/saltmaster.py sosreport-4.7.0/sos/report/plugins/saltmaster.py --- sosreport-4.5.6/sos/report/plugins/saltmaster.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/saltmaster.py 2024-02-20 00:39:46.000000000 +0000 @@ -5,6 +5,8 @@ # version 2 of the GNU General Public License. # # See the LICENSE file in the source distribution for further information. +import glob +import yaml from sos.report.plugins import Plugin, IndependentPlugin @@ -26,7 +28,35 @@ self.add_copy_spec("/etc/salt") self.add_forbidden_path("/etc/salt/pki/*/*.pem") - self.add_cmd_output("salt-key --list all") + + self.add_pillar_roots() + self.add_cmd_output([ + "salt-master --version", + "systemctl --full status salt-master", + "systemctl --full status salt-api", + "salt-key --list all", + "salt-run jobs.list_jobs --out=yaml", + "salt-run manage.list_state --out=yaml", + "salt-run manage.list_not_state --out=yaml", + "salt-run manage.joined --out=yaml", + ], timeout=30) + + def add_pillar_roots(self): + cfgs = glob.glob("/etc/salt/master.d/*conf") + main_cfg = "/etc/salt/master" + + if self.path_exists(main_cfg): + cfgs.append(main_cfg) + + all_pillar_roots = [] + for cfg in cfgs: + with open(cfg, "r") as f: + cfg_pillar_roots = ( + yaml.safe_load(f).get("pillar_roots", {}).get("base", []) + ) + all_pillar_roots.extend(cfg_pillar_roots) + + self.add_copy_spec(all_pillar_roots) def postproc(self): regexp = r'(^\s+.*(pass|secret|(? 1: - try: - limit = int(words[1]) - except ValueError: - self._log_warn("Can't decode integer" - " sizelimit on line '%s'" - " in file %s, using" - " default." - % (line, _file)) - self.add_copy_spec(words[0][1:], sizelimit=limit) - else: - # command to execute - self.add_cmd_output(line, subdir=f) + with open(_file, 'r') as sfile: + for line in sfile.read().splitlines(): + # ignore empty lines or comments + if len(line.split()) == 0 or line.startswith('#'): + continue + # lines starting by ':' specify file pattern to + # collect optionally followed by sizelimit + if line.startswith(':'): + words = line.split() + limit = None + if len(words) > 1: + try: + limit = int(words[1]) + except ValueError: + self._log_warn( + f"Can't decode size limit on line" + f"{line} in {_file}, using default" + ) + self.add_copy_spec(words[0][1:], + sizelimit=limit) + else: + # command to execute + self.add_cmd_output(line, subdir=f) except IOError: self._log_warn("unable to read extras file %s" % _file) diff -Nru sosreport-4.5.6/sos/report/plugins/targetcli.py sosreport-4.7.0/sos/report/plugins/targetcli.py --- sosreport-4.5.6/sos/report/plugins/targetcli.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/targetcli.py 2024-02-20 00:39:46.000000000 +0000 @@ -23,9 +23,13 @@ "targetcli ls", "targetcli status", ]) + sys_conf_dir = '/sys/kernel/config/target' + self.add_forbidden_path([ + self.path_join(sys_conf_dir, '**/password*'), + ]) self.add_service_status("target") self.add_journal(units="targetcli") - self.add_copy_spec("/sys/kernel/config/target") + self.add_copy_spec(sys_conf_dir) self.add_copy_spec("/etc/target") # vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/tpm2.py sosreport-4.7.0/sos/report/plugins/tpm2.py --- sosreport-4.5.6/sos/report/plugins/tpm2.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/tpm2.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,32 @@ +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, IndependentPlugin + + +class TPM2(Plugin, IndependentPlugin): + """ + Collects information about TPM2 module installed on host system. + + This plugin will capture data using tpm2_tools + """ + + short_desc = 'Trusted Platform Module 2.0' + plugin_name = 'tpm2' + profiles = ('security', 'system', 'storage', 'hardware') + packages = ('tpm2-tools',) + + def setup(self): + self.add_cmd_output([ + 'tpm2_getcap properties-variable', + 'tpm2_getcap properties-fixed', + 'tpm2_nvreadpublic', + 'tpm2_readclock' + ]) + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/ubuntu.py sosreport-4.7.0/sos/report/plugins/ubuntu.py --- sosreport-4.5.6/sos/report/plugins/ubuntu.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ubuntu.py 2024-02-20 00:39:46.000000000 +0000 @@ -6,7 +6,7 @@ # # See the LICENSE file in the source distribution for further information. -from sos.report.plugins import Plugin, UbuntuPlugin +from sos.report.plugins import Plugin, UbuntuPlugin, SoSPredicate from sos.utilities import is_executable @@ -30,8 +30,11 @@ ua_tools_status = 'pro status' else: ua_tools_status = 'ubuntu-advantage status' - self.add_cmd_output(ua_tools_status) - self.add_cmd_output("%s --format json" % ua_tools_status) + ua_pred = SoSPredicate(self, kmods=['tls']) + self.add_cmd_output(ua_tools_status, + pred=ua_pred, changes=True) + self.add_cmd_output("%s --format json" % ua_tools_status, + pred=ua_pred, changes=True) if not self.get_option("all_logs"): self.add_copy_spec([ diff -Nru sosreport-4.5.6/sos/report/plugins/ultrapath.py sosreport-4.7.0/sos/report/plugins/ultrapath.py --- sosreport-4.5.6/sos/report/plugins/ultrapath.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/ultrapath.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,40 @@ +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, RedHatPlugin + + +class UltraPath(Plugin, RedHatPlugin): + + short_desc = 'HUAWEI UltraPath' + + plugin_name = 'ultrapath' + profiles = ('storage', 'hardware') + packages = ('UltraPath',) + kernel_mods = ('nxup', 'nxupext_a') + + def setup(self): + """ Huawei UltraPath specific information - commands + """ + self.add_cmd_output([ + "upadm show version", + "upadm show connectarray", + "upadm show option", + "upadm show upconfig", + "upadm show diskarray", + "upadmin show vlun", + ]) + + result = self.collect_cmd_output('upadm show path') + if result['status'] == 0: + for line in result['output'].splitlines(): + if line.startswith("Array ID :"): + self.add_cmd_output("upadm show lun array=%s" % + line.split(':')[1].strip()) + +# vim: set et ts=4 sw=4 : diff -Nru sosreport-4.5.6/sos/report/plugins/vectordev.py sosreport-4.7.0/sos/report/plugins/vectordev.py --- sosreport-4.5.6/sos/report/plugins/vectordev.py 1970-01-01 01:00:00.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/vectordev.py 2024-02-20 00:39:46.000000000 +0000 @@ -0,0 +1,42 @@ +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, IndependentPlugin + + +class VectorDev(Plugin, IndependentPlugin): + + short_desc = 'A tool for building observability pipelines' + + plugin_name = "vectordev" + profiles = ('observability',) + files = ('/etc/vector/',) + + def setup(self): + self.add_copy_spec([ + "/etc/vector/" + ]) + + def postproc(self): + + vector_config_path = "/etc/vector/*" + protect_keys = [ + "auth.password", + "auth.token", + "tls.key_pass", + ] + + # Redact yaml and ini style "key (:|=) value". + keys_regex = r"(^\s*(%s)\s*(:|=)\s*)(.*)" % "|".join(protect_keys) + sub_regex = r"\1*********" + self.do_path_regex_sub(vector_config_path, keys_regex, sub_regex) + # Redact certificates + self.do_file_private_sub(vector_config_path) + + +# vim: et ts=4 sw=4 diff -Nru sosreport-4.5.6/sos/report/plugins/virsh.py sosreport-4.7.0/sos/report/plugins/virsh.py --- sosreport-4.5.6/sos/report/plugins/virsh.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/virsh.py 2024-02-20 00:39:46.000000000 +0000 @@ -50,7 +50,8 @@ # get network, pool and nwfilter elements for k in ['net', 'nwfilter', 'pool']: - k_list = self.collect_cmd_output('%s %s-list' % (cmd, k), + k_list = self.collect_cmd_output('%s %s-list %s' % (cmd, k, '--all' + if k in ['net', 'pool'] else ''), foreground=True) if k_list['status'] == 0: k_lines = k_list['output'].splitlines() diff -Nru sosreport-4.5.6/sos/report/plugins/zvm.py sosreport-4.7.0/sos/report/plugins/zvm.py --- sosreport-4.5.6/sos/report/plugins/zvm.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/report/plugins/zvm.py 2024-02-20 00:39:46.000000000 +0000 @@ -6,7 +6,7 @@ # # See the LICENSE file in the source distribution for further information. -from sos.report.plugins import Plugin, IndependentPlugin, SoSPredicate +from sos.report.plugins import Plugin, IndependentPlugin from sos.utilities import is_executable @@ -18,9 +18,6 @@ def setup(self): - zvm_pred = SoSPredicate(self, kmods=['vmcp', 'cpint']) - self.set_cmd_predicate(zvm_pred) - self.vm_cmd = None for cmd in self.commands: if is_executable(cmd): diff -Nru sosreport-4.5.6/sos/utilities.py sosreport-4.7.0/sos/utilities.py --- sosreport-4.5.6/sos/utilities.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos/utilities.py 2024-02-20 00:39:46.000000000 +0000 @@ -23,9 +23,9 @@ from collections import deque try: - from pkg_resources import parse_version as version_parse -except SyntaxError: - from packaging.version import parse as version_parse + from packaging.version import parse as parse_version +except ImportError: + from pkg_resources import parse_version # try loading magic>=0.4.20 which implements detect_from_filename method magic_mod = False @@ -46,6 +46,32 @@ TIMEOUT_DEFAULT = 300 +__all__ = [ + 'TIMEOUT_DEFAULT', + 'ImporterHelper', + 'SoSTimeoutError', + 'TempFileUtil', + 'bold', + 'file_is_binary', + 'fileobj', + 'find', + 'get_human_readable', + 'grep', + 'import_module', + 'is_executable', + 'listdir', + 'parse_version', + 'path_exists', + 'path_isdir', + 'path_isfile', + 'path_islink', + 'path_join', + 'recursive_dict_values_by_key', + 'shell_out', + 'sos_get_command_output', + 'tail', +] + def tail(filename, number_of_bytes): """Returns the last number_of_bytes of filename""" @@ -282,7 +308,12 @@ be subclasses of the specified superclass or superclasses. If superclasses is plural it must be a tuple of classes.""" module_name = module_fqname.rpartition(".")[-1] - module = __import__(module_fqname, globals(), locals(), [module_name]) + try: + module = __import__(module_fqname, globals(), locals(), [module_name]) + except ImportError as e: + print(f'Error while trying to load module {module_fqname}: ' + f' {e.__class__.__name__}') + raise e modules = [class_ for cname, class_ in inspect.getmembers(module, inspect.isclass) if class_.__module__ == module_fqname] @@ -413,12 +444,6 @@ return [d for d in _items if d not in _filt] -def parse_version(version): - """Parse the version string - """ - return version_parse(version) - - class FakeReader(): """Used as a replacement AsyncReader for when we are writing directly to disk, and allows us to keep more simplified flows for executing, @@ -533,8 +558,7 @@ pnames = self._get_plugins_from_list(py_files) if pnames: return pnames - else: - return [] + return [] def get_modules(self): """Returns the list of importable modules in the configured python diff -Nru sosreport-4.5.6/sos.spec sosreport-4.7.0/sos.spec --- sosreport-4.5.6/sos.spec 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/sos.spec 2024-02-20 00:39:46.000000000 +0000 @@ -1,6 +1,6 @@ Summary: A set of tools to gather troubleshooting information from a system Name: sos -Version: 4.5.6 +Version: 4.7.0 Release: 1%{?dist} Source0: https://github.com/sosreport/sos/archive/%{name}-%{version}.tar.gz License: GPL-2.0-or-later @@ -10,7 +10,11 @@ BuildRequires: python3-setuptools Requires: python3-rpm Requires: python3-pexpect +%if 0%{?rhel} && 0%{?rhel} < 10 Requires: python3-setuptools +%else +Requires: python3-packaging +%endif Recommends: python3-magic # Mandatory just for uploading to a SFTP server: Recommends: python3-requests @@ -18,6 +22,8 @@ Obsoletes: sos-collector <= 1.9 # For the _tmpfilesdir macro. BuildRequires: systemd +# Mandatory just for uploading to an S3 bucket: +Recommends: python3-boto3 %description Sos is a set of tools that gathers information about system @@ -86,6 +92,15 @@ %config(noreplace) %{_sysconfdir}/sos/sos.conf %changelog +* Mon Feb 19 2024 Jake Hunsaker = 4.7.0 +- New upstream release + +* Wed Jan 10 2024 Pavel Moravec = 4.6.1 +- New upstream release + +* Thu Aug 17 2023 Jake Hunsaker = 4.6.0 +- New upstream release + * Thu Jul 20 2023 Jake Hunsaker = 4.5.6 - New upstream release diff -Nru sosreport-4.5.6/tests/report_tests/timeout/timeout_tests.py sosreport-4.7.0/tests/report_tests/timeout/timeout_tests.py --- sosreport-4.5.6/tests/report_tests/timeout/timeout_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/report_tests/timeout/timeout_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -20,7 +20,7 @@ def test_correct_plugin_timeout(self): man = self.get_plugin_manifest('timeout_test') - self.assertEquals(man['timeout'], 10) + self.assertEqual(man['timeout'], 10) def test_plugin_timed_out(self): self.assertSosLogNotContains('collected plugin \'timeout_test\' in') @@ -41,9 +41,9 @@ def test_correct_plugin_timeout(self): man = self.get_plugin_manifest('timeout_test') - self.assertEquals(man['timeout'], 100) + self.assertEqual(man['timeout'], 100) hman = self.get_plugin_manifest('host') - self.assertEquals(hman['timeout'], 300) + self.assertEqual(hman['timeout'], 300) def test_plugin_completed(self): self.assertSosLogContains('collected plugin \'timeout_test\' in') @@ -64,6 +64,6 @@ def test_correct_plugin_timeout(self): man = self.get_plugin_manifest('timeout_test') - self.assertEquals(man['timeout'], 60) + self.assertEqual(man['timeout'], 60) hman = self.get_plugin_manifest('host') - self.assertEquals(hman['timeout'], 30) + self.assertEqual(hman['timeout'], 30) diff -Nru sosreport-4.5.6/tests/sos_tests.py sosreport-4.7.0/tests/sos_tests.py --- sosreport-4.5.6/tests/sos_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/sos_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -390,11 +390,16 @@ raise return _archive - def grep_for_content(self, search): + def grep_for_content(self, search, regexp=False): """Call out to grep for finding a specific string 'search' in any place in the archive + + :param search: string to search + :param regexp: use regular expression search (default False + means "grep -F") """ - cmd = "grep -ril '%s' %s" % (search, self.archive_path) + fixed_opt = "" if regexp else "F" + cmd = "grep -ril%s '%s' %s" % (fixed_opt, search, self.archive_path) try: out = process.run(cmd) rc = out.exit_status diff -Nru sosreport-4.5.6/tests/test_data/foreman_setup.sh sosreport-4.7.0/tests/test_data/foreman_setup.sh --- sosreport-4.5.6/tests/test_data/foreman_setup.sh 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/test_data/foreman_setup.sh 2024-02-20 00:39:46.000000000 +0000 @@ -10,7 +10,7 @@ else dnf -y install https://yum.puppet.com/puppet6-release-el-8.noarch.rpm dnf -y install https://yum.theforeman.org/releases/$FOREMAN_VER/el8/x86_64/foreman-release.rpm - dnf -y module enable ruby:2.7 + dnf -y module enable ruby:2.7 postgresql:12 foreman:el8 fi dnf -y install foreman-installer && SUCCESS=1 elif grep -iq debian /etc/os-release; then diff -Nru sosreport-4.5.6/tests/unittests/archive_tests.py sosreport-4.7.0/tests/unittests/archive_tests.py --- sosreport-4.5.6/tests/unittests/archive_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/unittests/archive_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -92,7 +92,7 @@ self.tf.add_string('this is my content', 'tests/string_test.txt') afp = self.tf.open_file('tests/string_test.txt') - self.assertEquals('this is my content', afp.read()) + self.assertEqual('this is my content', afp.read()) def test_rewrite_file(self): """Test that re-writing a file with add_string() modifies the content. @@ -101,7 +101,7 @@ self.tf.add_string('this is my new content', 'tests/string_test.txt') afp = self.tf.open_file('tests/string_test.txt') - self.assertEquals('this is my new content', afp.read()) + self.assertEqual('this is my new content', afp.read()) def test_make_link(self): self.tf.add_file('tests/ziptest') diff -Nru sosreport-4.5.6/tests/unittests/cleaner_tests.py sosreport-4.7.0/tests/unittests/cleaner_tests.py --- sosreport-4.5.6/tests/unittests/cleaner_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/unittests/cleaner_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -50,12 +50,12 @@ def test_mac_map_skip_ignores(self): _test = self.mac_map.get('ff:ff:ff:ff:ff:ff') - self.assertEquals(_test, 'ff:ff:ff:ff:ff:ff') + self.assertEqual(_test, 'ff:ff:ff:ff:ff:ff') def test_mac_map_avoid_duplicate_obfuscation(self): _test = self.mac_map.get('ab:cd:ef:fe:dc:ba') _dup = self.mac_map.get(_test) - self.assertEquals(_test, _dup) + self.assertEqual(_test, _dup) def test_ip_map_obfuscate_v4_with_cidr(self): _test = self.ip_map.get('192.168.1.0/24') @@ -77,7 +77,7 @@ def test_ip_skip_ignores(self): _test = self.ip_map.get('127.0.0.1') - self.assertEquals(_test, '127.0.0.1') + self.assertEqual(_test, '127.0.0.1') def test_hostname_obfuscate_domain_options(self): _test = self.host_map.get('www.redhat.com') @@ -164,6 +164,7 @@ self.kw_parser.generate_item_regexes() self.uname_parser = SoSUsernameParser(config={}) self.uname_parser.mapping.add('DOMAIN\myusername') + self.uname_parser.mapping.add('foo') def test_ip_parser_valid_ipv4_line(self): line = 'foobar foo 10.0.0.1/24 barfoo bar' @@ -246,6 +247,11 @@ _test = self.kw_parser.parse_line(line)[0] self.assertNotEqual(line, _test) + def test_keyword_parser_fullword_only(self): + line = 'notfoobar and foobars line' + _test = self.kw_parser.parse_line(line)[0] + self.assertEqual(line, _test) + def test_keyword_parser_no_change_by_default(self): line = 'this is my foobar test line' _test = self.kw_parser_none.parse_line(line)[0] @@ -280,6 +286,11 @@ _test = self.uname_parser.parse_line(line)[0] self.assertNotEqual(line, _test) + def test_too_short_username(self): + line = "but foo is too short username" + _test = self.uname_parser.parse_line(line)[0] + self.assertEqual(line, _test) + class PrepperTests(unittest.TestCase): """ diff -Nru sosreport-4.5.6/tests/unittests/juju/juju_cluster_tests.py sosreport-4.7.0/tests/unittests/juju/juju_cluster_tests.py --- sosreport-4.5.6/tests/unittests/juju/juju_cluster_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/unittests/juju/juju_cluster_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -40,6 +40,10 @@ } +def get_juju_version(): + return "2.9.45" + + def test_parse_option_string(): result = _parse_option_string(" a,b,c") assert result == ["a", "b", "c"] @@ -68,10 +72,16 @@ assert nodes == [] @patch( + "sos.collector.clusters.juju.juju._get_juju_version", + side_effect=get_juju_version, + ) + @patch( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) - def test_get_nodes_app_filter(self, mock_exec_primary_cmd): + def test_get_nodes_app_filter( + self, mock_exec_primary_cmd, mock_get_juju_version + ): """Application filter.""" mock_opts = MockOptions() mock_opts.cluster_options.append( @@ -96,10 +106,16 @@ ) @patch( + "sos.collector.clusters.juju.juju._get_juju_version", + side_effect=get_juju_version, + ) + @patch( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) - def test_get_nodes_app_regex_filter(self, mock_exec_primary_cmd): + def test_get_nodes_app_regex_filter( + self, mock_exec_primary_cmd, mock_get_juju_version + ): """Application filter.""" mock_opts = MockOptions() mock_opts.cluster_options.append( @@ -124,11 +140,15 @@ ) @patch( + "sos.collector.clusters.juju.juju._get_juju_version", + side_effect=get_juju_version, + ) + @patch( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) def test_get_nodes_model_filter_multiple_models( - self, mock_exec_primary_cmd + self, mock_exec_primary_cmd, mock_get_juju_version ): """Multiple model filter.""" mock_opts = MockOptions() @@ -171,10 +191,16 @@ ) @patch( + "sos.collector.clusters.juju.juju._get_juju_version", + side_effect=get_juju_version, + ) + @patch( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) - def test_get_nodes_model_filter(self, mock_exec_primary_cmd): + def test_get_nodes_model_filter( + self, mock_exec_primary_cmd, mock_get_juju_version + ): """Model filter.""" mock_opts = MockOptions() mock_opts.cluster_options.append( @@ -213,10 +239,16 @@ ) @patch( + "sos.collector.clusters.juju.juju._get_juju_version", + side_effect=get_juju_version, + ) + @patch( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) - def test_get_nodes_unit_filter(self, mock_exec_primary_cmd): + def test_get_nodes_unit_filter( + self, mock_exec_primary_cmd, mock_get_juju_version + ): """Node filter.""" mock_opts = MockOptions() mock_opts.cluster_options.append( @@ -238,10 +270,16 @@ assert nodes == [":0", ":2"] @patch( + "sos.collector.clusters.juju.juju._get_juju_version", + side_effect=get_juju_version, + ) + @patch( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) - def test_get_nodes_machine_filter(self, mock_exec_primary_cmd): + def test_get_nodes_machine_filter( + self, mock_exec_primary_cmd, mock_get_juju_version + ): """Machine filter.""" mock_opts = MockOptions() mock_opts.cluster_options.append( @@ -264,10 +302,14 @@ assert nodes == [":0", ":2"] @patch( + "sos.collector.clusters.juju.juju._get_juju_version", + side_effect=get_juju_version, + ) + @patch( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) - def test_subordinates(self, mock_exec_primary_cmd): + def test_subordinates(self, mock_exec_primary_cmd, mock_get_juju_version): """Subordinate filter.""" mock_opts = MockOptions() mock_opts.cluster_options.append( diff -Nru sosreport-4.5.6/tests/unittests/option_tests.py sosreport-4.7.0/tests/unittests/option_tests.py --- sosreport-4.5.6/tests/unittests/option_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/unittests/option_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -45,10 +45,10 @@ self.plugin = MockPlugin(self.commons) def test_simple_lookup(self): - self.assertEquals(self.plugin.get_option('test_option'), 'foobar') + self.assertEqual(self.plugin.get_option('test_option'), 'foobar') def test_cascade(self): - self.assertEquals(self.plugin.get_option(('baz')), False) + self.assertEqual(self.plugin.get_option(('baz')), False) if __name__ == "__main__": diff -Nru sosreport-4.5.6/tests/unittests/plugin_tests.py sosreport-4.7.0/tests/unittests/plugin_tests.py --- sosreport-4.5.6/tests/unittests/plugin_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/unittests/plugin_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -133,36 +133,36 @@ ['this is only a test', 'there are only two lines']) test_fo = StringIO(test_s) matches = regex_findall(r".*lines$", test_fo) - self.assertEquals(matches, ['there are only two lines']) + self.assertEqual(matches, ['there are only two lines']) def test_regex_findall_miss(self): test_s = u"\n".join( ['this is only a test', 'there are only two lines']) test_fo = StringIO(test_s) matches = regex_findall(r".*not_there$", test_fo) - self.assertEquals(matches, []) + self.assertEqual(matches, []) def test_regex_findall_bad_input(self): matches = regex_findall(r".*", None) - self.assertEquals(matches, []) + self.assertEqual(matches, []) matches = regex_findall(r".*", []) - self.assertEquals(matches, []) + self.assertEqual(matches, []) matches = regex_findall(r".*", 1) - self.assertEquals(matches, []) + self.assertEqual(matches, []) def test_mangle_command(self): name_max = 255 - self.assertEquals("foo", _mangle_command("/usr/bin/foo", name_max)) - self.assertEquals( + self.assertEqual("foo", _mangle_command("/usr/bin/foo", name_max)) + self.assertEqual( "foo_-x", _mangle_command("/usr/bin/foo -x", name_max)) - self.assertEquals( + self.assertEqual( "foo_--verbose", _mangle_command("/usr/bin/foo --verbose", name_max)) - self.assertEquals("foo_.path.to.stuff", _mangle_command( + self.assertEqual("foo_.path.to.stuff", _mangle_command( "/usr/bin/foo /path/to/stuff", name_max)) longcmd = "foo is " + "a" * 256 + " long_command" expected = longcmd[0:name_max].replace(' ', '_') - self.assertEquals(expected, _mangle_command(longcmd, name_max)) + self.assertEqual(expected, _mangle_command(longcmd, name_max)) class PluginTests(unittest.TestCase): @@ -185,7 +185,7 @@ 'cmdlineopts': MockOptions(), 'devices': {} }) - self.assertEquals(p.name(), "mockplugin") + self.assertEqual(p.name(), "mockplugin") def test_plugin_set_name(self): p = NamedMockPlugin({ @@ -194,7 +194,7 @@ 'cmdlineopts': MockOptions(), 'devices': {} }) - self.assertEquals(p.name(), "testing") + self.assertEqual(p.name(), "testing") def test_plugin_no_descrip(self): p = MockPlugin({ @@ -203,7 +203,7 @@ 'cmdlineopts': MockOptions(), 'devices': {} }) - self.assertEquals(p.get_description(), "") + self.assertEqual(p.get_description(), "") def test_plugin_has_descrip(self): p = NamedMockPlugin({ @@ -212,7 +212,7 @@ 'cmdlineopts': MockOptions(), 'devices': {} }) - self.assertEquals(p.get_description(), + self.assertEqual(p.get_description(), "This plugin has a description.") def test_set_plugin_option(self): @@ -223,7 +223,7 @@ 'devices': {} }) p.set_option("opt", "testing") - self.assertEquals(p.get_option("opt"), "testing") + self.assertEqual(p.get_option("opt"), "testing") def test_set_nonexistant_plugin_option(self): p = MockPlugin({ @@ -241,7 +241,7 @@ 'cmdlineopts': MockOptions(), 'devices': {} }) - self.assertEquals(p.get_option("badopt"), 0) + self.assertEqual(p.get_option("badopt"), 0) def test_get_unset_plugin_option(self): p = MockPlugin({ @@ -250,7 +250,7 @@ 'cmdlineopts': MockOptions(), 'devices': {} }) - self.assertEquals(p.get_option("opt"), None) + self.assertEqual(p.get_option("opt"), None) def test_get_unset_plugin_option_with_default(self): # this shows that even when we pass in a default to get, @@ -262,7 +262,7 @@ 'cmdlineopts': MockOptions(), 'devices': {} }) - self.assertEquals(p.get_option("opt", True), True) + self.assertEqual(p.get_option("opt", True), True) def test_get_unset_plugin_option_with_default_not_none(self): # this shows that even when we pass in a default to get, @@ -275,17 +275,17 @@ 'cmdlineopts': MockOptions(), 'devices': {} }) - self.assertEquals(p.get_option("opt2", True), False) + self.assertEqual(p.get_option("opt2", True), False) def test_copy_dir(self): self.mp._do_copy_path("tests") - self.assertEquals( + self.assertEqual( self.mp.archive.m["tests/unittests/plugin_tests.py"], 'tests/unittests/plugin_tests.py') def test_copy_dir_bad_path(self): self.mp._do_copy_path("not_here_tests") - self.assertEquals(self.mp.archive.m, {}) + self.assertEqual(self.mp.archive.m, {}) def test_copy_dir_forbidden_path(self): p = ForbiddenMockPlugin({ @@ -297,7 +297,7 @@ p.archive = MockArchive() p.setup() p.collect_plugin() - self.assertEquals(p.archive.m, {}) + self.assertEqual(p.archive.m, {}) def test_postproc_default_on(self): p = PostprocMockPlugin({ @@ -318,11 +318,11 @@ }) e = {'TORVALDS': 'Linus'} p.set_default_cmd_environment(e) - self.assertEquals(p.default_environment, e) + self.assertEqual(p.default_environment, e) add_e = {'GREATESTSPORT': 'hockey'} p.add_default_cmd_environment(add_e) - self.assertEquals(p.default_environment['GREATESTSPORT'], 'hockey') - self.assertEquals(p.default_environment['TORVALDS'], 'Linus') + self.assertEqual(p.default_environment['GREATESTSPORT'], 'hockey') + self.assertEqual(p.default_environment['TORVALDS'], 'Linus') class AddCopySpecTests(unittest.TestCase): @@ -344,7 +344,7 @@ path = path[1:] return os.path.join(self.mp.sysroot, path) expected_paths = set(map(pathmunge, self.expect_paths)) - self.assertEquals(self.mp.copy_paths, expected_paths) + self.assertEqual(self.mp.copy_paths, expected_paths) def test_single_file_no_limit(self): self.mp.add_copy_spec("tests/unittests/tail_test.txt") @@ -361,7 +361,7 @@ fname, _size = self.mp._tail_files_list[0] self.assertTrue(fname == fn) self.assertTrue("tmp" in fname) - self.assertEquals(1024 * 1024, _size) + self.assertEqual(1024 * 1024, _size) os.unlink(fn) def test_bad_filename(self): @@ -379,7 +379,7 @@ create_file(2, dir=tmpdir) create_file(2, dir=tmpdir) self.mp.add_copy_spec(tmpdir + "/*") - self.assertEquals(len(self.mp.copy_paths), 2) + self.assertEqual(len(self.mp.copy_paths), 2) shutil.rmtree(tmpdir) def test_glob_file_over_limit(self): @@ -388,18 +388,18 @@ create_file(2, dir=tmpdir) create_file(2, dir=tmpdir) self.mp.add_copy_spec(tmpdir + "/*", 1) - self.assertEquals(len(self.mp._tail_files_list), 1) + self.assertEqual(len(self.mp._tail_files_list), 1) fname, _size = self.mp._tail_files_list[0] - self.assertEquals(1024 * 1024, _size) + self.assertEqual(1024 * 1024, _size) shutil.rmtree(tmpdir) def test_multiple_files_no_limit(self): self.mp.add_copy_spec(['tests/unittests/tail_test.txt', 'tests/unittests/test.txt']) - self.assertEquals(len(self.mp.copy_paths), 2) + self.assertEqual(len(self.mp.copy_paths), 2) def test_multiple_files_under_limit(self): self.mp.add_copy_spec(['tests/unittests/tail_test.txt', 'tests/unittests/test.txt'], 1) - self.assertEquals(len(self.mp.copy_paths), 2) + self.assertEqual(len(self.mp.copy_paths), 2) class CheckEnabledTests(unittest.TestCase): @@ -443,7 +443,7 @@ self.mp.archive = MockArchive() def test_file_never_copied(self): - self.assertEquals(0, self.mp.do_file_sub( + self.assertEqual(0, self.mp.do_file_sub( "never_copied", r"^(.*)$", "foobar")) def test_no_replacements(self): @@ -452,7 +452,7 @@ self.mp.collect_plugin() replacements = self.mp.do_file_sub( j("tail_test.txt"), r"wont_match", "foobar") - self.assertEquals(0, replacements) + self.assertEqual(0, replacements) def test_replacements(self): # test uses absolute paths @@ -461,7 +461,7 @@ self.mp.collect_plugin() replacements = self.mp.do_file_sub( j("tail_test.txt"), r"(tail)", "foobar") - self.assertEquals(1, replacements) + self.assertEqual(1, replacements) self.assertTrue("foobar" in self.mp.archive.m.get(j('tail_test.txt'))) diff -Nru sosreport-4.5.6/tests/unittests/policy_tests.py sosreport-4.7.0/tests/unittests/policy_tests.py --- sosreport-4.5.6/tests/unittests/policy_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/unittests/policy_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -88,17 +88,17 @@ self.pm = PackageManager() def test_default_all_pkgs(self): - self.assertEquals(self.pm.packages, {}) + self.assertEqual(self.pm.packages, {}) def test_default_all_pkgs_by_name(self): - self.assertEquals(self.pm.all_pkgs_by_name('doesntmatter'), []) + self.assertEqual(self.pm.all_pkgs_by_name('doesntmatter'), []) def test_default_all_pkgs_by_name_regex(self): - self.assertEquals( + self.assertEqual( self.pm.all_pkgs_by_name_regex('.*doesntmatter$'), []) def test_default_pkg_by_name(self): - self.assertEquals(self.pm.pkg_by_name('foo'), None) + self.assertEqual(self.pm.pkg_by_name('foo'), None) class RpmPackageManagerTests(unittest.TestCase): @@ -115,7 +115,7 @@ kpkg = self.pm.pkg_by_name('coreutils') self.assertIsInstance(kpkg, dict) self.assertIsInstance(kpkg['version'], list) - self.assertEquals(kpkg['pkg_manager'], 'rpm') + self.assertEqual(kpkg['pkg_manager'], 'rpm') class DpkgPackageManagerTests(unittest.TestCase): @@ -132,7 +132,7 @@ kpkg = self.pm.pkg_by_name('coreutils') self.assertIsInstance(kpkg, dict) self.assertIsInstance(kpkg['version'], list) - self.assertEquals(kpkg['pkg_manager'], 'dpkg') + self.assertEqual(kpkg['pkg_manager'], 'dpkg') class MultiPackageManagerTests(unittest.TestCase): @@ -150,9 +150,9 @@ self.assertIsInstance(kpkg['version'], list) _local = distro.detect().name if _local in ['Ubuntu', 'debian']: - self.assertEquals(kpkg['pkg_manager'], 'dpkg') + self.assertEqual(kpkg['pkg_manager'], 'dpkg') else: - self.assertEquals(kpkg['pkg_manager'], 'rpm') + self.assertEqual(kpkg['pkg_manager'], 'rpm') if __name__ == "__main__": diff -Nru sosreport-4.5.6/tests/unittests/report_tests.py sosreport-4.7.0/tests/unittests/report_tests.py --- sosreport-4.5.6/tests/unittests/report_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/unittests/report_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -23,7 +23,7 @@ expected = json.dumps({}) - self.assertEquals(expected, str(report)) + self.assertEqual(expected, str(report)) def test_nested_section(self): report = Report() @@ -32,7 +32,7 @@ expected = json.dumps({"section": {}}) - self.assertEquals(expected, str(report)) + self.assertEqual(expected, str(report)) def test_multiple_sections(self): report = Report() @@ -45,7 +45,7 @@ expected = json.dumps({"section": {}, "section2": {}, }) - self.assertEquals(expected, str(report)) + self.assertEqual(expected, str(report)) def test_deeply_nested(self): report = Report() @@ -61,7 +61,7 @@ "return_code": 0, "href": "does/not/matter"}]}}) - self.assertEquals(expected, str(report)) + self.assertEqual(expected, str(report)) class TestPlainReport(unittest.TestCase): @@ -78,13 +78,13 @@ ]) def test_basic(self): - self.assertEquals(self.pluglist.format(pluglist=""), + self.assertEqual(self.pluglist.format(pluglist=""), PlainTextReport(self.report).unicode()) def test_one_section(self): self.report.add(self.section) - self.assertEquals(self.defaultheader, + self.assertEqual(self.defaultheader, PlainTextReport(self.report).unicode() + '\n') def test_two_sections(self): @@ -92,7 +92,7 @@ section2 = Section(name="second") self.report.add(section1, section2) - self.assertEquals(u''.join([ + self.assertEqual(u''.join([ self.pluglist.format(pluglist=" first second"), self.div, "\nfirst", @@ -108,7 +108,7 @@ self.section.add(cmd) self.report.add(self.section) - self.assertEquals(u''.join([ + self.assertEqual(u''.join([ self.defaultheader, "- commands executed:\n * ls -al /foo/bar/baz" ]), @@ -119,7 +119,7 @@ self.section.add(cf) self.report.add(self.section) - self.assertEquals(u''.join([ + self.assertEqual(u''.join([ self.defaultheader, "- files copied:\n * /etc/hosts" ]), @@ -131,7 +131,7 @@ self.section.add(crf) self.report.add(self.section) - self.assertEquals(u''.join([ + self.assertEqual(u''.join([ self.defaultheader, "- files created:\n * sample.txt" ]), @@ -142,7 +142,7 @@ self.section.add(alrt) self.report.add(self.section) - self.assertEquals(u''.join([ + self.assertEqual(u''.join([ self.defaultheader, "- alerts:\n ! this is an alert" ]), diff -Nru sosreport-4.5.6/tests/unittests/utilities_tests.py sosreport-4.7.0/tests/unittests/utilities_tests.py --- sosreport-4.5.6/tests/unittests/utilities_tests.py 2023-07-20 22:38:49.000000000 +0100 +++ sosreport-4.7.0/tests/unittests/utilities_tests.py 2024-02-20 00:39:46.000000000 +0000 @@ -24,32 +24,32 @@ ['this is only a test', 'there are only two lines']) test_fo = StringIO(test_s) matches = grep(".*test$", test_fo) - self.assertEquals(matches, ['this is only a test\n']) + self.assertEqual(matches, ['this is only a test\n']) def test_real_file(self): matches = grep(".*unittest$", __file__.replace(".pyc", ".py")) - self.assertEquals(matches, ['import unittest\n']) + self.assertEqual(matches, ['import unittest\n']) def test_open_file(self): matches = grep(".*unittest$", open(__file__.replace(".pyc", ".py"))) - self.assertEquals(matches, ['import unittest\n']) + self.assertEqual(matches, ['import unittest\n']) def test_grep_multiple_files(self): matches = grep(".*unittest$", __file__.replace(".pyc", ".py"), "does_not_exist.txt") - self.assertEquals(matches, ['import unittest\n']) + self.assertEqual(matches, ['import unittest\n']) class TailTest(unittest.TestCase): def test_tail(self): t = tail("tests/unittests/tail_test.txt", 10) - self.assertEquals(t, b"last line\n") + self.assertEqual(t, b"last line\n") def test_tail_too_many(self): t = tail("tests/unittests/tail_test.txt", 200) expected = open("tests/unittests/tail_test.txt", "r").read() - self.assertEquals(t, str.encode(expected)) + self.assertEqual(t, str.encode(expected)) class ExecutableTest(unittest.TestCase): @@ -66,23 +66,23 @@ def test_output(self): result = sos_get_command_output("echo executed") - self.assertEquals(result['status'], 0) - self.assertEquals(result['output'], "executed\n") + self.assertEqual(result['status'], 0) + self.assertEqual(result['output'], "executed\n") def test_output_non_exe(self): path = os.path.join(TEST_DIR, 'utility_tests.py') result = sos_get_command_output(path) - self.assertEquals(result['status'], 127) - self.assertEquals(result['output'], b"") + self.assertEqual(result['status'], 127) + self.assertEqual(result['output'], b"") def test_output_chdir(self): cmd = "/bin/bash -c 'echo $PWD'" result = sos_get_command_output(cmd, chdir=TEST_DIR) - self.assertEquals(result['status'], 0) + self.assertEqual(result['status'], 0) self.assertTrue(result['output'].strip().endswith(TEST_DIR)) def test_shell_out(self): - self.assertEquals("executed\n", shell_out('echo executed')) + self.assertEqual("executed\n", shell_out('echo executed')) class FindTest(unittest.TestCase):