diff -Nru octavia-10.1.0/AUTHORS octavia-10.1.1/AUTHORS --- octavia-10.1.0/AUTHORS 2023-07-19 12:06:08.000000000 +0000 +++ octavia-10.1.1/AUTHORS 2024-02-08 14:49:00.000000000 +0000 @@ -56,6 +56,7 @@ Dao Cong Tien Dawson Coleman Deepak +Dmitriy Rabotyagov Dong Jun Doug Fish Doug Hellmann @@ -138,6 +139,7 @@ Martin Chlumsky Masayuki Igawa Matt Alline +Maximilian Stinsky Michael Johnson Michael Johnson Michal Arbet @@ -189,12 +191,14 @@ Stephen Balukoff Susanne Balle Sven Wegener +Takashi Kajinami Takashi Kajinami Tatsuma Matsuki Thobias Salazar Trevisan Thomas Bechtold Thomas Goirand Tin Lam +Tobias Urdin Tom Weininger Tony Breeds Trevor Vardeman diff -Nru octavia-10.1.0/ChangeLog octavia-10.1.1/ChangeLog --- octavia-10.1.0/ChangeLog 2023-07-19 12:06:08.000000000 +0000 +++ octavia-10.1.1/ChangeLog 2024-02-08 14:49:00.000000000 +0000 @@ -1,15 +1,46 @@ CHANGES ======= +10.1.1 +------ + +* Don't update the management port when calculating delta [v1] +* Don't update the management port when calculating delta +* Fix issue with certificates with no subject or CN +* Stable-only: Cap hacking to < 6.1.0 +* Fix TLS-HELLO healthmonitors in the amphora-driver +* Fix health-monitors with ALPN members +* Add Noop Certificate Manager +* Fix text/plain mime type with healthcheck endpoint +* Fix amphorav1 member deletion bug +* Fix create\_server\_group in compute noop +* Fix amphorae in ERROR during the failover +* Reduce duration of failovers with amphora in ERROR +* Fix timeout duration in start\_vrrp\_service during failovers +* Retry to set loadbalancer prov status on failures +* Fix race condition in members batch update API call +* Fix incorrect removal of IP rules in the amphora +* Drop lower-constraints.txt and its testing +* Fix persistence\_granularity default value +* Fix octavia-status with amphorav2 +* Fix upgrade check not working + 10.1.0 ------ +* Fix typo in lvs-masquerade.sh +* Fix job book deletion issue in taskflow +* fix haproxy\_count to display the number of running processes * Fix TCP HMs on UDP pools with SELinux * Fix hm operating status to ONLINE in single lb call * Avoid interface name collisions in the amphora * Fix pool creation with single LB create call * Send IP advertisements when plugging a new member subnet * Fix pep8 error +* Fix failover when the last listener is deleted +* Fix SQLAlchemy warning about conflict relationship with Tags +* Rename Context to RequestContext +* Pass config to castellan * Prevent incorrect reschedule of resumed tasks with jobboard * Fix serialization of lists in data\_models.to\_dict * Remove incorrect info message diff -Nru octavia-10.1.0/PKG-INFO octavia-10.1.1/PKG-INFO --- octavia-10.1.0/PKG-INFO 2023-07-19 12:06:09.059678800 +0000 +++ octavia-10.1.1/PKG-INFO 2024-02-08 14:49:01.176324100 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: octavia -Version: 10.1.0 +Version: 10.1.1 Summary: OpenStack Octavia Scalable Load Balancer as a Service Home-page: https://docs.openstack.org/octavia/latest/ Author: OpenStack diff -Nru octavia-10.1.0/debian/changelog octavia-10.1.1/debian/changelog --- octavia-10.1.0/debian/changelog 2023-08-07 16:30:20.000000000 +0000 +++ octavia-10.1.1/debian/changelog 2024-03-19 15:58:34.000000000 +0000 @@ -1,3 +1,9 @@ +octavia (1:10.1.1-0ubuntu1) jammy; urgency=medium + + * New upstream release for OpenStack Yoga. + + -- Nicolas Bock Tue, 19 Mar 2024 15:58:34 +0000 + octavia (1:10.1.0-0ubuntu1) jammy; urgency=medium * d/gbp.conf: Create stable/yoga branch. diff -Nru octavia-10.1.0/debian/patches/disable-sphinxcontrib.rsvgconverter.patch octavia-10.1.1/debian/patches/disable-sphinxcontrib.rsvgconverter.patch --- octavia-10.1.0/debian/patches/disable-sphinxcontrib.rsvgconverter.patch 2023-08-07 16:30:20.000000000 +0000 +++ octavia-10.1.1/debian/patches/disable-sphinxcontrib.rsvgconverter.patch 2024-03-19 15:57:44.000000000 +0000 @@ -2,9 +2,11 @@ Author: James Page Forwarded: not-needed ---- a/doc/source/conf.py -+++ b/doc/source/conf.py -@@ -67,7 +67,6 @@ extensions = [ +Index: octavia-10.1.1/doc/source/conf.py +=================================================================== +--- octavia-10.1.1.orig/doc/source/conf.py ++++ octavia-10.1.1/doc/source/conf.py +@@ -69,7 +69,6 @@ extensions = [ 'oslo_config.sphinxext', 'oslo_policy.sphinxpolicygen', 'sphinxcontrib.apidoc', diff -Nru octavia-10.1.0/doc/source/admin/guides/operator-maintenance.rst octavia-10.1.1/doc/source/admin/guides/operator-maintenance.rst --- octavia-10.1.0/doc/source/admin/guides/operator-maintenance.rst 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/doc/source/admin/guides/operator-maintenance.rst 2024-02-08 14:48:31.000000000 +0000 @@ -73,6 +73,14 @@ continuing to process traffic through the load balancer, but might not have applied the latest configuration updates yet. +A load balancer in a PENDING provisioning status is immutable, it cannot be +updated or deleted by another process, this PENDING status acts as a lock on +the resource. +If a database outage occurs while a load balancer is deleted, created or +updated, the Octavia control plane will try to remove the PENDING status and +set it to ERROR during a long period of time (around 2h45min with the default +settings), to prevent the resource from remaining immutable. + Monitoring load balancer functionality -------------------------------------- diff -Nru octavia-10.1.0/elements/amphora-agent/static/usr/local/bin/lvs-masquerade.sh octavia-10.1.1/elements/amphora-agent/static/usr/local/bin/lvs-masquerade.sh --- octavia-10.1.0/elements/amphora-agent/static/usr/local/bin/lvs-masquerade.sh 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/elements/amphora-agent/static/usr/local/bin/lvs-masquerade.sh 2024-02-08 14:48:31.000000000 +0000 @@ -88,10 +88,10 @@ nft flush chain ip octavia-ipv4 output nft delete chain ip octavia-ipv4 output elif [ "$2" == "ipv6" ]; then - nft flush chain ip6 octavia-ipv6 ip-udp-masq - nft delete chain ip6 octavia-ipv6 ip-udp-masq - nft flush chain ip6 octavia-ipv6 ip-sctp-masq - nft delete chain ip6 octavia-ipv6 ip-sctp-masq + nft flush chain ip6 octavia-ipv6 ip6-udp-masq + nft delete chain ip6 octavia-ipv6 ip6-udp-masq + nft flush chain ip6 octavia-ipv6 ip6-sctp-masq + nft delete chain ip6 octavia-ipv6 ip6-sctp-masq nft flush chain ip6 octavia-ipv6 prerouting nft delete chain ip6 octavia-ipv6 prerouting nft flush chain ip6 octavia-ipv6 output diff -Nru octavia-10.1.0/lower-constraints.txt octavia-10.1.1/lower-constraints.txt --- octavia-10.1.0/lower-constraints.txt 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/lower-constraints.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,178 +0,0 @@ -alabaster==0.7.10 -alembic==0.9.6 -amqp==5.0.2 -appdirs==1.4.3 -asn1crypto==0.24.0 -astroid==2.4.0 -automaton==1.14.0 -bandit==1.4.0 -bashate==0.5.1 -bcrypt==3.1.4 -beautifulsoup4==4.6.0 -cachetools==2.0.1 -castellan==0.16.0 -certifi==2018.1.18 -cffi==1.14.0 -chardet==3.0.4 -click==6.7 -cliff==2.11.0 -cmd2==0.8.1 -contextlib2==0.5.5 -cotyledon==1.3.0 -coverage==4.0 -cryptography==3.0 -debtcollector==1.19.0 -decorator==4.2.1 -deprecation==2.0 -diskimage-builder==2.24.0 -distro===1.2.0 -doc8==0.6.0 -docutils==0.14 -dogpile.cache==0.6.5 -enum-compat==0.0.2 -eventlet==0.20.0 -extras==1.0.0 -fasteners==0.14.1 -fixtures==3.0.0 -flake8==3.7.9 -flake8-import-order==0.12 -Flask==0.10 -future==0.16.0 -futurist==1.2.0 -futures==3.0.0 -gitdb2==2.0.3 -GitPython==2.1.8 -greenlet==0.4.13 -gunicorn==19.9.0 -hacking==3.0 -idna==2.6 -imagesize==1.0.0 -ipaddress==1.0.17 -iso8601==0.1.12 -itsdangerous==0.24 -Jinja2==2.10 -jmespath==0.9.3 -jsonpatch==1.21 -jsonpointer==2.0 -jsonschema==3.2.0 -kazoo==2.6.0 -keystoneauth1==3.4.0 -keystonemiddleware==4.17.0 -kombu==5.0.2 -linecache2==1.0.0 -logilab-common==1.4.1 -logutils==0.3.5 -Mako==1.0.7 -MarkupSafe==1.1.0 -mccabe==0.6.1 -monotonic==1.4 -mox3==0.25.0 -msgpack==0.5.6 -munch==2.2.0 -netaddr==0.7.19 -netifaces==0.10.4 -networkx==2.1.0 -octavia-lib==2.5.0 -openstacksdk==0.12.0 -os-client-config==1.29.0 -os-service-types==1.2.0 -osc-lib==1.10.0 -oslo.cache==1.29.0 -oslo.concurrency==3.26.0 -oslo.config==6.8.0 -oslo.context==2.22.0 -oslo.db==8.4.0 -oslo.i18n==3.20.0 -oslo.log==4.3.0 -oslo.messaging==12.4.0 -oslo.middleware==4.0.1 -oslo.policy==3.7.0 -oslo.reports==1.18.0 -oslo.serialization==2.28.1 -oslo.service==1.30.0 -oslo.upgradecheck==1.3.0 -oslo.utils==4.7.0 -oslotest==3.2.0 -packaging==20.4 -paramiko==2.4.1 -Paste==2.0.3 -PasteDeploy==1.5.2 -pbr==3.1.1 -pecan==1.3.2 -pep8==1.7.1 -pika==0.10.0 -pika-pool==0.1.3 -prettytable==0.7.2 -psutil==5.7.1 -pyasn1==0.1.8 -pyasn1-modules==0.0.6 -pycadf==2.7.0 -pycodestyle==2.5.0 -pycparser==2.18 -pydot==1.2.4 -pyflakes==2.1.0 -Pygments==2.2.0 -pyinotify==0.9.6 -pylint==2.5.3 -PyMySQL==0.8.0 -PyNaCl==1.2.1 -pyOpenSSL==19.1.0 -pyparsing==2.2.0 -pyperclip==1.6.0 -pyroute2==0.5.14 -python-barbicanclient==4.5.2 -python-dateutil==2.7.0 -python-editor==1.0.3 -python-glanceclient==2.8.0 -python-keystoneclient==3.15.0 -python-mimeparse==1.6.0 -python-neutronclient==6.7.0 -python-novaclient==9.1.0 -python-subunit==1.0.0 -pytz==2018.3 -PyYAML==5.1 -redis==2.10.0 -repoze.lru==0.7 -requests==2.23.0 -requests-mock==1.2.0 -requestsexceptions==1.4.0 -restructuredtext-lint==1.1.3 -rfc3986==1.2.0 -Routes==2.4.1 -setproctitle==1.1.10 -simplegeneric==0.8.1 -simplejson==3.13.2 -smmap2==2.0.3 -snowballstemmer==1.2.1 -Sphinx==2.0.0 -sphinxcontrib-svg2pdfconverter==0.1.0 -sphinxcontrib-websupport==1.0.1 -SQLAlchemy==1.2.19 -SQLAlchemy-Utils==0.30.11 -sqlalchemy-migrate==0.11.0 -sqlparse==0.2.4 -statsd==3.2.2 -stestr==2.0.0 -stevedore==1.20.0 -taskflow==4.4.0 -tempest==23.0.0 -Tempita==0.5.2 -tenacity==5.0.4 -testrepository==0.0.18 -testresources==2.0.0 -testscenarios==0.4 -testtools==2.2.0 -traceback2==1.4.0 -unittest2==1.1.0 -urllib3==1.21.1 -vine==5.0.0 -voluptuous==0.11.1 -waitress==1.1.0 -warlock==1.3.1 -WebOb==1.8.2 -WebTest==2.0.29 -Werkzeug==0.14.1 -wrapt==1.12.0 -WSME==0.8.0 -zake==0.1.6 -python-cinderclient==3.3.0 diff -Nru octavia-10.1.0/octavia/amphorae/backends/agent/api_server/amphora_info.py octavia-10.1.1/octavia/amphorae/backends/agent/api_server/amphora_info.py --- octavia-10.1.0/octavia/amphorae/backends/agent/api_server/amphora_info.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/backends/agent/api_server/amphora_info.py 2024-02-08 14:48:31.000000000 +0000 @@ -45,6 +45,7 @@ return webob.Response(json=body) def compile_amphora_details(self, extend_lvs_driver=None): + haproxy_loadbalancer_list = sorted(util.get_loadbalancers()) haproxy_listener_list = sorted(util.get_listeners()) extend_body = {} lvs_listener_list = [] @@ -67,7 +68,7 @@ 'networks': self._get_networks(), 'active': True, 'haproxy_count': - self._count_haproxy_processes(haproxy_listener_list), + self._count_haproxy_processes(haproxy_loadbalancer_list), 'cpu': { 'total': cpu['total'], 'user': cpu['user'], diff -Nru octavia-10.1.0/octavia/amphorae/backends/agent/api_server/osutils.py octavia-10.1.1/octavia/amphorae/backends/agent/api_server/osutils.py --- octavia-10.1.0/octavia/amphorae/backends/agent/api_server/osutils.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/backends/agent/api_server/osutils.py 2024-02-08 14:48:31.000000000 +0000 @@ -56,6 +56,7 @@ def write_interface_file(self, interface, ip_address, prefixlen): interface = interface_file.InterfaceFile( name=interface, + if_type=consts.LO, addresses=[{ "address": ip_address, "prefixlen": prefixlen diff -Nru octavia-10.1.0/octavia/amphorae/backends/health_daemon/health_daemon.py octavia-10.1.1/octavia/amphorae/backends/health_daemon/health_daemon.py --- octavia-10.1.0/octavia/amphorae/backends/health_daemon/health_daemon.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/backends/health_daemon/health_daemon.py 2024-02-08 14:48:31.000000000 +0000 @@ -157,9 +157,15 @@ def get_stats(stat_sock_file): - stats_query = haproxy_query.HAProxyQuery(stat_sock_file) - stats = stats_query.show_stat() - pool_status = stats_query.get_pool_status() + try: + stats_query = haproxy_query.HAProxyQuery(stat_sock_file) + stats = stats_query.show_stat() + pool_status = stats_query.get_pool_status() + except Exception as e: + LOG.warning('Unable to query the HAProxy stats (%s) due to: %s', + stat_sock_file, str(e)) + # Return empty lists so that the heartbeat will still be sent + return [], {} return stats, pool_status diff -Nru octavia-10.1.0/octavia/amphorae/backends/utils/interface.py octavia-10.1.1/octavia/amphorae/backends/utils/interface.py --- octavia-10.1.0/octavia/amphorae/backends/utils/interface.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/backends/utils/interface.py 2024-02-08 14:48:31.000000000 +0000 @@ -195,7 +195,9 @@ self._addresses_up(interface, ipr, idx) self._routes_up(interface, ipr, idx) - self._rules_up(interface, ipr, idx) + # only the vip port updates the rules + if interface.if_type == consts.VIP: + self._rules_up(interface, ipr, idx) self._scripts_up(interface, current_state) @@ -374,11 +376,13 @@ current_state = link.get(consts.STATE) if current_state == consts.IFACE_UP: - for rule in interface.rules: - rule[consts.FAMILY] = self._family(rule[consts.SRC]) - LOG.debug("%s: Deleting rule %s", interface.name, rule) - self._ipr_command(ipr.rule, self.DELETE, - raise_on_error=False, **rule) + # only the vip port updates the rules + if interface.if_type == consts.VIP: + for rule in interface.rules: + rule[consts.FAMILY] = self._family(rule[consts.SRC]) + LOG.debug("%s: Deleting rule %s", interface.name, rule) + self._ipr_command(ipr.rule, self.DELETE, + raise_on_error=False, **rule) for route in interface.routes: route[consts.FAMILY] = self._family(route[consts.DST]) diff -Nru octavia-10.1.0/octavia/amphorae/backends/utils/interface_file.py octavia-10.1.1/octavia/amphorae/backends/utils/interface_file.py --- octavia-10.1.0/octavia/amphorae/backends/utils/interface_file.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/backends/utils/interface_file.py 2024-02-08 14:48:31.000000000 +0000 @@ -25,9 +25,11 @@ class InterfaceFile(object): - def __init__(self, name, mtu=None, addresses=None, + def __init__(self, name, if_type, + mtu=None, addresses=None, routes=None, rules=None, scripts=None): self.name = name + self.if_type = if_type self.mtu = mtu self.addresses = addresses or [] self.routes = routes or [] @@ -92,6 +94,7 @@ flags, mode), 'w') as fp: interface = { consts.NAME: self.name, + consts.IF_TYPE: self.if_type, consts.ADDRESSES: self.addresses, consts.ROUTES: self.routes, consts.RULES: self.rules, @@ -108,7 +111,7 @@ gateway, vrrp_ip, host_routes, topology, fixed_ips=None): - super().__init__(name, mtu=mtu) + super().__init__(name, if_type=consts.VIP, mtu=mtu) if vrrp_ip: self.addresses.append({ @@ -224,7 +227,7 @@ class PortInterfaceFile(InterfaceFile): def __init__(self, name, mtu, fixed_ips): - super().__init__(name, mtu=mtu) + super().__init__(name, if_type=consts.BACKEND, mtu=mtu) if fixed_ips: ip_versions = set() diff -Nru octavia-10.1.0/octavia/amphorae/drivers/driver_base.py octavia-10.1.1/octavia/amphorae/drivers/driver_base.py --- octavia-10.1.0/octavia/amphorae/drivers/driver_base.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/drivers/driver_base.py 2024-02-08 14:48:31.000000000 +0000 @@ -14,6 +14,9 @@ # under the License. import abc +from typing import Optional + +from octavia.db import models as db_models class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta): @@ -231,6 +234,19 @@ :type timeout_dict: dict """ + @abc.abstractmethod + def check(self, amphora: db_models.Amphora, + timeout_dict: Optional[dict] = None): + """Check connectivity to the amphora. + + :param amphora: The amphora to query. + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :raises TimeOutException: The amphora didn't reply + """ + class VRRPDriverMixin(object, metaclass=abc.ABCMeta): """Abstract mixin class for VRRP support in loadbalancer amphorae diff -Nru octavia-10.1.0/octavia/amphorae/drivers/haproxy/rest_api_driver.py octavia-10.1.1/octavia/amphorae/drivers/haproxy/rest_api_driver.py --- octavia-10.1.0/octavia/amphorae/drivers/haproxy/rest_api_driver.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/drivers/haproxy/rest_api_driver.py 2024-02-08 14:48:31.000000000 +0000 @@ -17,6 +17,7 @@ import os import ssl import time +from typing import Optional import warnings from oslo_context import context as oslo_context @@ -38,6 +39,7 @@ from octavia.common.tls_utils import cert_parser from octavia.common import utils from octavia.db import api as db_apis +from octavia.db import models as db_models from octavia.db import repositories as repo from octavia.network import data_models as network_models @@ -115,6 +117,11 @@ amphora.id, amphora.api_version) return list(map(int, amphora.api_version.split('.'))) + def check(self, amphora: db_models.Amphora, + timeout_dict: Optional[dict] = None): + """Check connectivity to the amphora.""" + self._populate_amphora_api_version(amphora, timeout_dict) + def update_amphora_listeners(self, loadbalancer, amphora, timeout_dict=None): """Update the amphora with a new configuration. @@ -635,15 +642,15 @@ req_read_timeout, conn_max_retries, conn_retry_interval :type timeout_dict: dict - :returns: None if not found, the interface name string if found. + :returns: the interface name string if found. + :raises octavia.amphorae.drivers.haproxy.exceptions.NotFound: + No interface found on the amphora + :raises TimeOutException: The amphora didn't reply """ - try: - self._populate_amphora_api_version(amphora, timeout_dict) - response_json = self.clients[amphora.api_version].get_interface( - amphora, ip_address, timeout_dict, log_error=False) - return response_json.get('interface', None) - except (exc.NotFound, driver_except.TimeOutException): - return None + self._populate_amphora_api_version(amphora, timeout_dict) + response_json = self.clients[amphora.api_version].get_interface( + amphora, ip_address, timeout_dict, log_error=False) + return response_json.get('interface', None) # Check a custom hostname diff -Nru octavia-10.1.0/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py octavia-10.1.1/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py --- octavia-10.1.0/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py 2024-02-08 14:48:31.000000000 +0000 @@ -96,7 +96,8 @@ LOG.info("Start amphora %s VRRP Service.", amphora.id) - self._populate_amphora_api_version(amphora) + self._populate_amphora_api_version(amphora, + timeout_dict=timeout_dict) self.clients[amphora.api_version].start_vrrp(amphora, timeout_dict=timeout_dict) diff -Nru octavia-10.1.0/octavia/amphorae/drivers/noop_driver/driver.py octavia-10.1.1/octavia/amphorae/drivers/noop_driver/driver.py --- octavia-10.1.0/octavia/amphorae/drivers/noop_driver/driver.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/amphorae/drivers/noop_driver/driver.py 2024-02-08 14:48:31.000000000 +0000 @@ -195,3 +195,6 @@ def reload_vrrp_service(self, loadbalancer): pass + + def check(self, amphora, timeout_dict=None): + pass diff -Nru octavia-10.1.0/octavia/api/common/hooks.py octavia-10.1.1/octavia/api/common/hooks.py --- octavia-10.1.0/octavia/api/common/hooks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/api/common/hooks.py 2024-02-08 14:48:31.000000000 +0000 @@ -23,7 +23,8 @@ """Configures a request context and attaches it to the request.""" def on_route(self, state): - context_obj = context.Context.from_environ(state.request.environ) + context_obj = context.RequestContext.from_environ( + state.request.environ) state.request.context['octavia_context'] = context_obj diff -Nru octavia-10.1.0/octavia/api/root_controller.py octavia-10.1.1/octavia/api/root_controller.py --- octavia-10.1.0/octavia/api/root_controller.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/api/root_controller.py 2024-02-08 14:48:31.000000000 +0000 @@ -39,7 +39,7 @@ # Run the oslo middleware healthcheck for /healthcheck @pecan_expose('json') - @pecan_expose(content_type='plain/text') + @pecan_expose(content_type='text/plain') @pecan_expose(content_type='text/html') def healthcheck(self): # pylint: disable=inconsistent-return-statements if CONF.api_settings.healthcheck_enabled: diff -Nru octavia-10.1.0/octavia/api/v2/controllers/member.py octavia-10.1.1/octavia/api/v2/controllers/member.py --- octavia-10.1.0/octavia/api/v2/controllers/member.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/api/v2/controllers/member.py 2024-02-08 14:48:31.000000000 +0000 @@ -325,7 +325,6 @@ context = pecan_request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) - old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) @@ -343,6 +342,11 @@ with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) + # Reload the pool, the members may have been updated between the + # first query in this function and the lock of the loadbalancer + db_pool = self._get_db_pool(context.session, self.pool_id) + old_members = db_pool.members + old_member_uniques = { (m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [ diff -Nru octavia-10.1.0/octavia/certificates/manager/castellan_mgr.py octavia-10.1.1/octavia/certificates/manager/castellan_mgr.py --- octavia-10.1.0/octavia/certificates/manager/castellan_mgr.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/certificates/manager/castellan_mgr.py 2024-02-08 14:48:31.000000000 +0000 @@ -19,12 +19,15 @@ from castellan.common.objects import opaque_data from castellan import key_manager from OpenSSL import crypto +from oslo_config import cfg from oslo_log import log as logging from octavia.certificates.common import pkcs12 from octavia.certificates.manager import cert_mgr from octavia.common import exceptions +CONF = cfg.CONF + LOG = logging.getLogger(__name__) @@ -33,7 +36,7 @@ def __init__(self): super().__init__() - self.manager = key_manager.API() + self.manager = key_manager.API(CONF) def store_cert(self, context, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, diff -Nru octavia-10.1.0/octavia/certificates/manager/noop.py octavia-10.1.1/octavia/certificates/manager/noop.py --- octavia-10.1.0/octavia/certificates/manager/noop.py 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/octavia/certificates/manager/noop.py 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,106 @@ +# Copyright (c) 2023 Red Hat +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import uuid + +from oslo_log import log as logging + +from octavia.certificates.common import cert +from octavia.certificates.common import local +from octavia.certificates.manager import cert_mgr +from octavia.common.tls_utils import cert_parser +from octavia.tests.common import sample_certs + +LOG = logging.getLogger(__name__) + + +class NoopCertManager(cert_mgr.CertManager): + """Cert manager implementation for no-op operations + + """ + def __init__(self): + super().__init__() + self._local_cert = None + + @property + def local_cert(self): + if self._local_cert is None: + self._local_cert = self.store_cert( + None, + sample_certs.X509_CERT, + sample_certs.X509_CERT_KEY_ENCRYPTED, + sample_certs.X509_IMDS, + private_key_passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) + return self._local_cert + + def store_cert(self, context, certificate, private_key, intermediates=None, + private_key_passphrase=None, **kwargs) -> cert.Cert: + """Stores (i.e., registers) a cert with the cert manager. + + This method stores the specified cert to the filesystem and returns + a UUID that can be used to retrieve it. + + :param context: Ignored in this implementation + :param certificate: PEM encoded TLS certificate + :param private_key: private key for the supplied certificate + :param intermediates: ordered and concatenated intermediate certs + :param private_key_passphrase: optional passphrase for the supplied key + + :returns: the UUID of the stored cert + :raises CertificateStorageException: if certificate storage fails + """ + cert_ref = str(uuid.uuid4()) + if isinstance(certificate, bytes): + certificate = certificate.decode('utf-8') + if isinstance(private_key, bytes): + private_key = private_key.decode('utf-8') + + LOG.debug('Driver %s no-op, store_cert certificate %s, cert_ref %s', + self.__class__.__name__, certificate, cert_ref) + + cert_data = {'certificate': certificate, 'private_key': private_key} + if intermediates: + if isinstance(intermediates, bytes): + intermediates = intermediates.decode('utf-8') + cert_data['intermediates'] = list( + cert_parser.get_intermediates_pems(intermediates)) + if private_key_passphrase: + if isinstance(private_key_passphrase, bytes): + private_key_passphrase = private_key_passphrase.decode('utf-8') + cert_data['private_key_passphrase'] = private_key_passphrase + + return local.LocalCert(**cert_data) + + def get_cert(self, context, cert_ref, check_only=True, **kwargs) -> ( + cert.Cert): + LOG.debug('Driver %s no-op, get_cert with cert_ref %s', + self.__class__.__name__, cert_ref) + return self.local_cert + + def delete_cert(self, context, cert_ref, resource_ref, service_name=None): + LOG.debug('Driver %s no-op, delete_cert with cert_ref %s', + self.__class__.__name__, cert_ref) + + def set_acls(self, context, cert_ref): + LOG.debug('Driver %s no-op, set_acls with cert_ref %s', + self.__class__.__name__, cert_ref) + + def unset_acls(self, context, cert_ref): + LOG.debug('Driver %s no-op, unset_acls with cert_ref %s', + self.__class__.__name__, cert_ref) + + def get_secret(self, context, secret_ref) -> cert.Cert: + LOG.debug('Driver %s no-op, get_secret with secret_ref %s', + self.__class__.__name__, secret_ref) + return self.local_cert diff -Nru octavia-10.1.0/octavia/cmd/status.py octavia-10.1.1/octavia/cmd/status.py --- octavia-10.1.0/octavia/cmd/status.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/cmd/status.py 2024-02-08 14:48:31.000000000 +0000 @@ -22,7 +22,6 @@ # Need to import to load config from octavia.common import config # noqa: F401 pylint: disable=unused-import from octavia.common import constants -from octavia.common import policy from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver from octavia.i18n import _ @@ -77,9 +76,8 @@ 'section.')) def _check_amphorav2(self): - default_provider_driver = CONF.api_settings.default_provider_driver enabled_provider_drivers = CONF.api_settings.enabled_provider_drivers - if (default_provider_driver == constants.AMPHORAV2 or + if (constants.AMPHORA in enabled_provider_drivers or constants.AMPHORAV2 in enabled_provider_drivers): persistence = self._check_persistence() if isinstance(persistence, upgradecheck.Result): @@ -120,7 +118,6 @@ def main(): - policy.Policy() return upgradecheck.main( CONF, project='octavia', upgrade_command=Checks()) diff -Nru octavia-10.1.0/octavia/common/base_taskflow.py octavia-10.1.1/octavia/common/base_taskflow.py --- octavia-10.1.0/octavia/common/base_taskflow.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/common/base_taskflow.py 2024-02-08 14:48:31.000000000 +0000 @@ -147,12 +147,13 @@ if (not CONF.task_flow.jobboard_save_logbook and job.state == states.COMPLETE): LOG.debug("Job %s is complete. Cleaning up job logbook.", job.name) - try: - self._persistence.get_connection().destroy_logbook( - job.book.uuid) - except taskflow_exc.NotFound: - LOG.debug("Logbook for job %s has been already cleaned up", - job.name) + if job.book: + try: + self._persistence.get_connection().destroy_logbook( + job.book.uuid) + except taskflow_exc.NotFound: + LOG.debug("Logbook for job %s has been already cleaned up", + job.name) class RedisDynamicLoggingConductor(DynamicLoggingConductor): diff -Nru octavia-10.1.0/octavia/common/config.py octavia-10.1.1/octavia/common/config.py --- octavia-10.1.0/octavia/common/config.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/common/config.py 2024-02-08 14:48:31.000000000 +0000 @@ -525,6 +525,17 @@ help=_('Number of times an amphora delete should be retried.')), cfg.IntOpt('amphora_delete_retry_interval', default=5, help=_('Time, in seconds, between amphora delete retries.')), + # 2000 attempts is around 2h45 with the default settings + cfg.IntOpt('db_commit_retry_attempts', default=2000, + help=_('The number of times the database action will be ' + 'attempted.')), + cfg.IntOpt('db_commit_retry_initial_delay', default=1, + help=_('The initial delay before a retry attempt.')), + cfg.IntOpt('db_commit_retry_backoff', default=1, + help=_('The time to backoff retry attempts.')), + cfg.IntOpt('db_commit_retry_max', default=5, + help=_('The maximum amount of time to wait between retry ' + 'attempts.')), ] task_flow_opts = [ diff -Nru octavia-10.1.0/octavia/common/constants.py octavia-10.1.1/octavia/common/constants.py --- octavia-10.1.0/octavia/common/constants.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/common/constants.py 2024-02-08 14:48:31.000000000 +0000 @@ -313,6 +313,7 @@ AMPHORA_NETWORK_CONFIG = 'amphora_network_config' AMPHORAE = 'amphorae' AMPHORAE_NETWORK_CONFIG = 'amphorae_network_config' +AMPHORAE_STATUS = 'amphorae_status' AMPS_DATA = 'amps_data' ANTI_AFFINITY = 'anti-affinity' ATTEMPT_NUMBER = 'attempt_number' @@ -384,6 +385,7 @@ NAME = 'name' NETWORK = 'network' NETWORK_ID = 'network_id' +NEW_AMPHORA_ID = 'new_amphora_id' NEXTHOP = 'nexthop' NICS = 'nics' OBJECT = 'object' @@ -430,6 +432,7 @@ TLS_CONTAINER_ID = 'tls_container_id' TOPOLOGY = 'topology' TOTAL_CONNECTIONS = 'total_connections' +UNREACHABLE = 'unreachable' UPDATED_AT = 'updated_at' UPDATE_DICT = 'update_dict' UPDATED_PORTS = 'updated_ports' @@ -557,6 +560,7 @@ AMPHORA_POST_VIP_PLUG = 'amphora-post-vip-plug' AMPHORA_RELOAD_LISTENER = 'amphora-reload-listener' AMPHORA_TO_ERROR_ON_REVERT = 'amphora-to-error-on-revert' +AMPHORAE_GET_CONNECTIVITY_STATUS = 'amphorae-get-connectivity-status' AMPHORAE_POST_NETWORK_PLUG = 'amphorae-post-network-plug' ATTACH_PORT = 'attach-port' CALCULATE_AMPHORA_DELTA = 'calculate-amphora-delta' @@ -909,6 +913,9 @@ lib_consts.ALPN_PROTOCOL_HTTP_1_0] # Amphora interface fields +IF_TYPE = 'if_type' +BACKEND = 'backend' +LO = 'lo' MTU = 'mtu' ADDRESSES = 'addresses' ROUTES = 'routes' diff -Nru octavia-10.1.0/octavia/common/context.py octavia-10.1.1/octavia/common/context.py --- octavia-10.1.0/octavia/common/context.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/common/context.py 2024-02-08 14:48:31.000000000 +0000 @@ -22,7 +22,7 @@ CONF = cfg.CONF -class Context(common_context.RequestContext): +class RequestContext(common_context.RequestContext): _session = None diff -Nru octavia-10.1.0/octavia/common/exceptions.py octavia-10.1.1/octavia/common/exceptions.py --- octavia-10.1.0/octavia/common/exceptions.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/common/exceptions.py 2024-02-08 14:48:31.000000000 +0000 @@ -133,6 +133,12 @@ code = 400 +class MissingCertSubject(APIException): + msg = _('No CN or DNSName(s) found in certificate. The certificate is ' + 'invalid.') + code = 400 + + class MisMatchedKey(OctaviaException): message = _("Key and x509 certificate do not match") diff -Nru octavia-10.1.0/octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 octavia-10.1.1/octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 --- octavia-10.1.0/octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 2024-02-08 14:48:31.000000000 +0000 @@ -202,13 +202,18 @@ {% else %} {% set monitor_port_opt = "" %} {% endif %} + {% if pool.alpn_protocols is defined %} + {% set alpn_opt = " check-alpn %s"|format(pool.alpn_protocols) %} + {% else %} + {% set alpn_opt = "" %} + {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %} {% set monitor_ssl_opt = " check-ssl verify none" %} {% else %} {% set monitor_ssl_opt = "" %} {% endif %} - {% set hm_opt = " check%s inter %ds fall %d rise %d%s%s"|format( - monitor_ssl_opt, pool.health_monitor.delay, + {% set hm_opt = " check%s%s inter %ds fall %d rise %d%s%s"|format( + monitor_ssl_opt, alpn_opt, pool.health_monitor.delay, pool.health_monitor.fall_threshold, pool.health_monitor.rise_threshold, monitor_addr_opt, monitor_port_opt) %} @@ -365,9 +370,6 @@ {% endif %} http-check expect rstatus {{ pool.health_monitor.expected_codes }} {% endif %} - {% if pool.health_monitor.type == constants.HEALTH_MONITOR_TLS_HELLO %} - option ssl-hello-chk - {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_PING %} option external-check external-check command /var/lib/octavia/ping-wrapper.sh diff -Nru octavia-10.1.0/octavia/common/jinja/lvs/templates/macros.j2 octavia-10.1.1/octavia/common/jinja/lvs/templates/macros.j2 --- octavia-10.1.0/octavia/common/jinja/lvs/templates/macros.j2 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/common/jinja/lvs/templates/macros.j2 2024-02-08 14:48:31.000000000 +0000 @@ -117,8 +117,10 @@ {% endif %} {% if default_pool.session_persistence.persistence_granularity %} persistence_granularity {{ default_pool.session_persistence.persistence_granularity }} - {% else %} + {% elif ip_version == 4 %} persistence_granularity 255.255.255.255 + {% else %} + persistence_granularity 128 {% endif %} {% endif %} {{ health_monitor_vs_macro(default_pool) }} diff -Nru octavia-10.1.0/octavia/common/tls_utils/cert_parser.py octavia-10.1.1/octavia/common/tls_utils/cert_parser.py --- octavia-10.1.0/octavia/common/tls_utils/cert_parser.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/common/tls_utils/cert_parser.py 2024-02-08 14:48:31.000000000 +0000 @@ -256,14 +256,16 @@ """ if isinstance(certificate, str): certificate = certificate.encode('utf-8') + host_names = {'cn': None, 'dns_names': []} try: cert = x509.load_pem_x509_certificate(certificate, backends.default_backend()) - cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0] - host_names = { - 'cn': cn.value.lower(), - 'dns_names': [] - } + try: + cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0] + host_names['cn'] = cn.value.lower() + except Exception as e: + LOG.debug(f'Unable to get CN from certificate due to: {e}. ' + f'Assuming subject alternative names are present.') try: ext = cert.extensions.get_extension_for_oid( x509.OID_SUBJECT_ALTERNATIVE_NAME @@ -274,7 +276,17 @@ LOG.debug("%s extension not found", x509.OID_SUBJECT_ALTERNATIVE_NAME) + # Certs with no subject are valid as long as a subject alternative + # name is present. If both are missing, it is an invalid cert per + # the x.509 standard. + if not host_names['cn'] and not host_names['dns_names']: + LOG.warning('No CN or DNSName(s) found in certificate. The ' + 'certificate is invalid.') + raise exceptions.MissingCertSubject() + return host_names + except exceptions.MissingCertSubject: + raise except Exception as e: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert from e @@ -359,6 +371,10 @@ cert_mngr.get_cert(context, obj.tls_certificate_id, check_only=True)) + except exceptions.MissingCertSubject: + # This was logged below, so raise as is to provide a clear + # user error + raise except Exception as e: LOG.warning('Unable to retrieve certificate: %s due to %s.', obj.tls_certificate_id, str(e)) diff -Nru octavia-10.1.0/octavia/compute/drivers/noop_driver/driver.py octavia-10.1.1/octavia/compute/drivers/noop_driver/driver.py --- octavia-10.1.0/octavia/compute/drivers/noop_driver/driver.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/compute/drivers/noop_driver/driver.py 2024-02-08 14:48:31.000000000 +0000 @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +from collections import namedtuple + from oslo_log import log as logging from oslo_utils import uuidutils @@ -23,6 +25,9 @@ LOG = logging.getLogger(__name__) +NoopServerGroup = namedtuple('ServerGroup', ['id']) + + class NoopManager(object): def __init__(self): super().__init__() @@ -76,6 +81,7 @@ LOG.debug("Create Server Group %s no-op, name %s, policy %s ", self.__class__.__name__, name, policy) self.computeconfig[(name, policy)] = (name, policy, 'create') + return NoopServerGroup(id=uuidutils.generate_uuid()) def delete_server_group(self, server_group_id): LOG.debug("Delete Server Group %s no-op, id %s ", diff -Nru octavia-10.1.0/octavia/controller/worker/task_utils.py octavia-10.1.1/octavia/controller/worker/task_utils.py --- octavia-10.1.0/octavia/controller/worker/task_utils.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/task_utils.py 2024-02-08 14:48:31.000000000 +0000 @@ -14,18 +14,32 @@ """ Methods common to the controller work tasks.""" +from oslo_config import cfg from oslo_log import log as logging +from oslo_utils import excutils +import tenacity from octavia.common import constants from octavia.db import api as db_apis from octavia.db import repositories as repo +CONF = cfg.CONF LOG = logging.getLogger(__name__) class TaskUtils(object): """Class of helper/utility methods used by tasks.""" + status_update_retry = tenacity.retry( + retry=tenacity.retry_if_exception_type(Exception), + wait=tenacity.wait_incrementing( + CONF.controller_worker.db_commit_retry_initial_delay, + CONF.controller_worker.db_commit_retry_backoff, + CONF.controller_worker.db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.controller_worker.db_commit_retry_attempts), + after=tenacity.after_log(LOG, logging.DEBUG)) + def __init__(self, **kwargs): self.amphora_repo = repo.AmphoraRepository() self.health_mon_repo = repo.HealthMonitorRepository() @@ -153,6 +167,7 @@ "provisioning status to ERROR due to: " "%(except)s", {'list': listener_id, 'except': str(e)}) + @status_update_retry def mark_loadbalancer_prov_status_error(self, loadbalancer_id): """Sets a load balancer provisioning status to ERROR. @@ -166,9 +181,12 @@ id=loadbalancer_id, provisioning_status=constants.ERROR) except Exception as e: - LOG.error("Failed to update load balancer %(lb)s " - "provisioning status to ERROR due to: " - "%(except)s", {'lb': loadbalancer_id, 'except': str(e)}) + # Reraise for tenacity + with excutils.save_and_reraise_exception(): + LOG.error("Failed to update load balancer %(lb)s " + "provisioning status to ERROR due to: " + "%(except)s", {'lb': loadbalancer_id, + 'except': str(e)}) def mark_listener_prov_status_active(self, listener_id): """Sets a listener provisioning status to ACTIVE. @@ -203,6 +221,7 @@ "to ACTIVE due to: %(except)s", {'pool': pool_id, 'except': str(e)}) + @status_update_retry def mark_loadbalancer_prov_status_active(self, loadbalancer_id): """Sets a load balancer provisioning status to ACTIVE. @@ -216,9 +235,12 @@ id=loadbalancer_id, provisioning_status=constants.ACTIVE) except Exception as e: - LOG.error("Failed to update load balancer %(lb)s " - "provisioning status to ACTIVE due to: " - "%(except)s", {'lb': loadbalancer_id, 'except': str(e)}) + # Reraise for tenacity + with excutils.save_and_reraise_exception(): + LOG.error("Failed to update load balancer %(lb)s " + "provisioning status to ACTIVE due to: " + "%(except)s", {'lb': loadbalancer_id, + 'except': str(e)}) def mark_member_prov_status_error(self, member_id): """Sets a member provisioning status to ERROR. diff -Nru octavia-10.1.0/octavia/controller/worker/v1/flows/amphora_flows.py octavia-10.1.1/octavia/controller/worker/v1/flows/amphora_flows.py --- octavia-10.1.0/octavia/controller/worker/v1/flows/amphora_flows.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v1/flows/amphora_flows.py 2024-02-08 14:48:31.000000000 +0000 @@ -240,7 +240,8 @@ return delete_amphora_flow def get_vrrp_subflow(self, prefix, timeout_dict=None, - create_vrrp_group=True): + create_vrrp_group=True, + get_amphorae_status=True): sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW vrrp_subflow = linear_flow.Flow(sf_name) @@ -256,6 +257,17 @@ requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE_NETWORK_CONFIG)) + if get_amphorae_status: + # Get the amphorae_status dict in case the caller hasn't fetched + # it yet. + vrrp_subflow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS, + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.AMPHORAE_STATUS)) + # VRRP update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. @@ -266,7 +278,8 @@ amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF, - requires=constants.AMPHORAE, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict}, provides=constants.AMP_VRRP_INT)) @@ -275,13 +288,15 @@ name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, - constants.AMP_VRRP_INT), + constants.AMPHORAE_STATUS, constants.AMP_VRRP_INT), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( name=sf_name + '-0-' + constants.AMP_VRRP_START, - requires=constants.AMPHORAE, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) @@ -289,7 +304,8 @@ amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF, - requires=constants.AMPHORAE, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict}, provides=constants.AMP_VRRP_INT)) @@ -298,12 +314,14 @@ name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, - constants.AMP_VRRP_INT), + constants.AMPHORAE_STATUS, constants.AMP_VRRP_INT), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( name=sf_name + '-1-' + constants.AMP_VRRP_START, - requires=constants.AMPHORAE, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) @@ -551,6 +569,14 @@ constants.CONN_RETRY_INTERVAL: CONF.haproxy_amphora.active_connection_retry_interval} + failover_amp_flow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS, + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.AMPHORAE_STATUS)) + # Listeners update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. @@ -561,7 +587,9 @@ update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE, - requires=(constants.LOADBALANCER, constants.AMPHORAE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: amp_index, constants.TIMEOUT_DICT: timeout_dict})) @@ -571,7 +599,8 @@ if lb_amp_count == 2: failover_amp_flow.add( self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW, - timeout_dict, create_vrrp_group=False)) + timeout_dict, create_vrrp_group=False, + get_amphorae_status=False)) # Reload the listener. This needs to be done here because # it will create the required haproxy check scripts for @@ -587,7 +616,9 @@ amphora_driver_tasks.AmphoraIndexListenersReload( name=(str(amp_index) + '-' + constants.AMPHORA_RELOAD_LISTENER), - requires=(constants.LOADBALANCER, constants.AMPHORAE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: amp_index, constants.TIMEOUT_DICT: timeout_dict})) diff -Nru octavia-10.1.0/octavia/controller/worker/v1/flows/load_balancer_flows.py octavia-10.1.1/octavia/controller/worker/v1/flows/load_balancer_flows.py --- octavia-10.1.0/octavia/controller/worker/v1/flows/load_balancer_flows.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v1/flows/load_balancer_flows.py 2024-02-08 14:48:31.000000000 +0000 @@ -621,6 +621,14 @@ requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE)) + failover_LB_flow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=(new_amp_role + '-' + + constants.AMPHORAE_GET_CONNECTIVITY_STATUS), + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + provides=constants.AMPHORAE_STATUS)) + # Listeners update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. @@ -635,14 +643,18 @@ amphora_driver_tasks.AmphoraIndexListenerUpdate( name=(constants.AMPHORA + '-0-' + constants.AMP_LISTENER_UPDATE), - requires=(constants.LOADBALANCER, constants.AMPHORAE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=(constants.AMPHORA + '-1-' + constants.AMP_LISTENER_UPDATE), - requires=(constants.LOADBALANCER, constants.AMPHORAE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) @@ -651,7 +663,8 @@ # Configure and enable keepalived in the amphora failover_LB_flow.add(self.amp_flows.get_vrrp_subflow( new_amp_role + '-' + constants.GET_VRRP_SUBFLOW, - timeout_dict, create_vrrp_group=False)) + timeout_dict, create_vrrp_group=False, + get_amphorae_status=False)) # #### End of standby #### @@ -666,6 +679,7 @@ name=(new_amp_role + '-' + constants.AMPHORA_RELOAD_LISTENER), requires=(constants.LOADBALANCER, constants.AMPHORAE), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) diff -Nru octavia-10.1.0/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py octavia-10.1.1/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py --- octavia-10.1.0/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. # +from typing import List +from typing import Optional from cryptography import fernet from oslo_config import cfg @@ -73,10 +75,18 @@ """Task to update the listeners on one amphora.""" def execute(self, loadbalancer, amphora_index, amphorae, - timeout_dict=None): + amphorae_status: dict, new_amphora_id: str, timeout_dict=None): # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. + + amphora_id = amphorae[amphora_index].id + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping listener update because amphora %s " + "is not reachable.", amphora_id) + return + try: # Make sure we have a fresh load balancer object loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), @@ -84,12 +94,14 @@ self.amphora_driver.update_amphora_listeners( loadbalancer, amphorae[amphora_index], timeout_dict) except Exception as e: - amphora_id = amphorae[amphora_index].id LOG.error('Failed to update listeners on amphora %s. Skipping ' 'this amphora as it is failing to update due to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) class ListenersUpdate(BaseAmphoraTask): @@ -129,19 +141,31 @@ """Task to reload all listeners on an amphora.""" def execute(self, loadbalancer, amphora_index, amphorae, - timeout_dict=None): + amphorae_status: dict, new_amphora_id: str, timeout_dict=None): """Execute listener reload routines for listeners on an amphora.""" + if amphorae is None: + return + + amphora_id = amphorae[amphora_index].id + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping listener reload because amphora %s " + "is not reachable.", amphora_id) + return + if loadbalancer.listeners: try: self.amphora_driver.reload( loadbalancer, amphorae[amphora_index], timeout_dict) except Exception as e: - amphora_id = amphorae[amphora_index].id LOG.warning('Failed to reload listeners on amphora %s. ' 'Skipping this amphora as it is failing to ' 'reload due to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during + # the failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) class ListenerDelete(BaseAmphoraTask): @@ -305,8 +329,15 @@ class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask): """Task to get and update the VRRP interface device name from amphora.""" - def execute(self, amphora_index, amphorae, timeout_dict=None): + def execute(self, amphora_index, amphorae, amphorae_status: dict, + new_amphora_id: str, timeout_dict=None): amphora_id = amphorae[amphora_index].id + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP interface update because amphora %s " + "is not reachable.", amphora_id) + return None + try: interface = self.amphora_driver.get_interface_from_ip( amphorae[amphora_index], amphorae[amphora_index].vrrp_ip, @@ -316,8 +347,11 @@ LOG.error('Failed to get amphora VRRP interface on amphora ' '%s. Skipping this amphora as it is failing due to: ' '%s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) return None self.amphora_repo.update(db_apis.get_session(), amphora_id, @@ -354,14 +388,21 @@ """Task to update the VRRP configuration of an amphora.""" def execute(self, loadbalancer_id, amphorae_network_config, amphora_index, - amphorae, amp_vrrp_int, timeout_dict=None): + amphorae, amphorae_status: dict, amp_vrrp_int: Optional[str], + new_amphora_id: str, timeout_dict=None): """Execute update_vrrp_conf.""" - loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), - id=loadbalancer_id) # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. amphora_id = amphorae[amphora_index].id + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP configuration because amphora %s " + "is not reachable.", amphora_id) + return + + loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), + id=loadbalancer_id) amphorae[amphora_index].vrrp_interface = amp_vrrp_int try: self.amphora_driver.update_vrrp_conf( @@ -371,8 +412,11 @@ LOG.error('Failed to update VRRP configuration amphora %s. ' 'Skipping this amphora as it is failing to update due ' 'to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) return LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) @@ -394,8 +438,15 @@ This will reload keepalived if it is already running. """ - def execute(self, amphora_index, amphorae, timeout_dict=None): + def execute(self, amphora_index, amphorae, amphorae_status: dict, + new_amphora_id: str, timeout_dict=None): amphora_id = amphorae[amphora_index].id + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP start because amphora %s " + "is not reachable.", amphora_id) + return + try: self.amphora_driver.start_vrrp_service(amphorae[amphora_index], timeout_dict) @@ -403,8 +454,11 @@ LOG.error('Failed to start VRRP on amphora %s. ' 'Skipping this amphora as it is failing to start due ' 'to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) return LOG.debug("Started VRRP on amphora %s.", amphorae[amphora_index].id) @@ -451,3 +505,40 @@ LOG.error('Amphora %s does not support agent configuration ' 'update. Please update the amphora image for this ' 'amphora. Skipping.', amphora.id) + + +class AmphoraeGetConnectivityStatus(BaseAmphoraTask): + """Task that checks amphorae connectivity status. + + Check and return the connectivity status of both amphorae in ACTIVE STANDBY + load balancers + """ + + def execute(self, amphorae: List[dict], new_amphora_id: str, + timeout_dict=None): + amphorae_status = {} + + for amphora in amphorae: + amphora_id = amphora.id + amphorae_status[amphora_id] = {} + + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, id=amphora_id) + + try: + # Verify if the amphora is reachable + self.amphora_driver.check(db_amp, timeout_dict=timeout_dict) + except Exception as e: + LOG.exception("Cannot get status for amphora %s", + amphora_id) + # In case it fails and the tested amphora is the newly created + # amphora, it's not a normal error handling, re-raise the + # exception + if amphora_id == new_amphora_id: + raise e + amphorae_status[amphora_id][constants.UNREACHABLE] = True + else: + amphorae_status[amphora_id][constants.UNREACHABLE] = False + + return amphorae_status diff -Nru octavia-10.1.0/octavia/controller/worker/v1/tasks/lifecycle_tasks.py octavia-10.1.1/octavia/controller/worker/v1/tasks/lifecycle_tasks.py --- octavia-10.1.0/octavia/controller/worker/v1/tasks/lifecycle_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v1/tasks/lifecycle_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -52,11 +52,18 @@ pass def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_health_mon_prov_status_error(health_mon.id) - self.task_utils.mark_pool_prov_status_active(health_mon.pool_id) + try: + self.task_utils.mark_health_mon_prov_status_error(health_mon.id) + self.task_utils.mark_pool_prov_status_active(health_mon.pool_id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) class L7PolicyToErrorOnRevertTask(BaseLifecycleTask): @@ -66,10 +73,17 @@ pass def revert(self, l7policy, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_l7policy_prov_status_error(l7policy.id) + try: + self.task_utils.mark_l7policy_prov_status_error(l7policy.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) class L7RuleToErrorOnRevertTask(BaseLifecycleTask): @@ -79,11 +93,19 @@ pass def revert(self, l7rule, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_l7rule_prov_status_error(l7rule.id) - self.task_utils.mark_l7policy_prov_status_active(l7rule.l7policy_id) + try: + self.task_utils.mark_l7rule_prov_status_error(l7rule.id) + self.task_utils.mark_l7policy_prov_status_active( + l7rule.l7policy_id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) class ListenerToErrorOnRevertTask(BaseLifecycleTask): @@ -93,7 +115,14 @@ pass def revert(self, listener, *args, **kwargs): - self.task_utils.mark_listener_prov_status_error(listener.id) + try: + self.task_utils.mark_listener_prov_status_error(listener.id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( listener.load_balancer.id) @@ -105,10 +134,17 @@ pass def revert(self, listeners, loadbalancer, *args, **kwargs): + try: + for listener in listeners: + self.task_utils.mark_listener_prov_status_error(listener.id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_error(listener.id) class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask): @@ -138,10 +174,17 @@ pass def revert(self, member, listeners, loadbalancer, pool, *args, **kwargs): - self.task_utils.mark_member_prov_status_error(member.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) - self.task_utils.mark_pool_prov_status_active(pool.id) + try: + self.task_utils.mark_member_prov_status_error(member.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + self.task_utils.mark_pool_prov_status_active(pool.id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) @@ -152,11 +195,18 @@ pass def revert(self, members, listeners, loadbalancer, pool, *args, **kwargs): - for m in members: - self.task_utils.mark_member_prov_status_error(m.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) - self.task_utils.mark_pool_prov_status_active(pool.id) + try: + for m in members: + self.task_utils.mark_member_prov_status_error(m.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + self.task_utils.mark_pool_prov_status_active(pool.id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) @@ -167,7 +217,14 @@ pass def revert(self, pool, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_pool_prov_status_error(pool.id) + try: + self.task_utils.mark_pool_prov_status_error(pool.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) diff -Nru octavia-10.1.0/octavia/controller/worker/v1/tasks/network_tasks.py octavia-10.1.1/octavia/controller/worker/v1/tasks/network_tasks.py --- octavia-10.1.0/octavia/controller/worker/v1/tasks/network_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v1/tasks/network_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -56,11 +56,6 @@ def execute(self, loadbalancer, amphora, availability_zone): LOG.debug("Calculating network delta for amphora id: %s", amphora.id) - vip_subnet_to_net_map = { - loadbalancer.vip.subnet_id: - loadbalancer.vip.network_id, - } - # Figure out what networks we want # seed with lb network(s) if (availability_zone and @@ -70,12 +65,15 @@ else: management_nets = CONF.controller_worker.amp_boot_network_list - desired_subnet_to_net_map = {} - for mgmt_net_id in management_nets: - for subnet_id in self.network_driver.get_network( - mgmt_net_id).subnets: - desired_subnet_to_net_map[subnet_id] = mgmt_net_id - desired_subnet_to_net_map.update(vip_subnet_to_net_map) + # Reload the load balancer, the provisioning status of the members may + # have been updated by a previous task + loadbalancer = self.lb_repo.get( + db_apis.get_session(), id=loadbalancer.id) + + desired_subnet_to_net_map = { + loadbalancer.vip.subnet_id: + loadbalancer.vip.network_id, + } for pool in loadbalancer.pools: for member in pool.members: @@ -94,7 +92,12 @@ nics = self.network_driver.get_plugged_networks( amphora.compute_id) # we don't have two nics in the same network - network_to_nic_map = {nic.network_id: nic for nic in nics} + # Don't include the nics connected to the management network, we don't + # want to update these interfaces. + network_to_nic_map = { + nic.network_id: nic + for nic in nics + if nic.network_id not in management_nets} plugged_network_ids = set(network_to_nic_map) diff -Nru octavia-10.1.0/octavia/controller/worker/v2/flows/amphora_flows.py octavia-10.1.1/octavia/controller/worker/v2/flows/amphora_flows.py --- octavia-10.1.0/octavia/controller/worker/v2/flows/amphora_flows.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v2/flows/amphora_flows.py 2024-02-08 14:48:31.000000000 +0000 @@ -226,7 +226,8 @@ return delete_amphora_flow def get_vrrp_subflow(self, prefix, timeout_dict=None, - create_vrrp_group=True): + create_vrrp_group=True, + get_amphorae_status=True): sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW vrrp_subflow = linear_flow.Flow(sf_name) @@ -242,6 +243,17 @@ requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE_NETWORK_CONFIG)) + if get_amphorae_status: + # Get the amphorae_status dict in case the caller hasn't fetched + # it yet. + vrrp_subflow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS, + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.AMPHORAE_STATUS)) + # VRRP update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. @@ -252,7 +264,8 @@ amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF, - requires=constants.AMPHORAE, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict}, provides=constants.AMP_VRRP_INT)) @@ -261,13 +274,15 @@ name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, - constants.AMP_VRRP_INT), + constants.AMPHORAE_STATUS, constants.AMP_VRRP_INT), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( name=sf_name + '-0-' + constants.AMP_VRRP_START, - requires=constants.AMPHORAE, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) @@ -275,7 +290,8 @@ amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF, - requires=constants.AMPHORAE, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict}, provides=constants.AMP_VRRP_INT)) @@ -284,12 +300,14 @@ name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, - constants.AMP_VRRP_INT), + constants.AMPHORAE_STATUS, constants.AMP_VRRP_INT), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( name=sf_name + '-1-' + constants.AMP_VRRP_START, - requires=constants.AMPHORAE, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) @@ -537,6 +555,14 @@ constants.CONN_RETRY_INTERVAL: CONF.haproxy_amphora.active_connection_retry_interval} + failover_amp_flow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS, + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.AMPHORAE_STATUS)) + # Listeners update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. @@ -547,7 +573,9 @@ update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE, - requires=(constants.LOADBALANCER, constants.AMPHORAE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: amp_index, constants.TIMEOUT_DICT: timeout_dict})) @@ -557,7 +585,8 @@ if lb_amp_count == 2: failover_amp_flow.add( self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW, - timeout_dict, create_vrrp_group=False)) + timeout_dict, create_vrrp_group=False, + get_amphorae_status=False)) # Reload the listener. This needs to be done here because # it will create the required haproxy check scripts for @@ -573,7 +602,9 @@ amphora_driver_tasks.AmphoraIndexListenersReload( name=(str(amp_index) + '-' + constants.AMPHORA_RELOAD_LISTENER), - requires=(constants.LOADBALANCER, constants.AMPHORAE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: amp_index, constants.TIMEOUT_DICT: timeout_dict})) diff -Nru octavia-10.1.0/octavia/controller/worker/v2/flows/load_balancer_flows.py octavia-10.1.1/octavia/controller/worker/v2/flows/load_balancer_flows.py --- octavia-10.1.0/octavia/controller/worker/v2/flows/load_balancer_flows.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v2/flows/load_balancer_flows.py 2024-02-08 14:48:31.000000000 +0000 @@ -611,6 +611,14 @@ requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE)) + failover_LB_flow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=(new_amp_role + '-' + + constants.AMPHORAE_GET_CONNECTIVITY_STATUS), + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + provides=constants.AMPHORAE_STATUS)) + # Listeners update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. @@ -625,14 +633,18 @@ amphora_driver_tasks.AmphoraIndexListenerUpdate( name=(constants.AMPHORA + '-0-' + constants.AMP_LISTENER_UPDATE), - requires=(constants.LOADBALANCER, constants.AMPHORAE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=(constants.AMPHORA + '-1-' + constants.AMP_LISTENER_UPDATE), - requires=(constants.LOADBALANCER, constants.AMPHORAE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) @@ -641,7 +653,8 @@ # Configure and enable keepalived in the amphora failover_LB_flow.add(self.amp_flows.get_vrrp_subflow( new_amp_role + '-' + constants.GET_VRRP_SUBFLOW, - timeout_dict, create_vrrp_group=False)) + timeout_dict, create_vrrp_group=False, + get_amphorae_status=False)) # #### End of standby #### @@ -656,6 +669,7 @@ name=(new_amp_role + '-' + constants.AMPHORA_RELOAD_LISTENER), requires=(constants.LOADBALANCER, constants.AMPHORAE), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) diff -Nru octavia-10.1.0/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py octavia-10.1.1/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py --- octavia-10.1.0/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -14,6 +14,9 @@ # import copy +from typing import List +from typing import Optional + from cryptography import fernet from oslo_config import cfg from oslo_log import log as logging @@ -99,10 +102,19 @@ class AmphoraIndexListenerUpdate(BaseAmphoraTask): """Task to update the listeners on one amphora.""" - def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=()): + def execute(self, loadbalancer, amphora_index, amphorae, + amphorae_status: dict, new_amphora_id: str, timeout_dict=()): # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. + + amphora_id = amphorae[amphora_index].get(constants.ID) + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping listener update because amphora %s " + "is not reachable.", amphora_id) + return + try: # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups @@ -115,12 +127,14 @@ self.amphora_driver.update_amphora_listeners( db_lb, db_amp, timeout_dict) except Exception as e: - amphora_id = amphorae[amphora_index].get(constants.ID) LOG.error('Failed to update listeners on amphora %s. Skipping ' 'this amphora as it is failing to update due to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) class ListenersUpdate(BaseAmphoraTask): @@ -177,10 +191,18 @@ """Task to reload all listeners on an amphora.""" def execute(self, loadbalancer, amphora_index, amphorae, - timeout_dict=None): + amphorae_status: dict, new_amphora_id: str, timeout_dict=None): """Execute listener reload routines for listeners on an amphora.""" if amphorae is None: return + + amphora_id = amphorae[amphora_index].get(constants.ID) + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping listener reload because amphora %s " + "is not reachable.", amphora_id) + return + # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get( @@ -192,12 +214,14 @@ try: self.amphora_driver.reload(db_lb, db_amp, timeout_dict) except Exception as e: - amphora_id = amphorae[amphora_index][constants.ID] LOG.warning('Failed to reload listeners on amphora %s. ' 'Skipping this amphora as it is failing to ' 'reload due to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during + # the failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) class ListenerDelete(BaseAmphoraTask): @@ -421,8 +445,15 @@ class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask): """Task to get and update the VRRP interface device name from amphora.""" - def execute(self, amphora_index, amphorae, timeout_dict=None): + def execute(self, amphora_index, amphorae, amphorae_status: dict, + new_amphora_id: str, timeout_dict=None): amphora_id = amphorae[amphora_index][constants.ID] + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP interface update because amphora %s " + "is not reachable.", amphora_id) + return None + try: # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups @@ -435,8 +466,11 @@ LOG.error('Failed to get amphora VRRP interface on amphora ' '%s. Skipping this amphora as it is failing due to: ' '%s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) return None self.amphora_repo.update(db_apis.get_session(), amphora_id, @@ -478,12 +512,19 @@ """Task to update the VRRP configuration of an amphora.""" def execute(self, loadbalancer_id, amphorae_network_config, amphora_index, - amphorae, amp_vrrp_int, timeout_dict=None): + amphorae, amphorae_status: dict, amp_vrrp_int: Optional[str], + new_amphora_id: str, timeout_dict=None): """Execute update_vrrp_conf.""" # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. amphora_id = amphorae[amphora_index][constants.ID] + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP configuration because amphora %s " + "is not reachable.", amphora_id) + return + try: # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups @@ -498,8 +539,11 @@ LOG.error('Failed to update VRRP configuration amphora %s. ' 'Skipping this amphora as it is failing to update due ' 'to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) return LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) @@ -525,10 +569,17 @@ This will reload keepalived if it is already running. """ - def execute(self, amphora_index, amphorae, timeout_dict=None): + def execute(self, amphora_index, amphorae, amphorae_status: dict, + new_amphora_id: str, timeout_dict=None): # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups amphora_id = amphorae[amphora_index][constants.ID] + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP start because amphora %s " + "is not reachable.", amphora_id) + return + db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora_id) try: self.amphora_driver.start_vrrp_service(db_amp, timeout_dict) @@ -536,8 +587,11 @@ LOG.error('Failed to start VRRP on amphora %s. ' 'Skipping this amphora as it is failing to start due ' 'to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) return LOG.debug("Started VRRP on amphora %s.", amphorae[amphora_index][constants.ID]) @@ -592,3 +646,40 @@ 'update. Please update the amphora image for this ' 'amphora. Skipping.'. format(amphora.get(constants.ID))) + + +class AmphoraeGetConnectivityStatus(BaseAmphoraTask): + """Task that checks amphorae connectivity status. + + Check and return the connectivity status of both amphorae in ACTIVE STANDBY + load balancers + """ + + def execute(self, amphorae: List[dict], new_amphora_id: str, + timeout_dict=None): + amphorae_status = {} + + for amphora in amphorae: + amphora_id = amphora[constants.ID] + amphorae_status[amphora_id] = {} + + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, id=amphora_id) + + try: + # Verify if the amphora is reachable + self.amphora_driver.check(db_amp, timeout_dict=timeout_dict) + except Exception as e: + LOG.exception("Cannot get status for amphora %s", + amphora_id) + # In case it fails and the tested amphora is the newly created + # amphora, it's not a normal error handling, re-raise the + # exception + if amphora_id == new_amphora_id: + raise e + amphorae_status[amphora_id][constants.UNREACHABLE] = True + else: + amphorae_status[amphora_id][constants.UNREACHABLE] = False + + return amphorae_status diff -Nru octavia-10.1.0/octavia/controller/worker/v2/tasks/lifecycle_tasks.py octavia-10.1.1/octavia/controller/worker/v2/tasks/lifecycle_tasks.py --- octavia-10.1.0/octavia/controller/worker/v2/tasks/lifecycle_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v2/tasks/lifecycle_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -54,15 +54,22 @@ pass def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_health_mon_prov_status_error( - health_mon[constants.HEALTHMONITOR_ID]) - self.task_utils.mark_pool_prov_status_active( - health_mon[constants.POOL_ID]) + try: + self.task_utils.mark_health_mon_prov_status_error( + health_mon[constants.HEALTHMONITOR_ID]) + self.task_utils.mark_pool_prov_status_active( + health_mon[constants.POOL_ID]) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer[constants.LOADBALANCER_ID]) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active( - listener[constants.LISTENER_ID]) class L7PolicyToErrorOnRevertTask(BaseLifecycleTask): @@ -72,12 +79,19 @@ pass def revert(self, l7policy, listeners, loadbalancer_id, *args, **kwargs): - self.task_utils.mark_l7policy_prov_status_error( - l7policy[constants.L7POLICY_ID]) + try: + self.task_utils.mark_l7policy_prov_status_error( + l7policy[constants.L7POLICY_ID]) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer_id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active( - listener[constants.LISTENER_ID]) class L7RuleToErrorOnRevertTask(BaseLifecycleTask): @@ -88,14 +102,21 @@ def revert(self, l7rule, l7policy_id, listeners, loadbalancer_id, *args, **kwargs): - self.task_utils.mark_l7rule_prov_status_error( - l7rule[constants.L7RULE_ID]) - self.task_utils.mark_l7policy_prov_status_active(l7policy_id) + try: + self.task_utils.mark_l7rule_prov_status_error( + l7rule[constants.L7RULE_ID]) + self.task_utils.mark_l7policy_prov_status_active(l7policy_id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer_id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active( - listener[constants.LISTENER_ID]) class ListenerToErrorOnRevertTask(BaseLifecycleTask): @@ -105,8 +126,15 @@ pass def revert(self, listener, *args, **kwargs): - self.task_utils.mark_listener_prov_status_error( - listener[constants.LISTENER_ID]) + try: + self.task_utils.mark_listener_prov_status_error( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( listener[constants.LOADBALANCER_ID]) @@ -118,9 +146,16 @@ pass def revert(self, listeners, *args, **kwargs): - for listener in listeners: - self.task_utils.mark_listener_prov_status_error( - listener[constants.LISTENER_ID]) + try: + for listener in listeners: + self.task_utils.mark_listener_prov_status_error( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( listeners[0][constants.LOADBALANCER_ID]) @@ -154,12 +189,19 @@ def revert(self, member, listeners, loadbalancer, pool_id, *args, **kwargs): - self.task_utils.mark_member_prov_status_error( - member[constants.MEMBER_ID]) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active( - listener[constants.LISTENER_ID]) - self.task_utils.mark_pool_prov_status_active(pool_id) + try: + self.task_utils.mark_member_prov_status_error( + member[constants.MEMBER_ID]) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + self.task_utils.mark_pool_prov_status_active(pool_id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer[constants.LOADBALANCER_ID]) @@ -172,13 +214,20 @@ def revert(self, members, listeners, loadbalancer, pool_id, *args, **kwargs): - for m in members: - self.task_utils.mark_member_prov_status_error( - m[constants.MEMBER_ID]) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active( - listener[constants.LISTENER_ID]) - self.task_utils.mark_pool_prov_status_active(pool_id) + try: + for m in members: + self.task_utils.mark_member_prov_status_error( + m[constants.MEMBER_ID]) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + self.task_utils.mark_pool_prov_status_active(pool_id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer[constants.LOADBALANCER_ID]) @@ -190,9 +239,16 @@ pass def revert(self, pool_id, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_pool_prov_status_error(pool_id) + try: + self.task_utils.mark_pool_prov_status_error(pool_id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer[constants.LOADBALANCER_ID]) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active( - listener[constants.LISTENER_ID]) diff -Nru octavia-10.1.0/octavia/controller/worker/v2/tasks/network_tasks.py octavia-10.1.1/octavia/controller/worker/v2/tasks/network_tasks.py --- octavia-10.1.0/octavia/controller/worker/v2/tasks/network_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/controller/worker/v2/tasks/network_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -55,16 +55,10 @@ default_provides = constants.DELTA - # TODO(gthiemonge) ensure we no longer need vrrp_port def execute(self, loadbalancer, amphora, availability_zone): LOG.debug("Calculating network delta for amphora id: %s", amphora.get(constants.ID)) - vip_subnet_to_net_map = { - loadbalancer[constants.VIP_SUBNET_ID]: - loadbalancer[constants.VIP_NETWORK_ID] - } - # Figure out what networks we want # seed with lb network(s) if (availability_zone and @@ -77,12 +71,10 @@ db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) - desired_subnet_to_net_map = {} - for mgmt_net_id in management_nets: - for subnet_id in self.network_driver.get_network( - mgmt_net_id).subnets: - desired_subnet_to_net_map[subnet_id] = mgmt_net_id - desired_subnet_to_net_map.update(vip_subnet_to_net_map) + desired_subnet_to_net_map = { + loadbalancer[constants.VIP_SUBNET_ID]: + loadbalancer[constants.VIP_NETWORK_ID] + } for pool in db_lb.pools: for member in pool.members: @@ -101,7 +93,12 @@ nics = self.network_driver.get_plugged_networks( amphora[constants.COMPUTE_ID]) # we don't have two nics in the same network - network_to_nic_map = {nic.network_id: nic for nic in nics} + # Don't include the nics connected to the management network, we don't + # want to update these interfaces. + network_to_nic_map = { + nic.network_id: nic + for nic in nics + if nic.network_id not in management_nets} plugged_network_ids = set(network_to_nic_map) diff -Nru octavia-10.1.0/octavia/db/models.py octavia-10.1.1/octavia/db/models.py --- octavia-10.1.0/octavia/db/models.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/db/models.py 2024-02-08 14:48:31.000000000 +0000 @@ -234,7 +234,8 @@ single_parent=True, lazy='subquery', cascade='all,delete-orphan', - primaryjoin='and_(foreign(Tags.resource_id)==Member.id)' + primaryjoin='and_(foreign(Tags.resource_id)==Member.id)', + overlaps='_tags' ) def __str__(self): @@ -297,7 +298,8 @@ single_parent=True, lazy='subquery', cascade='all,delete-orphan', - primaryjoin='and_(foreign(Tags.resource_id)==HealthMonitor.id)' + primaryjoin='and_(foreign(Tags.resource_id)==HealthMonitor.id)', + overlaps='_tags' ) http_version = sa.Column(sa.Float, nullable=True) domain_name = sa.Column(sa.String(255), nullable=True) @@ -359,7 +361,8 @@ single_parent=True, lazy='subquery', cascade='all,delete-orphan', - primaryjoin='and_(foreign(Tags.resource_id)==Pool.id)' + primaryjoin='and_(foreign(Tags.resource_id)==Pool.id)', + overlaps='_tags' ) tls_certificate_id = sa.Column(sa.String(255), nullable=True) ca_tls_certificate_id = sa.Column(sa.String(255), nullable=True) @@ -436,7 +439,8 @@ single_parent=True, lazy='subquery', cascade='all,delete-orphan', - primaryjoin='and_(foreign(Tags.resource_id)==LoadBalancer.id)' + primaryjoin='and_(foreign(Tags.resource_id)==LoadBalancer.id)', + overlaps='_tags' ) flavor_id = sa.Column( sa.String(36), @@ -576,7 +580,8 @@ single_parent=True, lazy='subquery', cascade='all,delete-orphan', - primaryjoin='and_(foreign(Tags.resource_id)==Listener.id)' + primaryjoin='and_(foreign(Tags.resource_id)==Listener.id)', + overlaps='_tags' ) # This property should be a unique list of the default_pool and anything @@ -731,7 +736,8 @@ single_parent=True, lazy='subquery', cascade='all,delete-orphan', - primaryjoin='and_(foreign(Tags.resource_id)==L7Rule.id)' + primaryjoin='and_(foreign(Tags.resource_id)==L7Rule.id)', + overlaps='_tags' ) def __str__(self): @@ -796,7 +802,8 @@ single_parent=True, lazy='subquery', cascade='all,delete-orphan', - primaryjoin='and_(foreign(Tags.resource_id)==L7Policy.id)' + primaryjoin='and_(foreign(Tags.resource_id)==L7Policy.id)', + overlaps='_tags' ) def __str__(self): diff -Nru octavia-10.1.0/octavia/tests/common/sample_certs.py octavia-10.1.1/octavia/tests/common/sample_certs.py --- octavia-10.1.0/octavia/tests/common/sample_certs.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/common/sample_certs.py 2024-02-08 14:48:31.000000000 +0000 @@ -872,3 +872,118 @@ PftOKlYtE7T7Kw4CI9+O2H38IUOYjDt/c2twy954K4pKe4x9Ud8mImpS/oEzOsoz /Mn++bjO55LdaAUKQ3wa8LZ5WFB+Gs6b2kmBfzGarWEiX64= -----END X509 CRL-----""" + +# An invalid certificate due to no subject and no subjectAltName +NOCN_NOSUBALT_CRT = b"""-----BEGIN CERTIFICATE----- +MIIE4zCCAsugAwIBAgIUTo7POpWDLecy0B7fY2OAbLztmswwDQYJKoZIhvcNAQEL +BQAwADAgFw0yMzExMjIyMjE4MzBaGA8yMTIzMTAyOTIyMTgzMFowADCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAPClqkTqRyjlp+LXE4oElYGvg7y710yZ +pR96TNqgugXxNLmIgzx2A3wWJ77z6qn3XoTFEXNnT6f4WrVr1Eh5/Zd1ioyj1r0G +hIuEWMkm42UsTv+bId6BkXrr4wTgXgU+ss82dmRsYArV1b+c+89oYlEjQorhQ6eT +2aWnt1XJbtpgRYCy5DsBKg1Iq63QRXp5svEr4iX+jAiDCQnBBLhrkfMUf8zuMCev +Ij5119OGY5ihLuopIZi6OurA0fyN9e2MFlnYmWcxSZu49+6yBnXGmhmev3qzWj1+ +9DA50Pqu+NS9rVpYBNhhKuBTBxaTeZPDAl67DC2Mc8TFI1OfpiOwb+w/ewRYznry +ZceASFovPFsAlUddwu/94sxgUSCmSE81Op+VlXS0LRgg8o/OZHp/eFsG2NM0OGAH +v2uJly4OTPTd/kT50zViX3wJlRYIH+4szSjpbNXE0aF+cqQ56PBrGEe6j+SaGZEV +6k4N9WMHNipffkq10N2d6fkRQjAD9B7gHOB6AAQ1mxoZtgchCKL7E8FuA803Yx8B +a7h9J65SJq9nbr0z4eTscFZPulW8wMZT/ZeooQJJWqvA+g2FZf0dExk46gqU3F2F +IRMvfGzSbIQF7bp/Yj4fLMUwLVaYv6NNdzhI+/eC0wVDWwbQ2rZkkvcvysSteGT4 +IDuFKuIWt4UnAgMBAAGjUzBRMB0GA1UdDgQWBBSEDhho9+R5JhsAZlQ0wU4Rjbqn +OjAfBgNVHSMEGDAWgBSEDhho9+R5JhsAZlQ0wU4RjbqnOjAPBgNVHRMBAf8EBTAD +AQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAZ8E7l2H56z08yJiAa5DFmT8jmBHUCoJlM +HiZSn04mtzZEfho/21Zdnb2Pa2SDrRkVXmrO+DebO5sK1Kn/EFC9P3SAOeZ3LB+m +bJUX4WGEJ+7fv9uVRwSRfF21Lxo9QFgSVfQlQAhmXcKCE/8VtKB34oOZRhR8tAxH +I4VvHUPyCT8ZwNhofP2TYHEjRi/4fsXueBH4kBHDy0/pyHMy1b5crWQAjlOhFXhW ++qauSXkbIXNXd+wX23UF2uQ8YH819V7cHAidx9ikwn6HC5hxXjzMjViDwI451V6Q +eAgrVuKTgx6cdnd2mgra8k7Bd2S+uTxwcrzVVzNfF+D2Al43xgeFF02M8Wp6ZDsh +3/mJ7NOJGTJbXLRP+u73PEh1mGGU8H2QoGvaRO7R599sbmU4LedWX/VJc2GXojzF +ibPWaMkKtX31QiOeNiLTMSkUWiyDTvzFW2ErqyzARv/yYFcEixEFl1GV8Bqb+ujj +cxO5/y9cK6aM+qPb/FrXivXQsNArrpE3T1C54RvhUWOi+kyCiV/mDIG+oOp7sfZ5 +tBPenwWB2/LGS4rS67jZdwyIC5UbVySaVxtqJrdQXTRNjGfj2m963CHbiaQLSoSF +2Zh2e8W4ixo6k6mhih2YjZVtpHrXyzNEtHT9HpPHDeElVcWteIceZMI2Ah0C6Ggj +uTbEBYW85Q== +-----END CERTIFICATE-----""" + +# A certificate with no subject but with Subject Alternative Name +NOCN_SUBALT_CRT = b"""-----BEGIN CERTIFICATE----- +MIIFAjCCAuqgAwIBAgIUNjJqSdaJ9FsivfRHbXpdmcZgJR4wDQYJKoZIhvcNAQEL +BQAwADAgFw0yMzExMzAyMTQyNTVaGA8yMTIzMTEwNjIxNDI1NVowADCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAKA8+0iJzx51kTufmIpxGCM/KUFWdJ0U +MmOPN1NmySNaj6nGI/Ix6m13A5SaezhbRlJvEwN7Hqg+tl+fqu0RgtQOXfBDMiJm ++kAl0CQiOH7XU41P6fyk/QL8WF3VVGBtawTWn3x9Jw7Itd/zFr+aepQOj5LIwcx1 +ncHXreWdMLqDa7PpW1Ru6BW0FKVxX6WYQr2PI08nEIxu6DzLcaLHktRyNYg7r9X9 +a0tLZcp5MCBG3h3EtVgUkL9qw8q6acJpDGBF7ssRTNDf3QUSg0jrfzkD9WJCi631 +tefdAkDNIZXGZggbWsDGPseX4JG9p7WGzPx5QY2DkMqDJqi6FoS35tT+WNcY0n9V +oBQXtXFV/AqOC070NwrhxsNA3cBbpRqEQYJsIDaXq0cmFR4aoDWk4OXqs7I+dpyi +MFeRHEU7h4DpwzaOmOyaSmzsZqEMG2lsdJZmC+fIFkyKtP0BQv/movWY25oJSpF5 +4Q/PdwKn6PFO2bRVSLStlrhpuqXw2+CzlQT6YCAz+ajqDnn/w8NIrT6y+DiFd+kt +WCed/o4ZBzsxOexRph+t0bdkTmR8PNpnHwcxzVN33gCSc6Q5DW1/M2V8VGYqnPd/ +taEaMlHm/wQ3y2/aH/tkyq85PM5tqCbUscD4TUZ7R6kb0k83Ak2iZOM5RHb4zc4p +mreNKLPfgrQ7AgMBAAGjcjBwMB0GA1UdDgQWBBT6/yXwr+5BhORB3cUkrrSgnreq +NTAfBgNVHSMEGDAWgBT6/yXwr+5BhORB3cUkrrSgnreqNTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdEQEB/wQTMBGCD3d3dy5leGFtcGxlLmNvbTANBgkqhkiG9w0BAQsF +AAOCAgEAjxrBZ3v6wK7oZWvrzFV+aCs+KkoUkK0Y61TM4SCbWIT8oinN68nweha5 +p48Jp+hSBHEsj9h0opHezihduKh5IVM7KtbcXn1GSeN2hmyAAPm/MbxyD+l+UEfB +G/behQcsYdVXXog7nwD2NXINvra8KGPqA7n/BnQ7RsxBXHVa9+IHF2L4LpbcvG7G +Ci/jmLSBk7Gi/75TsFphHAhfomovfnnNykfJ0u99ew14MxVmRWbZ+rbpMsUL/AhV +h8VujkfUs1hFbdxePTVyHwplqH65yjzzQ18q8CX7kMGi9sz2k8xJS04Nz0x1l7xQ +JDuhFMDDrcyb7vAqG7BHQ9zXWJ3IkTg9WrbfkOyTqQsJeInToWQybmr/7lY3PmC2 +e/X0zNABF+ypX29RrKzWL+KfpbslysZIEPLEW28qAh3KOyml1du+lbDSNtcHxQcT +bnvz2rQlAYE70Ds3znLLuMXbq8GtS+h8EYH1jxcjZD9DAPhxi37v8QSY/ABIBGE2 +lfbhbzZ5OWQLMA0L1tbTg7bG5JGoi/GmPl4oA+Dbz3+8Yd/v8XJUzQgI221tx+T+ +isog5o96m62pW6hd1R+eZjVAOVMT/OxecJ9eIVva8EiZwu1Ja9arBkuhIBVK2htm +PVi6J1iFUrPZG+QrK/ZePo4xE06Lm31dr8pxdZ7Y860owwIuHfA= +-----END CERTIFICATE-----""" + +NOCN_SUBALT_KEY = b"""-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCgPPtIic8edZE7 +n5iKcRgjPylBVnSdFDJjjzdTZskjWo+pxiPyMeptdwOUmns4W0ZSbxMDex6oPrZf +n6rtEYLUDl3wQzIiZvpAJdAkIjh+11ONT+n8pP0C/Fhd1VRgbWsE1p98fScOyLXf +8xa/mnqUDo+SyMHMdZ3B163lnTC6g2uz6VtUbugVtBSlcV+lmEK9jyNPJxCMbug8 +y3Gix5LUcjWIO6/V/WtLS2XKeTAgRt4dxLVYFJC/asPKumnCaQxgRe7LEUzQ390F +EoNI6385A/ViQout9bXn3QJAzSGVxmYIG1rAxj7Hl+CRvae1hsz8eUGNg5DKgyao +uhaEt+bU/ljXGNJ/VaAUF7VxVfwKjgtO9DcK4cbDQN3AW6UahEGCbCA2l6tHJhUe +GqA1pODl6rOyPnacojBXkRxFO4eA6cM2jpjsmkps7GahDBtpbHSWZgvnyBZMirT9 +AUL/5qL1mNuaCUqReeEPz3cCp+jxTtm0VUi0rZa4abql8Nvgs5UE+mAgM/mo6g55 +/8PDSK0+svg4hXfpLVgnnf6OGQc7MTnsUaYfrdG3ZE5kfDzaZx8HMc1Td94AknOk +OQ1tfzNlfFRmKpz3f7WhGjJR5v8EN8tv2h/7ZMqvOTzObagm1LHA+E1Ge0epG9JP +NwJNomTjOUR2+M3OKZq3jSiz34K0OwIDAQABAoICABC+7r/g7w1O2hOyFR36vbwJ +QMV8RImZ774p3G1R45lXQIZMl7sa7lXsRyqDjncQSuQYiZMmjcilbSfHJvTJjLOe +oMCYNSgVPPfxO7RbAy52UFwHSvvFPk/OkWmU/tFo/fMuftJive80mJVD8U+q1D6e +2vBLHL3CWO9GG/1QFSSY0Wum6o2DXavO+w1jMMy8gdUPnXALNBaJDKo11LVfR//9 +w4xuOG0To9/ljEjBq37kCRhxU0ZWN95ZSQbpvl273rg89rywHSgDDTUXfzLisZQC +zuUq8TAH6q/FkBO3nFfruQQF39EfprXzMFvqxxkYclm8TlZ8tmgDlsmxUOMj2PKl +H9kWDC5YkynfkxltKgiEJ9Kc3pZnfaScABnz0GySsZN71bUbr7fBqwH0LhbZiQqa +b9pWcbyKuGFJ56gVsokVHcpKnKmKHedtmL33oJzI3iWYZls/mPejmkwIWt1i3F7c +ZnhDJJp3gWgzZzSyV5OjZ05SIrM9er9r+WqS75ns7vKEzhgzpHdZuUR2jNNVu/EA +rCnsebUtemr0tDYxhI5BcPgj3fzq02u7plJUFIwlPrpMxZ8VBJgoSwT7Di5qpHnt +LmiGoqRM+vVXiWshops1I7q7zLCgvP+Difi4KNjap/lBsj7hiB7alZTrMVVAXiBr +Ia++3L38ga5DJ+SHDzjBAoIBAQDNUG4URQD/j0E3pS4zn4wezSp0wOTKKIw2Z6oU +02reZq9uFLIt+/74DVy3NZm3tBgeSakYUZeDB8zpog3mGpkPAHpwObB/fPbMYmst +cCnXYDf9Uvb7k287a0GIbCOXwkHSrgRwznAZ4EQp6E0nZSoLbyZiC+uhYEVZgQQo +JswsjKCSaL7o/4XXQOi6Mdsd4BX7aVVKjYrQZ8TkkCsMYFdQMSL1fB8DW4Q+Ixco +6BGXPoaav/2XOb0HGBmrXX/yqllA8rw0U7RNLgsE7gZIlltGeTsQMeo/+w5+LJKt +HOhhEUHITJkRZ7P/S8OdXXoVCNiUzCxGy/LrHW/AWu0t1WWbAoIBAQDHy9Allaod +WDxdbe5G5ke03WFcPoVAxOWu0mloaFdbd7Ec39y4vr1hxRZz+SEUdouCie1nVB3P +sj2lPJ44qKS8triqNCuEalpMHaTBdIyjItqh1l66fLA1/FYxAM7cxcz5rBVK2zvf +KrT3LNmzVpbltl3nPQhvAKEV8zEdSVze6Z0K6QbZP8WfPtCiQYMAjeNu48AIp/+t +pxJbkcmWLIYixfiJbHfe0LUu/P3rk0WDCHnheVzOTSE8XzGqnIxyv6w4rYOl9IeT +SnYublICJHOTp6gKuiIieGD7TC14DB8vYbSc0+opIvYYItcS//laLOD+eLUgZx5K +Wb4ubbosnyXhAoIBAFGzQsqgFuCbQemBupviTmDnZZCmPaTQc9Mmd0DoTGuJ0x9r +7udrkq9kqdNh6fR3Hu3WhApgVXlXvkvuJ7e8N9IHb7F+02Q39wGn3FxteMjyyfTt +ccj0h1vOt3oxBgzayVSr2KqHC4bQfm9quGEH2a5JIa38blx+MbqHI39SyQalQzRf +qDCRldHtS27kbfw6cqTj6oPLRUTfNjN5xxeassP/eZjUNocggMQ1NH8bsfxMbkXg +RmpKGJVdGsHdaA/Jh9DXhtsPv/zCaLIiga+a3WFy1nUAV+Xz4nWFCS0IBtSxiErL +aFHLwY3CuWnCi9UY+w5jHO9jMxwqT5Ds3drSQycCggEBALoewFEy4d0iRGGYtb6w +aJ4xGLBwwXt7sKcx9eXARZi8oG5QkHI9pXg9vFPfAZTpdb7uNAzszDSeS1TxakdH +uubdpJtRrDRXSrTbbI6Wvyh9oIPgijBZVWGFJtnRceMyFGeFifRI1LZpN1mHG2o4 +QKvPPhzau0+Em4syGE+69tvlblkqiSm6gaN+RabRNnM+ul6jpVGrBsBDAhPxdIQE +CBS+rW9/bw9PB2m1XemlML0HGVsUzoKUUWDHISJZYXDH42yNHzVq3R014XARby31 +vQEQzrbnfEL2NwoChdzuFeLytujddKZLnksPsaFOeYAqjJIh6kE8Lnh+r27a4vMM +cqECggEAAx1DVI43AMBfSbAs5C41vjRdjMrZtxfKIpFjj1whGj/JzLKdMdqqH+Ai ++R6NI7IB88pGHlCOmdEpfbr4Cq1ZnizA3yLV9sluMz1bpHlIDsCIp+1VkQYKfsEv +upZy82MtfGtG3BSLn+GCTzLJcTN6KINg98Xivp/WsRAEvwT/w1o4iJMgzKmTET2I +UGJfZcF0WeSVo34FNArfXyfXPvPV7mi08Z6fQuUnFvH9tGZs5Y9mUUSgXXEDSjKY +ZHliqmDNGub7rMy6/0wDOWiS4pi/w8FeCyBvbx23rj6i+FLO6GK+5B7TaCxjOVbk +SYVTfCHpvJIgjRkRMP2yZCk3g6T4XA== +-----END PRIVATE KEY-----""" diff -Nru octavia-10.1.0/octavia/tests/functional/api/test_healthcheck.py octavia-10.1.1/octavia/tests/functional/api/test_healthcheck.py --- octavia-10.1.0/octavia/tests/functional/api/test_healthcheck.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/test_healthcheck.py 2024-02-08 14:48:31.000000000 +0000 @@ -118,6 +118,20 @@ self.assertEqual(200, response.status_code) self.assertEqual('OK', response.text) + def test_healthcheck_get_text_plain(self): + self.conf.config(group='healthcheck', detailed=False) + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'text/plain'}) + self.assertEqual(200, response.status_code) + self.assertEqual('OK', response.text) + + def test_healthcheck_get_text_plain_detailed(self): + self.conf.config(group='healthcheck', detailed=True) + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'text/plain'}) + self.assertEqual(200, response.status_code) + self.assertEqual('OK', response.text) + def test_healthcheck_get_json(self): self.conf.config(group='healthcheck', detailed=False) response = self._get(self._get_enabled_app(), '/healthcheck', diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_amphora.py octavia-10.1.1/octavia/tests/functional/api/v2/test_amphora.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_amphora.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_amphora.py 2024-02-08 14:48:31.000000000 +0000 @@ -173,7 +173,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -214,7 +215,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): self.delete(self.AMPHORA_PATH.format(amphora_id=amp.id), status=403) @@ -247,7 +249,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -276,7 +279,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.AMPHORA_PATH.format( amphora_id=self.amp_id), status=403) @@ -289,7 +293,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -320,7 +325,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id=self.amp_id), body={}, status=403) @@ -349,7 +355,8 @@ def test_get_all_authorized(self): auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): override_credentials = { 'service_user_id': None, @@ -378,7 +385,8 @@ def test_get_all_not_authorized(self): auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): amps = self.get(self.AMPHORAE_PATH, status=403).json @@ -511,7 +519,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -539,7 +548,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.AMPHORA_STATS_PATH.format( amphora_id=self.amp_id), status=403) @@ -615,7 +625,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -647,7 +658,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): self.put(self.AMPHORA_CONFIG_PATH.format( amphora_id=self.amp_id), body={}, status=403) diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_availability_zone_profiles.py octavia-10.1.1/octavia/tests/functional/api/v2/test_availability_zone_profiles.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_availability_zone_profiles.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_availability_zone_profiles.py 2024-02-08 14:48:31.000000000 +0000 @@ -110,7 +110,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -203,7 +204,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -287,7 +289,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -405,7 +408,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -532,7 +536,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_availability_zones.py octavia-10.1.1/octavia/tests/functional/api/v2/test_availability_zones.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_availability_zones.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_availability_zones.py 2024-02-08 14:48:31.000000000 +0000 @@ -118,7 +118,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -204,7 +205,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -288,7 +290,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -412,7 +415,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -511,7 +515,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_flavor_profiles.py octavia-10.1.1/octavia/tests/functional/api/v2/test_flavor_profiles.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_flavor_profiles.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_flavor_profiles.py 2024-02-08 14:48:31.000000000 +0000 @@ -109,7 +109,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -201,7 +202,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -285,7 +287,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -402,7 +405,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -525,7 +529,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_flavors.py octavia-10.1.1/octavia/tests/functional/api/v2/test_flavors.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_flavors.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_flavors.py 2024-02-08 14:48:31.000000000 +0000 @@ -117,7 +117,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -204,7 +205,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -297,7 +299,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -426,7 +429,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -529,7 +533,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_health_monitor.py octavia-10.1.1/octavia/tests/functional/api/v2/test_health_monitor.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_health_monitor.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_health_monitor.py 2024-02-08 14:48:31.000000000 +0000 @@ -128,7 +128,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -167,7 +168,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id')), status=403) @@ -207,7 +209,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): hms = self.get(self.HMS_PATH, status=403).json @@ -281,7 +284,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', hm3['project_id']): override_credentials = { 'service_user_id': None, @@ -336,7 +340,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', None): override_credentials = { 'service_user_id': None, @@ -392,7 +397,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', hm3['project_id']): override_credentials = { 'service_user_id': None, @@ -1243,7 +1249,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1291,7 +1298,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, @@ -1697,7 +1705,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1738,7 +1747,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), @@ -2028,7 +2038,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -2073,7 +2084,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): self.delete( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_l7policy.py octavia-10.1.1/octavia/tests/functional/api/v2/test_l7policy.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_l7policy.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_l7policy.py 2024-02-08 14:48:31.000000000 +0000 @@ -67,7 +67,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -99,7 +100,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), status=403) @@ -198,7 +200,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', api_l7p_c.get('project_id')): override_credentials = { 'service_user_id': None, @@ -255,7 +258,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', None): override_credentials = { 'service_user_id': None, @@ -307,7 +311,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', api_l7p_c.get('project_id')): override_credentials = { 'service_user_id': None, @@ -346,7 +351,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): policies = self.get(self.L7POLICIES_PATH, status=403).json @@ -670,7 +676,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -712,7 +719,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): api_l7policy = self.create_l7policy( self.listener_id, @@ -902,7 +910,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -946,7 +955,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): response = self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), @@ -1146,7 +1156,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1194,7 +1205,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): self.delete(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), status=403) diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_l7rule.py octavia-10.1.1/octavia/tests/functional/api/v2/test_l7rule.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_l7rule.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_l7rule.py 2024-02-08 14:48:31.000000000 +0000 @@ -67,7 +67,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -98,7 +99,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): response = self.get(self.l7rule_path.format( l7rule_id=l7rule.get('id')), status=403).json @@ -164,7 +166,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -208,7 +211,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', None): override_credentials = { 'service_user_id': None, @@ -244,7 +248,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): rules = self.get(self.l7rules_path, status=403) @@ -528,7 +533,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -572,7 +578,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, @@ -906,7 +913,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -948,7 +956,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): response = self.put(self.l7rule_path.format( l7rule_id=api_l7rule.get('id')), @@ -1108,7 +1117,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1158,7 +1168,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): self.delete( self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_listener.py octavia-10.1.1/octavia/tests/functional/api/v2/test_listener.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_listener.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_listener.py 2024-02-08 14:48:31.000000000 +0000 @@ -103,7 +103,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', listener3['project_id']): override_credentials = { 'service_user_id': None, @@ -150,7 +151,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', None): override_credentials = { 'service_user_id': None, @@ -191,7 +193,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -242,7 +245,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): listeners = self.get(self.LISTENERS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) @@ -545,7 +549,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -578,7 +583,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.listener_path.format( listener_id=listener['id']), status=403) @@ -962,7 +968,8 @@ self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1017,7 +1024,8 @@ self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.post(self.LISTENERS_PATH, body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) @@ -1036,6 +1044,51 @@ self.assertEqual(constants.CLIENT_AUTH_NONE, listener_api.get('client_authentication')) + def test_create_tls_with_no_subject_no_alt_names(self): + tls_cert_mock = mock.MagicMock() + tls_cert_mock.get_certificate.return_value = ( + sample_certs.NOCN_NOSUBALT_CRT) + self.cert_manager_mock().get_cert.return_value = tls_cert_mock + + lb_listener = {'name': 'listener1-no-subject-no-alt-names', + 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, 'connection_limit': 10, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'insert_headers': {}, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'tags': ['test_tag']} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=400) + self.assertIn("No CN or DNSName", response) + + def test_create_tls_with_no_subject_with_alt_names(self): + tls_cert_mock = mock.MagicMock() + tls_cert_mock.get_certificate.return_value = ( + sample_certs.NOCN_SUBALT_CRT) + tls_cert_mock.get_private_key.return_value = ( + sample_certs.NOCN_SUBALT_KEY) + tls_cert_mock.get_private_key_passphrase.return_value = None + self.cert_manager_mock().get_cert.return_value = tls_cert_mock + + lb_listener = {'name': 'listener1-no-subject', + 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, 'connection_limit': 10, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'insert_headers': {}, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'tags': ['test_tag']} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=201) + self.assertIn("PENDING_CREATE", response) + def test_create_with_ca_cert_and_option(self): self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) @@ -2016,7 +2069,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -2068,7 +2122,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): api_listener = self.put(listener_path, body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) @@ -2175,7 +2230,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -2224,7 +2280,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): self.delete(listener_path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) @@ -2830,7 +2887,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -2877,7 +2935,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): res = self.get(self.LISTENER_PATH.format( listener_id=li['id'] + "/stats"), status=403) diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_load_balancer.py octavia-10.1.1/octavia/tests/functional/api/v2/test_load_balancer.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_load_balancer.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_load_balancer.py 2024-02-08 14:48:31.000000000 +0000 @@ -866,7 +866,8 @@ } lb_json.update(optionals) body = self._build_body(lb_json) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) @@ -883,7 +884,8 @@ } lb_json.update(optionals) body = self._build_body(lb_json) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -916,7 +918,8 @@ } lb_json.update(optionals) body = self._build_body(lb_json) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.post(self.LBS_PATH, body, status=403) api_lb = response.json @@ -1193,7 +1196,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1232,7 +1236,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', None): override_credentials = { 'service_user_id': None, @@ -1269,7 +1274,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1309,7 +1315,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) LB_PROJECT_PATH = '{}?project_id={}'.format(self.LBS_PATH, project_id) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): response = self.get(LB_PROJECT_PATH, status=403) api_lb = response.json @@ -1769,7 +1776,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -1826,7 +1834,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=403) @@ -1965,7 +1974,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -2011,7 +2021,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=403) @@ -2149,7 +2160,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -2192,7 +2204,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=403) @@ -2317,7 +2330,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.app.put(path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) @@ -2338,7 +2352,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): override_credentials = { 'service_user_id': None, @@ -2376,7 +2391,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id_2): override_credentials = { 'service_user_id': None, @@ -2413,7 +2429,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id_2): override_credentials = { 'service_user_id': None, @@ -3788,7 +3805,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -3822,7 +3840,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): res = self.get(self.LB_PATH.format(lb_id=lb['id'] + "/status"), @@ -3889,7 +3908,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -3935,7 +3955,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): res = self.get(self.LB_PATH.format(lb_id=lb['id'] + "/stats"), status=403) diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_member.py octavia-10.1.1/octavia/tests/functional/api/v2/test_member.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_member.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_member.py 2024-02-08 14:48:31.000000000 +0000 @@ -80,7 +80,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -110,7 +111,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.member_path.format( member_id=api_member.get('id')), status=403).json @@ -183,7 +185,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -229,7 +232,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', None): override_credentials = { 'service_user_id': None, @@ -268,7 +272,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.members_path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) @@ -515,7 +520,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -563,7 +569,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): api_member = self.create_member( self.pool_id, '192.0.2.1', 80, status=403) @@ -1130,7 +1137,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1183,7 +1191,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): member_path = self.member_path_listener.format( member_id=api_member.get('id')) @@ -1310,7 +1319,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1359,7 +1369,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): self.delete(self.member_path_listener.format( member_id=api_member.get('id')), status=403) diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_pool.py octavia-10.1.1/octavia/tests/functional/api/v2/test_pool.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_pool.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_pool.py 2024-02-08 14:48:31.000000000 +0000 @@ -96,7 +96,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -135,7 +136,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id')), status=403) @@ -236,7 +238,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', pool3['project_id']): override_credentials = { 'service_user_id': None, @@ -284,7 +287,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', None): override_credentials = { 'service_user_id': None, @@ -327,7 +331,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -378,7 +383,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): pools = self.get(self.POOLS_PATH, status=403).json @@ -727,7 +733,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -776,7 +783,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): api_pool = self.create_pool( self.lb_id, @@ -1257,7 +1265,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -1309,7 +1318,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): api_pool = self.put( self.POOL_PATH.format(pool_id=api_pool.get('id')), @@ -2083,7 +2093,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -2131,7 +2142,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=403) diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_provider.py octavia-10.1.1/octavia/tests/functional/api/v2/test_provider.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_provider.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_provider.py 2024-02-08 14:48:31.000000000 +0000 @@ -102,7 +102,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, @@ -226,7 +227,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id): override_credentials = { 'service_user_id': None, diff -Nru octavia-10.1.0/octavia/tests/functional/api/v2/test_quotas.py octavia-10.1.1/octavia/tests/functional/api/v2/test_quotas.py --- octavia-10.1.0/octavia/tests/functional/api/v2/test_quotas.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/functional/api/v2/test_quotas.py 2024-02-08 14:48:31.000000000 +0000 @@ -168,7 +168,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.QUOTAS_PATH, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) @@ -185,7 +186,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project_id1): override_credentials = { 'service_user_id': None, @@ -225,7 +227,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): override_credentials = { 'service_user_id': None, @@ -286,7 +289,8 @@ ).get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -332,7 +336,8 @@ ).get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -380,7 +385,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project3_id): override_credentials = { 'service_user_id': None, @@ -425,7 +431,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project3_id): override_credentials = { 'service_user_id': None, @@ -493,7 +500,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project1_id): override_credentials = { 'service_user_id': None, @@ -526,7 +534,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): quotas = self.get(self.QUOTA_PATH.format(project_id=project1_id), status=403) @@ -542,7 +551,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project1_id): override_credentials = { 'service_user_id': None, @@ -575,7 +585,8 @@ auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', project1_id): override_credentials = { 'service_user_id': None, @@ -684,7 +695,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -712,7 +724,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', uuidutils.generate_uuid()): response = self.get(self.QUOTA_DEFAULT_PATH.format( project_id=self.project_id), status=403) @@ -737,7 +750,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -769,7 +783,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -847,7 +862,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, @@ -883,7 +899,8 @@ self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) - with mock.patch.object(octavia.common.context.Context, 'project_id', + with mock.patch.object(octavia.common.context.RequestContext, + 'project_id', self.project_id): override_credentials = { 'service_user_id': None, diff -Nru octavia-10.1.0/octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py octavia-10.1.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py --- octavia-10.1.0/octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py 2024-02-08 14:48:31.000000000 +0000 @@ -38,6 +38,7 @@ FAKE_LISTENER_ID_3 = uuidutils.generate_uuid() FAKE_LISTENER_ID_4 = uuidutils.generate_uuid() LB_ID_1 = uuidutils.generate_uuid() + LB_ID_2 = uuidutils.generate_uuid() def setUp(self): super().setUp() @@ -112,6 +113,8 @@ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_listeners', return_value=[FAKE_LISTENER_ID_1, FAKE_LISTENER_ID_2]) + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_loadbalancers', return_value=[LB_ID_1, LB_ID_2]) @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_meminfo') @mock.patch('octavia.amphorae.backends.agent.api_server.' @@ -128,7 +131,8 @@ @mock.patch('socket.gethostname', return_value='FAKE_HOST') def test_compile_amphora_details(self, mhostname, m_count, m_pkg_version, m_load, m_get_nets, m_os, m_cpu, - mget_mem, mget_listener): + mget_mem, mget_loadbalancers, + mget_listeners): mget_mem.return_value = {'SwapCached': 0, 'Buffers': 344792, 'MemTotal': 21692784, 'Cached': 4271856, 'Slab': 534384, 'MemFree': 12685624, @@ -180,6 +184,7 @@ u'topology_status': u'OK'} actual = self.amp_info.compile_amphora_details() self.assertEqual(expected_dict, actual.json) + m_count.assert_called_once_with(sorted(mget_loadbalancers())) api_server.VERSION = original_version @mock.patch('octavia.amphorae.backends.agent.api_server.util.' diff -Nru octavia-10.1.0/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py octavia-10.1.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py --- octavia-10.1.0/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py 2024-02-08 14:48:31.000000000 +0000 @@ -106,7 +106,7 @@ '192.0.2.2', 16) mock_interface_file.assert_called_once_with( - name='eth1', + name='eth1', if_type="lo", addresses=[{"address": "192.0.2.2", "prefixlen": 16}]) mock_interface.write.assert_called_once() diff -Nru octavia-10.1.0/octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py octavia-10.1.1/octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py --- octavia-10.1.0/octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py 2024-02-08 14:48:31.000000000 +0000 @@ -319,6 +319,14 @@ stats_query_mock.show_stat.assert_called_once_with() stats_query_mock.get_pool_status.assert_called_once_with() + @mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery') + def test_get_stats_exception(self, mock_query): + mock_query.side_effect = Exception('Boom') + + stats, pool_status = health_daemon.get_stats('TEST') + self.assertEqual([], stats) + self.assertEqual({}, pool_status) + @mock.patch('octavia.amphorae.backends.agent.api_server.' 'util.is_lb_running') @mock.patch('octavia.amphorae.backends.health_daemon.' diff -Nru octavia-10.1.0/octavia/tests/unit/amphorae/backends/utils/test_interface.py octavia-10.1.1/octavia/tests/unit/amphorae/backends/utils/test_interface.py --- octavia-10.1.0/octavia/tests/unit/amphorae/backends/utils/test_interface.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/amphorae/backends/utils/test_interface.py 2024-02-08 14:48:31.000000000 +0000 @@ -74,6 +74,7 @@ '],\n' '"mtu": 1450,\n' '"name": "eth1",\n' + '"if_type": "mytype",\n' '"routes": [\n' '{"dst": "0.0.0.0/0",\n' '"gateway": "10.0.0.1"},\n' @@ -107,6 +108,7 @@ expected_dict = { consts.NAME: "eth1", + consts.IF_TYPE: "mytype", consts.MTU: 1450, consts.ADDRESSES: [{ consts.ADDRESS: "10.0.0.2", @@ -331,6 +333,7 @@ mock_link, mock_addr, mock_route, mock_rule): iface = interface_file.InterfaceFile( name="eth1", + if_type="vip", mtu=1450, addresses=[{ consts.ADDRESS: '1.2.3.4', @@ -450,6 +453,78 @@ @mock.patch('pyroute2.IPRoute.addr') @mock.patch('pyroute2.IPRoute.link') @mock.patch('pyroute2.IPRoute.get_links') + @mock.patch('pyroute2.IPRoute.link_lookup') + @mock.patch('pyroute2.IPRoute.get_rules') + @mock.patch('subprocess.check_output') + def test_up_backend(self, mock_check_output, mock_get_rules, + mock_link_lookup, mock_get_links, mock_link, mock_addr, + mock_route, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="backend", + mtu=1450, + addresses=[{ + consts.ADDRESS: '1.2.3.4', + consts.PREFIXLEN: 24 + }], + routes=[], + rules=[], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_DOWN + }] + mock_get_rules.return_value = [{ + 'src_len': 32, + 'attrs': { + 'FRA_SRC': '1.1.1.1', + 'FRA_TABLE': 20, + 'FRA_PROTOCOL': 0 + } + }] + + controller = interface.InterfaceController() + controller.up(iface) + + mock_link.assert_called_once_with( + controller.SET, + index=idx, + state=consts.IFACE_UP, + mtu=1450) + + mock_addr.assert_has_calls([ + mock.call(controller.ADD, + index=idx, + address='1.2.3.4', + prefixlen=24, + family=socket.AF_INET), + ]) + + mock_route.assert_called_once_with( + 'dump', family=mock.ANY, match=mock.ANY) + + # for 'backend' iface, we don't update the rules + mock_rule.assert_not_called() + + mock_check_output.assert_has_calls([ + mock.call(["post-up", "eth1"]) + ]) + + @mock.patch('pyroute2.IPRoute.rule') + @mock.patch('pyroute2.IPRoute.route') + @mock.patch('pyroute2.IPRoute.addr') + @mock.patch('pyroute2.IPRoute.link') + @mock.patch('pyroute2.IPRoute.get_links') @mock.patch('pyroute2.IPRoute.get_rules') @mock.patch('pyroute2.IPRoute.get_routes') @mock.patch('pyroute2.IPRoute.get_addr') @@ -463,6 +538,7 @@ mock_route, mock_rule): iface = interface_file.InterfaceFile( name="eth1", + if_type="vip", mtu=1450, addresses=[{ consts.ADDRESS: '1.2.3.4', @@ -658,6 +734,7 @@ mock_route, mock_rule): iface = interface_file.InterfaceFile( name="eth1", + if_type="vip", mtu=1450, addresses=[{ consts.DHCP: True, @@ -722,6 +799,7 @@ mock_link, mock_addr, mock_route, mock_rule): iface = interface_file.InterfaceFile( name="eth1", + if_type="vip", mtu=1450, addresses=[{ consts.ADDRESS: '1.2.3.4', @@ -848,6 +926,7 @@ mock_addr, mock_route, mock_rule): iface = interface_file.InterfaceFile( name="eth1", + if_type="vip", mtu=1450, addresses=[{ consts.ADDRESS: '1.2.3.4', @@ -997,6 +1076,7 @@ mock_route, mock_rule): iface = interface_file.InterfaceFile( name="eth1", + if_type="vip", mtu=1450, addresses=[{ consts.ADDRESS: '1.2.3.4', @@ -1069,6 +1149,7 @@ mock_addr, mock_route, mock_rule): iface = interface_file.InterfaceFile( name="eth1", + if_type="vip", mtu=1450, addresses=[{ consts.DHCP: True, diff -Nru octavia-10.1.0/octavia/tests/unit/amphorae/backends/utils/test_interface_file.py octavia-10.1.1/octavia/tests/unit/amphorae/backends/utils/test_interface_file.py --- octavia-10.1.0/octavia/tests/unit/amphorae/backends/utils/test_interface_file.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/amphorae/backends/utils/test_interface_file.py 2024-02-08 14:48:31.000000000 +0000 @@ -662,6 +662,7 @@ '"prefixlen": 26}\n' '],\n' '"mtu": 1450,\n' + '"if_type": "mytype",\n' '"name": "eth1",\n' '"routes": [\n' '{"dst": "0.0.0.0/0",\n' @@ -688,6 +689,7 @@ expected_dict = { consts.NAME: "eth1", + consts.IF_TYPE: "mytype", consts.MTU: 1450, consts.ADDRESSES: [{ consts.ADDRESS: "10.0.0.181", diff -Nru octavia-10.1.0/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py octavia-10.1.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py --- octavia-10.1.0/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py 2024-02-08 14:48:31.000000000 +0000 @@ -75,9 +75,9 @@ mock_api_version.reset_mock() client_mock.reset_mock() - result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) - - self.assertIsNone(result) + self.assertRaises( + exc.NotFound, + self.driver.get_interface_from_ip, amphora_mock, IP_ADDRESS) mock_api_version.assert_called_once_with(amphora_mock, None) client_mock.get_interface.assert_called_once_with( amphora_mock, IP_ADDRESS, None, log_error=False) diff -Nru octavia-10.1.0/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py octavia-10.1.1/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py --- octavia-10.1.0/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py 2024-02-08 14:48:31.000000000 +0000 @@ -100,6 +100,9 @@ self.keepalived_mixin.start_vrrp_service(self.amphora_mock) + populate_mock = self.keepalived_mixin._populate_amphora_api_version + populate_mock.assert_called_once_with(self.amphora_mock, + timeout_dict=None) self.clients[API_VERSION].start_vrrp.assert_called_once_with( self.amphora_mock, timeout_dict=None) @@ -114,6 +117,20 @@ self.clients[API_VERSION].start_vrrp.assert_not_called() + # With timeout_dict + self.clients[API_VERSION].start_vrrp.reset_mock() + populate_mock.reset_mock() + + timeout_dict = mock.Mock() + self.keepalived_mixin.start_vrrp_service(self.amphora_mock, + timeout_dict=timeout_dict) + + populate_mock = self.keepalived_mixin._populate_amphora_api_version + populate_mock.assert_called_once_with(self.amphora_mock, + timeout_dict=timeout_dict) + self.clients[API_VERSION].start_vrrp.assert_called_once_with( + self.amphora_mock, timeout_dict=timeout_dict) + def test_reload_vrrp_service(self): self.keepalived_mixin.reload_vrrp_service(self.lb_mock) diff -Nru octavia-10.1.0/octavia/tests/unit/certificates/manager/test_noop.py octavia-10.1.1/octavia/tests/unit/certificates/manager/test_noop.py --- octavia-10.1.0/octavia/tests/unit/certificates/manager/test_noop.py 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/certificates/manager/test_noop.py 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,53 @@ +# Copyright 2023 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils + +from octavia.certificates.common import cert +from octavia.certificates.manager import noop as noop_cert_mgr +from octavia.tests.common import sample_certs +import octavia.tests.unit.base as base + + +class TestNoopManager(base.TestCase): + + def setUp(self): + super().setUp() + self.manager = noop_cert_mgr.NoopCertManager() + + def test_store_cert(self): + certificate = self.manager.store_cert( + None, + sample_certs.X509_CERT, + sample_certs.X509_CERT_KEY_ENCRYPTED, + sample_certs.X509_IMDS, + private_key_passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) + self.assertIsNotNone(certificate) + self.assertIsInstance(certificate, cert.Cert) + + def test_get_cert(self): + cert_ref = uuidutils.generate_uuid() + certificate = self.manager.get_cert( + context=None, + cert_ref=cert_ref) + self.assertIsNotNone(certificate) + self.assertIsInstance(certificate, cert.Cert) + + def test_get_secret(self): + secret_ref = uuidutils.generate_uuid() + secret = self.manager.get_secret( + context=None, + secret_ref=secret_ref) + self.assertIsNotNone(secret) + self.assertIsInstance(secret, cert.Cert) diff -Nru octavia-10.1.0/octavia/tests/unit/cmd/test_interface.py octavia-10.1.1/octavia/tests/unit/cmd/test_interface.py --- octavia-10.1.0/octavia/tests/unit/cmd/test_interface.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/cmd/test_interface.py 2024-02-08 14:48:31.000000000 +0000 @@ -23,8 +23,8 @@ def setUp(self): super().setUp() - self.interface1 = interface_file.InterfaceFile("eth1") - self.interface2 = interface_file.InterfaceFile("eth2") + self.interface1 = interface_file.InterfaceFile("eth1", if_type="type1") + self.interface2 = interface_file.InterfaceFile("eth2", if_type="type2") def test_interfaces_find(self): controller = mock.Mock() diff -Nru octavia-10.1.0/octavia/tests/unit/cmd/test_status.py octavia-10.1.1/octavia/tests/unit/cmd/test_status.py --- octavia-10.1.0/octavia/tests/unit/cmd/test_status.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/cmd/test_status.py 2024-02-08 14:48:31.000000000 +0000 @@ -32,8 +32,8 @@ def test__check_amphorav2_not_enabled(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group='api_settings', - default_provider_driver=constants.AMPHORA, - enabled_provider_drivers={constants.AMPHORA: "Test"}) + default_provider_driver='other_provider', + enabled_provider_drivers={'other_provider': "Test"}) check_result = self.cmd._check_amphorav2() self.assertEqual( Code.SUCCESS, check_result.code) diff -Nru octavia-10.1.0/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py octavia-10.1.1/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py --- octavia-10.1.0/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py 2024-02-08 14:48:31.000000000 +0000 @@ -668,7 +668,6 @@ " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" - " option ssl-hello-chk\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" @@ -1181,11 +1180,11 @@ " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " - "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " - "{opts} alpn {alpn}\n" + "check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + "sample_member_id_1 {opts} alpn {alpn}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " - "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " - "{opts} alpn {alpn}\n\n").format( + "check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + "sample_member_id_2 {opts} alpn {alpn}\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, opts="ssl crt %s verify none sni ssl_fc_sni" % cert_file_path + " ciphers " + constants.CIPHERS_OWASP_SUITE_B + @@ -1260,11 +1259,11 @@ " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " - "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " - "{opts} alpn {alpn}\n" + "check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + "sample_member_id_1 {opts} alpn {alpn}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " - "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " - "{opts} alpn {alpn}\n\n").format( + "check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + "sample_member_id_2 {opts} alpn {alpn}\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, opts="ssl crt %s verify none sni ssl_fc_sni" % cert_file_path + " ciphers " + constants.CIPHERS_OWASP_SUITE_B, @@ -1300,11 +1299,11 @@ " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " - "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " - "{opts} alpn {alpn}\n" + "check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + "sample_member_id_1 {opts} alpn {alpn}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " - "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " - "{opts} alpn {alpn}\n\n").format( + "check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + "sample_member_id_2 {opts} alpn {alpn}\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, opts="ssl crt %s verify none sni ssl_fc_sni" % cert_file_path + " no-sslv3 no-tlsv10 no-tlsv11", @@ -1400,11 +1399,11 @@ " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " - "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " - "{opts} alpn {alpn}\n" + "check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + "sample_member_id_1 {opts} alpn {alpn}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " - "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " - "{opts} alpn {alpn}\n\n").format( + "check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + "sample_member_id_2 {opts} alpn {alpn}\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, opts="%s %s %s %s %s %s" % ( "ssl", "crt", pool_client_cert, diff -Nru octavia-10.1.0/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py octavia-10.1.1/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py --- octavia-10.1.0/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py 2024-02-08 14:48:31.000000000 +0000 @@ -83,6 +83,58 @@ connection_limit=98)) self.assertEqual(exp, rendered_obj) + def test_render_template_udp_ipv6_session_persistence_default_values(self): + # The session persistence default values refer to + # persistence_timeout and persistence_granularity + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 2001:db8::2 80 {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " persistence_timeout 360\n" + " persistence_granularity 128\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.99 82\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.98 82\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + udp_sample = sample_configs_combined.sample_lb_with_udp_listener_tuple( + listeners=[sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + connection_limit=98)] + ) + udp_listener = udp_sample.listeners[0] + ipv6_lb = sample_configs_combined.sample_listener_loadbalancer_tuple( + vip=sample_configs_combined.sample_vip_tuple('2001:db8::2')) + udp_listener = udp_listener._replace(load_balancer=ipv6_lb) + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(udp_listener) + self.assertEqual(exp, rendered_obj) + def test_render_template_udp_one_packet(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" diff -Nru octavia-10.1.0/octavia/tests/unit/common/sample_configs/sample_configs_combined.py octavia-10.1.1/octavia/tests/unit/common/sample_configs/sample_configs_combined.py --- octavia-10.1.0/octavia/tests/unit/common/sample_configs/sample_configs_combined.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/common/sample_configs/sample_configs_combined.py 2024-02-08 14:48:31.000000000 +0000 @@ -576,7 +576,7 @@ def sample_listener_loadbalancer_tuple( - topology=None, enabled=True, pools=None): + topology=None, enabled=True, vip=None, pools=None): if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: @@ -588,7 +588,7 @@ return in_lb( id='sample_loadbalancer_id_1', name='test-lb', - vip=sample_vip_tuple(), + vip=vip or sample_vip_tuple(), amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), sample_amphora_tuple( id='sample_amphora_id_2', @@ -605,13 +605,13 @@ def sample_lb_with_udp_listener_tuple( - topology=None, enabled=True, pools=None): + topology=None, enabled=True, listeners=None, pools=None): if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: more_amp = False topology = constants.TOPOLOGY_SINGLE - listeners = [sample_listener_tuple( + listeners = listeners or [sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, diff -Nru octavia-10.1.0/octavia/tests/unit/common/test_policy.py octavia-10.1.1/octavia/tests/unit/common/test_policy.py --- octavia-10.1.0/octavia/tests/unit/common/test_policy.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/common/test_policy.py 2024-02-08 14:48:31.000000000 +0000 @@ -46,7 +46,7 @@ tmp.write('{"example:test": ""}') tmp.flush() - self.context = context.Context('fake', project_id='fake') + self.context = context.RequestContext('fake', project_id='fake') rule = oslo_policy.RuleDefault('example:test', "") policy.get_enforcer().register_defaults([rule]) @@ -73,8 +73,8 @@ # https://bugs.launchpad.net/oslo.config/+bug/1645868 self.conf.conf.__call__(args=[]) policy.reset() - self.context = context.Context('fake', project_id='fake', - roles=['member']) + self.context = context.RequestContext('fake', project_id='fake', + roles=['member']) self.rules = [ oslo_policy.RuleDefault("true", "@"), @@ -153,8 +153,8 @@ # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored - self.context = context.Context('admin', project_id='fake', - roles=['AdMiN']) + self.context = context.RequestContext('admin', project_id='fake', + roles=['AdMiN']) policy.get_enforcer().authorize(lowercase_action, self.target, self.context) @@ -169,16 +169,16 @@ # This test and the conditional in common/policy.py can then # be removed in favor of test_check_is_admin_new_defaults(). def test_check_is_admin(self): - self.context = context.Context('admin', project_id='fake', - roles=['AdMiN']) + self.context = context.RequestContext('admin', project_id='fake', + roles=['AdMiN']) self.assertTrue(policy.get_enforcer().check_is_admin(self.context)) def test_check_is_admin_new_defaults(self): conf = oslo_fixture.Config(config.cfg.CONF) conf.config(group="oslo_policy", enforce_new_defaults=True) - self.context = context.Context('admin', roles=['AdMiN'], - system_scope='all') + self.context = context.RequestContext('admin', roles=['AdMiN'], + system_scope='all') self.assertTrue(policy.get_enforcer().check_is_admin(self.context)) @@ -197,7 +197,7 @@ # https://bugs.launchpad.net/oslo.config/+bug/1645868 self.conf.conf.__call__(args=[]) - self.context = context.Context('fake', project_id='fake') + self.context = context.RequestContext('fake', project_id='fake') def test_init_true(self): check = policy.IsAdminCheck('is_admin', 'True') @@ -240,8 +240,8 @@ # https://bugs.launchpad.net/oslo.config/+bug/1645868 self.conf.conf.__call__(args=[]) - self.context = context.Context('fake', project_id='fake', - roles=['member']) + self.context = context.RequestContext('fake', project_id='fake', + roles=['member']) self.actions = policy.get_enforcer().get_rules().keys() self.target = {} diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/test_task_utils.py octavia-10.1.1/octavia/tests/unit/controller/worker/test_task_utils.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/test_task_utils.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/test_task_utils.py 2024-02-08 14:48:31.000000000 +0000 @@ -14,6 +14,7 @@ from unittest import mock from oslo_utils import uuidutils +import tenacity from octavia.common import constants from octavia.controller.worker import task_utils as task_utilities @@ -173,7 +174,13 @@ @mock.patch('octavia.db.api.get_session', return_value=TEST_SESSION) @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('tenacity.nap.time') + # mock LOG so we don't fill the console with log messages from + # tenacity.retry + @mock.patch('octavia.controller.worker.task_utils.LOG') def test_mark_loadbalancer_prov_status_active(self, + mock_LOG, + mock_time, mock_lb_repo_update, mock_get_session): @@ -190,14 +197,41 @@ mock_lb_repo_update.reset_mock() mock_get_session.side_effect = Exception('fail') - self.task_utils.mark_loadbalancer_prov_status_active( + self.assertRaises( + tenacity.RetryError, + self.task_utils.mark_loadbalancer_prov_status_active, self.LOADBALANCER_ID) self.assertFalse(mock_lb_repo_update.called) + # Exceptions then happy path + mock_get_session.reset_mock(side_effect=True) + mock_lb_repo_update.reset_mock() + + mock_session = mock_get_session() + mock_get_session.side_effect = [ + Exception('fail'), + Exception('fail'), + Exception('fail'), + mock_session] + + self.task_utils.mark_loadbalancer_prov_status_active( + self.LOADBALANCER_ID) + + mock_lb_repo_update.assert_called_once_with( + mock_session, + id=self.LOADBALANCER_ID, + provisioning_status=constants.ACTIVE) + @mock.patch('octavia.db.api.get_session', return_value=TEST_SESSION) @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('tenacity.nap.time') + # mock LOG so we don't fill the console with log messages from + # tenacity.retry + @mock.patch('octavia.controller.worker.task_utils.LOG') def test_mark_loadbalancer_prov_status_error(self, + mock_LOG, + mock_time, mock_lb_repo_update, mock_get_session): @@ -214,10 +248,30 @@ mock_lb_repo_update.reset_mock() mock_get_session.side_effect = Exception('fail') + self.assertRaises(tenacity.RetryError, + self.task_utils.mark_loadbalancer_prov_status_error, + self.LOADBALANCER_ID) + + self.assertFalse(mock_lb_repo_update.called) + + # Exceptions then happy path + mock_get_session.reset_mock(side_effect=True) + mock_lb_repo_update.reset_mock() + + mock_session = mock_get_session() + mock_get_session.side_effect = [ + Exception('fail'), + Exception('fail'), + Exception('fail'), + mock_session] + self.task_utils.mark_loadbalancer_prov_status_error( self.LOADBALANCER_ID) - self.assertFalse(mock_lb_repo_update.called) + mock_lb_repo_update.assert_called_once_with( + mock_session, + id=self.LOADBALANCER_ID, + provisioning_status=constants.ERROR) @mock.patch('octavia.db.api.get_session', return_value=TEST_SESSION) @mock.patch('octavia.db.repositories.MemberRepository.update') diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py octavia-10.1.1/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py 2024-02-08 14:48:31.000000000 +0000 @@ -243,6 +243,7 @@ self.assertIn(constants.AMPHORA, amp_flow.provides) self.assertIn(constants.AMPHORA_ID, amp_flow.provides) self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.BASE_PORT, amp_flow.provides) self.assertIn(constants.COMPUTE_ID, amp_flow.provides) @@ -253,7 +254,7 @@ self.assertIn(constants.VIP_SG_ID, amp_flow.provides) self.assertEqual(7, len(amp_flow.requires)) - self.assertEqual(13, len(amp_flow.provides)) + self.assertEqual(14, len(amp_flow.provides)) def test_get_failover_flow_standalone(self, mock_get_net_driver): failed_amphora = data_models.Amphora( @@ -276,6 +277,7 @@ self.assertIn(constants.AMPHORA, amp_flow.provides) self.assertIn(constants.AMPHORA_ID, amp_flow.provides) self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.BASE_PORT, amp_flow.provides) self.assertIn(constants.COMPUTE_ID, amp_flow.provides) @@ -286,7 +288,7 @@ self.assertIn(constants.VIP_SG_ID, amp_flow.provides) self.assertEqual(7, len(amp_flow.requires)) - self.assertEqual(12, len(amp_flow.provides)) + self.assertEqual(13, len(amp_flow.provides)) def test_get_failover_flow_bogus_role(self, mock_get_net_driver): failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(), @@ -324,12 +326,31 @@ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.provides) + + self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) + self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) + + self.assertEqual(3, len(vrrp_subflow.provides)) + self.assertEqual(3, len(vrrp_subflow.requires)) + + def test_get_vrrp_subflow_dont_get_status(self, mock_get_net_driver): + vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123', + get_amphorae_status=False) + + self.assertIsInstance(vrrp_subflow, flow.Flow) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) + self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.requires) self.assertEqual(2, len(vrrp_subflow.provides)) - self.assertEqual(2, len(vrrp_subflow.requires)) + self.assertEqual(4, len(vrrp_subflow.requires)) def test_get_vrrp_subflow_dont_create_vrrp_group( self, mock_get_net_driver): @@ -340,12 +361,14 @@ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.provides) self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) - self.assertEqual(2, len(vrrp_subflow.provides)) - self.assertEqual(2, len(vrrp_subflow.requires)) + self.assertEqual(3, len(vrrp_subflow.provides)) + self.assertEqual(3, len(vrrp_subflow.requires)) def test_get_post_map_lb_subflow(self, mock_get_net_driver): diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py octavia-10.1.1/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py 2024-02-08 14:48:31.000000000 +0000 @@ -155,10 +155,16 @@ self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) + self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(3, len(amp_flow.requires)) amp_flow = self.LBFlow.get_post_lb_amp_association_flow( '123', constants.TOPOLOGY_ACTIVE_STANDBY) @@ -167,10 +173,16 @@ self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(3, len(amp_flow.requires)) def test_get_create_load_balancer_flows_single_listeners( self, mock_get_net_driver): @@ -219,6 +231,7 @@ self.assertIn(constants.LISTENERS, create_flow.provides) self.assertIn(constants.AMPHORA, create_flow.provides) self.assertIn(constants.AMPHORA_ID, create_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, create_flow.provides) self.assertIn(constants.COMPUTE_ID, create_flow.provides) self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) self.assertIn(constants.LOADBALANCER, create_flow.provides) @@ -230,7 +243,7 @@ create_flow.provides) self.assertEqual(6, len(create_flow.requires)) - self.assertEqual(16, len(create_flow.provides), + self.assertEqual(17, len(create_flow.provides), create_flow.provides) def _test_get_failover_LB_flow_single(self, amphorae): @@ -322,6 +335,7 @@ self.assertIn(constants.AMPHORA, failover_flow.provides) self.assertIn(constants.AMPHORA_ID, failover_flow.provides) self.assertIn(constants.AMPHORAE, failover_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, failover_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, failover_flow.provides) self.assertIn(constants.BASE_PORT, failover_flow.provides) @@ -339,7 +353,7 @@ self.assertEqual(6, len(failover_flow.requires), failover_flow.requires) - self.assertEqual(16, len(failover_flow.provides), + self.assertEqual(17, len(failover_flow.provides), failover_flow.provides) def test_get_failover_LB_flow_no_amps_act_stdby(self, mock_get_net_driver): diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py octavia-10.1.1/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -125,21 +125,56 @@ mock_amphora_repo_update): mock_lb_repo_get.return_value = _LB_mock + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: False + } + } + amp_list_update_obj = amphora_driver_tasks.AmphoraIndexListenerUpdate() amp_list_update_obj.execute(_load_balancer_mock, 0, - [_amphora_mock], self.timeout_dict) + [_amphora_mock], amphorae_status, + _amphora_mock.id, + self.timeout_dict) mock_driver.update_amphora_listeners.assert_called_once_with( _LB_mock, _amphora_mock, self.timeout_dict) + # Unreachable amp + mock_driver.reset_mock() + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: True + } + } + amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], + amphorae_status, + _amphora_mock.id, + self.timeout_dict) + mock_driver.update_amphora_listeners.assert_not_called() + + # Test exception mock_driver.update_amphora_listeners.side_effect = Exception('boom') amp_list_update_obj.execute(_load_balancer_mock, 0, - [_amphora_mock], self.timeout_dict) + [_amphora_mock], {}, + _amphora_mock.id, + self.timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, AMP_ID, status=constants.ERROR) + # Test exception, secondary amp + mock_amphora_repo_update.reset_mock() + mock_driver.update_amphora_listeners.side_effect = Exception('boom') + + amp_list_update_obj.execute(_load_balancer_mock, 0, + [_amphora_mock], {}, + '1234', + self.timeout_dict) + + mock_amphora_repo_update.assert_not_called() + def test_listener_update(self, mock_driver, mock_generate_uuid, @@ -217,26 +252,57 @@ # Test no listeners mock_lb.listeners = None - listeners_reload_obj.execute(mock_lb, 0, None) + listeners_reload_obj.execute(mock_lb, 0, None, {}, amphora_mock.id) mock_driver.reload.assert_not_called() # Test with listeners + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: False + } + } mock_driver.start.reset_mock() mock_lb.listeners = [mock_listener] listeners_reload_obj.execute(mock_lb, 0, [amphora_mock], + amphorae_status, + amphora_mock.id, timeout_dict=self.timeout_dict) mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock, self.timeout_dict) + # Unreachable amp + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: True + } + } + mock_driver.reload.reset_mock() + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], + amphorae_status, + _amphora_mock.id, + timeout_dict=self.timeout_dict) + mock_driver.reload.assert_not_called() + # Test with reload exception mock_driver.reload.reset_mock() - listeners_reload_obj.execute(mock_lb, 0, [amphora_mock], + listeners_reload_obj.execute(mock_lb, 0, [amphora_mock], {}, + amphora_mock.id, timeout_dict=self.timeout_dict) mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock, self.timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, amphora_mock.id, status=constants.ERROR) + # Test with reload exception, secondary amp + mock_driver.reload.reset_mock() + mock_amphora_repo_update.reset_mock() + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], {}, + '1234', + timeout_dict=self.timeout_dict) + mock_driver.reload.assert_called_once_with(mock_lb, _amphora_mock, + self.timeout_dict) + mock_amphora_repo_update.assert_not_called() + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' 'mark_listener_prov_status_error') def test_listeners_start(self, @@ -604,6 +670,11 @@ _LB_mock.amphorae = _amphorae_mock mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE, Exception('boom')] + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: False + } + } timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} @@ -611,19 +682,38 @@ amphora_update_vrrp_interface_obj = ( amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface()) amphora_update_vrrp_interface_obj.execute( - 0, [_amphora_mock], timeout_dict) + 0, [_amphora_mock], amphorae_status, _amphora_mock.id, + timeout_dict) mock_driver.get_interface_from_ip.assert_called_once_with( _amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE) + # Unreachable amp + mock_driver.reset_mock() + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: True + } + } + amphora_update_vrrp_interface_obj.execute( + 0, [_amphora_mock], amphorae_status, _amphora_mock.id, + timeout_dict) + mock_driver.get_interface_from_ip.assert_not_called() + # Test with an exception mock_amphora_repo_update.reset_mock() amphora_update_vrrp_interface_obj.execute( - 0, [_amphora_mock], timeout_dict) + 0, [_amphora_mock], {}, _amphora_mock.id, timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, _amphora_mock.id, status=constants.ERROR) + # Test with an exception, secondary amp + mock_amphora_repo_update.reset_mock() + amphora_update_vrrp_interface_obj.execute( + 0, [_amphora_mock], {}, '1234', timeout_dict) + mock_amphora_repo_update.assert_not_called() + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') def test_amphora_vrrp_update(self, mock_lb_get, @@ -666,23 +756,52 @@ mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT, Exception('boom')] mock_lb_get.return_value = _LB_mock + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: False + } + } + amphora_vrrp_update_obj = ( amphora_driver_tasks.AmphoraIndexVRRPUpdate()) amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, - 0, [_amphora_mock], 'fakeint0', + 0, [_amphora_mock], amphorae_status, + 'fakeint0', + _amphora_mock.id, timeout_dict=self.timeout_dict) mock_driver.update_vrrp_conf.assert_called_once_with( _LB_mock, amphorae_network_config, _amphora_mock, self.timeout_dict) + # Unreachable amp + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: True + } + } + mock_amphora_repo_update.reset_mock() + mock_driver.update_vrrp_conf.reset_mock() + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + 0, [_amphora_mock], amphorae_status, + None, _amphora_mock.id) + mock_driver.update_vrrp_conf.assert_not_called() + # Test with an exception mock_amphora_repo_update.reset_mock() amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, - 0, [_amphora_mock], 'fakeint0') + 0, [_amphora_mock], {}, 'fakeint0', + _amphora_mock.id) mock_amphora_repo_update.assert_called_once_with( _session_mock, _amphora_mock.id, status=constants.ERROR) + # Test with an exception, secondary amp + mock_amphora_repo_update.reset_mock() + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + 0, [_amphora_mock], {}, 'fakeint0', + '1234') + mock_amphora_repo_update.assert_not_called() + def test_amphora_vrrp_start(self, mock_driver, mock_generate_uuid, @@ -706,25 +825,54 @@ mock_listener_repo_get, mock_listener_repo_update, mock_amphora_repo_update): + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: False + } + } + amphora_vrrp_start_obj = ( amphora_driver_tasks.AmphoraIndexVRRPStart()) mock_driver.start_vrrp_service.side_effect = [mock.DEFAULT, Exception('boom')] - amphora_vrrp_start_obj.execute(0, [_amphora_mock], + amphora_vrrp_start_obj.execute(0, [_amphora_mock], amphorae_status, + _amphora_mock.id, timeout_dict=self.timeout_dict) mock_driver.start_vrrp_service.assert_called_once_with( _amphora_mock, self.timeout_dict) + # Unreachable amp + mock_driver.start_vrrp_service.reset_mock() + amphorae_status = { + _amphora_mock.id: { + constants.UNREACHABLE: True + } + } + amphora_vrrp_start_obj.execute(0, [_amphora_mock], amphorae_status, + _amphora_mock.id, + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_not_called() + # Test with a start exception mock_driver.start_vrrp_service.reset_mock() - amphora_vrrp_start_obj.execute(0, [_amphora_mock], + amphora_vrrp_start_obj.execute(0, [_amphora_mock], {}, + _amphora_mock.id, timeout_dict=self.timeout_dict) mock_driver.start_vrrp_service.assert_called_once_with( _amphora_mock, self.timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, _amphora_mock.id, status=constants.ERROR) + # Test with a start exception, secondary amp + mock_driver.start_vrrp_service.reset_mock() + mock_amphora_repo_update.reset_mock() + amphora_vrrp_start_obj.execute(0, [_amphora_mock], {}, '1234', + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_called_once_with( + _amphora_mock, self.timeout_dict) + mock_amphora_repo_update.assert_not_called() + def test_amphora_compute_connectivity_wait(self, mock_driver, mock_generate_uuid, @@ -790,3 +938,75 @@ self.assertRaises(driver_except.TimeOutException, amp_config_update_obj.execute, _amphora_mock, flavor) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + def test_amphorae_get_connectivity_status(self, + mock_amphora_repo_get, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + amphora1_mock = mock.MagicMock() + amphora1_mock.id = 'id1' + amphora2_mock = mock.MagicMock() + amphora2_mock.id = 'id2' + db_amphora1_mock = mock.Mock() + db_amphora2_mock = mock.Mock() + + amp_get_connectivity_status = ( + amphora_driver_tasks.AmphoraeGetConnectivityStatus()) + + # All amphorae reachable + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.return_value = None + + ret = amp_get_connectivity_status.execute( + [amphora1_mock, amphora2_mock], + amphora1_mock.id, + timeout_dict=self.timeout_dict) + mock_driver.check.assert_has_calls( + [mock.call(db_amphora1_mock, timeout_dict=self.timeout_dict), + mock.call(db_amphora2_mock, timeout_dict=self.timeout_dict)]) + self.assertFalse( + ret[amphora1_mock.id][constants.UNREACHABLE]) + self.assertFalse( + ret[amphora2_mock.id][constants.UNREACHABLE]) + + # amphora1 unreachable + mock_driver.check.reset_mock() + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.side_effect = [ + driver_except.TimeOutException, None] + self.assertRaises(driver_except.TimeOutException, + amp_get_connectivity_status.execute, + [amphora1_mock, amphora2_mock], + amphora1_mock.id, + timeout_dict=self.timeout_dict) + mock_driver.check.assert_called_with( + db_amphora1_mock, timeout_dict=self.timeout_dict) + + # amphora2 unreachable + mock_driver.check.reset_mock() + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.side_effect = [ + None, driver_except.TimeOutException] + ret = amp_get_connectivity_status.execute( + [amphora1_mock, amphora2_mock], + amphora1_mock.id, + timeout_dict=self.timeout_dict) + mock_driver.check.assert_has_calls( + [mock.call(db_amphora1_mock, timeout_dict=self.timeout_dict), + mock.call(db_amphora2_mock, timeout_dict=self.timeout_dict)]) + self.assertFalse( + ret[amphora1_mock.id][constants.UNREACHABLE]) + self.assertTrue( + ret[amphora2_mock.id][constants.UNREACHABLE]) diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py octavia-10.1.1/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -65,6 +65,7 @@ vrrp_ip=t_constants.MOCK_VRRP_IP3) ] UPDATE_DICT = {constants.TOPOLOGY: None} +_session_mock = mock.MagicMock() class TestException(Exception): @@ -98,7 +99,10 @@ conf.config(group="networking", max_retries=1) super().setUp() - def test_calculate_amphora_delta(self, mock_get_net_driver): + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_calculate_amphora_delta(self, mock_get_session, mock_lb_repo_get, + mock_get_net_driver): VRRP_PORT_ID = uuidutils.generate_uuid() VIP_NETWORK_ID = uuidutils.generate_uuid() VIP_SUBNET_ID = uuidutils.generate_uuid() @@ -155,9 +159,9 @@ id=mock.Mock(), network_id=DELETE_NETWORK_ID) + mock_lb_repo_get.return_value = lb_mock mock_driver.get_port.return_value = vrrp_port mock_driver.get_subnet.return_value = member_subnet - mock_driver.get_network.return_value = mgmt_net mock_driver.get_plugged_networks.return_value = [ mgmt_interface, vrrp_interface, @@ -177,8 +181,12 @@ mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID) mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) - def test_calculate_delta(self, mock_get_net_driver): + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_calculate_delta(self, mock_get_session, mock_get_lb, + mock_get_net_driver): mock_driver = mock.MagicMock() + mock_get_lb.return_value = self.load_balancer_mock mock_get_net_driver.return_value = mock_driver empty_deltas = {self.amphora_mock.id: data_models.Delta( amphora_id=self.amphora_mock.id, @@ -191,9 +199,6 @@ mgmt_subnet = data_models.Subnet( id=self.mgmt_subnet_id, network_id=self.mgmt_net_id) - mgmt_net = data_models.Network( - id=self.mgmt_net_id, - subnets=[mgmt_subnet.id]) mgmt_ip_address = mock.MagicMock() mgmt_interface = data_models.Interface( network_id=self.mgmt_net_id, @@ -264,7 +269,6 @@ fixed_ips=vrrp_port.fixed_ips)] mock_driver.get_port.return_value = vrrp_port mock_driver.get_subnet.return_value = vrrp_subnet - mock_driver.get_network.return_value = mgmt_net calc_delta = network_tasks.CalculateDelta() @@ -367,10 +371,6 @@ mgmt2_subnet = data_models.Subnet( id=mgmt2_subnet_id, network_id=mgmt2_net_id) - mgmt2_net = data_models.Network( - id=mgmt2_net_id, - subnets=[mgmt2_subnet.id] - ) mgmt2_interface = data_models.Interface( network_id=mgmt2_net_id, fixed_ips=[ @@ -379,7 +379,6 @@ subnet_id=mgmt2_subnet_id, ) ]) - mock_driver.get_network.return_value = mgmt2_net az = { constants.MANAGEMENT_NETWORK: mgmt2_net_id, } @@ -399,7 +398,6 @@ # Test with one amp and one pool and one member, wrong network plugged # Delta should be one network/subnet to add and one to remove mock_driver.reset_mock() - mock_driver.get_network.return_value = mgmt_net member_mock = mock.MagicMock() member_mock.subnet_id = member_private_subnet.id pool_mock.members = [member_mock] @@ -572,7 +570,11 @@ self.assertEqual({self.amphora_mock.id: ndm}, calc_delta.execute(self.load_balancer_mock, {})) - def test_calculate_delta_ipv6_ipv4_subnets(self, mock_get_net_driver): + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_calculate_delta_ipv6_ipv4_subnets(self, mock_get_session, + mock_get_lb, + mock_get_net_driver): mock_driver = mock.MagicMock() mock_get_net_driver.return_value = mock_driver diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py octavia-10.1.1/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py 2024-02-08 14:48:31.000000000 +0000 @@ -286,6 +286,7 @@ self.assertIn(constants.AMPHORA, amp_flow.provides) self.assertIn(constants.AMPHORA_ID, amp_flow.provides) self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.BASE_PORT, amp_flow.provides) self.assertIn(constants.COMPUTE_ID, amp_flow.provides) @@ -296,7 +297,7 @@ self.assertIn(constants.VIP_SG_ID, amp_flow.provides) self.assertEqual(7, len(amp_flow.requires)) - self.assertEqual(13, len(amp_flow.provides)) + self.assertEqual(14, len(amp_flow.provides)) def test_get_failover_flow_standalone(self, mock_get_net_driver): failed_amphora = data_models.Amphora( @@ -320,6 +321,7 @@ self.assertIn(constants.AMPHORA, amp_flow.provides) self.assertIn(constants.AMPHORA_ID, amp_flow.provides) self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.BASE_PORT, amp_flow.provides) self.assertIn(constants.COMPUTE_ID, amp_flow.provides) @@ -330,7 +332,7 @@ self.assertIn(constants.VIP_SG_ID, amp_flow.provides) self.assertEqual(7, len(amp_flow.requires)) - self.assertEqual(12, len(amp_flow.provides)) + self.assertEqual(13, len(amp_flow.provides)) def test_get_failover_flow_bogus_role(self, mock_get_net_driver): failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(), @@ -368,12 +370,31 @@ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.provides) + + self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) + self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) + + self.assertEqual(3, len(vrrp_subflow.provides)) + self.assertEqual(3, len(vrrp_subflow.requires)) + + def test_get_vrrp_subflow_dont_get_status(self, mock_get_net_driver): + vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123', + get_amphorae_status=False) + + self.assertIsInstance(vrrp_subflow, flow.Flow) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) + self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.requires) self.assertEqual(2, len(vrrp_subflow.provides)) - self.assertEqual(2, len(vrrp_subflow.requires)) + self.assertEqual(4, len(vrrp_subflow.requires)) def test_get_vrrp_subflow_dont_create_vrrp_group( self, mock_get_net_driver): @@ -384,12 +405,14 @@ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.provides) self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) - self.assertEqual(2, len(vrrp_subflow.provides)) - self.assertEqual(2, len(vrrp_subflow.requires)) + self.assertEqual(3, len(vrrp_subflow.provides)) + self.assertEqual(3, len(vrrp_subflow.requires)) def test_update_amphora_config_flow(self, mock_get_net_driver): diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py octavia-10.1.1/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py 2024-02-08 14:48:31.000000000 +0000 @@ -174,14 +174,16 @@ self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(2, len(amp_flow.requires), amp_flow.requires) - self.assertEqual(4, len(amp_flow.provides), amp_flow.provides) + self.assertEqual(3, len(amp_flow.requires), amp_flow.requires) + self.assertEqual(5, len(amp_flow.provides), amp_flow.provides) amp_flow = self.LBFlow.get_post_lb_amp_association_flow( '123', constants.TOPOLOGY_ACTIVE_STANDBY) @@ -190,14 +192,16 @@ self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(2, len(amp_flow.requires), amp_flow.requires) - self.assertEqual(4, len(amp_flow.provides), amp_flow.provides) + self.assertEqual(3, len(amp_flow.requires), amp_flow.requires) + self.assertEqual(5, len(amp_flow.provides), amp_flow.provides) def test_get_create_load_balancer_flows_single_listeners( self, mock_get_net_driver): @@ -255,6 +259,7 @@ self.assertIn(constants.AMPHORA_ID, create_flow.provides) self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, create_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, create_flow.provides) self.assertIn(constants.COMPUTE_ID, create_flow.provides) self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) self.assertIn(constants.DELTAS, create_flow.provides) @@ -265,7 +270,7 @@ self.assertIn(constants.VIP, create_flow.provides) self.assertEqual(6, len(create_flow.requires), create_flow.requires) - self.assertEqual(16, len(create_flow.provides), + self.assertEqual(17, len(create_flow.provides), create_flow.provides) def _test_get_failover_LB_flow_single(self, amphorae): diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py octavia-10.1.1/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -132,21 +132,54 @@ mock_amphora_repo_get.return_value = _db_amphora_mock mock_lb_get.return_value = _db_load_balancer_mock + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + amp_list_update_obj = amphora_driver_tasks.AmphoraIndexListenerUpdate() amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], + amphorae_status, + _amphora_mock[constants.ID], self.timeout_dict) mock_driver.update_amphora_listeners.assert_called_once_with( _db_load_balancer_mock, _db_amphora_mock, self.timeout_dict) + # Unreachable amp + mock_driver.reset_mock() + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], + amphorae_status, + _amphora_mock[constants.ID], + self.timeout_dict) + mock_driver.update_amphora_listeners.assert_not_called() + + # Test exception mock_driver.update_amphora_listeners.side_effect = Exception('boom') - amp_list_update_obj.execute(_LB_mock, 0, - [_amphora_mock], self.timeout_dict) + amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], {}, + _amphora_mock[constants.ID], + self.timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, AMP_ID, status=constants.ERROR) + # Test exception, secondary amp + mock_amphora_repo_update.reset_mock() + mock_driver.update_amphora_listeners.side_effect = Exception('boom') + + amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], {}, + '1234', + self.timeout_dict) + + mock_amphora_repo_update.assert_not_called() + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') def test_listeners_update(self, mock_lb_get, @@ -194,39 +227,70 @@ mock_driver, mock_generate_uuid, mock_log, mock_get_session, mock_listener_repo_get, mock_listener_repo_update, mock_amphora_repo_get, mock_amphora_repo_update): - amphora_mock = mock.MagicMock() listeners_reload_obj = ( amphora_driver_tasks.AmphoraIndexListenersReload()) mock_lb = mock.MagicMock() mock_listener = mock.MagicMock() mock_listener.id = '12345' - mock_amphora_repo_get.return_value = amphora_mock + mock_amphora_repo_get.return_value = _amphora_mock mock_lb_repo_get.return_value = mock_lb mock_driver.reload.side_effect = [mock.DEFAULT, Exception('boom')] # Test no listeners mock_lb.listeners = None - listeners_reload_obj.execute(mock_lb, 0, None) + listeners_reload_obj.execute(mock_lb, 0, None, {}, + _amphora_mock[constants.ID]) mock_driver.reload.assert_not_called() # Test with listeners - mock_driver.start.reset_mock() + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + mock_driver.reload.reset_mock() mock_lb.listeners = [mock_listener] - listeners_reload_obj.execute(mock_lb, 0, [amphora_mock], + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], + amphorae_status, + _amphora_mock[constants.ID], timeout_dict=self.timeout_dict) - mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock, + mock_driver.reload.assert_called_once_with(mock_lb, _amphora_mock, self.timeout_dict) + # Unreachable amp + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + mock_driver.reload.reset_mock() + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], + amphorae_status, + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.reload.assert_not_called() + # Test with reload exception mock_driver.reload.reset_mock() - listeners_reload_obj.execute(mock_lb, 0, [amphora_mock], + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], {}, + _amphora_mock[constants.ID], timeout_dict=self.timeout_dict) - mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock, + mock_driver.reload.assert_called_once_with(mock_lb, _amphora_mock, self.timeout_dict) mock_amphora_repo_update.assert_called_once_with( - _session_mock, amphora_mock[constants.ID], + _session_mock, _amphora_mock[constants.ID], status=constants.ERROR) + # Test with reload exception, secondary amp + mock_driver.reload.reset_mock() + mock_amphora_repo_update.reset_mock() + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], {}, + '1234', + timeout_dict=self.timeout_dict) + mock_driver.reload.assert_called_once_with(mock_lb, _amphora_mock, + self.timeout_dict) + mock_amphora_repo_update.assert_not_called() + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' 'mark_listener_prov_status_error') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @@ -728,6 +792,11 @@ FAKE_INTERFACE = 'fake0' mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE, Exception('boom')] + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} @@ -735,20 +804,39 @@ amphora_update_vrrp_interface_obj = ( amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface()) amphora_update_vrrp_interface_obj.execute( - 0, [_amphora_mock], timeout_dict) + 0, [_amphora_mock], amphorae_status, _amphora_mock[constants.ID], + timeout_dict) mock_driver.get_interface_from_ip.assert_called_once_with( _db_amphora_mock, _db_amphora_mock.vrrp_ip, timeout_dict=timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, _db_amphora_mock.id, vrrp_interface=FAKE_INTERFACE) + # Unreachable amp + mock_driver.reset_mock() + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + amphora_update_vrrp_interface_obj.execute( + 0, [_amphora_mock], amphorae_status, _amphora_mock[constants.ID], + timeout_dict) + mock_driver.get_interface_from_ip.assert_not_called() + # Test with an exception mock_amphora_repo_update.reset_mock() amphora_update_vrrp_interface_obj.execute( - 0, [_amphora_mock], timeout_dict) + 0, [_amphora_mock], {}, _amphora_mock[constants.ID], timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, _db_amphora_mock.id, status=constants.ERROR) + # Test with an exception, secondary amp + mock_amphora_repo_update.reset_mock() + amphora_update_vrrp_interface_obj.execute( + 0, [_amphora_mock], {}, '1234', timeout_dict) + mock_amphora_repo_update.assert_not_called() + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') def test_amphora_vrrp_update(self, mock_lb_get, @@ -796,23 +884,52 @@ Exception('boom')] mock_lb_get.return_value = _db_load_balancer_mock mock_amphora_repo_get.return_value = _db_amphora_mock + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + amphora_vrrp_update_obj = ( amphora_driver_tasks.AmphoraIndexVRRPUpdate()) amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, - 0, [_amphora_mock], 'fakeint0', + 0, [_amphora_mock], amphorae_status, + 'fakeint0', + _amphora_mock[constants.ID], timeout_dict=self.timeout_dict) mock_driver.update_vrrp_conf.assert_called_once_with( _db_load_balancer_mock, amphorae_network_config, _db_amphora_mock, self.timeout_dict) + # Unreachable amp + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + mock_amphora_repo_update.reset_mock() + mock_driver.update_vrrp_conf.reset_mock() + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + 0, [_amphora_mock], amphorae_status, + None, _amphora_mock[constants.ID]) + mock_driver.update_vrrp_conf.assert_not_called() + # Test with an exception mock_amphora_repo_update.reset_mock() amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, - 0, [_amphora_mock], 'fakeint0') + 0, [_amphora_mock], {}, 'fakeint0', + _amphora_mock[constants.ID]) mock_amphora_repo_update.assert_called_once_with( _session_mock, _db_amphora_mock.id, status=constants.ERROR) + # Test with an exception, secondary amp + mock_amphora_repo_update.reset_mock() + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + 0, [_amphora_mock], {}, 'fakeint0', + '1234') + mock_amphora_repo_update.assert_not_called() + def test_amphora_vrrp_start(self, mock_driver, mock_generate_uuid, @@ -840,25 +957,54 @@ mock_amphora_repo_get, mock_amphora_repo_update): mock_amphora_repo_get.return_value = _db_amphora_mock + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + amphora_vrrp_start_obj = ( amphora_driver_tasks.AmphoraIndexVRRPStart()) mock_driver.start_vrrp_service.side_effect = [mock.DEFAULT, Exception('boom')] - amphora_vrrp_start_obj.execute(0, [_amphora_mock], + amphora_vrrp_start_obj.execute(0, [_amphora_mock], amphorae_status, + _amphora_mock[constants.ID], timeout_dict=self.timeout_dict) mock_driver.start_vrrp_service.assert_called_once_with( _db_amphora_mock, self.timeout_dict) + # Unreachable amp + mock_driver.start_vrrp_service.reset_mock() + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + amphora_vrrp_start_obj.execute(0, [_amphora_mock], amphorae_status, + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_not_called() + # Test with a start exception mock_driver.start_vrrp_service.reset_mock() - amphora_vrrp_start_obj.execute(0, [_amphora_mock], + amphora_vrrp_start_obj.execute(0, [_amphora_mock], {}, + _amphora_mock[constants.ID], timeout_dict=self.timeout_dict) mock_driver.start_vrrp_service.assert_called_once_with( _db_amphora_mock, self.timeout_dict) mock_amphora_repo_update.assert_called_once_with( _session_mock, _db_amphora_mock.id, status=constants.ERROR) + # Test with a start exception, secondary amp + mock_driver.start_vrrp_service.reset_mock() + mock_amphora_repo_update.reset_mock() + amphora_vrrp_start_obj.execute(0, [_amphora_mock], {}, '1234', + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_called_once_with( + _db_amphora_mock, self.timeout_dict) + mock_amphora_repo_update.assert_not_called() + def test_amphora_compute_connectivity_wait(self, mock_driver, mock_generate_uuid, @@ -930,3 +1076,74 @@ self.assertRaises(driver_except.TimeOutException, amp_config_update_obj.execute, _amphora_mock, flavor) + + def test_amphorae_get_connectivity_status(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + amphora1_mock = mock.MagicMock() + amphora1_mock[constants.ID] = 'id1' + amphora2_mock = mock.MagicMock() + amphora2_mock[constants.ID] = 'id2' + db_amphora1_mock = mock.Mock() + db_amphora2_mock = mock.Mock() + + amp_get_connectivity_status = ( + amphora_driver_tasks.AmphoraeGetConnectivityStatus()) + + # All amphorae reachable + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.return_value = None + + ret = amp_get_connectivity_status.execute( + [amphora1_mock, amphora2_mock], + amphora1_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.check.assert_has_calls( + [mock.call(db_amphora1_mock, timeout_dict=self.timeout_dict), + mock.call(db_amphora2_mock, timeout_dict=self.timeout_dict)]) + self.assertFalse( + ret[amphora1_mock[constants.ID]][constants.UNREACHABLE]) + self.assertFalse( + ret[amphora2_mock[constants.ID]][constants.UNREACHABLE]) + + # amphora1 unreachable + mock_driver.check.reset_mock() + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.side_effect = [ + driver_except.TimeOutException, None] + self.assertRaises(driver_except.TimeOutException, + amp_get_connectivity_status.execute, + [amphora1_mock, amphora2_mock], + amphora1_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.check.assert_called_with( + db_amphora1_mock, timeout_dict=self.timeout_dict) + + # amphora2 unreachable + mock_driver.check.reset_mock() + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.side_effect = [ + None, driver_except.TimeOutException] + ret = amp_get_connectivity_status.execute( + [amphora1_mock, amphora2_mock], + amphora1_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.check.assert_has_calls( + [mock.call(db_amphora1_mock, timeout_dict=self.timeout_dict), + mock.call(db_amphora2_mock, timeout_dict=self.timeout_dict)]) + self.assertFalse( + ret[amphora1_mock[constants.ID]][constants.UNREACHABLE]) + self.assertTrue( + ret[amphora2_mock[constants.ID]][constants.UNREACHABLE]) diff -Nru octavia-10.1.0/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py octavia-10.1.1/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py --- octavia-10.1.0/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py 2024-02-08 14:48:31.000000000 +0000 @@ -185,7 +185,6 @@ mock_lb_repo_get.return_value = lb_mock mock_driver.get_port.return_value = vrrp_port mock_driver.get_subnet.return_value = member_subnet - mock_driver.get_network.return_value = mgmt_net mock_driver.get_plugged_networks.return_value = [ mgmt_interface, vrrp_interface, @@ -231,9 +230,6 @@ mgmt_subnet = data_models.Subnet( id=self.mgmt_subnet_id, network_id=self.mgmt_net_id) - mgmt_net = data_models.Network( - id=self.mgmt_net_id, - subnets=[mgmt_subnet.id]) mgmt_ip_address = mock.MagicMock() mgmt_interface = data_models.Interface( network_id=self.mgmt_net_id, @@ -304,7 +300,6 @@ fixed_ips=vrrp_port.fixed_ips)] mock_driver.get_port.return_value = vrrp_port mock_driver.get_subnet.return_value = vrrp_subnet - mock_driver.get_network.return_value = mgmt_net calc_delta = network_tasks.CalculateDelta() @@ -407,10 +402,6 @@ mgmt2_subnet = data_models.Subnet( id=mgmt2_subnet_id, network_id=mgmt2_net_id) - mgmt2_net = data_models.Network( - id=mgmt2_net_id, - subnets=[mgmt2_subnet.id] - ) mgmt2_interface = data_models.Interface( network_id=mgmt2_net_id, fixed_ips=[ @@ -419,7 +410,6 @@ subnet_id=mgmt2_subnet_id, ) ]) - mock_driver.get_network.return_value = mgmt2_net az = { constants.MANAGEMENT_NETWORK: mgmt2_net_id, } @@ -439,7 +429,6 @@ # Test with one amp and one pool and one member, wrong network plugged # Delta should be one network/subnet to add and one to remove mock_driver.reset_mock() - mock_driver.get_network.return_value = mgmt_net member_mock = mock.MagicMock() member_mock.subnet_id = member_private_subnet.id pool_mock.members = [member_mock] diff -Nru octavia-10.1.0/octavia.egg-info/PKG-INFO octavia-10.1.1/octavia.egg-info/PKG-INFO --- octavia-10.1.0/octavia.egg-info/PKG-INFO 2023-07-19 12:06:08.000000000 +0000 +++ octavia-10.1.1/octavia.egg-info/PKG-INFO 2024-02-08 14:49:00.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: octavia -Version: 10.1.0 +Version: 10.1.1 Summary: OpenStack Octavia Scalable Load Balancer as a Service Home-page: https://docs.openstack.org/octavia/latest/ Author: OpenStack diff -Nru octavia-10.1.0/octavia.egg-info/SOURCES.txt octavia-10.1.1/octavia.egg-info/SOURCES.txt --- octavia-10.1.0/octavia.egg-info/SOURCES.txt 2023-07-19 12:06:08.000000000 +0000 +++ octavia-10.1.1/octavia.egg-info/SOURCES.txt 2024-02-08 14:49:00.000000000 +0000 @@ -10,7 +10,6 @@ README.rst TESTING.rst bindep.txt -lower-constraints.txt requirements.txt setup.cfg setup.py @@ -542,6 +541,7 @@ octavia/certificates/manager/castellan_mgr.py octavia/certificates/manager/cert_mgr.py octavia/certificates/manager/local.py +octavia/certificates/manager/noop.py octavia/cmd/__init__.py octavia/cmd/agent.py octavia/cmd/api.py @@ -938,6 +938,7 @@ octavia/tests/unit/certificates/manager/test_barbican_legacy.py octavia/tests/unit/certificates/manager/test_castellan_mgr.py octavia/tests/unit/certificates/manager/test_local.py +octavia/tests/unit/certificates/manager/test_noop.py octavia/tests/unit/cmd/__init__.py octavia/tests/unit/cmd/test_agent.py octavia/tests/unit/cmd/test_driver_agent.py @@ -1101,11 +1102,14 @@ releasenotes/notes/Fix-HM-DB-Rollback-no-connection-2664c4f7823ecaec.yaml releasenotes/notes/Fix-allocate_and_associate-deadlock-3ff1464421c1d464.yaml releasenotes/notes/Fix-failover-ip-addresses-exhausted-69110b2fa4683e1a.yaml +releasenotes/notes/Fix-healthcheck-text-plain-mime-type-134485abb8bcea0c.yaml releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml releasenotes/notes/Fix-ifup-on-member-create-5b405d98eb036718.yaml +releasenotes/notes/Fix-listener-delete-causing-failover-251efdb79af24c0a.yaml releasenotes/notes/Fix-noop-batch-member-update-issue-09b76787553e7752.yaml releasenotes/notes/Fix-plug-vip-revert-abandoned-vrrp-port-efff14edce62ad75.yaml releasenotes/notes/Fix-pool-alpn-older-haproxy-50514c1df4f77bcd.yaml +releasenotes/notes/Handle-blank-cert-subjects-b660d403ce56b0b8.yaml releasenotes/notes/IPv6-support-953ef81ed8555fce.yaml releasenotes/notes/Increase-TCP-buffer-memory-max-and-enable-mtu-black-hole-detection.-0640432a7202400f.yaml releasenotes/notes/Octavia-flavors-2a96424c3d65c224.yaml @@ -1138,6 +1142,7 @@ releasenotes/notes/add-listener-tls-alpn-support-3056fb01b418c88f.yaml releasenotes/notes/add-monitor-address-and-port-to-member-99fa2ee65e2b04b4.yaml releasenotes/notes/add-nftables-support-c86a89c420f6a42a.yaml +releasenotes/notes/add-noop-cert-manager-7018d3933a0ce9c6.yaml releasenotes/notes/add-policy-json-support-38929bb1fb581a7a.yaml releasenotes/notes/add-pool-tls-alpn-support-68cb94b828c9ba37.yaml releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml @@ -1208,13 +1213,16 @@ releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml releasenotes/notes/fix-amp-failover-missing-vrrp-port-9b5f13b9951b7edb.yaml releasenotes/notes/fix-amphora-failover-amphorav2-b19a76ccfdc75245.yaml +releasenotes/notes/fix-amphora-haproxy-count-b1b1df43a7150926.yaml releasenotes/notes/fix-amphora-to-support-centos-stream-9-e4c8599ae152d396.yaml releasenotes/notes/fix-amphora-to-support-rhel-9-b10091e81b48533a.yaml +releasenotes/notes/fix-amphorav1-subnet-member-9921d1ba387ff975.yaml releasenotes/notes/fix-amphorav2-failover-secgroup-c793de5e00b32653.yaml releasenotes/notes/fix-api-listener-update-sni-containers-6595c52e2de1f621.yaml releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml releasenotes/notes/fix-application-credential-tokens-with-barbican-3b7d13283206c124.yaml releasenotes/notes/fix-audit-map-for-failover-e63390399da6841d.yaml +releasenotes/notes/fix-bad-management-port-update-3fa157f74ee8c7b2.yaml releasenotes/notes/fix-barbican-client-verfiy-689be1b9389bd1d8.yaml releasenotes/notes/fix-batch-member-update-race-condition-09b82e2cc3121e03.yaml releasenotes/notes/fix-certs-ramfs-race-561f355d13fc6d14.yaml @@ -1245,11 +1253,14 @@ releasenotes/notes/fix-host-routes-with-amphorav2-and-persistence-54b99d651a4ee9c4.yaml releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml releasenotes/notes/fix-housekeeping-db-performance-b0d0fcfcce696314.yaml +releasenotes/notes/fix-http-https-healthmonitor-with-alpn-pools-82249b2b9a025068.yaml releasenotes/notes/fix-invalid-attribute-for-filtering-d2ddb95a1acbded2.yaml +releasenotes/notes/fix-ip-rules-in-amphora-b74b7b616752c13b.yaml releasenotes/notes/fix-ipv6-address-enclosed-in-brackets-c1cfc4717465ba09.yaml releasenotes/notes/fix-ipv6-interface-configuration-61b1bd7d2c962cea.yaml releasenotes/notes/fix-ipv6-udp-health-message-ed94b35bbea396ec.yaml releasenotes/notes/fix-lb-error-failover-2c17afaa20c0c97f.yaml +releasenotes/notes/fix-lb-in-PENDING-on-DB-failure-1ffea71a86cd4ea9.yaml releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml releasenotes/notes/fix-listener-MAX_TIMEOUT-4c4fdf804a96c34b.yaml releasenotes/notes/fix-listener-creation-allowing-pool-protocol-b9e9ef147f6eeaf4.yaml @@ -1269,8 +1280,10 @@ releasenotes/notes/fix-no-resolvconf-rhel-dhclient-hook-36a1c3b1a3b03a3d.yaml releasenotes/notes/fix-nova-service_name-6bde4970047817f4.yaml releasenotes/notes/fix-nr_open-limit-value-7f475c3e301a608d.yaml +releasenotes/notes/fix-octavia-status-amphorav2-038fe77a2189b99f.yaml releasenotes/notes/fix-oslo-messaging-connection-leakage-aeb79474105ac116.yaml releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml +releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml releasenotes/notes/fix-ping-hm-on-centos-stream-6624f19c8da86e22.yaml releasenotes/notes/fix-ping-hm-with-haproxy-2.2-9b83777172fb8835.yaml releasenotes/notes/fix-plugging-member-subnets-8560cd9403ff79a7.yaml @@ -1283,6 +1296,7 @@ releasenotes/notes/fix-proxyv2-44a7627294922a8e.yaml releasenotes/notes/fix-qos-apply-after-failover-561abbd153ab88ee.yaml releasenotes/notes/fix-race-condiction-on-update-b5330c8fcf1800cd.yaml +releasenotes/notes/fix-race-condition-member-batch-update-1aed0e06004c5dad.yaml releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml releasenotes/notes/fix-reschedule-of-jobboard-tasks-929c066dea9267fd.yaml releasenotes/notes/fix-route-table-b2ec0aa7b92d2abc.yaml @@ -1292,8 +1306,10 @@ releasenotes/notes/fix-subnet-host_routes-amphorav2-3c079c5a3bfa1b3d.yaml releasenotes/notes/fix-support-for-monitoring-address-and-port-in-udp-members-ff83395544f228cf.yaml releasenotes/notes/fix-timeout-dict-in-failover-tasks-537456e0fe1d7cb8.yaml +releasenotes/notes/fix-timeout-dict-when-start-vrrp-278d4837702bd247.yaml releasenotes/notes/fix-tls-container-serialization-1cb83ad4c9eca3b8.yaml releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml +releasenotes/notes/fix-tls-hello-healthmonitors-a4b98a80f6de8394.yaml releasenotes/notes/fix-udp-and-sctp-listener-wrr-50de9dc0774a8ea1.yaml releasenotes/notes/fix-udp-members-status-ef3202849bfda29b.yaml releasenotes/notes/fix-udp-only-lbs-c4ca42106fc1e2bb.yaml @@ -1338,6 +1354,7 @@ releasenotes/notes/new-default_connection_limit-config-option-3ed9f0ed6ec2b514.yaml releasenotes/notes/octavia-active-standby-cec5d2ad4fd214d8.yaml releasenotes/notes/octavia-v2-api-c32a62b37c2c8f6f.yaml +releasenotes/notes/octavia_castellan_config-995e65f129e3e983.yaml releasenotes/notes/octavia_v2_RBAC-0eb2b51aa6278435.yaml releasenotes/notes/per-amphora-statistics-api-5479605c7f3adb12.yaml releasenotes/notes/pike-release-35a1d632ce854d4a.yaml @@ -1347,6 +1364,8 @@ releasenotes/notes/provisioning_neutron_db_sync-c019d96a7b64fe20.yaml releasenotes/notes/py3-hmac-digest-81696f6b176e7ae4.yaml releasenotes/notes/recommend-haproxy-2.x-for-http2-697416c486e36840.yaml +releasenotes/notes/reduce-duration-failover-636032433984d911.yaml +releasenotes/notes/reduce-failover-duration-active-standby-amphora-in-error-3c1d75bc7d9b169f.yaml releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml releasenotes/notes/remove-amp-ssh-access-allowed-e11dc011637b21dd.yaml releasenotes/notes/remove-amp_image_id-12a88bae6518455b.yaml @@ -1355,6 +1374,7 @@ releasenotes/notes/remove-deprecated-v1-resources-6360da3de27b74d3.yaml releasenotes/notes/remove-duplicated-cert_generator-option-83d18647dc1d2954.yaml releasenotes/notes/remove-status_update_threads-85a8b0307a04c164.yaml +releasenotes/notes/remove-tags-relationship-warnings-a3c0175135f6cd84.yaml releasenotes/notes/remove-tenant_id-c0352efbfb3a54f9.yaml releasenotes/notes/remove_user_group_option-56ba749d0064a394.yaml releasenotes/notes/render-disabled-members-to-make-statuses-consistent-69189f71da2e02e8.yaml diff -Nru octavia-10.1.0/octavia.egg-info/entry_points.txt octavia-10.1.1/octavia.egg-info/entry_points.txt --- octavia-10.1.0/octavia.egg-info/entry_points.txt 2023-07-19 12:06:08.000000000 +0000 +++ octavia-10.1.1/octavia.egg-info/entry_points.txt 2024-02-08 14:49:00.000000000 +0000 @@ -37,6 +37,7 @@ barbican_cert_manager = octavia.certificates.manager.barbican:BarbicanCertManager castellan_cert_manager = octavia.certificates.manager.castellan_mgr:CastellanCertManager local_cert_manager = octavia.certificates.manager.local:LocalCertManager +noop_cert_manager = octavia.certificates.manager.noop:NoopCertManager [octavia.compute.drivers] compute_noop_driver = octavia.compute.drivers.noop_driver.driver:NoopComputeDriver diff -Nru octavia-10.1.0/octavia.egg-info/pbr.json octavia-10.1.1/octavia.egg-info/pbr.json --- octavia-10.1.0/octavia.egg-info/pbr.json 2023-07-19 12:06:08.000000000 +0000 +++ octavia-10.1.1/octavia.egg-info/pbr.json 2024-02-08 14:49:00.000000000 +0000 @@ -1 +1 @@ -{"git_version": "b81454a2", "is_release": true} \ No newline at end of file +{"git_version": "8f1b0b92", "is_release": true} \ No newline at end of file diff -Nru octavia-10.1.0/octavia.egg-info/requires.txt octavia-10.1.1/octavia.egg-info/requires.txt --- octavia-10.1.0/octavia.egg-info/requires.txt 2023-07-19 12:06:08.000000000 +0000 +++ octavia-10.1.1/octavia.egg-info/requires.txt 2024-02-08 14:49:00.000000000 +0000 @@ -62,7 +62,7 @@ doc8>=0.6.0 fixtures>=3.0.0 flake8-import-order==0.12 -hacking>=3.0 +hacking<6.1.0 oslotest>=3.2.0 pylint<=2.15.10,>=2.5.3 python-subunit>=1.0.0 diff -Nru octavia-10.1.0/releasenotes/notes/Fix-healthcheck-text-plain-mime-type-134485abb8bcea0c.yaml octavia-10.1.1/releasenotes/notes/Fix-healthcheck-text-plain-mime-type-134485abb8bcea0c.yaml --- octavia-10.1.0/releasenotes/notes/Fix-healthcheck-text-plain-mime-type-134485abb8bcea0c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/Fix-healthcheck-text-plain-mime-type-134485abb8bcea0c.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed the ability to use the 'text/plain' mime type with the healthcheck + endpoint. diff -Nru octavia-10.1.0/releasenotes/notes/Fix-listener-delete-causing-failover-251efdb79af24c0a.yaml octavia-10.1.1/releasenotes/notes/Fix-listener-delete-causing-failover-251efdb79af24c0a.yaml --- octavia-10.1.0/releasenotes/notes/Fix-listener-delete-causing-failover-251efdb79af24c0a.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/Fix-listener-delete-causing-failover-251efdb79af24c0a.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue when deleting the last listener from a load balancer may + trigger a failover. diff -Nru octavia-10.1.0/releasenotes/notes/Handle-blank-cert-subjects-b660d403ce56b0b8.yaml octavia-10.1.1/releasenotes/notes/Handle-blank-cert-subjects-b660d403ce56b0b8.yaml --- octavia-10.1.0/releasenotes/notes/Handle-blank-cert-subjects-b660d403ce56b0b8.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/Handle-blank-cert-subjects-b660d403ce56b0b8.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue when using certificates with a blank subject or missing CN. diff -Nru octavia-10.1.0/releasenotes/notes/add-noop-cert-manager-7018d3933a0ce9c6.yaml octavia-10.1.1/releasenotes/notes/add-noop-cert-manager-7018d3933a0ce9c6.yaml --- octavia-10.1.0/releasenotes/notes/add-noop-cert-manager-7018d3933a0ce9c6.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/add-noop-cert-manager-7018d3933a0ce9c6.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,4 @@ +--- +other: + - | + Noop certificate manager was added. Now any Octavia certificate operations using noop drivers will be faster (as they won't be validated). diff -Nru octavia-10.1.0/releasenotes/notes/fix-amphora-haproxy-count-b1b1df43a7150926.yaml octavia-10.1.1/releasenotes/notes/fix-amphora-haproxy-count-b1b1df43a7150926.yaml --- octavia-10.1.0/releasenotes/notes/fix-amphora-haproxy-count-b1b1df43a7150926.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-amphora-haproxy-count-b1b1df43a7150926.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix amphora haproxy_count to return the number of + haproxy processes that are running. diff -Nru octavia-10.1.0/releasenotes/notes/fix-amphorav1-subnet-member-9921d1ba387ff975.yaml octavia-10.1.1/releasenotes/notes/fix-amphorav1-subnet-member-9921d1ba387ff975.yaml --- octavia-10.1.0/releasenotes/notes/fix-amphorav1-subnet-member-9921d1ba387ff975.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-amphorav1-subnet-member-9921d1ba387ff975.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a bug in amphorav1, the subnet of a member that was being deleted was + not immediately unplugged from the amphora, but only during the next update + of the members. diff -Nru octavia-10.1.0/releasenotes/notes/fix-bad-management-port-update-3fa157f74ee8c7b2.yaml octavia-10.1.1/releasenotes/notes/fix-bad-management-port-update-3fa157f74ee8c7b2.yaml --- octavia-10.1.0/releasenotes/notes/fix-bad-management-port-update-3fa157f74ee8c7b2.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-bad-management-port-update-3fa157f74ee8c7b2.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an issue when adding or deleting a member, Octavia might have + reconfigured the management port of the amphora by adding or removing + additional subnets. Octavia no longer updates the management port during + those tasks. diff -Nru octavia-10.1.0/releasenotes/notes/fix-http-https-healthmonitor-with-alpn-pools-82249b2b9a025068.yaml octavia-10.1.1/releasenotes/notes/fix-http-https-healthmonitor-with-alpn-pools-82249b2b9a025068.yaml --- octavia-10.1.0/releasenotes/notes/fix-http-https-healthmonitor-with-alpn-pools-82249b2b9a025068.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-http-https-healthmonitor-with-alpn-pools-82249b2b9a025068.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed a bug with HTTP/HTTPS health-monitors on pools with ALPN protocols in + the amphora-driver. The healthchecks sent by haproxy were flagged as bad + requests by the backend servers. Updated haproxy configuration to use ALPN + for the heathchecks too. diff -Nru octavia-10.1.0/releasenotes/notes/fix-ip-rules-in-amphora-b74b7b616752c13b.yaml octavia-10.1.1/releasenotes/notes/fix-ip-rules-in-amphora-b74b7b616752c13b.yaml --- octavia-10.1.0/releasenotes/notes/fix-ip-rules-in-amphora-b74b7b616752c13b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-ip-rules-in-amphora-b74b7b616752c13b.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,11 @@ +--- +fixes: + - | + Fixed a bug that could have made the VIP port unreachable because of the + removal of some IP rules in the Amphora. It could have been triggered only + when sending a request from a subnet that is not the VIP subnet but that is + plugged as a member subnet. +upgrade: + - | + A patch that fixes an issue making the VIP port unreachable because of + missing IP rules requires an update of the Amphora image. diff -Nru octavia-10.1.0/releasenotes/notes/fix-lb-in-PENDING-on-DB-failure-1ffea71a86cd4ea9.yaml octavia-10.1.1/releasenotes/notes/fix-lb-in-PENDING-on-DB-failure-1ffea71a86cd4ea9.yaml --- octavia-10.1.0/releasenotes/notes/fix-lb-in-PENDING-on-DB-failure-1ffea71a86cd4ea9.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-lb-in-PENDING-on-DB-failure-1ffea71a86cd4ea9.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an issue with load balancers stuck in a ``PENDING_*`` state during + database outages. Now when a task fails in Octavia, it retries to update + the ``provisioning_status`` of the load balancer until the database is back + (or it gives up after a really long timeout - around 2h45) diff -Nru octavia-10.1.0/releasenotes/notes/fix-octavia-status-amphorav2-038fe77a2189b99f.yaml octavia-10.1.1/releasenotes/notes/fix-octavia-status-amphorav2-038fe77a2189b99f.yaml --- octavia-10.1.0/releasenotes/notes/fix-octavia-status-amphorav2-038fe77a2189b99f.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-octavia-status-amphorav2-038fe77a2189b99f.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug in octavia-status which reported an incorrect status for the + *amphorav2* driver when using the default *amphora* alias. diff -Nru octavia-10.1.0/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml octavia-10.1.1/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml --- octavia-10.1.0/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Modified default Keepalived LVS persistence granularity + configuration value so it would be ipv6 compatible. diff -Nru octavia-10.1.0/releasenotes/notes/fix-race-condition-member-batch-update-1aed0e06004c5dad.yaml octavia-10.1.1/releasenotes/notes/fix-race-condition-member-batch-update-1aed0e06004c5dad.yaml --- octavia-10.1.0/releasenotes/notes/fix-race-condition-member-batch-update-1aed0e06004c5dad.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-race-condition-member-batch-update-1aed0e06004c5dad.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed a race condition in the members batch update API call, the data + passed to the Octavia worker service may have been incorrect when quickly + sending successive API calls. Then the load balancer was stuck in + PENDING_UPDATE provisioning_status. diff -Nru octavia-10.1.0/releasenotes/notes/fix-timeout-dict-when-start-vrrp-278d4837702bd247.yaml octavia-10.1.1/releasenotes/notes/fix-timeout-dict-when-start-vrrp-278d4837702bd247.yaml --- octavia-10.1.0/releasenotes/notes/fix-timeout-dict-when-start-vrrp-278d4837702bd247.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-timeout-dict-when-start-vrrp-278d4837702bd247.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a too long timeout when attempting to start the VRRP service in an + unreachable amphora during a failover. A specific shorter timeout should be + used during the failovers. diff -Nru octavia-10.1.0/releasenotes/notes/fix-tls-hello-healthmonitors-a4b98a80f6de8394.yaml octavia-10.1.1/releasenotes/notes/fix-tls-hello-healthmonitors-a4b98a80f6de8394.yaml --- octavia-10.1.0/releasenotes/notes/fix-tls-hello-healthmonitors-a4b98a80f6de8394.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/fix-tls-hello-healthmonitors-a4b98a80f6de8394.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed TLS-HELLO health-monitors in the amphora-driver. diff -Nru octavia-10.1.0/releasenotes/notes/octavia_castellan_config-995e65f129e3e983.yaml octavia-10.1.1/releasenotes/notes/octavia_castellan_config-995e65f129e3e983.yaml --- octavia-10.1.0/releasenotes/notes/octavia_castellan_config-995e65f129e3e983.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/octavia_castellan_config-995e65f129e3e983.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Usage of ``castellan_cert_manager`` as cert_manager has been significantly + improved. Now you can define configuration options for castellan in + octavia.conf and they will be passed properly to castellan beckend. This + allows to use allowed castellan backends as for certificate storage. diff -Nru octavia-10.1.0/releasenotes/notes/reduce-duration-failover-636032433984d911.yaml octavia-10.1.1/releasenotes/notes/reduce-duration-failover-636032433984d911.yaml --- octavia-10.1.0/releasenotes/notes/reduce-duration-failover-636032433984d911.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/reduce-duration-failover-636032433984d911.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Reduce the duration of the failovers of ACTIVE_STANDBY load balancers. Many + updates of an unreachable amphora may have been attempted during a + failover, now if an amphora is not reachable at the first update, the other + updates are skipped. diff -Nru octavia-10.1.0/releasenotes/notes/reduce-failover-duration-active-standby-amphora-in-error-3c1d75bc7d9b169f.yaml octavia-10.1.1/releasenotes/notes/reduce-failover-duration-active-standby-amphora-in-error-3c1d75bc7d9b169f.yaml --- octavia-10.1.0/releasenotes/notes/reduce-failover-duration-active-standby-amphora-in-error-3c1d75bc7d9b169f.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/reduce-failover-duration-active-standby-amphora-in-error-3c1d75bc7d9b169f.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Reduce the duration of the failovers of ACTIVE_STANDBY load balancers when + both amphorae are unreachable. diff -Nru octavia-10.1.0/releasenotes/notes/remove-tags-relationship-warnings-a3c0175135f6cd84.yaml octavia-10.1.1/releasenotes/notes/remove-tags-relationship-warnings-a3c0175135f6cd84.yaml --- octavia-10.1.0/releasenotes/notes/remove-tags-relationship-warnings-a3c0175135f6cd84.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-10.1.1/releasenotes/notes/remove-tags-relationship-warnings-a3c0175135f6cd84.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed SQLAlchemy warnings about the relationship between the Tags object + and the other Octavia resources. diff -Nru octavia-10.1.0/requirements.txt octavia-10.1.1/requirements.txt --- octavia-10.1.0/requirements.txt 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/requirements.txt 2024-02-08 14:48:31.000000000 +0000 @@ -1,3 +1,7 @@ +# Requirements lower bounds listed here are our best effort to keep them up to +# date but we do not test them so no guarantee of having them all correct. If +# you find any incorrect lower bounds, let us know or propose a fix. + # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. diff -Nru octavia-10.1.0/setup.cfg octavia-10.1.1/setup.cfg --- octavia-10.1.0/setup.cfg 2023-07-19 12:06:09.059678800 +0000 +++ octavia-10.1.1/setup.cfg 2024-02-08 14:49:01.180324300 +0000 @@ -95,6 +95,7 @@ local_cert_manager = octavia.certificates.manager.local:LocalCertManager barbican_cert_manager = octavia.certificates.manager.barbican:BarbicanCertManager castellan_cert_manager = octavia.certificates.manager.castellan_mgr:CastellanCertManager + noop_cert_manager = octavia.certificates.manager.noop:NoopCertManager octavia.barbican_auth = barbican_acl_auth = octavia.certificates.common.auth.barbican_acl:BarbicanACLAuth octavia.plugins = diff -Nru octavia-10.1.0/test-requirements.txt octavia-10.1.1/test-requirements.txt --- octavia-10.1.0/test-requirements.txt 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/test-requirements.txt 2024-02-08 14:48:31.000000000 +0000 @@ -1,7 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -hacking>=3.0 # Apache-2.0 +hacking<6.1.0 # Apache-2.0 requests-mock>=1.2.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD diff -Nru octavia-10.1.0/tox.ini octavia-10.1.1/tox.ini --- octavia-10.1.0/tox.ini 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/tox.ini 2024-02-08 14:48:31.000000000 +0000 @@ -205,16 +205,6 @@ rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html -[testenv:lower-constraints] -deps = - -c{toxinidir}/lower-constraints.txt - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/requirements.txt -allowlist_externals = sh -commands = - sh -c 'OS_TEST_PATH={toxinidir}/octavia/tests/unit stestr run {posargs}' - sh -c 'OS_TEST_PATH={toxinidir}/octavia/tests/functional stestr run {posargs}' - [testenv:requirements] deps = -egit+https://opendev.org/openstack/requirements#egg=openstack-requirements diff -Nru octavia-10.1.0/zuul.d/projects.yaml octavia-10.1.1/zuul.d/projects.yaml --- octavia-10.1.0/zuul.d/projects.yaml 2023-07-19 12:05:36.000000000 +0000 +++ octavia-10.1.1/zuul.d/projects.yaml 2024-02-08 14:48:31.000000000 +0000 @@ -5,7 +5,6 @@ - check-requirements - periodic-stable-jobs-neutron - openstack-cover-jobs - - openstack-lower-constraints-jobs - openstack-python3-yoga-jobs - publish-openstack-docs-pti - release-notes-jobs-python3