diff -Nru octavia-5.0.1/debian/changelog octavia-5.0.1/debian/changelog --- octavia-5.0.1/debian/changelog 2020-07-08 06:25:17.000000000 +0100 +++ octavia-5.0.1/debian/changelog 2020-10-15 20:15:24.000000000 +0100 @@ -1,3 +1,16 @@ +octavia (5.0.1-0ubuntu1~cloud1+test.1) bionic; urgency=medium + + [EXPERIMENTAL BACKPORT - TESTING ONLY] + * Backport fixes for lb failover (LP: #1899964) + - d/p/0000-Use-retry-for-AmphoraComputeConnectivityWait.patch + - d/p/0001-Validate-resource-access-when-creating-loadbalancer-.patch + - d/p/0002-Workaround-peer-name-starting-with-hyphen.patch + - d/p/0003-Refactor-the-failover-flows.patch + - d/p/0004-Add-missing-reload-method-in-amphora-noop-driver.patch + - d/p/0005-Fix-missing-params-in-amphora-base-and-noop-driver.patch + + -- Edward Hope-Morley Thu, 15 Oct 2020 20:15:24 +0100 + octavia (5.0.1-0ubuntu1~cloud1) bionic-train; urgency=medium * Fix batch creation of new members failure due to timeouts (LP: #1882657) diff -Nru octavia-5.0.1/debian/patches/0000-Use-retry-for-AmphoraComputeConnectivityWait.patch octavia-5.0.1/debian/patches/0000-Use-retry-for-AmphoraComputeConnectivityWait.patch --- octavia-5.0.1/debian/patches/0000-Use-retry-for-AmphoraComputeConnectivityWait.patch 1970-01-01 01:00:00.000000000 +0100 +++ octavia-5.0.1/debian/patches/0000-Use-retry-for-AmphoraComputeConnectivityWait.patch 2020-10-15 20:15:24.000000000 +0100 @@ -0,0 +1,369 @@ +From dad1357ebfa5600844b15a22a3d10f82a2d1ddbf Mon Sep 17 00:00:00 2001 +From: Ann Taraday +Date: Mon, 3 Jun 2019 19:41:47 +0400 +Subject: [PATCH] Use retry for AmphoraComputeConnectivityWait + +Use taskflow retry for connectivity wait. [1] + +This reqired for redis jobboard implementation as each retry expand +claim for job on worker. This means that worker is proccesing job and +it should not be released for other workers to work on it. + +Adopted for v2 flows. + +[1] - https://docs.openstack.org/taskflow/latest/user/atoms.html#retry + +Story: 2005072 +Task: 33477 + +Change-Id: I2cf241ea965ad56ed70ebde83632ab855f5d859e +(cherry picked from commit 314b43af9a57a698a66d46c11892380bff315166) +--- + .../amphorae/driver_exceptions/exceptions.py | 6 +++ + octavia/amphorae/drivers/driver_base.py | 4 +- + .../drivers/haproxy/rest_api_driver.py | 37 ++++++++++++------- + .../amphorae/drivers/noop_driver/driver.py | 7 ++-- + octavia/common/constants.py | 1 + + octavia/controller/queue/v2/endpoints.py | 8 +--- + .../worker/v2/flows/amphora_flows.py | 20 +++++++--- + .../worker/v2/tasks/amphora_driver_tasks.py | 26 ++++++++++++- + .../controller/queue/v2/test_endpoints.py | 8 ++-- + .../v2/tasks/test_amphora_driver_tasks.py | 6 ++- + 10 files changed, 86 insertions(+), 37 deletions(-) + +diff --git a/octavia/amphorae/driver_exceptions/exceptions.py b/octavia/amphorae/driver_exceptions/exceptions.py +index b93b4340..3380e5d2 100644 +--- a/octavia/amphorae/driver_exceptions/exceptions.py ++++ b/octavia/amphorae/driver_exceptions/exceptions.py +@@ -121,3 +121,9 @@ class NodeProvisioningError(ProvisioningErrors): + class AmpDriverNotImplementedError(AmphoraDriverError): + + message = _('Amphora does not implement this feature.') ++ ++ ++class AmpConnectionRetry(AmphoraDriverError): ++ ++ message = _('Could not connect to amphora, exception caught: ' ++ '%(exception)s') +diff --git a/octavia/amphorae/drivers/driver_base.py b/octavia/amphorae/drivers/driver_base.py +index f2c72ed8..215b95ef 100644 +--- a/octavia/amphorae/drivers/driver_base.py ++++ b/octavia/amphorae/drivers/driver_base.py +@@ -82,11 +82,13 @@ class AmphoraLoadBalancerDriver(object): + """ + + @abc.abstractmethod +- def get_info(self, amphora): ++ def get_info(self, amphora, raise_retry_exception=False): + """Returns information about the amphora. + + :param amphora: amphora object, need to use its id property + :type amphora: octavia.db.models.Amphora ++ :param raise_retry_exception: Flag if outside task should be retried ++ :type boolean: False by default + :returns: return a value list (amphora.id, status flag--'info') + + At this moment, we just build the basic structure for testing, will +diff --git a/octavia/amphorae/drivers/haproxy/rest_api_driver.py b/octavia/amphorae/drivers/haproxy/rest_api_driver.py +index 47c720a3..aee2fb3e 100644 +--- a/octavia/amphorae/drivers/haproxy/rest_api_driver.py ++++ b/octavia/amphorae/drivers/haproxy/rest_api_driver.py +@@ -91,7 +91,8 @@ class HaproxyAmphoraLoadBalancerDriver( + + return haproxy_version_string.split('.')[:2] + +- def _populate_amphora_api_version(self, amphora): ++ def _populate_amphora_api_version(self, amphora, ++ raise_retry_exception=False): + """Populate the amphora object with the api_version + + This will query the amphora for version discovery and populate +@@ -102,7 +103,8 @@ class HaproxyAmphoraLoadBalancerDriver( + if not getattr(amphora, 'api_version', None): + try: + amphora.api_version = self.clients['base'].get_api_version( +- amphora)['api_version'] ++ amphora, ++ raise_retry_exception=raise_retry_exception)['api_version'] + except exc.NotFound: + # Amphora is too old for version discovery, default to 0.5 + amphora.api_version = '0.5' +@@ -364,9 +366,11 @@ class HaproxyAmphoraLoadBalancerDriver( + self.clients[amphora.api_version].delete_listener( + amphora, listener.load_balancer.id) + +- def get_info(self, amphora): +- self._populate_amphora_api_version(amphora) +- return self.clients[amphora.api_version].get_info(amphora) ++ def get_info(self, amphora, raise_retry_exception=False): ++ self._populate_amphora_api_version( ++ amphora, raise_retry_exception=raise_retry_exception) ++ return self.clients[amphora.api_version].get_info( ++ amphora, raise_retry_exception=raise_retry_exception) + + def get_diagnostics(self, amphora): + pass +@@ -624,7 +628,7 @@ class AmphoraAPIClientBase(object): + port=CONF.haproxy_amphora.bind_port) + + def request(self, method, amp, path='/', timeout_dict=None, +- retry_404=True, **kwargs): ++ retry_404=True, raise_retry_exception=False, **kwargs): + cfg_ha_amp = CONF.haproxy_amphora + if timeout_dict is None: + timeout_dict = {} +@@ -689,7 +693,13 @@ class AmphoraAPIClientBase(object): + exception = e + LOG.warning("Could not connect to instance. Retrying.") + time.sleep(conn_retry_interval) +- ++ if raise_retry_exception: ++ # For taskflow persistence cause attribute should ++ # be serializable to JSON. Pass None, as cause exception ++ # is described in the expection message. ++ six.raise_from( ++ driver_except.AmpConnectionRetry(exception=str(e)), ++ None) + LOG.error("Connection retries (currently set to %(max_retries)s) " + "exhausted. The amphora is unavailable. Reason: " + "%(exception)s", +@@ -697,9 +707,10 @@ class AmphoraAPIClientBase(object): + 'exception': exception}) + raise driver_except.TimeOutException() + +- def get_api_version(self, amp): ++ def get_api_version(self, amp, raise_retry_exception=False): + amp.api_version = None +- r = self.get(amp, retry_404=False) ++ r = self.get(amp, retry_404=False, ++ raise_retry_exception=raise_retry_exception) + # Handle 404 special as we don't want to log an ERROR on 404 + exc.check_exception(r, (404,)) + if r.status_code == 404: +@@ -766,8 +777,8 @@ class AmphoraAPIClient0_5(AmphoraAPIClientBase): + amp, 'listeners/{listener_id}'.format(listener_id=listener_id)) + return exc.check_exception(r, (404,)) + +- def get_info(self, amp): +- r = self.get(amp, "info") ++ def get_info(self, amp, raise_retry_exception=False): ++ r = self.get(amp, "info", raise_retry_exception=raise_retry_exception) + if exc.check_exception(r): + return r.json() + return None +@@ -896,8 +907,8 @@ class AmphoraAPIClient1_0(AmphoraAPIClientBase): + amp, 'listeners/{object_id}'.format(object_id=object_id)) + return exc.check_exception(r, (404,)) + +- def get_info(self, amp): +- r = self.get(amp, "info") ++ def get_info(self, amp, raise_retry_exception=False): ++ r = self.get(amp, "info", raise_retry_exception=raise_retry_exception) + if exc.check_exception(r): + return r.json() + return None +diff --git a/octavia/amphorae/drivers/noop_driver/driver.py b/octavia/amphorae/drivers/noop_driver/driver.py +index bd53ee5c..6ce3de70 100644 +--- a/octavia/amphorae/drivers/noop_driver/driver.py ++++ b/octavia/amphorae/drivers/noop_driver/driver.py +@@ -73,7 +73,7 @@ class NoopManager(object): + listener.load_balancer.vip.ip_address)] = ( + listener, listener.load_balancer.vip, 'delete') + +- def get_info(self, amphora): ++ def get_info(self, amphora, raise_retry_exception=False): + LOG.debug("Amphora %s no-op, info amphora %s", + self.__class__.__name__, amphora.id) + self.amphoraconfig[amphora.id] = (amphora.id, 'get_info') +@@ -138,9 +138,10 @@ class NoopAmphoraLoadBalancerDriver( + + self.driver.delete(listener) + +- def get_info(self, amphora): ++ def get_info(self, amphora, raise_retry_exception=False): + +- self.driver.get_info(amphora) ++ self.driver.get_info(amphora, ++ raise_retry_exception=raise_retry_exception) + + def get_diagnostics(self, amphora): + +diff --git a/octavia/common/constants.py b/octavia/common/constants.py +index 1c98116f..2c505c11 100644 +--- a/octavia/common/constants.py ++++ b/octavia/common/constants.py +@@ -383,6 +383,7 @@ VRRP_GROUP = 'vrrp_group' + # Taskflow flow and task names + CERT_ROTATE_AMPHORA_FLOW = 'octavia-cert-rotate-amphora-flow' + CREATE_AMPHORA_FLOW = 'octavia-create-amphora-flow' ++CREATE_AMPHORA_RETRY_SUBFLOW = 'octavia-create-amphora-retry-subflow' + CREATE_AMPHORA_FOR_LB_FLOW = 'octavia-create-amp-for-lb-flow' + CREATE_HEALTH_MONITOR_FLOW = 'octavia-create-health-monitor-flow' + CREATE_LISTENER_FLOW = 'octavia-create-listener_flow' +diff --git a/octavia/controller/queue/v2/endpoints.py b/octavia/controller/queue/v2/endpoints.py +index 00eaef74..fa4583f1 100644 +--- a/octavia/controller/queue/v2/endpoints.py ++++ b/octavia/controller/queue/v2/endpoints.py +@@ -15,9 +15,9 @@ + from oslo_config import cfg + from oslo_log import log as logging + import oslo_messaging as messaging +-from stevedore import driver as stevedore_driver + + from octavia.common import constants ++from octavia.controller.worker.v2 import controller_worker + + CONF = cfg.CONF + +@@ -34,11 +34,7 @@ class Endpoints(object): + version='2.0') + + def __init__(self): +- self.worker = stevedore_driver.DriverManager( +- namespace='octavia.plugins', +- name=CONF.octavia_plugins, +- invoke_on_load=True +- ).driver ++ self.worker = controller_worker.ControllerWorker() + + def create_load_balancer(self, context, load_balancer_id, + flavor=None): +diff --git a/octavia/controller/worker/v2/flows/amphora_flows.py b/octavia/controller/worker/v2/flows/amphora_flows.py +index a983b302..608ddc43 100644 +--- a/octavia/controller/worker/v2/flows/amphora_flows.py ++++ b/octavia/controller/worker/v2/flows/amphora_flows.py +@@ -70,9 +70,14 @@ class AmphoraFlows(object): + create_amphora_flow.add(database_tasks.UpdateAmphoraInfo( + requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), + provides=constants.AMPHORA)) +- create_amphora_flow.add( ++ retry_subflow = linear_flow.Flow( ++ constants.CREATE_AMPHORA_RETRY_SUBFLOW, ++ retry=amphora_driver_tasks.AmpRetry()) ++ retry_subflow.add( + amphora_driver_tasks.AmphoraComputeConnectivityWait( +- requires=constants.AMPHORA)) ++ requires=constants.AMPHORA, ++ inject={'raise_retry_exception': True})) ++ create_amphora_flow.add(retry_subflow) + create_amphora_flow.add(database_tasks.ReloadAmphora( + requires=constants.AMPHORA_ID, + provides=constants.AMPHORA)) +@@ -194,10 +199,15 @@ class AmphoraFlows(object): + name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO, + requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), + provides=constants.AMPHORA)) +- create_amp_for_lb_subflow.add( ++ retry_task = sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT ++ retry_subflow = linear_flow.Flow( ++ constants.CREATE_AMPHORA_RETRY_SUBFLOW, ++ retry=amphora_driver_tasks.AmpRetry()) ++ retry_subflow.add( + amphora_driver_tasks.AmphoraComputeConnectivityWait( +- name=sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT, +- requires=constants.AMPHORA)) ++ name=retry_task, requires=constants.AMPHORA, ++ inject={'raise_retry_exception': True})) ++ create_amp_for_lb_subflow.add(retry_subflow) + create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize( + name=sf_name + '-' + constants.AMPHORA_FINALIZE, + requires=constants.AMPHORA)) +diff --git a/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py b/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +index 5a2f0768..8f9ed123 100644 +--- a/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py ++++ b/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +@@ -18,6 +18,7 @@ from oslo_config import cfg + from oslo_log import log as logging + import six + from stevedore import driver as stevedore_driver ++from taskflow import retry + from taskflow import task + from taskflow.types import failure + +@@ -49,6 +50,26 @@ class BaseAmphoraTask(task.Task): + self.task_utils = task_utilities.TaskUtils() + + ++class AmpRetry(retry.Times): ++ ++ def on_failure(self, history, *args, **kwargs): ++ last_errors = history[-1][1] ++ max_retry_attempt = CONF.haproxy_amphora.connection_max_retries ++ for task_name, ex_info in last_errors.items(): ++ if len(history) <= max_retry_attempt: ++ # When taskflow persistance is enabled and flow/task state is ++ # saved in the backend. If flow(task) is restored(restart of ++ # worker,etc) we are getting ex_info as None - we need to RETRY ++ # task to check its real state. ++ if ex_info is None or ex_info._exc_info is None: ++ return retry.RETRY ++ excp = ex_info._exc_info[1] ++ if isinstance(excp, driver_except.AmpConnectionRetry): ++ return retry.RETRY ++ ++ return retry.REVERT_ALL ++ ++ + class AmpListenersUpdate(BaseAmphoraTask): + """Task to update the listeners on one amphora.""" + +@@ -323,10 +344,11 @@ class AmphoraVRRPStart(BaseAmphoraTask): + class AmphoraComputeConnectivityWait(BaseAmphoraTask): + """Task to wait for the compute instance to be up.""" + +- def execute(self, amphora): ++ def execute(self, amphora, raise_retry_exception=False): + """Execute get_info routine for an amphora until it responds.""" + try: +- amp_info = self.amphora_driver.get_info(amphora) ++ amp_info = self.amphora_driver.get_info( ++ amphora, raise_retry_exception=raise_retry_exception) + LOG.debug('Successfuly connected to amphora %s: %s', + amphora.id, amp_info) + except driver_except.TimeOutException: +diff --git a/octavia/tests/unit/controller/queue/v2/test_endpoints.py b/octavia/tests/unit/controller/queue/v2/test_endpoints.py +index 226f164e..a5207bc9 100644 +--- a/octavia/tests/unit/controller/queue/v2/test_endpoints.py ++++ b/octavia/tests/unit/controller/queue/v2/test_endpoints.py +@@ -18,7 +18,6 @@ from oslo_config import fixture as oslo_fixture + from oslo_utils import uuidutils + + from octavia.controller.queue.v2 import endpoints +-from octavia.controller.worker.v2 import controller_worker + from octavia.tests.unit import base + + +@@ -30,10 +29,9 @@ class TestEndpoints(base.TestCase): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(octavia_plugins='hot_plug_plugin') + +- mock_class = mock.create_autospec(controller_worker.ControllerWorker) +- self.worker_patcher = mock.patch('octavia.controller.queue.v2.' +- 'endpoints.stevedore_driver') +- self.worker_patcher.start().ControllerWorker = mock_class ++ self.worker_patcher = mock.patch('octavia.controller.worker.v2.' ++ 'controller_worker.ControllerWorker') ++ self.worker_patcher.start() + + self.ep = endpoints.Endpoints() + self.context = {} +diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +index 960b8e45..170b60e6 100644 +--- a/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py ++++ b/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +@@ -580,8 +580,10 @@ class TestAmphoraDriverTasks(base.TestCase): + mock_amphora_repo_update): + amp_compute_conn_wait_obj = ( + amphora_driver_tasks.AmphoraComputeConnectivityWait()) +- amp_compute_conn_wait_obj.execute(_amphora_mock) +- mock_driver.get_info.assert_called_once_with(_amphora_mock) ++ amp_compute_conn_wait_obj.execute(_amphora_mock, ++ raise_retry_exception=True) ++ mock_driver.get_info.assert_called_once_with( ++ _amphora_mock, raise_retry_exception=True) + + mock_driver.get_info.side_effect = driver_except.TimeOutException() + self.assertRaises(driver_except.TimeOutException, +-- +2.17.1 + diff -Nru octavia-5.0.1/debian/patches/0001-Validate-resource-access-when-creating-loadbalancer-.patch octavia-5.0.1/debian/patches/0001-Validate-resource-access-when-creating-loadbalancer-.patch --- octavia-5.0.1/debian/patches/0001-Validate-resource-access-when-creating-loadbalancer-.patch 1970-01-01 01:00:00.000000000 +0100 +++ octavia-5.0.1/debian/patches/0001-Validate-resource-access-when-creating-loadbalancer-.patch 2020-10-15 20:15:24.000000000 +0100 @@ -0,0 +1,515 @@ +From 1933ede06dafff0c3d6c3225d65b5aaac0ed0a3c Mon Sep 17 00:00:00 2001 +From: Lingxian Kong +Date: Thu, 9 Apr 2020 13:58:30 +1200 +Subject: [PATCH] Validate resource access when creating loadbalancer or member + +* Make sure the user has access to the subnet in the request for + creating or updating pool member. +* Make sure the user has access to port or subnet or network for + creating load balancer + +Story: 2007531 +Task: 39339 + +Change-Id: I479019a911b5a1acfc1951d1cbbc2a351089cb4d +(cherry picked from commit a33d42fa59ada38dd5a5d3114a9a83f03a30d0b2) +--- + etc/octavia.conf | 10 +++ + octavia/api/v2/controllers/load_balancer.py | 25 ++++--- + octavia/api/v2/controllers/member.py | 8 +-- + octavia/common/clients.py | 28 ++++++++ + octavia/common/config.py | 6 ++ + octavia/common/validate.py | 13 ++-- + octavia/network/base.py | 9 ++- + octavia/network/drivers/neutron/base.py | 21 +++--- + octavia/network/drivers/noop_driver/driver.py | 6 +- + .../unit/network/drivers/neutron/test_base.py | 67 +++++++++++++++++++ + ...ow-invisible-subnets-e30b0b5fbd216294.yaml | 16 +++++ + 11 files changed, 176 insertions(+), 33 deletions(-) + create mode 100644 releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml + +diff --git a/etc/octavia.conf b/etc/octavia.conf +index e58e9f9e..9f6d4b07 100644 +--- a/etc/octavia.conf ++++ b/etc/octavia.conf +@@ -141,21 +141,31 @@ + [networking] + # The maximum attempts to retry an action with the networking service. + # max_retries = 15 ++ + # Seconds to wait before retrying an action with the networking service. + # retry_interval = 1 ++ + # The maximum time to wait, in seconds, for a port to detach from an amphora + # port_detach_timeout = 300 ++ + # Allow/disallow specific network object types when creating VIPs. + # allow_vip_network_id = True + # allow_vip_subnet_id = True + # allow_vip_port_id = True ++ + # List of network_ids that are valid for VIP creation. + # If this field empty, no validation is performed. + # valid_vip_networks = ++ + # List of reserved IP addresses that cannot be used for member addresses + # The default is the nova metadata service address + # reserved_ips = ['169.254.169.254'] + ++# When True, users can use network resources they cannot normally see as VIP ++# or member subnets. Making this True may allow users to access resources on ++# subnets they do not normally have access to via neutron RBAC policies. ++# allow_invisible_resource_usage = False ++ + [haproxy_amphora] + # base_path = /var/lib/octavia + # base_cert_dir = /var/lib/octavia/certs +diff --git a/octavia/api/v2/controllers/load_balancer.py b/octavia/api/v2/controllers/load_balancer.py +index 7520e8bf..f5f7ee49 100644 +--- a/octavia/api/v2/controllers/load_balancer.py ++++ b/octavia/api/v2/controllers/load_balancer.py +@@ -117,10 +117,12 @@ class LoadBalancersController(base.BaseController): + state=prov_status, id=id) + + @staticmethod +- def _validate_network_and_fill_or_validate_subnet(load_balancer): ++ def _validate_network_and_fill_or_validate_subnet(load_balancer, ++ context=None): + network = validate.network_exists_optionally_contains_subnet( + network_id=load_balancer.vip_network_id, +- subnet_id=load_balancer.vip_subnet_id) ++ subnet_id=load_balancer.vip_subnet_id, ++ context=context) + if not load_balancer.vip_subnet_id: + network_driver = utils.get_network_driver() + if load_balancer.vip_address: +@@ -168,8 +170,10 @@ class LoadBalancersController(base.BaseController): + break + + @staticmethod +- def _validate_port_and_fill_or_validate_subnet(load_balancer): +- port = validate.port_exists(port_id=load_balancer.vip_port_id) ++ def _validate_port_and_fill_or_validate_subnet(load_balancer, ++ context=None): ++ port = validate.port_exists(port_id=load_balancer.vip_port_id, ++ context=context) + validate.check_port_in_use(port) + load_balancer.vip_network_id = port.network_id + +@@ -184,7 +188,8 @@ class LoadBalancersController(base.BaseController): + + # Identify the subnet for this port + if load_balancer.vip_subnet_id: +- validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id) ++ validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id, ++ context=context) + else: + if load_balancer.vip_address: + for port_fixed_ip in port.fixed_ips: +@@ -202,7 +207,7 @@ class LoadBalancersController(base.BaseController): + "VIP port's subnet could not be determined. Please " + "specify either a VIP subnet or address.")) + +- def _validate_vip_request_object(self, load_balancer): ++ def _validate_vip_request_object(self, load_balancer, context=None): + allowed_network_objects = [] + if CONF.networking.allow_vip_port_id: + allowed_network_objects.append('vip_port_id') +@@ -234,10 +239,12 @@ class LoadBalancersController(base.BaseController): + + # Validate the port id + if load_balancer.vip_port_id: +- self._validate_port_and_fill_or_validate_subnet(load_balancer) ++ self._validate_port_and_fill_or_validate_subnet(load_balancer, ++ context=context) + # If no port id, validate the network id (and subnet if provided) + elif load_balancer.vip_network_id: +- self._validate_network_and_fill_or_validate_subnet(load_balancer) ++ self._validate_network_and_fill_or_validate_subnet(load_balancer, ++ context=context) + # Validate just the subnet id + elif load_balancer.vip_subnet_id: + subnet = validate.subnet_exists( +@@ -347,7 +354,7 @@ class LoadBalancersController(base.BaseController): + self._auth_validate_action(context, load_balancer.project_id, + constants.RBAC_POST) + +- self._validate_vip_request_object(load_balancer) ++ self._validate_vip_request_object(load_balancer, context=context) + + self._validate_flavor(context.session, load_balancer) + +diff --git a/octavia/api/v2/controllers/member.py b/octavia/api/v2/controllers/member.py +index 6f28b25d..c1f655c1 100644 +--- a/octavia/api/v2/controllers/member.py ++++ b/octavia/api/v2/controllers/member.py +@@ -149,9 +149,9 @@ class MemberController(base.BaseController): + validate.ip_not_reserved(member.address) + + # Validate member subnet +- if member.subnet_id and not validate.subnet_exists(member.subnet_id): +- raise exceptions.NotFound(resource='Subnet', +- id=member.subnet_id) ++ if (member.subnet_id and ++ not validate.subnet_exists(member.subnet_id, context=context)): ++ raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) + pool = self.repositories.pool.get(context.session, id=self.pool_id) + member.project_id, provider = self._get_lb_project_id_provider( + context.session, pool.load_balancer_id) +@@ -345,7 +345,7 @@ class MembersController(MemberController): + # Validate member subnets + for member in members: + if member.subnet_id and not validate.subnet_exists( +- member.subnet_id): ++ member.subnet_id, context=context): + raise exceptions.NotFound(resource='Subnet', + id=member.subnet_id) + +diff --git a/octavia/common/clients.py b/octavia/common/clients.py +index 1b1277ca..58795b34 100644 +--- a/octavia/common/clients.py ++++ b/octavia/common/clients.py +@@ -12,6 +12,8 @@ + + from cinderclient import client as cinder_client + from glanceclient import client as glance_client ++from keystoneauth1.identity.generic import token ++from keystoneauth1 import session + from neutronclient.neutron import client as neutron_client + from novaclient import api_versions + from novaclient import client as nova_client +@@ -107,6 +109,32 @@ class NeutronAuth(object): + LOG.exception("Error creating Neutron client.") + return cls.neutron_client + ++ @classmethod ++ def get_user_neutron_client(cls, context): ++ # get a normal session ++ ksession = keystone.KeystoneSession() ++ service_auth = ksession.get_auth() ++ ++ # make user auth and swap it in session ++ user_auth = token.Token(auth_url=service_auth.auth_url, ++ token=context.auth_token, ++ project_id=context.project_id) ++ user_session = session.Session(auth=user_auth) ++ ++ kwargs = { ++ 'session': user_session, ++ 'region_name': CONF.neutron.region_name, ++ 'endpoint_type': CONF.neutron.endpoint_type, ++ 'service_name': CONF.neutron.service_name, ++ 'insecure': CONF.neutron.insecure, ++ 'ca_cert': CONF.neutron.ca_certificates_file ++ } ++ if CONF.neutron.endpoint: ++ kwargs['endpoint_override'] = CONF.neutron.endpoint ++ ++ # create neutron client using user's session ++ return neutron_client.Client(NEUTRON_VERSION, **kwargs) ++ + + class GlanceAuth(object): + glance_client = None +diff --git a/octavia/common/config.py b/octavia/common/config.py +index e60a335f..d7d6dc1c 100644 +--- a/octavia/common/config.py ++++ b/octavia/common/config.py +@@ -196,6 +196,12 @@ networking_opts = [ + help=_('List of IP addresses reserved from being used for ' + 'member addresses. IPv6 addresses should be in ' + 'expanded, uppercase form.')), ++ cfg.BoolOpt('allow_invisible_resource_usage', default=False, ++ help=_("When True, users can use network resources they " ++ "cannot normally see as VIP or member subnets. Making " ++ "this True may allow users to access resources on " ++ "subnets they do not normally have access to via " ++ "neutron RBAC policies.")), + ] + + healthmanager_opts = [ +diff --git a/octavia/common/validate.py b/octavia/common/validate.py +index 33a3b96b..25c4ce5b 100644 +--- a/octavia/common/validate.py ++++ b/octavia/common/validate.py +@@ -313,11 +313,11 @@ def sanitize_l7policy_api_args(l7policy, create=False): + return l7policy + + +-def port_exists(port_id): ++def port_exists(port_id, context=None): + """Raises an exception when a port does not exist.""" + network_driver = utils.get_network_driver() + try: +- port = network_driver.get_port(port_id) ++ port = network_driver.get_port(port_id, context=context) + except Exception: + raise exceptions.InvalidSubresource(resource='Port', id=port_id) + return port +@@ -332,11 +332,11 @@ def check_port_in_use(port): + return False + + +-def subnet_exists(subnet_id): ++def subnet_exists(subnet_id, context=None): + """Raises an exception when a subnet does not exist.""" + network_driver = utils.get_network_driver() + try: +- subnet = network_driver.get_subnet(subnet_id) ++ subnet = network_driver.get_subnet(subnet_id, context=context) + except Exception: + raise exceptions.InvalidSubresource(resource='Subnet', id=subnet_id) + return subnet +@@ -359,14 +359,15 @@ def qos_extension_enabled(network_driver): + "VIP QoS policy is not allowed in this deployment.")) + + +-def network_exists_optionally_contains_subnet(network_id, subnet_id=None): ++def network_exists_optionally_contains_subnet(network_id, subnet_id=None, ++ context=None): + """Raises an exception when a network does not exist. + + If a subnet is provided, also validate the network contains that subnet. + """ + network_driver = utils.get_network_driver() + try: +- network = network_driver.get_network(network_id) ++ network = network_driver.get_network(network_id, context=context) + except Exception: + raise exceptions.InvalidSubresource(resource='Network', id=network_id) + if subnet_id: +diff --git a/octavia/network/base.py b/octavia/network/base.py +index 2f1a3a39..f7c5bc2a 100644 +--- a/octavia/network/base.py ++++ b/octavia/network/base.py +@@ -186,28 +186,31 @@ class AbstractNetworkDriver(object): + """ + + @abc.abstractmethod +- def get_network(self, network_id): ++ def get_network(self, network_id, context=None): + """Retrieves network from network id. + + :param network_id: id of an network to retrieve ++ :param context: A request context + :return: octavia.network.data_models.Network + :raises: NetworkException, NetworkNotFound + """ + + @abc.abstractmethod +- def get_subnet(self, subnet_id): ++ def get_subnet(self, subnet_id, context=None): + """Retrieves subnet from subnet id. + + :param subnet_id: id of a subnet to retrieve ++ :param context: A request context + :return: octavia.network.data_models.Subnet + :raises: NetworkException, SubnetNotFound + """ + + @abc.abstractmethod +- def get_port(self, port_id): ++ def get_port(self, port_id, context=None): + """Retrieves port from port id. + + :param port_id: id of a port to retrieve ++ :param context: A request context + :return: octavia.network.data_models.Port + :raises: NetworkException, PortNotFound + """ +diff --git a/octavia/network/drivers/neutron/base.py b/octavia/network/drivers/neutron/base.py +index 6dfe0dc5..def22f0b 100644 +--- a/octavia/network/drivers/neutron/base.py ++++ b/octavia/network/drivers/neutron/base.py +@@ -173,9 +173,14 @@ class BaseNeutronDriver(base.AbstractNetworkDriver): + return [self._port_to_octavia_interface( + compute_id, port) for port in ports['ports']] + +- def _get_resource(self, resource_type, resource_id): ++ def _get_resource(self, resource_type, resource_id, context=None): ++ neutron_client = self.neutron_client ++ if context and not CONF.networking.allow_invisible_resource_usage: ++ neutron_client = clients.NeutronAuth.get_user_neutron_client( ++ context) ++ + try: +- resource = getattr(self.neutron_client, 'show_%s' % ++ resource = getattr(neutron_client, 'show_%s' % + resource_type)(resource_id) + return getattr(utils, 'convert_%s_dict_to_model' % + resource_type)(resource) +@@ -225,14 +230,14 @@ class BaseNeutronDriver(base.AbstractNetworkDriver): + LOG.exception(message) + raise base.NetworkException(message) + +- def get_network(self, network_id): +- return self._get_resource('network', network_id) ++ def get_network(self, network_id, context=None): ++ return self._get_resource('network', network_id, context=context) + +- def get_subnet(self, subnet_id): +- return self._get_resource('subnet', subnet_id) ++ def get_subnet(self, subnet_id, context=None): ++ return self._get_resource('subnet', subnet_id, context=context) + +- def get_port(self, port_id): +- return self._get_resource('port', port_id) ++ def get_port(self, port_id, context=None): ++ return self._get_resource('port', port_id, context=context) + + def get_network_by_name(self, network_name): + return self._get_resources_by_filters( +diff --git a/octavia/network/drivers/noop_driver/driver.py b/octavia/network/drivers/noop_driver/driver.py +index 3d686aa0..bc32e94b 100644 +--- a/octavia/network/drivers/noop_driver/driver.py ++++ b/octavia/network/drivers/noop_driver/driver.py +@@ -310,13 +310,13 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver): + def update_vip(self, loadbalancer, for_delete=False): + self.driver.update_vip(loadbalancer, for_delete) + +- def get_network(self, network_id): ++ def get_network(self, network_id, context=None): + return self.driver.get_network(network_id) + +- def get_subnet(self, subnet_id): ++ def get_subnet(self, subnet_id, context=None): + return self.driver.get_subnet(subnet_id) + +- def get_port(self, port_id): ++ def get_port(self, port_id, context=None): + return self.driver.get_port(port_id) + + def get_qos_policy(self, qos_policy_id): +diff --git a/octavia/tests/unit/network/drivers/neutron/test_base.py b/octavia/tests/unit/network/drivers/neutron/test_base.py +index 5f59c715..90ce7829 100644 +--- a/octavia/tests/unit/network/drivers/neutron/test_base.py ++++ b/octavia/tests/unit/network/drivers/neutron/test_base.py +@@ -14,6 +14,8 @@ + + import mock + from neutronclient.common import exceptions as neutron_client_exceptions ++from oslo_config import cfg ++from oslo_config import fixture as oslo_fixture + + from octavia.common import clients + from octavia.common import data_models +@@ -200,6 +202,9 @@ class TestBaseNeutronNetworkDriver(base.TestCase): + port2['fixed_ips'][0]['ip_address']]) + + def test_get_network(self): ++ config = self.useFixture(oslo_fixture.Config(cfg.CONF)) ++ config.config(group="networking", allow_invisible_resource_usage=True) ++ + show_network = self.driver.neutron_client.show_network + show_network.return_value = {'network': { + 'id': t_constants.MOCK_NETWORK_ID, +@@ -210,7 +215,25 @@ class TestBaseNeutronNetworkDriver(base.TestCase): + self.assertEqual(1, len(network.subnets)) + self.assertEqual(t_constants.MOCK_SUBNET_ID, network.subnets[0]) + ++ @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") ++ def test_get_user_network(self, neutron_client_mock): ++ show_network = neutron_client_mock.return_value.show_network ++ show_network.return_value = {'network': { ++ 'id': t_constants.MOCK_NETWORK_ID, ++ 'subnets': [t_constants.MOCK_SUBNET_ID]}} ++ ++ network = self.driver.get_network(t_constants.MOCK_NETWORK_ID, ++ context=mock.ANY) ++ ++ self.assertIsInstance(network, network_models.Network) ++ self.assertEqual(t_constants.MOCK_NETWORK_ID, network.id) ++ self.assertEqual(1, len(network.subnets)) ++ self.assertEqual(t_constants.MOCK_SUBNET_ID, network.subnets[0]) ++ + def test_get_subnet(self): ++ config = self.useFixture(oslo_fixture.Config(cfg.CONF)) ++ config.config(group="networking", allow_invisible_resource_usage=True) ++ + show_subnet = self.driver.neutron_client.show_subnet + show_subnet.return_value = {'subnet': { + 'id': t_constants.MOCK_SUBNET_ID, +@@ -222,7 +245,26 @@ class TestBaseNeutronNetworkDriver(base.TestCase): + self.assertEqual(t_constants.MOCK_IP_ADDRESS, subnet.gateway_ip) + self.assertEqual(t_constants.MOCK_CIDR, subnet.cidr) + ++ @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") ++ def test_get_user_subnet(self, neutron_client_mock): ++ show_subnet = neutron_client_mock.return_value.show_subnet ++ show_subnet.return_value = {'subnet': { ++ 'id': t_constants.MOCK_SUBNET_ID, ++ 'gateway_ip': t_constants.MOCK_IP_ADDRESS, ++ 'cidr': t_constants.MOCK_CIDR}} ++ ++ subnet = self.driver.get_subnet(t_constants.MOCK_SUBNET_ID, ++ context=mock.ANY) ++ ++ self.assertIsInstance(subnet, network_models.Subnet) ++ self.assertEqual(t_constants.MOCK_SUBNET_ID, subnet.id) ++ self.assertEqual(t_constants.MOCK_IP_ADDRESS, subnet.gateway_ip) ++ self.assertEqual(t_constants.MOCK_CIDR, subnet.cidr) ++ + def test_get_port(self): ++ config = self.useFixture(oslo_fixture.Config(cfg.CONF)) ++ config.config(group="networking", allow_invisible_resource_usage=True) ++ + show_port = self.driver.neutron_client.show_port + show_port.return_value = {'port': { + 'id': t_constants.MOCK_PORT_ID, +@@ -244,6 +286,31 @@ class TestBaseNeutronNetworkDriver(base.TestCase): + self.assertEqual(t_constants.MOCK_IP_ADDRESS, + port.fixed_ips[0].ip_address) + ++ @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") ++ def test_get_user_port(self, neutron_client_mock): ++ show_port = neutron_client_mock.return_value.show_port ++ show_port.return_value = {'port': { ++ 'id': t_constants.MOCK_PORT_ID, ++ 'mac_address': t_constants.MOCK_MAC_ADDR, ++ 'network_id': t_constants.MOCK_NETWORK_ID, ++ 'fixed_ips': [{ ++ 'subnet_id': t_constants.MOCK_SUBNET_ID, ++ 'ip_address': t_constants.MOCK_IP_ADDRESS ++ }]}} ++ ++ port = self.driver.get_port(t_constants.MOCK_PORT_ID, context=mock.ANY) ++ ++ self.assertIsInstance(port, network_models.Port) ++ self.assertEqual(t_constants.MOCK_PORT_ID, port.id) ++ self.assertEqual(t_constants.MOCK_MAC_ADDR, port.mac_address) ++ self.assertEqual(t_constants.MOCK_NETWORK_ID, port.network_id) ++ self.assertEqual(1, len(port.fixed_ips)) ++ self.assertIsInstance(port.fixed_ips[0], network_models.FixedIP) ++ self.assertEqual(t_constants.MOCK_SUBNET_ID, ++ port.fixed_ips[0].subnet_id) ++ self.assertEqual(t_constants.MOCK_IP_ADDRESS, ++ port.fixed_ips[0].ip_address) ++ + def test_get_network_by_name(self): + list_network = self.driver.neutron_client.list_networks + list_network.return_value = {'networks': [{'network': { +diff --git a/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml b/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml +new file mode 100644 +index 00000000..0663be6c +--- /dev/null ++++ b/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml +@@ -0,0 +1,16 @@ ++--- ++upgrade: ++ - | ++ After this upgrade, users will no longer be able use network resources they ++ cannot see or "show" on load balancers. Operators can revert this behavior ++ by setting the "allow_invisible_reourece_usage" configuration file setting ++ to ``True``. ++security: ++ - | ++ Previously, if a user knew or could guess the UUID for a network resource, ++ they could use that UUID to create load balancer resources using that UUID. ++ Now the user must have permission to see or "show" the resource before it ++ can be used with a load balancer. This will be the new default, but ++ operators can disable this behavior via the setting the configuration file ++ setting "allow_invisible_resource_usage" to ``True``. This issue falls ++ under the "Class C1" security issue as the user would require a valid UUID. +-- +2.17.1 + diff -Nru octavia-5.0.1/debian/patches/0002-Workaround-peer-name-starting-with-hyphen.patch octavia-5.0.1/debian/patches/0002-Workaround-peer-name-starting-with-hyphen.patch --- octavia-5.0.1/debian/patches/0002-Workaround-peer-name-starting-with-hyphen.patch 1970-01-01 01:00:00.000000000 +0100 +++ octavia-5.0.1/debian/patches/0002-Workaround-peer-name-starting-with-hyphen.patch 2020-10-15 20:15:24.000000000 +0100 @@ -0,0 +1,119 @@ +From b35f77d3b42997e882036e4dc9359b1afb78ffac Mon Sep 17 00:00:00 2001 +From: Carlos Goncalves +Date: Mon, 25 May 2020 20:47:34 +0200 +Subject: [PATCH] Workaround peer name starting with hyphen + +The base64_sha_string method is used to set a base64-encoded peer name +in HAProxy. There are cases where the peer name can start with +an hypen which is troublesome when used in HAProxy CLI. Specifically, +HAProxy fails to reload when local peer name starts with '-x' [1]. When +this is the case, an amphora goes to provisioning status ERROR and later +is scheduled for failover by the Octavia Health Manager service. A new +amphora UUUID is assigned and base64 encoded, hopefully not starting +with '-x' again. However, this is far from being ideal -- we incur in a +dataplane disruption (single topology) or reduce HA capabilities +(active-standby topology) for some time. + +Four possible options: + +a) add prefix to peer name +b) change b64encode altchars +c) quote peer name in haproxy CLI command +d) substitute first character if hyphen + +Option a) and b) are not backward compatible with running amphorae. Peer +names of existing amphorae that do not start with hypen but contain +hyphen at any other position would get different peer names. + +Option c) would nonetheless still require an amphora image update to add +quotes in the HAProxy init service file. Continuing to generate peer +names with hyphens at begininng of the string is avoidable and +recommended. + +Option d), while also requiring an amphora image update, it would get +rid of hyphens in begining of the peer names. It is also backward +compatible with all running amphorae, except for those starting with +hyphen but are broken anyways. + +This patch takes option d). It substitutes hyphen with 'x' character. + +[1] https://github.com/haproxy/haproxy/issues/644 + +Task: 39850 +Story: 2007714 + +Change-Id: Ib0fc26877710dea423a5ebcf1f71077665404377 +(cherry picked from commit acc38391dea12a7f70142077250d15a4eb53cb87) +(cherry picked from commit df36c2c8ca54e9b8f63bb3c0231e85f1b87b35b3) +--- + octavia/common/utils.py | 5 ++++- + octavia/tests/unit/common/test_utils.py | 15 +++++++++++++++ + ...x-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml | 10 ++++++++++ + 3 files changed, 29 insertions(+), 1 deletion(-) + create mode 100644 releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml + +diff --git a/octavia/common/utils.py b/octavia/common/utils.py +index 2c2fbbf4..3b217167 100644 +--- a/octavia/common/utils.py ++++ b/octavia/common/utils.py +@@ -20,6 +20,7 @@ + + import base64 + import hashlib ++import re + import socket + + import netaddr +@@ -45,7 +46,9 @@ def base64_sha1_string(string_to_hash): + # break backwards compatibility with existing loadbalancers. + hash_str = hashlib.sha1(string_to_hash.encode('utf-8')).digest() # nosec + b64_str = base64.b64encode(hash_str, str.encode('_-', 'ascii')) +- return b64_str.decode('UTF-8') ++ b64_sha1 = b64_str.decode('UTF-8') ++ # https://github.com/haproxy/haproxy/issues/644 ++ return re.sub(r"^-", "x", b64_sha1) + + + def get_network_driver(): +diff --git a/octavia/tests/unit/common/test_utils.py b/octavia/tests/unit/common/test_utils.py +index 832f6ee6..b98e5a32 100644 +--- a/octavia/tests/unit/common/test_utils.py ++++ b/octavia/tests/unit/common/test_utils.py +@@ -60,3 +60,18 @@ class TestConfig(base.TestCase): + utils.ip_netmask_to_cidr('10.0.0.1', '255.255.240.0')) + self.assertEqual('10.0.0.0/30', utils.ip_netmask_to_cidr( + '10.0.0.1', '255.255.255.252')) ++ ++ def test_base64_sha1_string(self): ++ str_to_sha1 = [ ++ # no special cases str (no altchars) ++ ('77e7d60d-e137-4246-8a84-a25db33571cd', ++ 'iVZVQ5AKmk2Ae0uGLP0Ue4OseRM='), ++ # backward compat amphorae with - in str[1:] ++ ('9c6e5f27-a0da-4ceb-afe5-5a81230be42e', ++ 'NjrNgt3Egl-H5ScbYM5ChtUH3M8='), ++ # sha1 would start with -, now replaced with x ++ ('4db4a3cf-9fef-4057-b1fd-b2afbf7a8a0f', ++ 'xxqntK8jJ_gE3QEmh-D1-XgCW_E=') ++ ] ++ for str, sha1 in str_to_sha1: ++ self.assertEqual(sha1, utils.base64_sha1_string(str)) +diff --git a/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml b/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml +new file mode 100644 +index 00000000..0f883eab +--- /dev/null ++++ b/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml +@@ -0,0 +1,10 @@ ++--- ++upgrade: ++ - | ++ An amphora image update is recommended to pick up a workaround to an ++ HAProxy issue where it would fail to reload on configuration change should ++ the local peer name start with "-x". ++fixes: ++ - | ++ Workaround an HAProxy issue where it would fail to reload on configuration ++ change should the local peer name start with "-x". +-- +2.17.1 + diff -Nru octavia-5.0.1/debian/patches/0003-Refactor-the-failover-flows.patch octavia-5.0.1/debian/patches/0003-Refactor-the-failover-flows.patch --- octavia-5.0.1/debian/patches/0003-Refactor-the-failover-flows.patch 1970-01-01 01:00:00.000000000 +0100 +++ octavia-5.0.1/debian/patches/0003-Refactor-the-failover-flows.patch 2020-10-15 20:15:24.000000000 +0100 @@ -0,0 +1,10538 @@ +From 8eaa660c3a1439de72077b7e6b8a7f87a69b90aa Mon Sep 17 00:00:00 2001 +From: Michael Johnson +Date: Fri, 31 Jan 2020 17:15:50 -0800 +Subject: [PATCH] Refactor the failover flows + +This patch refactors the failover flows to improve the performance +and reliability of failovers in Octavia. + +Specific improvements are: +* More tasks and flows will retry when other OpenStack services are + failing. +* Failover can now succeed even when all of the amphora are missing + for a given load balancer. +* It will check and repair the load balancer VIP should the VIP + port(s) become corrupted in neutron. +* It will cleanup extra resources that may be associated with a + load balancer in the event of a cloud service failure. + +This patch also removes some dead code. + +Conflicts: + octavia/amphorae/backends/agent/api_server/amphora_info.py + octavia/amphorae/drivers/haproxy/rest_api_driver.py + octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py + octavia/api/drivers/utils.py + octavia/api/v2/controllers/load_balancer.py + octavia/common/constants.py + octavia/common/utils.py + octavia/controller/worker/v1/controller_worker.py + octavia/controller/worker/v1/flows/amphora_flows.py + octavia/controller/worker/v1/tasks/amphora_driver_tasks.py + octavia/controller/worker/v1/tasks/compute_tasks.py + octavia/controller/worker/v1/tasks/network_tasks.py + octavia/network/base.py + octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py + octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py + octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py + octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py + octavia/tests/unit/controller/worker/v1/test_controller_worker.py + octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py + +Change-Id: I04cb2f1f10ec566298834f81df0cf8b100ca916c +Story: 2003084 +Task: 23166 +Story: 2004440 +Task: 28108 +(cherry picked from commit 955bb8840616d96ed74de3086f8959ad4190a472) +(cherry picked from commit 2f9dc3693e73fe8c235e0eaaf0f9595576940aae) +(cherry picked from commit edebde748d0283a9948c8b7f6386d5a8835c617c) +--- + etc/octavia.conf | 37 + + .../backends/agent/api_server/amphora_info.py | 75 +- + .../backends/agent/api_server/keepalived.py | 4 + + .../agent/api_server/keepalivedlvs.py | 6 +- + .../backends/agent/api_server/loadbalancer.py | 35 +- + .../backends/agent/api_server/util.py | 78 +- + .../backends/utils/ip_advertisement.py | 183 +++++ + .../backends/utils/network_namespace.py | 50 ++ + .../amphorae/backends/utils/network_utils.py | 86 ++ + octavia/amphorae/drivers/driver_base.py | 41 +- + .../amphorae/drivers/haproxy/exceptions.py | 7 +- + .../drivers/haproxy/rest_api_driver.py | 60 +- + .../drivers/keepalived/vrrp_rest_driver.py | 67 +- + .../amphorae/drivers/noop_driver/driver.py | 19 +- + .../api/drivers/amphora_driver/v1/driver.py | 7 +- + octavia/api/drivers/utils.py | 6 + + octavia/api/v2/controllers/load_balancer.py | 57 +- + octavia/common/config.py | 44 +- + octavia/common/constants.py | 60 +- + octavia/common/exceptions.py | 20 +- + octavia/common/utils.py | 23 + + octavia/compute/drivers/noop_driver/driver.py | 8 +- + octavia/compute/drivers/nova_driver.py | 40 +- + .../controller/worker/v1/controller_worker.py | 427 ++++++---- + .../worker/v1/flows/amphora_flows.py | 760 ++++++++++-------- + .../worker/v1/flows/load_balancer_flows.py | 450 +++++++++-- + .../worker/v1/tasks/amphora_driver_tasks.py | 212 +++-- + .../worker/v1/tasks/compute_tasks.py | 82 +- + .../worker/v1/tasks/database_tasks.py | 51 +- + .../worker/v1/tasks/network_tasks.py | 224 +++++- + .../controller/worker/v1/tasks/retry_tasks.py | 74 ++ + .../worker/v2/tasks/amphora_driver_tasks.py | 4 +- + octavia/network/base.py | 52 ++ + octavia/network/data_models.py | 16 +- + .../drivers/neutron/allowed_address_pairs.py | 281 +++++-- + octavia/network/drivers/neutron/base.py | 17 +- + octavia/network/drivers/neutron/utils.py | 45 +- + octavia/network/drivers/noop_driver/driver.py | 69 ++ + octavia/opts.py | 1 + + octavia/tests/common/constants.py | 46 ++ + octavia/tests/common/data_model_helpers.py | 4 +- + octavia/tests/common/sample_data_models.py | 6 +- + octavia/tests/common/sample_network_data.py | 198 +++++ + .../agent/api_server/test_keepalivedlvs.py | 12 +- + .../backend/agent/api_server/test_server.py | 22 +- + .../agent/api_server/test_loadbalancer.py | 48 +- + .../backends/agent/api_server/test_osutils.py | 5 +- + .../backends/agent/api_server/test_util.py | 128 +++ + .../backends/utils/test_ip_advertisement.py | 213 +++++ + .../backends/utils/test_network_namespace.py | 117 +++ + .../backends/utils/test_network_utils.py | 140 ++++ + .../drivers/haproxy/test_exceptions.py | 52 ++ + .../haproxy/test_rest_api_driver_0_5.py | 34 +- + .../haproxy/test_rest_api_driver_1_0.py | 34 +- + .../haproxy/test_rest_api_driver_common.py | 83 ++ + .../keepalived/test_vrrp_rest_driver.py | 33 +- + .../test_noop_amphoraloadbalancer_driver.py | 11 +- + octavia/tests/unit/common/test_utils.py | 31 + + .../unit/compute/drivers/test_nova_driver.py | 66 +- + .../worker/v1/flows/test_amphora_flows.py | 223 +++-- + .../v1/flows/test_load_balancer_flows.py | 234 +++++- + .../v1/tasks/test_amphora_driver_tasks.py | 255 ++++-- + .../worker/v1/tasks/test_compute_tasks.py | 70 +- + .../worker/v1/tasks/test_database_tasks.py | 34 +- + .../worker/v1/tasks/test_network_tasks.py | 415 +++++++++- + .../worker/v1/tasks/test_retry_tasks.py | 47 ++ + .../worker/v1/test_controller_worker.py | 693 ++++++++++++---- + .../v2/tasks/test_amphora_driver_tasks.py | 5 +- + .../neutron/test_allowed_address_pairs.py | 439 +++++++++- + .../network/drivers/neutron/test_utils.py | 1 + + .../drivers/test_network_noop_driver.py | 56 ++ + ...factor_failover_flow-9efcd854240f71ad.yaml | 11 + + tools/create_flow_docs.py | 14 +- + tools/flow-list.txt | 3 +- + 74 files changed, 6220 insertions(+), 1341 deletions(-) + create mode 100644 octavia/amphorae/backends/utils/ip_advertisement.py + create mode 100644 octavia/amphorae/backends/utils/network_namespace.py + create mode 100644 octavia/amphorae/backends/utils/network_utils.py + create mode 100644 octavia/controller/worker/v1/tasks/retry_tasks.py + create mode 100644 octavia/tests/common/sample_network_data.py + create mode 100644 octavia/tests/unit/amphorae/backends/utils/test_ip_advertisement.py + create mode 100644 octavia/tests/unit/amphorae/backends/utils/test_network_namespace.py + create mode 100644 octavia/tests/unit/amphorae/backends/utils/test_network_utils.py + create mode 100644 octavia/tests/unit/amphorae/drivers/haproxy/test_exceptions.py + create mode 100644 octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_common.py + create mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py + create mode 100644 releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml + +Index: octavia-5.0.1/etc/octavia.conf +=================================================================== +--- octavia-5.0.1.orig/etc/octavia.conf ++++ octavia-5.0.1/etc/octavia.conf +@@ -138,6 +138,19 @@ + # Endpoint type to use for communication with the Barbican service. + # endpoint_type = publicURL + ++[compute] ++# The maximum attempts to retry an action with the compute service. ++# max_retries = 15 ++ ++# Seconds to wait before retrying an action with the compute service. ++# retry_interval = 1 ++ ++# The seconds to backoff retry attempts ++# retry_backoff = 1 ++ ++# The maximum interval in seconds between retry attempts ++# retry_max = 10 ++ + [networking] + # The maximum attempts to retry an action with the networking service. + # max_retries = 15 +@@ -145,6 +158,12 @@ + # Seconds to wait before retrying an action with the networking service. + # retry_interval = 1 + ++# The seconds to backoff retry attempts ++# retry_backoff = 1 ++ ++# The maximum interval in seconds between retry attempts ++# retry_max = 10 ++ + # The maximum time to wait, in seconds, for a port to detach from an amphora + # port_detach_timeout = 300 + +@@ -209,11 +228,26 @@ + # active_connection_max_retries = 15 + # active_connection_rety_interval = 2 + ++# These "failover" timeouts are used during the failover process to probe ++# amphorae that are part of the load balancer being failed over. ++# These values are very low to facilitate "fail fast" should an amphora ++# not respond in a failure situation. ++# failover_connection_max_retries = 2 ++# failover_connection_retry_interval = 5 ++ + # The user flow log format for HAProxy. + # {{ project_id }} and {{ lb_id }} will be automatically substituted by the + # controller when configuring HAProxy if they are present in the string. + # user_log_format = '{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST %B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt %tsc' + ++# API messaging / database commit retries ++# This is many times the controller worker retries waiting for the API to ++# complete a database commit for a message received over the queue. ++# api_db_commit_retry_attempts = 15 ++# api_db_commit_retry_initial_delay = 1 ++# api_db_commit_retry_backoff = 1 ++# api_db_commit_retry_max = 5 ++ + [controller_worker] + # workers = 1 + # amp_active_retries = 30 +@@ -270,6 +304,9 @@ + # loadbalancer_topology = SINGLE + # user_data_config_drive = False + ++# amphora_delete_retries = 5 ++# amphora_delete_retry_interval = 5 ++ + [task_flow] + # TaskFlow engine options are: + # - serial: Runs all tasks on a single thread. +Index: octavia-5.0.1/octavia/amphorae/backends/agent/api_server/amphora_info.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/backends/agent/api_server/amphora_info.py ++++ octavia-5.0.1/octavia/amphorae/backends/agent/api_server/amphora_info.py +@@ -12,20 +12,19 @@ + # License for the specific language governing permissions and limitations + # under the License. + +-import ipaddress + import os + import re + import socket + import subprocess + + import pyroute2 +-import six + import webob + +-import netifaces + from octavia.amphorae.backends.agent import api_server + from octavia.amphorae.backends.agent.api_server import util ++from octavia.amphorae.backends.utils import network_utils + from octavia.common import constants as consts ++from octavia.common import exceptions + + + class AmphoraInfo(object): +@@ -176,65 +175,15 @@ class AmphoraInfo(object): + return networks + + def get_interface(self, ip_addr): +- + try: +- ip_version = ipaddress.ip_address(six.text_type(ip_addr)).version +- except Exception: +- return webob.Response( +- json=dict(message="Invalid IP address"), status=400) +- +- if ip_version == 4: +- address_format = netifaces.AF_INET +- elif ip_version == 6: +- address_format = netifaces.AF_INET6 +- else: ++ interface = network_utils.get_interface_name( ++ ip_addr, net_ns=consts.AMPHORA_NAMESPACE) ++ except exceptions.InvalidIPAddress: ++ return webob.Response(json=dict(message="Invalid IP address"), ++ status=400) ++ except exceptions.NotFound: + return webob.Response( +- json=dict(message="Bad IP address version"), status=400) +- +- # We need to normalize the address as IPv6 has multiple representations +- # fe80:0000:0000:0000:f816:3eff:fef2:2058 == fe80::f816:3eff:fef2:2058 +- normalized_addr = socket.inet_ntop(address_format, +- socket.inet_pton(address_format, +- ip_addr)) +- +- with pyroute2.NetNS(consts.AMPHORA_NAMESPACE) as netns: +- for addr in netns.get_addr(): +- # Save the interface index as IPv6 records don't list a +- # textual interface +- interface_idx = addr['index'] +- # Save the address family (IPv4/IPv6) for use normalizing +- # the IP address for comparison +- interface_af = addr['family'] +- # Search through the attributes of each address record +- for attr in addr['attrs']: +- # Look for the attribute name/value pair for the address +- if attr[0] == 'IFA_ADDRESS': +- # Compare the normalized address with the address we +- # we are looking for. Since we have matched the name +- # above, attr[1] is the address value +- if normalized_addr == socket.inet_ntop( +- interface_af, +- socket.inet_pton(interface_af, attr[1])): +- +- # Lookup the matching interface name by +- # getting the interface with the index we found +- # in the above address search +- lookup_int = netns.get_links(interface_idx) +- # Search through the attributes of the matching +- # interface record +- for int_attr in lookup_int[0]['attrs']: +- # Look for the attribute name/value pair +- # that includes the interface name +- if int_attr[0] == 'IFLA_IFNAME': +- # Return the response with the matching +- # interface name that is in int_attr[1] +- # for the matching interface attribute +- # name +- return webob.Response( +- json=dict(message='OK', +- interface=int_attr[1]), +- status=200) +- +- return webob.Response( +- json=dict(message="Error interface not found for IP address"), +- status=404) ++ json=dict(message="Error interface not found for IP address"), ++ status=404) ++ return webob.Response(json=dict(message='OK', interface=interface), ++ status=200) +Index: octavia-5.0.1/octavia/amphorae/backends/agent/api_server/keepalived.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/backends/agent/api_server/keepalived.py ++++ octavia-5.0.1/octavia/amphorae/backends/agent/api_server/keepalived.py +@@ -47,6 +47,7 @@ class Keepalived(object): + + if not os.path.exists(util.keepalived_dir()): + os.makedirs(util.keepalived_dir()) ++ if not os.path.exists(util.keepalived_check_scripts_dir()): + os.makedirs(util.keepalived_check_scripts_dir()) + + conf_file = util.keepalived_cfg_path() +@@ -112,6 +113,9 @@ class Keepalived(object): + ) + text_file.write(text) + ++ # Configure the monitoring of haproxy ++ util.vrrp_check_script_update(None, consts.AMP_ACTION_START) ++ + # Make sure the new service is enabled on boot + if init_system != consts.INIT_UPSTART: + try: +Index: octavia-5.0.1/octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/backends/agent/api_server/keepalivedlvs.py ++++ octavia-5.0.1/octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +@@ -78,7 +78,8 @@ class KeepalivedLvs(udp_listener_base.Ud + # Active-Standby topology will create the directory below. So for + # Single topology, it should not create the directory and the check + # scripts for status change. +- if not os.path.exists(util.keepalived_check_scripts_dir()): ++ if (CONF.controller_worker.loadbalancer_topology != ++ consts.TOPOLOGY_ACTIVE_STANDBY): + NEED_CHECK = False + + conf_file = util.keepalived_lvs_cfg_path(listener_id) +@@ -157,6 +158,9 @@ class KeepalivedLvs(udp_listener_base.Ud + script_path = os.path.join(util.keepalived_check_scripts_dir(), + KEEPALIVED_CHECK_SCRIPT_NAME) + if not os.path.exists(script_path): ++ if not os.path.exists(util.keepalived_check_scripts_dir()): ++ os.makedirs(util.keepalived_check_scripts_dir()) ++ + with os.fdopen(os.open(script_path, flags, stat.S_IEXEC), + 'w') as script_file: + text = check_script_file_template.render( +Index: octavia-5.0.1/octavia/amphorae/backends/agent/api_server/loadbalancer.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/backends/agent/api_server/loadbalancer.py ++++ octavia-5.0.1/octavia/amphorae/backends/agent/api_server/loadbalancer.py +@@ -235,12 +235,11 @@ class Loadbalancer(object): + details="Unknown action: {0}".format(action)), status=400) + + self._check_lb_exists(lb_id) ++ is_vrrp = (CONF.controller_worker.loadbalancer_topology == ++ consts.TOPOLOGY_ACTIVE_STANDBY) + +- # Since this script should be created at LB create time +- # we can check for this path to see if VRRP is enabled +- # on this amphora and not write the file if VRRP is not in use +- if os.path.exists(util.keepalived_check_script_path()): +- self.vrrp_check_script_update(lb_id, action) ++ if is_vrrp: ++ util.vrrp_check_script_update(lb_id, action) + + # HAProxy does not start the process when given a reload + # so start it if haproxy is not already running +@@ -262,6 +261,14 @@ class Loadbalancer(object): + return webob.Response(json=dict( + message="Error {0}ing haproxy".format(action), + details=e.output), status=500) ++ ++ # If we are not in active/standby we need to send an IP ++ # advertisement (GARP or NA). Keepalived handles this for ++ # active/standby load balancers. ++ if not is_vrrp and action in [consts.AMP_ACTION_START, ++ consts.AMP_ACTION_RELOAD]: ++ util.send_vip_advertisements(lb_id) ++ + if action in [consts.AMP_ACTION_STOP, + consts.AMP_ACTION_RELOAD]: + return webob.Response(json=dict( +@@ -307,7 +314,7 @@ class Loadbalancer(object): + # we can check for this path to see if VRRP is enabled + # on this amphora and not write the file if VRRP is not in use + if os.path.exists(util.keepalived_check_script_path()): +- self.vrrp_check_script_update( ++ util.vrrp_check_script_update( + lb_id, action=consts.AMP_ACTION_STOP) + + # delete the ssl files +@@ -455,22 +462,6 @@ class Loadbalancer(object): + def _cert_file_path(self, lb_id, filename): + return os.path.join(self._cert_dir(lb_id), filename) + +- def vrrp_check_script_update(self, lb_id, action): +- lb_ids = util.get_loadbalancers() +- if action == consts.AMP_ACTION_STOP: +- lb_ids.remove(lb_id) +- args = [] +- for lbid in lb_ids: +- args.append(util.haproxy_sock_path(lbid)) +- +- if not os.path.exists(util.keepalived_dir()): +- os.makedirs(util.keepalived_dir()) +- os.makedirs(util.keepalived_check_scripts_dir()) +- +- cmd = 'haproxy-vrrp-check {args}; exit $?'.format(args=' '.join(args)) +- with open(util.haproxy_check_script_path(), 'w') as text_file: +- text_file.write(cmd) +- + def _check_haproxy_status(self, lb_id): + if os.path.exists(util.pid_path(lb_id)): + if os.path.exists( +Index: octavia-5.0.1/octavia/amphorae/backends/agent/api_server/util.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/backends/agent/api_server/util.py ++++ octavia-5.0.1/octavia/amphorae/backends/agent/api_server/util.py +@@ -12,7 +12,7 @@ + # License for the specific language governing permissions and limitations + # under the License. + +- ++import errno + import os + import re + import stat +@@ -23,6 +23,8 @@ from oslo_config import cfg + from oslo_log import log as logging + + from octavia.amphorae.backends.agent.api_server import osutils ++from octavia.amphorae.backends.utils import ip_advertisement ++from octavia.amphorae.backends.utils import network_utils + from octavia.common import constants as consts + + CONF = cfg.CONF +@@ -188,7 +190,7 @@ def get_listeners(): + def get_loadbalancers(): + """Get Load balancers + +- :returns: An array with the ids of all load balancers, ++ :returns: An array with the uuids of all load balancers, + e.g. ['123', '456', ...] or [] if no loadbalancers exist + """ + if os.path.exists(CONF.haproxy_amphora.base_path): +@@ -332,3 +334,75 @@ def parse_haproxy_file(lb_id): + stats_socket = m.group(1) + + return stats_socket, listeners ++ ++ ++def vrrp_check_script_update(lb_id, action): ++ try: ++ os.makedirs(keepalived_dir()) ++ os.makedirs(keepalived_check_scripts_dir()) ++ except OSError as e: ++ if e.errno != errno.EEXIST: ++ raise ++ ++ lb_ids = get_loadbalancers() ++ udp_ids = get_udp_listeners() ++ # If no LBs are found, so make sure keepalived thinks haproxy is down. ++ if not lb_ids: ++ if not udp_ids: ++ with open(haproxy_check_script_path(), 'w') as text_file: ++ text_file.write('exit 1') ++ return ++ if action == consts.AMP_ACTION_STOP: ++ lb_ids.remove(lb_id) ++ args = [] ++ for lbid in lb_ids: ++ args.append(haproxy_sock_path(lbid)) ++ ++ cmd = 'haproxy-vrrp-check {args}; exit $?'.format(args=' '.join(args)) ++ with open(haproxy_check_script_path(), 'w') as text_file: ++ text_file.write(cmd) ++ ++ ++def get_haproxy_vip_addresses(lb_id): ++ """Get the VIP addresses for a load balancer. ++ ++ :param lb_id: The load balancer ID to get VIP addresses from. ++ :returns: List of VIP addresses (IPv4 and IPv6) ++ """ ++ vips = [] ++ with open(config_path(lb_id), 'r') as file: ++ for line in file: ++ current_line = line.strip() ++ if current_line.startswith('bind'): ++ for section in current_line.split(' '): ++ # We will always have a port assigned per the template. ++ if ':' in section: ++ if ',' in section: ++ addr_port = section.rstrip(',') ++ vips.append(addr_port.rpartition(':')[0]) ++ else: ++ vips.append(section.rpartition(':')[0]) ++ break ++ return vips ++ ++ ++def send_vip_advertisements(lb_id): ++ """Sends address advertisements for each load balancer VIP. ++ ++ This method will send either GARP (IPv4) or neighbor advertisements (IPv6) ++ for the VIP addresses on a load balancer. ++ ++ :param lb_id: The load balancer ID to send advertisements for. ++ :returns: None ++ """ ++ try: ++ vips = get_haproxy_vip_addresses(lb_id) ++ ++ for vip in vips: ++ interface = network_utils.get_interface_name( ++ vip, net_ns=consts.AMPHORA_NAMESPACE) ++ ip_advertisement.send_ip_advertisement( ++ interface, vip, net_ns=consts.AMPHORA_NAMESPACE) ++ except Exception as e: ++ LOG.debug('Send VIP advertisement failed due to :%s. ' ++ 'This amphora may not be the MASTER. Ignoring.', str(e)) +Index: octavia-5.0.1/octavia/amphorae/backends/utils/ip_advertisement.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/amphorae/backends/utils/ip_advertisement.py +@@ -0,0 +1,183 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import fcntl ++import socket ++from struct import pack ++from struct import unpack ++ ++from oslo_log import log as logging ++ ++from octavia.amphorae.backends.utils import network_namespace ++from octavia.common import constants ++from octavia.common import utils as common_utils ++ ++LOG = logging.getLogger(__name__) ++ ++ ++def garp(interface, ip_address, net_ns=None): ++ """Sends a gratuitous ARP for ip_address on the interface. ++ ++ :param interface: The interface name to send the GARP on. ++ :param ip_address: The IP address to advertise in the GARP. ++ :param net_ns: The network namespace to send the GARP from. ++ :returns: None ++ """ ++ ARP_ETHERTYPE = 0x0806 ++ BROADCAST_MAC = b'\xff\xff\xff\xff\xff\xff' ++ ++ # Get a socket, optionally inside a network namespace ++ garp_socket = None ++ if net_ns: ++ with network_namespace.NetworkNamespace(net_ns): ++ garp_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) ++ else: ++ garp_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) ++ ++ # Bind the socket with the ARP ethertype protocol ++ garp_socket.bind((interface, ARP_ETHERTYPE)) ++ ++ # Get the MAC address of the interface ++ source_mac = garp_socket.getsockname()[4] ++ ++ garp_msg = [ ++ pack('!h', 1), # Hardware type ethernet ++ pack('!h', 0x0800), # Protocol type IPv4 ++ pack('!B', 6), # Hardware size ++ pack('!B', 4), # Protocol size ++ pack('!h', 1), # Opcode request ++ source_mac, # Sender MAC address ++ socket.inet_aton(ip_address), # Sender IP address ++ BROADCAST_MAC, # Target MAC address ++ socket.inet_aton(ip_address)] # Target IP address ++ ++ garp_ethernet = [ ++ BROADCAST_MAC, # Ethernet destination ++ source_mac, # Ethernet source ++ pack('!h', ARP_ETHERTYPE), # Ethernet type ++ b''.join(garp_msg)] # The GARP message ++ ++ garp_socket.send(b''.join(garp_ethernet)) ++ garp_socket.close() ++ ++ ++def calculate_icmpv6_checksum(packet): ++ """Calculate the ICMPv6 checksum for a packet. ++ ++ :param packet: The packet bytes to checksum. ++ :returns: The checksum integer. ++ """ ++ total = 0 ++ ++ # Add up 16-bit words ++ num_words = len(packet) // 2 ++ for chunk in unpack("!%sH" % num_words, packet[0:num_words * 2]): ++ total += chunk ++ ++ # Add any left over byte ++ if len(packet) % 2: ++ total += bytearray(packet)[-1] << 8 ++ ++ # Fold 32-bits into 16-bits ++ total = (total >> 16) + (total & 0xffff) ++ total += total >> 16 ++ return ~total + 0x10000 & 0xffff ++ ++ ++def neighbor_advertisement(interface, ip_address, net_ns=None): ++ """Sends a unsolicited neighbor advertisement for an ip on the interface. ++ ++ :param interface: The interface name to send the GARP on. ++ :param ip_address: The IP address to advertise in the GARP. ++ :param net_ns: The network namespace to send the GARP from. ++ :returns: None ++ """ ++ ALL_NODES_ADDR = 'ff02::1' ++ SIOCGIFHWADDR = 0x8927 ++ ++ # Get a socket, optionally inside a network namespace ++ na_socket = None ++ if net_ns: ++ with network_namespace.NetworkNamespace(net_ns): ++ na_socket = socket.socket( ++ socket.AF_INET6, socket.SOCK_RAW, ++ socket.getprotobyname(constants.IPV6_ICMP)) ++ else: ++ na_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW, ++ socket.getprotobyname(constants.IPV6_ICMP)) ++ ++ # Per RFC 4861 section 4.4, the hop limit should be 255 ++ na_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) ++ ++ # Bind the socket with the source address ++ na_socket.bind((ip_address, 0)) ++ ++ # Get the byte representation of the MAC address of the interface ++ # Note: You can't use getsockname() to get the MAC on this type of socket ++ source_mac = fcntl.ioctl(na_socket.fileno(), SIOCGIFHWADDR, pack('256s', ++ bytes(interface.encode('utf-8'))))[18:24] ++ ++ # Get the byte representation of the source IP address ++ source_ip_bytes = socket.inet_pton(socket.AF_INET6, ip_address) ++ ++ icmpv6_na_msg_prefix = [ ++ pack('!B', 136), # ICMP Type Neighbor Advertisement ++ pack('!B', 0)] # ICMP Code ++ icmpv6_na_msg_postfix = [ ++ pack('!I', 0xa0000000), # Flags (Router, Override) ++ source_ip_bytes, # Target address ++ pack('!B', 2), # ICMPv6 option type target link-layer address ++ pack('!B', 1), # ICMPv6 option length ++ source_mac] # ICMPv6 option link-layer address ++ ++ # Calculate the ICMPv6 checksum ++ icmpv6_pseudo_header = [ ++ source_ip_bytes, # Source IP address ++ socket.inet_pton(socket.AF_INET6, ALL_NODES_ADDR), # Destination IP ++ pack('!I', 58), # IPv6 next header (ICMPv6) ++ pack('!h', 32)] # IPv6 payload length ++ icmpv6_tmp_chksum = pack('!H', 0) # Checksum are zeros for calculation ++ tmp_chksum_msg = b''.join(icmpv6_pseudo_header + icmpv6_na_msg_prefix + ++ [icmpv6_tmp_chksum] + icmpv6_pseudo_header) ++ checksum = pack('!H', calculate_icmpv6_checksum(tmp_chksum_msg)) ++ ++ # Build the ICMPv6 unsolicitated neighbor advertisement ++ icmpv6_msg = b''.join(icmpv6_na_msg_prefix + [checksum] + ++ icmpv6_na_msg_postfix) ++ ++ na_socket.sendto(icmpv6_msg, (ALL_NODES_ADDR, 0, 0, 0)) ++ na_socket.close() ++ ++ ++def send_ip_advertisement(interface, ip_address, net_ns=None): ++ """Send an address advertisement. ++ ++ This method will send either GARP (IPv4) or neighbor advertisements (IPv6) ++ for the ip address specified. ++ ++ :param interface: The interface name to send the advertisement on. ++ :param ip_address: The IP address to advertise. ++ :param net_ns: The network namespace to send the advertisement from. ++ :returns: None ++ """ ++ try: ++ if common_utils.is_ipv4(ip_address): ++ garp(interface, ip_address, net_ns) ++ elif common_utils.is_ipv6(ip_address): ++ neighbor_advertisement(interface, ip_address, net_ns) ++ else: ++ LOG.error('Unknown IP version for address: "%s". Skipping', ++ ip_address) ++ except Exception as e: ++ LOG.warning('Unable to send address advertisement for address: "%s", ' ++ 'error: %s. Skipping', ip_address, str(e)) +Index: octavia-5.0.1/octavia/amphorae/backends/utils/network_namespace.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/amphorae/backends/utils/network_namespace.py +@@ -0,0 +1,50 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import ctypes ++import os ++ ++ ++class NetworkNamespace(object): ++ """A network namespace context manager. ++ ++ Runs wrapped code inside the specified network namespace. ++ ++ :param netns: The network namespace name to enter. ++ """ ++ # from linux/sched.h - We want to enter a network namespace ++ CLONE_NEWNET = 0x40000000 ++ ++ @staticmethod ++ def _error_handler(result, func, arguments): ++ if result == -1: ++ errno = ctypes.get_errno() ++ raise OSError(errno, os.strerror(errno)) ++ ++ def __init__(self, netns): ++ self.current_netns = '/proc/{pid}/ns/net'.format(pid=os.getpid()) ++ self.target_netns = '/var/run/netns/{netns}'.format(netns=netns) ++ # reference: man setns(2) ++ self.set_netns = ctypes.CDLL('libc.so.6', use_errno=True).setns ++ self.set_netns.errcheck = self._error_handler ++ ++ def __enter__(self): ++ # Save the current network namespace ++ self.current_netns_fd = open(self.current_netns) ++ with open(self.target_netns) as fd: ++ self.set_netns(fd.fileno(), self.CLONE_NEWNET) ++ ++ def __exit__(self, *args): ++ # Return to the previous network namespace ++ self.set_netns(self.current_netns_fd.fileno(), self.CLONE_NEWNET) ++ self.current_netns_fd.close() +Index: octavia-5.0.1/octavia/amphorae/backends/utils/network_utils.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/amphorae/backends/utils/network_utils.py +@@ -0,0 +1,86 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import ipaddress ++ ++import pyroute2 ++import six ++ ++from octavia.common import exceptions ++ ++ ++def _find_interface(ip_address, rtnl_api, normalized_addr): ++ """Find the interface using a routing netlink API. ++ ++ :param ip_address: The IP address to search with. ++ :param rtnl_api: A pyroute2 rtnl_api instance. (IPRoute, NetNS, etc.) ++ :returns: The interface name if found, None if not found. ++ :raises exceptions.InvalidIPAddress: Invalid IP address provided. ++ """ ++ for addr in rtnl_api.get_addr(address=ip_address): ++ # Save the interface index as IPv6 records don't list a textual ++ # interface ++ interface_idx = addr['index'] ++ # Search through the attributes of each address record ++ for attr in addr['attrs']: ++ # Look for the attribute name/value pair for the address ++ if attr[0] == 'IFA_ADDRESS': ++ # Compare the normalized address with the address we are ++ # looking for. Since we have matched the name above, attr[1] ++ # is the address value ++ if normalized_addr == ipaddress.ip_address( ++ six.text_type(attr[1])).compressed: ++ # Lookup the matching interface name by getting the ++ # interface with the index we found in the above address ++ # search ++ lookup_int = rtnl_api.get_links(interface_idx) ++ # Search through the attributes of the matching interface ++ # record ++ for int_attr in lookup_int[0]['attrs']: ++ # Look for the attribute name/value pair that includes ++ # the interface name ++ if int_attr[0] == 'IFLA_IFNAME': ++ # Return the matching interface name that is in ++ # int_attr[1] for the matching interface attribute ++ # name ++ return int_attr[1] ++ # We didn't find an interface with that IP address. ++ return None ++ ++ ++def get_interface_name(ip_address, net_ns=None): ++ """Gets the interface name from an IP address. ++ ++ :param ip_address: The IP address to lookup. ++ :param net_ns: The network namespace to find the interface in. ++ :returns: The interface name. ++ :raises exceptions.InvalidIPAddress: Invalid IP address provided. ++ :raises octavia.common.exceptions.NotFound: No interface was found. ++ """ ++ # We need to normalize the address as IPv6 has multiple representations ++ # fe80:0000:0000:0000:f816:3eff:fef2:2058 == fe80::f816:3eff:fef2:2058 ++ try: ++ normalized_addr = ipaddress.ip_address( ++ six.text_type(ip_address)).compressed ++ except ValueError: ++ raise exceptions.InvalidIPAddress(ip_addr=ip_address) ++ ++ if net_ns: ++ with pyroute2.NetNS(net_ns) as rtnl_api: ++ interface = _find_interface(ip_address, rtnl_api, normalized_addr) ++ else: ++ with pyroute2.IPRoute() as rtnl_api: ++ interface = _find_interface(ip_address, rtnl_api, normalized_addr) ++ if interface is not None: ++ return interface ++ raise exceptions.NotFound(resource='IP address', id=ip_address) +Index: octavia-5.0.1/octavia/amphorae/drivers/driver_base.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/drivers/driver_base.py ++++ octavia-5.0.1/octavia/amphorae/drivers/driver_base.py +@@ -199,6 +199,21 @@ class AmphoraLoadBalancerDriver(object): + :type agent_config: string + """ + ++ @abc.abstractmethod ++ def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): ++ """Get the interface name from an IP address. ++ ++ :param amphora: The amphora to query. ++ :type amphora: octavia.db.models.Amphora ++ :param ip_address: The IP address to lookup. (IPv4 or IPv6) ++ :type ip_address: string ++ :param timeout_dict: Dictionary of timeout values for calls to the ++ amphora. May contain: req_conn_timeout, ++ req_read_timeout, conn_max_retries, ++ conn_retry_interval ++ :type timeout_dict: dict ++ """ ++ + + @six.add_metaclass(abc.ABCMeta) + class HealthMixin(object): +@@ -252,10 +267,17 @@ class VRRPDriverMixin(object): + class XYZ: ... + """ + @abc.abstractmethod +- def update_vrrp_conf(self, loadbalancer): ++ def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, ++ timeout_dict=None): + """Update amphorae of the loadbalancer with a new VRRP configuration + + :param loadbalancer: loadbalancer object ++ :param amphorae_network_config: amphorae network configurations ++ :param amphora: The amphora object to update. ++ :param timeout_dict: Dictionary of timeout values for calls to the ++ amphora. May contain: req_conn_timeout, ++ req_read_timeout, conn_max_retries, ++ conn_retry_interval + """ + + @abc.abstractmethod +@@ -266,10 +288,14 @@ class VRRPDriverMixin(object): + """ + + @abc.abstractmethod +- def start_vrrp_service(self, loadbalancer): +- """Start the VRRP services of all amphorae of the loadbalancer ++ def start_vrrp_service(self, amphora, timeout_dict=None): ++ """Start the VRRP services on the amphora + +- :param loadbalancer: loadbalancer object ++ :param amphora: The amphora object to start the service on. ++ :param timeout_dict: Dictionary of timeout values for calls to the ++ amphora. May contain: req_conn_timeout, ++ req_read_timeout, conn_max_retries, ++ conn_retry_interval + """ + + @abc.abstractmethod +@@ -278,10 +304,3 @@ class VRRPDriverMixin(object): + + :param loadbalancer: loadbalancer object + """ +- +- @abc.abstractmethod +- def get_vrrp_interface(self, amphora): +- """Get the VRRP interface object for a specific amphora +- +- :param amphora: amphora object +- """ +Index: octavia-5.0.1/octavia/amphorae/drivers/haproxy/exceptions.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/drivers/haproxy/exceptions.py ++++ octavia-5.0.1/octavia/amphorae/drivers/haproxy/exceptions.py +@@ -20,7 +20,7 @@ from oslo_log import log as logging + LOG = logging.getLogger(__name__) + + +-def check_exception(response, ignore=tuple()): ++def check_exception(response, ignore=tuple(), log_error=True): + status_code = response.status_code + responses = { + 400: InvalidRequest, +@@ -34,8 +34,9 @@ def check_exception(response, ignore=tup + } + if (status_code not in ignore) and (status_code in responses): + try: +- LOG.error('Amphora agent returned unexpected result code %s with ' +- 'response %s', status_code, response.json()) ++ if log_error: ++ LOG.error('Amphora agent returned unexpected result code %s ' ++ 'with response %s', status_code, response.json()) + except Exception: + # Handle the odd case where there is no response body + # like when using requests_mock which doesn't support has_body +Index: octavia-5.0.1/octavia/amphorae/drivers/haproxy/rest_api_driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/drivers/haproxy/rest_api_driver.py ++++ octavia-5.0.1/octavia/amphorae/drivers/haproxy/rest_api_driver.py +@@ -88,7 +88,7 @@ class HaproxyAmphoraLoadBalancerDriver( + + return haproxy_version_string.split('.')[:2] + +- def _populate_amphora_api_version(self, amphora, ++ def _populate_amphora_api_version(self, amphora, timeout_dict=None, + raise_retry_exception=False): + """Populate the amphora object with the api_version + +@@ -100,7 +100,7 @@ class HaproxyAmphoraLoadBalancerDriver( + if not getattr(amphora, 'api_version', None): + try: + amphora.api_version = self.clients['base'].get_api_version( +- amphora, ++ amphora, timeout_dict=timeout_dict, + raise_retry_exception=raise_retry_exception)['api_version'] + except exc.NotFound: + # Amphora is too old for version discovery, default to 0.5 +@@ -262,8 +262,11 @@ class HaproxyAmphoraLoadBalancerDriver( + getattr(self.clients[amp.api_version], func_name)( + amp, loadbalancer.id, *args) + +- def start(self, loadbalancer, amphora=None): +- self._apply('start_listener', loadbalancer, amphora) ++ def reload(self, loadbalancer, amphora=None, timeout_dict=None): ++ self._apply('reload_listener', loadbalancer, amphora, timeout_dict) ++ ++ def start(self, loadbalancer, amphora=None, timeout_dict=None): ++ self._apply('start_listener', loadbalancer, amphora, timeout_dict) + + def delete(self, listener): + # Delete any UDP listeners the old way (we didn't update the way they +@@ -551,6 +554,28 @@ class HaproxyAmphoraLoadBalancerDriver( + 'API.'.format(amphora.id)) + raise driver_except.AmpDriverNotImplementedError() + ++ def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): ++ """Get the interface name for an IP address. ++ ++ :param amphora: The amphora to query. ++ :type amphora: octavia.db.models.Amphora ++ :param ip_address: The IP address to lookup. (IPv4 or IPv6) ++ :type ip_address: string ++ :param timeout_dict: Dictionary of timeout values for calls to the ++ amphora. May contain: req_conn_timeout, ++ req_read_timeout, conn_max_retries, ++ conn_retry_interval ++ :type timeout_dict: dict ++ :returns: None if not found, the interface name string if found. ++ """ ++ try: ++ self._populate_amphora_api_version(amphora, timeout_dict) ++ response_json = self.clients[amphora.api_version].get_interface( ++ amphora, ip_address, timeout_dict, log_error=False) ++ return response_json.get('interface', None) ++ except (exc.NotFound, driver_except.TimeOutException): ++ return None ++ + + # Check a custom hostname + class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter): +@@ -677,9 +702,10 @@ class AmphoraAPIClientBase(object): + 'exception': exception}) + raise driver_except.TimeOutException() + +- def get_api_version(self, amp, raise_retry_exception=False): ++ def get_api_version(self, amp, timeout_dict=None, ++ raise_retry_exception=False): + amp.api_version = None +- r = self.get(amp, retry_404=False, ++ r = self.get(amp, retry_404=False, timeout_dict=timeout_dict, + raise_retry_exception=raise_retry_exception) + # Handle 404 special as we don't want to log an ERROR on 404 + exc.check_exception(r, (404,)) +@@ -780,16 +806,15 @@ class AmphoraAPIClient0_5(AmphoraAPIClie + r = self.put(amp, 'vrrp/upload', data=config) + return exc.check_exception(r) + +- def _vrrp_action(self, action, amp): +- r = self.put(amp, 'vrrp/{action}'.format(action=action)) ++ def _vrrp_action(self, action, amp, timeout_dict=None): ++ r = self.put(amp, 'vrrp/{action}'.format(action=action), ++ timeout_dict=timeout_dict) + return exc.check_exception(r) + +- def get_interface(self, amp, ip_addr, timeout_dict=None): ++ def get_interface(self, amp, ip_addr, timeout_dict=None, log_error=True): + r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr), + timeout_dict=timeout_dict) +- if exc.check_exception(r): +- return r.json() +- return None ++ return exc.check_exception(r, log_error=log_error).json() + + def upload_udp_config(self, amp, listener_id, config, timeout_dict=None): + r = self.put( +@@ -910,16 +935,15 @@ class AmphoraAPIClient1_0(AmphoraAPIClie + r = self.put(amp, 'vrrp/upload', data=config) + return exc.check_exception(r) + +- def _vrrp_action(self, action, amp): +- r = self.put(amp, 'vrrp/{action}'.format(action=action)) ++ def _vrrp_action(self, action, amp, timeout_dict=None): ++ r = self.put(amp, 'vrrp/{action}'.format(action=action), ++ timeout_dict=timeout_dict) + return exc.check_exception(r) + +- def get_interface(self, amp, ip_addr, timeout_dict=None): ++ def get_interface(self, amp, ip_addr, timeout_dict=None, log_error=True): + r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr), + timeout_dict=timeout_dict) +- if exc.check_exception(r): +- return r.json() +- return None ++ return exc.check_exception(r, log_error=log_error).json() + + def upload_udp_config(self, amp, listener_id, config, timeout_dict=None): + r = self.put( +Index: octavia-5.0.1/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py ++++ octavia-5.0.1/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +@@ -30,29 +30,35 @@ class KeepalivedAmphoraDriverMixin(drive + # The Mixed class must define a self.client object for the + # AmphoraApiClient + +- def update_vrrp_conf(self, loadbalancer, amphorae_network_config): +- """Update amphorae of the loadbalancer with a new VRRP configuration ++ def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, ++ timeout_dict=None): ++ """Update amphora of the loadbalancer with a new VRRP configuration + + :param loadbalancer: loadbalancer object + :param amphorae_network_config: amphorae network configurations ++ :param amphora: The amphora object to update. ++ :param timeout_dict: Dictionary of timeout values for calls to the ++ amphora. May contain: req_conn_timeout, ++ req_read_timeout, conn_max_retries, ++ conn_retry_interval + """ +- templater = jinja_cfg.KeepalivedJinjaTemplater() ++ if amphora.status != constants.AMPHORA_ALLOCATED: ++ LOG.debug('update_vrrp_conf called for un-allocated amphora %s. ' ++ 'Ignoring.', amphora.id) ++ return + +- LOG.debug("Update loadbalancer %s amphora VRRP configuration.", +- loadbalancer.id) ++ templater = jinja_cfg.KeepalivedJinjaTemplater() + +- for amp in six.moves.filter( +- lambda amp: amp.status == constants.AMPHORA_ALLOCATED, +- loadbalancer.amphorae): ++ LOG.debug("Update amphora %s VRRP configuration.", amphora.id) + +- self._populate_amphora_api_version(amp) +- # Get the VIP subnet prefix for the amphora +- vip_cidr = amphorae_network_config[amp.id].vip_subnet.cidr ++ self._populate_amphora_api_version(amphora) ++ # Get the VIP subnet prefix for the amphora ++ vip_cidr = amphorae_network_config[amphora.id].vip_subnet.cidr + +- # Generate Keepalived configuration from loadbalancer object +- config = templater.build_keepalived_config( +- loadbalancer, amp, vip_cidr) +- self.clients[amp.api_version].upload_vrrp_config(amp, config) ++ # Generate Keepalived configuration from loadbalancer object ++ config = templater.build_keepalived_config( ++ loadbalancer, amphora, vip_cidr) ++ self.clients[amphora.api_version].upload_vrrp_config(amphora, config) + + def stop_vrrp_service(self, loadbalancer): + """Stop the vrrp services running on the loadbalancer's amphorae +@@ -69,21 +75,25 @@ class KeepalivedAmphoraDriverMixin(drive + self._populate_amphora_api_version(amp) + self.clients[amp.api_version].stop_vrrp(amp) + +- def start_vrrp_service(self, loadbalancer): +- """Start the VRRP services of all amphorae of the loadbalancer ++ def start_vrrp_service(self, amphora, timeout_dict=None): ++ """Start the VRRP services on an amphorae. + +- :param loadbalancer: loadbalancer object ++ :param amphora: amphora object ++ :param timeout_dict: Dictionary of timeout values for calls to the ++ amphora. May contain: req_conn_timeout, ++ req_read_timeout, conn_max_retries, ++ conn_retry_interval + """ +- LOG.info("Start loadbalancer %s amphora VRRP Service.", +- loadbalancer.id) ++ if amphora.status != constants.AMPHORA_ALLOCATED: ++ LOG.debug('start_vrrp_service called for un-allocated amphora %s. ' ++ 'Ignoring.', amphora.id) ++ return + +- for amp in six.moves.filter( +- lambda amp: amp.status == constants.AMPHORA_ALLOCATED, +- loadbalancer.amphorae): ++ LOG.info("Start amphora %s VRRP Service.", amphora.id) + +- LOG.debug("Start VRRP Service on amphora %s .", amp.lb_network_ip) +- self._populate_amphora_api_version(amp) +- self.clients[amp.api_version].start_vrrp(amp) ++ self._populate_amphora_api_version(amphora) ++ self.clients[amphora.api_version].start_vrrp(amphora, ++ timeout_dict=timeout_dict) + + def reload_vrrp_service(self, loadbalancer): + """Reload the VRRP services of all amphorae of the loadbalancer +@@ -99,8 +109,3 @@ class KeepalivedAmphoraDriverMixin(drive + + self._populate_amphora_api_version(amp) + self.clients[amp.api_version].reload_vrrp(amp) +- +- def get_vrrp_interface(self, amphora, timeout_dict=None): +- self._populate_amphora_api_version(amphora) +- return self.clients[amphora.api_version].get_interface( +- amphora, amphora.vrrp_ip, timeout_dict=timeout_dict)['interface'] +Index: octavia-5.0.1/octavia/amphorae/drivers/noop_driver/driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/amphorae/drivers/noop_driver/driver.py ++++ octavia-5.0.1/octavia/amphorae/drivers/noop_driver/driver.py +@@ -113,6 +113,13 @@ class NoopManager(object): + self.amphoraconfig[amphora.id, agent_config] = ( + amphora.id, agent_config, 'update_amphora_agent_config') + ++ def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): ++ LOG.debug("Amphora %s no-op, get interface from amphora %s for IP %s", ++ self.__class__.__name__, amphora.id, ip_address) ++ if ip_address == '198.51.100.99': ++ return "noop0" ++ return None ++ + + class NoopAmphoraLoadBalancerDriver( + driver_base.AmphoraLoadBalancerDriver, +@@ -167,17 +174,19 @@ class NoopAmphoraLoadBalancerDriver( + def update_amphora_agent_config(self, amphora, agent_config): + self.driver.update_amphora_agent_config(amphora, agent_config) + +- def update_vrrp_conf(self, loadbalancer): ++ def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): ++ return self.driver.get_interface_from_ip(amphora, ip_address, ++ timeout_dict) ++ ++ def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, ++ timeout_dict=None): + pass + + def stop_vrrp_service(self, loadbalancer): + pass + +- def start_vrrp_service(self, loadbalancer): ++ def start_vrrp_service(self, amphora, timeout_dict=None): + pass + + def reload_vrrp_service(self, loadbalancer): + pass +- +- def get_vrrp_interface(self, amphora): +- pass +Index: octavia-5.0.1/octavia/api/drivers/amphora_driver/v1/driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/api/drivers/amphora_driver/v1/driver.py ++++ octavia-5.0.1/octavia/api/drivers/amphora_driver/v1/driver.py +@@ -71,8 +71,11 @@ class AmphoraProviderDriver(driver_base. + try: + vip = network_driver.allocate_vip(lb_obj) + except network_base.AllocateVIPException as e: +- raise exceptions.DriverError(user_fault_string=e.orig_msg, +- operator_fault_string=e.orig_msg) ++ message = str(e) ++ if getattr(e, 'orig_msg', None) is not None: ++ message = e.orig_msg ++ raise exceptions.DriverError(user_fault_string=message, ++ operator_fault_string=message) + + LOG.info('Amphora provider created VIP port %s for load balancer %s.', + vip.port_id, loadbalancer_id) +Index: octavia-5.0.1/octavia/api/drivers/utils.py +=================================================================== +--- octavia-5.0.1.orig/octavia/api/drivers/utils.py ++++ octavia-5.0.1/octavia/api/drivers/utils.py +@@ -25,6 +25,7 @@ from oslo_utils import excutils + from stevedore import driver as stevedore_driver + + from octavia.api.drivers import exceptions as driver_exceptions ++from octavia.common import constants + from octavia.common import data_models + from octavia.common import exceptions + from octavia.common.tls_utils import cert_parser +@@ -543,6 +544,9 @@ def vip_dict_to_provider_dict(vip_dict): + new_vip_dict['vip_subnet_id'] = vip_dict['subnet_id'] + if 'qos_policy_id' in vip_dict: + new_vip_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id'] ++ if constants.OCTAVIA_OWNED in vip_dict: ++ new_vip_dict[constants.OCTAVIA_OWNED] = vip_dict[ ++ constants.OCTAVIA_OWNED] + return new_vip_dict + + +@@ -558,4 +562,6 @@ def provider_vip_dict_to_vip_obj(vip_dic + vip_obj.subnet_id = vip_dictionary['vip_subnet_id'] + if 'vip_qos_policy_id' in vip_dictionary: + vip_obj.qos_policy_id = vip_dictionary['vip_qos_policy_id'] ++ if constants.OCTAVIA_OWNED in vip_dictionary: ++ vip_obj.octavia_owned = vip_dictionary[constants.OCTAVIA_OWNED] + return vip_obj +Index: octavia-5.0.1/octavia/api/v2/controllers/load_balancer.py +=================================================================== +--- octavia-5.0.1.orig/octavia/api/v2/controllers/load_balancer.py ++++ octavia-5.0.1/octavia/api/v2/controllers/load_balancer.py +@@ -12,6 +12,7 @@ + # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + # License for the specific language governing permissions and limitations + # under the License. ++import ipaddress + + from oslo_config import cfg + from oslo_db import exception as odb_exceptions +@@ -19,6 +20,7 @@ from oslo_log import log as logging + from oslo_utils import excutils + from oslo_utils import strutils + import pecan ++import six + from sqlalchemy.orm import exc as sa_exception + from wsme import types as wtypes + from wsmeext import pecan as wsme_pecan +@@ -186,26 +188,40 @@ class LoadBalancersController(base.BaseC + isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)): + load_balancer.vip_qos_policy_id = port_qos_policy_id + +- # Identify the subnet for this port + if load_balancer.vip_subnet_id: ++ # If we were provided a subnet_id, validate it exists and that ++ # there is a fixed_ip on the port that matches the provided subnet + validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id, + context=context) +- else: +- if load_balancer.vip_address: +- for port_fixed_ip in port.fixed_ips: +- if port_fixed_ip.ip_address == load_balancer.vip_address: +- load_balancer.vip_subnet_id = port_fixed_ip.subnet_id +- break +- if not load_balancer.vip_subnet_id: +- raise exceptions.ValidationException(detail=_( +- "Specified VIP address not found on the " +- "specified VIP port.")) +- elif len(port.fixed_ips) == 1: +- load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id +- else: ++ for port_fixed_ip in port.fixed_ips: ++ if port_fixed_ip.subnet_id == load_balancer.vip_subnet_id: ++ load_balancer.vip_address = port_fixed_ip.ip_address ++ break # Just pick the first address found in the subnet ++ if not load_balancer.vip_address: ++ raise exceptions.ValidationException(detail=_( ++ "No VIP address found on the specified VIP port within " ++ "the specified subnet.")) ++ elif load_balancer.vip_address: ++ normalized_lb_ip = ipaddress.ip_address( ++ six.text_type(load_balancer.vip_address)).compressed ++ for port_fixed_ip in port.fixed_ips: ++ normalized_port_ip = ipaddress.ip_address( ++ six.text_type(port_fixed_ip.ip_address)).compressed ++ if normalized_port_ip == normalized_lb_ip: ++ load_balancer.vip_subnet_id = port_fixed_ip.subnet_id ++ break ++ if not load_balancer.vip_subnet_id: + raise exceptions.ValidationException(detail=_( +- "VIP port's subnet could not be determined. Please " +- "specify either a VIP subnet or address.")) ++ "Specified VIP address not found on the " ++ "specified VIP port.")) ++ elif len(port.fixed_ips) == 1: ++ # User provided only a port, get the subnet and address from it ++ load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id ++ load_balancer.vip_address = port.fixed_ips[0].ip_address ++ else: ++ raise exceptions.ValidationException(detail=_( ++ "VIP port's subnet could not be determined. Please " ++ "specify either a VIP subnet or address.")) + + def _validate_vip_request_object(self, load_balancer, context=None): + allowed_network_objects = [] +@@ -398,7 +414,10 @@ class LoadBalancersController(base.BaseC + # flavor dict instead of just the flavor_id we store in the DB. + lb_dict['flavor'] = flavor_dict + +- # See if the provider driver wants to create the VIP port ++ # See if the provider driver wants to manage the VIP port ++ # This will still be called if the user provided a port to ++ # allow drivers to collect any required information about the ++ # VIP port. + octavia_owned = False + try: + provider_vip_dict = driver_utils.vip_dict_to_provider_dict( +@@ -418,6 +437,10 @@ class LoadBalancersController(base.BaseC + if 'port_id' not in vip_dict or not vip_dict['port_id']: + octavia_owned = True + ++ # Check if the driver claims octavia owns the VIP port. ++ if vip.octavia_owned: ++ octavia_owned = True ++ + self.repositories.vip.update( + lock_session, db_lb.id, ip_address=vip.ip_address, + port_id=vip.port_id, network_id=vip.network_id, +Index: octavia-5.0.1/octavia/common/config.py +=================================================================== +--- octavia-5.0.1.orig/octavia/common/config.py ++++ octavia-5.0.1/octavia/common/config.py +@@ -170,6 +170,20 @@ amphora_agent_opts = [ + help='The UDP API backend for amphora agent.'), + ] + ++compute_opts = [ ++ cfg.IntOpt('max_retries', default=15, ++ help=_('The maximum attempts to retry an action with the ' ++ 'compute service.')), ++ cfg.IntOpt('retry_interval', default=1, ++ help=_('Seconds to wait before retrying an action with the ' ++ 'compute service.')), ++ cfg.IntOpt('retry_backoff', default=1, ++ help=_('The seconds to backoff retry attempts.')), ++ cfg.IntOpt('retry_max', default=10, ++ help=_('The maximum interval in seconds between retry ' ++ 'attempts.')), ++] ++ + networking_opts = [ + cfg.IntOpt('max_retries', default=15, + help=_('The maximum attempts to retry an action with the ' +@@ -177,6 +191,11 @@ networking_opts = [ + cfg.IntOpt('retry_interval', default=1, + help=_('Seconds to wait before retrying an action with the ' + 'networking service.')), ++ cfg.IntOpt('retry_backoff', default=1, ++ help=_('The seconds to backoff retry attempts.')), ++ cfg.IntOpt('retry_max', default=10, ++ help=_('The maximum interval in seconds between retry ' ++ 'attempts.')), + cfg.IntOpt('port_detach_timeout', default=300, + help=_('Seconds to wait for a port to detach from an ' + 'amphora.')), +@@ -289,6 +308,14 @@ haproxy_amphora_opts = [ + default=2, + help=_('Retry timeout between connection attempts in ' + 'seconds for active amphora.')), ++ cfg.IntOpt('failover_connection_max_retries', ++ default=2, ++ help=_('Retry threshold for connecting to an amphora in ' ++ 'failover.')), ++ cfg.IntOpt('failover_connection_retry_interval', ++ default=5, ++ help=_('Retry timeout between connection attempts in ' ++ 'seconds for amphora in failover.')), + cfg.IntOpt('build_rate_limit', + default=-1, + help=_('Number of amphorae that could be built per controller ' +@@ -352,6 +379,16 @@ haproxy_amphora_opts = [ + deprecated_reason='This is now automatically discovered ' + ' and configured.', + help=_("If False, use sysvinit.")), ++ cfg.IntOpt('api_db_commit_retry_attempts', default=15, ++ help=_('The number of times the database action will be ' ++ 'attempted.')), ++ cfg.IntOpt('api_db_commit_retry_initial_delay', default=1, ++ help=_('The initial delay before a retry attempt.')), ++ cfg.IntOpt('api_db_commit_retry_backoff', default=1, ++ help=_('The time to backoff retry attempts.')), ++ cfg.IntOpt('api_db_commit_retry_max', default=5, ++ help=_('The maximum amount of time to wait between retry ' ++ 'attempts.')), + ] + + controller_worker_opts = [ +@@ -434,7 +471,11 @@ controller_worker_opts = [ + help=_('If True, build cloud-init user-data that is passed ' + 'to the config drive on Amphora boot instead of ' + 'personality files. If False, utilize personality ' +- 'files.')) ++ 'files.')), ++ cfg.IntOpt('amphora_delete_retries', default=5, ++ help=_('Number of times an amphora delete should be retried.')), ++ cfg.IntOpt('amphora_delete_retry_interval', default=5, ++ help=_('Time, in seconds, between amphora delete retries.')), + ] + + task_flow_opts = [ +@@ -723,6 +764,7 @@ driver_agent_opts = [ + cfg.CONF.register_opts(core_opts) + cfg.CONF.register_opts(api_opts, group='api_settings') + cfg.CONF.register_opts(amphora_agent_opts, group='amphora_agent') ++cfg.CONF.register_opts(compute_opts, group='compute') + cfg.CONF.register_opts(networking_opts, group='networking') + cfg.CONF.register_opts(oslo_messaging_opts, group='oslo_messaging') + cfg.CONF.register_opts(haproxy_amphora_opts, group='haproxy_amphora') +Index: octavia-5.0.1/octavia/common/constants.py +=================================================================== +--- octavia-5.0.1.orig/octavia/common/constants.py ++++ octavia-5.0.1/octavia/common/constants.py +@@ -286,7 +286,10 @@ SUPPORTED_TASKFLOW_ENGINE_TYPES = ['seri + # Task/Flow constants + ACTIVE_CONNECTIONS = 'active_connections' + ADDED_PORTS = 'added_ports' ++ADMIN_STATE_UP = 'admin_state_up' ++ALLOWED_ADDRESS_PAIRS = 'allowed_address_pairs' + AMP_DATA = 'amp_data' ++AMP_VRRP_INT = 'amp_vrrp_int' + AMPHORA = 'amphora' + AMPHORA_ID = 'amphora_id' + AMPHORA_INDEX = 'amphora_index' +@@ -295,9 +298,12 @@ AMPHORAE = 'amphorae' + AMPHORAE_NETWORK_CONFIG = 'amphorae_network_config' + AMPS_DATA = 'amps_data' + ANTI_AFFINITY = 'anti-affinity' ++ATTEMPT_NUMBER = 'attempt_number' ++BASE_PORT = 'base_port' + BYTES_IN = 'bytes_in' + BYTES_OUT = 'bytes_out' + CA_TLS_CERTIFICATE_ID = 'ca_tls_certificate_id' ++CIDR = 'cidr' + CLIENT_CA_TLS_CERTIFICATE_ID = 'client_ca_tls_certificate_id' + CLIENT_CRL_CONTAINER_ID = 'client_crl_container_id' + COMPUTE_ID = 'compute_id' +@@ -309,17 +315,22 @@ CRL_CONTAINER_ID = 'crl_container_id' + DELTA = 'delta' + DELTAS = 'deltas' + DESCRIPTION = 'description' ++DEVICE_OWNER = 'device_owner' + ENABLED = 'enabled' ++FAILED_AMP_VRRP_PORT_ID = 'failed_amp_vrrp_port_id' + FAILED_AMPHORA = 'failed_amphora' + FAILOVER_AMPHORA = 'failover_amphora' + FAILOVER_AMPHORA_ID = 'failover_amphora_id' + FIELDS = 'fields' ++FIXED_IPS = 'fixed_ips' + FLAVOR_ID = 'flavor_id' + HEALTH_MON = 'health_mon' + HEALTH_MONITOR = 'health_monitor' + HEALTH_MONITOR_ID = 'health_monitor_id' + HEALTH_MONITOR_UPDATES = 'health_monitor_updates' ++ID = 'id' + IP_ADDRESS = 'ip_address' ++IPV6_ICMP = 'ipv6-icmp' + L7POLICY = 'l7policy' + L7POLICY_ID = 'l7policy_id' + L7POLICY_UPDATES = 'l7policy_updates' +@@ -337,17 +348,21 @@ MEMBER = 'member' + MEMBER_ID = 'member_id' + MEMBER_PORTS = 'member_ports' + MEMBER_UPDATES = 'member_updates' ++MESSAGE = 'message' + NAME = 'name' + NETWORK_ID = 'network_id' + NICS = 'nics' + OBJECT = 'object' ++PASSIVE_FAILURE = 'passive_failure' + PEER_PORT = 'peer_port' + POOL = 'pool' + POOL_CHILD_COUNT = 'pool_child_count' + POOL_ID = 'pool_id' + POOL_UPDATES = 'pool_updates' ++PORT = 'port' + PORT_ID = 'port_id' + PORTS = 'ports' ++PROJECT_ID = 'project_id' + PROVIDER = 'provider' + PROVIDER_NAME = 'provider_name' + QOS_POLICY_ID = 'qos_policy_id' +@@ -355,12 +370,18 @@ REDIRECT_POOL = 'redirect_pool' + REQ_CONN_TIMEOUT = 'req_conn_timeout' + REQ_READ_TIMEOUT = 'req_read_timeout' + REQUEST_ERRORS = 'request_errors' ++SECURITY_GROUPS = 'security_groups' ++SECURITY_GROUP_RULES = 'security_group_rules' + SERVER_GROUP_ID = 'server_group_id' + SERVER_PEM = 'server_pem' + SNI_CONTAINERS = 'sni_containers' + SOFT_ANTI_AFFINITY = 'soft-anti-affinity' ++STATUS_CODE = 'status_code' ++SUBNET = 'subnet' ++SUBNET_ID = 'subnet_id' + SUBNET = 'subnet' + TAGS = 'tags' ++TENANT_ID = 'tenant_id' + TIMEOUT_DICT = 'timeout_dict' + TLS_CERTIFICATE_ID = 'tls_certificate_id' + TLS_CONTAINER_ID = 'tls_container_id' +@@ -370,7 +391,10 @@ UPDATED_AT = 'updated_at' + UPDATE_DICT = 'update_dict' + VIP = 'vip' + VIP_NETWORK = 'vip_network' ++VIP_SG_ID = 'vip_sg_id' ++VIP_SUBNET = 'vip_subnet' + VRRP_GROUP = 'vrrp_group' ++VRRP_PORT = 'vrrp_port' + + # Taskflow flow and task names + CERT_ROTATE_AMPHORA_FLOW = 'octavia-cert-rotate-amphora-flow' +@@ -390,6 +414,7 @@ CREATE_POOL_FLOW = 'octavia-create-pool- + CREATE_L7POLICY_FLOW = 'octavia-create-l7policy-flow' + CREATE_L7RULE_FLOW = 'octavia-create-l7rule-flow' + DELETE_AMPHORA_FLOW = 'octavia-delete-amphora-flow' ++DELETE_EXTRA_AMPHORAE_FLOW = 'octavia-delete-extra-amphorae-flow' + DELETE_HEALTH_MONITOR_FLOW = 'octavia-delete-health-monitor-flow' + DELETE_LISTENER_FLOW = 'octavia-delete-listener_flow' + DELETE_LOADBALANCER_FLOW = 'octavia-delete-loadbalancer-flow' +@@ -398,6 +423,7 @@ DELETE_POOL_FLOW = 'octavia-delete-pool- + DELETE_L7POLICY_FLOW = 'octavia-delete-l7policy-flow' + DELETE_L7RULE_FLOW = 'octavia-delete-l7policy-flow' + FAILOVER_AMPHORA_FLOW = 'octavia-failover-amphora-flow' ++FAILOVER_LOADBALANCER_FLOW = 'octavia-failover-loadbalancer-flow' + LOADBALANCER_NETWORKING_SUBFLOW = 'octavia-new-loadbalancer-net-subflow' + UPDATE_HEALTH_MONITOR_FLOW = 'octavia-update-health-monitor-flow' + UPDATE_LISTENER_FLOW = 'octavia-update-listener-flow' +@@ -411,10 +437,13 @@ UPDATE_AMPHORA_CONFIG_FLOW = 'octavia-up + + POST_MAP_AMP_TO_LB_SUBFLOW = 'octavia-post-map-amp-to-lb-subflow' + CREATE_AMP_FOR_LB_SUBFLOW = 'octavia-create-amp-for-lb-subflow' ++CREATE_AMP_FOR_FAILOVER_SUBFLOW = 'octavia-create-amp-for-failover-subflow' + AMP_PLUG_NET_SUBFLOW = 'octavia-plug-net-subflow' + GET_AMPHORA_FOR_LB_SUBFLOW = 'octavia-get-amphora-for-lb-subflow' + POST_LB_AMP_ASSOCIATION_SUBFLOW = ( + 'octavia-post-loadbalancer-amp_association-subflow') ++AMPHORA_LISTENER_START_SUBFLOW = 'amphora-listener-start-subflow' ++AMPHORA_LISTENER_RELOAD_SUBFLOW = 'amphora-listener-start-subflow' + + MAP_LOADBALANCER_TO_AMPHORA = 'octavia-mapload-balancer-to-amphora' + RELOAD_AMPHORA = 'octavia-reload-amphora' +@@ -430,7 +459,7 @@ COMPUTE_WAIT = 'octavia-compute-wait' + UPDATE_AMPHORA_INFO = 'octavia-update-amphora-info' + AMPHORA_FINALIZE = 'octavia-amphora-finalize' + MARK_AMPHORA_ALLOCATED_INDB = 'octavia-mark-amphora-allocated-indb' +-RELOADLOAD_BALANCER = 'octavia-reloadload-balancer' ++MARK_AMPHORA_READY_INDB = 'octavia-mark-amphora-ready-indb' + MARK_LB_ACTIVE_INDB = 'octavia-mark-lb-active-indb' + MARK_AMP_MASTER_INDB = 'octavia-mark-amp-master-indb' + MARK_AMP_BACKUP_INDB = 'octavia-mark-amp-backup-indb' +@@ -444,6 +473,7 @@ CREATE_VRRP_GROUP_FOR_LB = 'octavia-crea + CREATE_VRRP_SECURITY_RULES = 'octavia-create-vrrp-security-rules' + AMP_COMPUTE_CONNECTIVITY_WAIT = 'octavia-amp-compute-connectivity-wait' + AMP_LISTENER_UPDATE = 'octavia-amp-listeners-update' ++AMP_LISTENER_START = 'octavia-amp-listeners-start' + PLUG_VIP_AMPHORA = 'octavia-amp-plug-vip' + APPLY_QOS_AMP = 'octavia-amp-apply-qos' + UPDATE_AMPHORA_VIP_DATA = 'ocatvia-amp-update-vip-data' +@@ -451,6 +481,8 @@ GET_AMP_NETWORK_CONFIG = 'octavia-amp-ge + AMP_POST_VIP_PLUG = 'octavia-amp-post-vip-plug' + GENERATE_SERVER_PEM_TASK = 'GenerateServerPEMTask' + AMPHORA_CONFIG_UPDATE_TASK = 'AmphoraConfigUpdateTask' ++FIRST_AMP_NETWORK_CONFIGS = 'first-amp-network-configs' ++FIRST_AMP_VRRP_INTERFACE = 'first-amp-vrrp_interface' + + # Batch Member Update constants + UNORDERED_MEMBER_UPDATES_FLOW = 'octavia-unordered-member-updates-flow' +@@ -465,11 +497,30 @@ UPDATE_MEMBER_INDB = 'octavia-update-mem + DELETE_MEMBER_INDB = 'octavia-delete-member-indb' + + # Task Names ++ADMIN_DOWN_PORT = 'admin-down-port' ++AMPHORA_POST_VIP_PLUG = 'amphora-post-vip-plug' ++AMPHORA_RELOAD_LISTENER = 'amphora-reload-listener' ++AMPHORA_TO_ERROR_ON_REVERT = 'amphora-to-error-on-revert' ++AMPHORAE_POST_NETWORK_PLUG = 'amphorae-post-network-plug' ++ATTACH_PORT = 'attach-port' ++CALCULATE_AMPHORA_DELTA = 'calculate-amphora-delta' ++CREATE_VIP_BASE_PORT = 'create-vip-base-port' ++DELETE_AMPHORA = 'delete-amphora' ++DELETE_PORT = 'delete-port' ++DISABLE_AMP_HEALTH_MONITORING = 'disable-amphora-health-monitoring' ++GET_AMPHORA_NETWORK_CONFIGS_BY_ID = 'get-amphora-network-configs-by-id' ++GET_AMPHORAE_FROM_LB = 'get-amphorae-from-lb' ++HANDLE_NETWORK_DELTA = 'handle-network-delta' ++MARK_AMPHORA_DELETED = 'mark-amphora-deleted' ++MARK_AMPHORA_PENDING_DELETE = 'mark-amphora-pending-delete' ++MARK_AMPHORA_HEALTH_BUSY = 'mark-amphora-health-busy' + RELOAD_AMP_AFTER_PLUG_VIP = 'reload-amp-after-plug-vip' + RELOAD_LB_AFTER_AMP_ASSOC = 'reload-lb-after-amp-assoc' + RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH = 'reload-lb-after-amp-assoc-full-graph' + RELOAD_LB_AFTER_PLUG_VIP = 'reload-lb-after-plug-vip' +-RELOAD_LB_BEFOR_ALLOCATE_VIP = "reload-lb-before-allocate-vip" ++RELOAD_LB_BEFOR_ALLOCATE_VIP = 'reload-lb-before-allocate-vip' ++UPDATE_AMP_FAILOVER_DETAILS = 'update-amp-failover-details' ++ + + NOVA_1 = '1.1' + NOVA_21 = '2.1' +@@ -717,3 +768,8 @@ SUPPORTED_VOLUME_DRIVERS = [VOLUME_NOOP_ + CINDER_STATUS_AVAILABLE = 'available' + CINDER_STATUS_ERROR = 'error' + CINDER_ACTION_CREATE_VOLUME = 'create volume' ++ ++VIP_SECURITY_GROUP_PREFIX = 'lb-' ++ ++AMP_BASE_PORT_PREFIX = 'octavia-lb-vrrp-' ++OCTAVIA_OWNED = 'octavia_owned' +Index: octavia-5.0.1/octavia/common/exceptions.py +=================================================================== +--- octavia-5.0.1.orig/octavia/common/exceptions.py ++++ octavia-5.0.1/octavia/common/exceptions.py +@@ -204,7 +204,8 @@ class ComputeBuildQueueTimeoutException( + + + class ComputeDeleteException(OctaviaException): +- message = _('Failed to delete compute instance.') ++ message = _('Failed to delete compute instance. The compute service ' ++ 'reports: %(compute_msg)s') + + + class ComputeGetException(OctaviaException): +@@ -245,6 +246,14 @@ class ComputeWaitTimeoutException(Octavi + message = _('Waiting for compute id %(id)s to go active timeout.') + + ++class ComputePortInUseException(OctaviaException): ++ message = _('Compute driver reports port %(port)s is already in use.') ++ ++ ++class ComputeUnknownException(OctaviaException): ++ message = _('Unknown exception from the compute driver: %(exc)s.') ++ ++ + class InvalidTopology(OctaviaException): + message = _('Invalid topology specified: %(topology)s') + +@@ -398,3 +407,12 @@ class VolumeDeleteException(OctaviaExcep + + class VolumeGetException(OctaviaException): + message = _('Failed to retrieve volume instance.') ++ ++ ++class NetworkServiceError(OctaviaException): ++ message = _('The networking service had a failure: %(net_error)s') ++ ++ ++class InvalidIPAddress(APIException): ++ msg = _('The IP Address %(ip_addr)s is invalid.') ++ code = 400 +Index: octavia-5.0.1/octavia/common/utils.py +=================================================================== +--- octavia-5.0.1.orig/octavia/common/utils.py ++++ octavia-5.0.1/octavia/common/utils.py +@@ -30,6 +30,8 @@ from oslo_utils import excutils + import six + from stevedore import driver as stevedore_driver + ++from octavia.common import constants ++ + CONF = cfg.CONF + + LOG = logging.getLogger(__name__) +@@ -51,6 +53,15 @@ def base64_sha1_string(string_to_hash): + return re.sub(r"^-", "x", b64_sha1) + + ++def get_amphora_driver(): ++ amphora_driver = stevedore_driver.DriverManager( ++ namespace='octavia.amphora.drivers', ++ name=CONF.controller_worker.amphora_driver, ++ invoke_on_load=True ++ ).driver ++ return amphora_driver ++ ++ + def get_network_driver(): + CONF.import_group('controller_worker', 'octavia.common.config') + network_driver = stevedore_driver.DriverManager( +@@ -61,6 +72,12 @@ def get_network_driver(): + return network_driver + + ++def is_ipv4(ip_address): ++ """Check if ip address is IPv4 address.""" ++ ip = netaddr.IPAddress(ip_address) ++ return ip.version == 4 ++ ++ + def is_ipv6(ip_address): + """Check if ip address is IPv6 address.""" + ip = netaddr.IPAddress(ip_address) +@@ -100,6 +117,12 @@ def ip_netmask_to_cidr(ip, netmask): + return "{ip}/{netmask}".format(ip=net.network, netmask=net.prefixlen) + + ++def get_vip_security_group_name(loadbalancer_id): ++ if loadbalancer_id: ++ return constants.VIP_SECURITY_GROUP_PREFIX + loadbalancer_id ++ return None ++ ++ + def get_six_compatible_value(value, six_type=six.string_types): + if six.PY3 and isinstance(value, six_type): + value = value.encode('utf-8') +Index: octavia-5.0.1/octavia/compute/drivers/noop_driver/driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/compute/drivers/noop_driver/driver.py ++++ octavia-5.0.1/octavia/compute/drivers/noop_driver/driver.py +@@ -82,8 +82,8 @@ class NoopManager(object): + self.__class__.__name__, server_group_id) + self.computeconfig[server_group_id] = (server_group_id, 'delete') + +- def attach_network_or_port(self, compute_id, network_id, ip_address=None, +- port_id=None): ++ def attach_network_or_port(self, compute_id, network_id=None, ++ ip_address=None, port_id=None): + LOG.debug("Compute %s no-op, attach_network_or_port compute_id %s," + "network_id %s, ip_address %s, port_id %s", + self.__class__.__name__, compute_id, +@@ -145,8 +145,8 @@ class NoopComputeDriver(driver_base.Comp + def delete_server_group(self, server_group_id): + self.driver.delete_server_group(server_group_id) + +- def attach_network_or_port(self, compute_id, network_id, ip_address=None, +- port_id=None): ++ def attach_network_or_port(self, compute_id, network_id=None, ++ ip_address=None, port_id=None): + self.driver.attach_network_or_port(compute_id, network_id, ip_address, + port_id) + +Index: octavia-5.0.1/octavia/compute/drivers/nova_driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/compute/drivers/nova_driver.py ++++ octavia-5.0.1/octavia/compute/drivers/nova_driver.py +@@ -196,9 +196,9 @@ class VirtualMachineManager(compute_base + except nova_exceptions.NotFound: + LOG.warning("Nova instance with id: %s not found. " + "Assuming already deleted.", compute_id) +- except Exception: ++ except Exception as e: + LOG.exception("Error deleting nova virtual machine.") +- raise exceptions.ComputeDeleteException() ++ raise exceptions.ComputeDeleteException(compute_msg=str(e)) + + def status(self, compute_id): + '''Retrieve the status of a virtual machine. +@@ -334,8 +334,8 @@ class VirtualMachineManager(compute_base + LOG.exception("Error delete server group instance.") + raise exceptions.ServerGroupObjectDeleteException() + +- def attach_network_or_port(self, compute_id, network_id, ip_address=None, +- port_id=None): ++ def attach_network_or_port(self, compute_id, network_id=None, ++ ip_address=None, port_id=None): + """Attaching a port or a network to an existing amphora + + :param compute_id: id of an amphora in the compute service +@@ -343,13 +343,39 @@ class VirtualMachineManager(compute_base + :param ip_address: ip address to attempt to be assigned to interface + :param port_id: id of the neutron port + :return: nova interface instance +- :raises: Exception ++ :raises ComputePortInUseException: The port is in use somewhere else ++ :raises ComputeUnknownException: Unknown nova error + """ + try: + interface = self.manager.interface_attach( + server=compute_id, net_id=network_id, fixed_ip=ip_address, + port_id=port_id) +- except Exception: ++ except nova_exceptions.Conflict as e: ++ # The port is already in use. ++ if port_id: ++ # Check if the port we want is already attached ++ try: ++ interfaces = self.manager.interface_list(compute_id) ++ for interface in interfaces: ++ if interface.id == port_id: ++ return interface ++ except Exception as e: ++ raise exceptions.ComputeUnknownException(exc=str(e)) ++ ++ raise exceptions.ComputePortInUseException(port=port_id) ++ ++ # Nova should have created the port, so something is really ++ # wrong in nova if we get here. ++ raise exceptions.ComputeUnknownException(exc=str(e)) ++ except nova_exceptions.NotFound as e: ++ if 'Instance' in str(e): ++ raise exceptions.NotFound(resource='Instance', id=compute_id) ++ if 'Network' in str(e): ++ raise exceptions.NotFound(resource='Network', id=network_id) ++ if 'Port' in str(e): ++ raise exceptions.NotFound(resource='Port', id=port_id) ++ raise exceptions.NotFound(resource=str(e), id=compute_id) ++ except Exception as e: + LOG.error('Error attaching network %(network_id)s with ip ' + '%(ip_address)s and port %(port)s to amphora ' + '(compute_id: %(compute_id)s) ', +@@ -359,7 +385,7 @@ class VirtualMachineManager(compute_base + 'ip_address': ip_address, + 'port': port_id + }) +- raise ++ raise exceptions.ComputeUnknownException(exc=str(e)) + return interface + + def detach_port(self, compute_id, port_id): +Index: octavia-5.0.1/octavia/controller/worker/v1/controller_worker.py +=================================================================== +--- octavia-5.0.1.orig/octavia/controller/worker/v1/controller_worker.py ++++ octavia-5.0.1/octavia/controller/worker/v1/controller_worker.py +@@ -23,6 +23,8 @@ import tenacity + + from octavia.common import base_taskflow + from octavia.common import constants ++from octavia.common import exceptions ++from octavia.common import utils + from octavia.controller.worker.v1.flows import amphora_flows + from octavia.controller.worker.v1.flows import health_monitor_flows + from octavia.controller.worker.v1.flows import l7policy_flows +@@ -37,11 +39,6 @@ from octavia.db import repositories as r + CONF = cfg.CONF + LOG = logging.getLogger(__name__) + +-RETRY_ATTEMPTS = 15 +-RETRY_INITIAL_DELAY = 1 +-RETRY_BACKOFF = 1 +-RETRY_MAX = 5 +- + + def _is_provisioning_status_pending_update(lb_obj): + return not lb_obj.provisioning_status == constants.PENDING_UPDATE +@@ -78,8 +75,11 @@ class ControllerWorker(base_taskflow.Bas + tenacity.retry_if_result(_is_provisioning_status_pending_update) | + tenacity.retry_if_exception_type()), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def _get_db_obj_until_pending_update(self, repo, id): + + return repo.get(db_apis.get_session(), id=id) +@@ -92,12 +92,13 @@ class ControllerWorker(base_taskflow.Bas + :returns: amphora_id + """ + try: ++ store = {constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_SPARES_POOL_PRIORITY, ++ constants.FLAVOR: None, ++ constants.SERVER_GROUP_ID: None} + create_amp_tf = self._taskflow_load( + self._amphora_flows.get_create_amphora_flow(), +- store={constants.BUILD_TYPE_PRIORITY: +- constants.LB_CREATE_SPARES_POOL_PRIORITY, +- constants.FLAVOR: None} +- ) ++ store=store) + with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG): + create_amp_tf.run() + +@@ -105,27 +106,14 @@ class ControllerWorker(base_taskflow.Bas + except Exception as e: + LOG.error('Failed to create an amphora due to: {}'.format(str(e))) + +- def delete_amphora(self, amphora_id): +- """Deletes an existing Amphora. +- +- :param amphora_id: ID of the amphora to delete +- :returns: None +- :raises AmphoraNotFound: The referenced Amphora was not found +- """ +- amphora = self._amphora_repo.get(db_apis.get_session(), +- id=amphora_id) +- delete_amp_tf = self._taskflow_load(self._amphora_flows. +- get_delete_amphora_flow(), +- store={constants.AMPHORA: amphora}) +- with tf_logging.DynamicLoggingListener(delete_amp_tf, +- log=LOG): +- delete_amp_tf.run() +- + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_health_monitor(self, health_monitor_id): + """Creates a health monitor. + +@@ -218,8 +206,11 @@ class ControllerWorker(base_taskflow.Bas + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_listener(self, listener_id): + """Creates a listener. + +@@ -304,8 +295,11 @@ class ControllerWorker(base_taskflow.Bas + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_load_balancer(self, load_balancer_id, flavor=None): + """Creates a load balancer by allocating Amphorae. + +@@ -330,6 +324,9 @@ class ControllerWorker(base_taskflow.Bas + constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: flavor} + ++ if not CONF.nova.enable_anti_affinity: ++ store[constants.SERVER_GROUP_ID] = None ++ + topology = lb.topology + + store[constants.UPDATE_DICT] = { +@@ -403,8 +400,11 @@ class ControllerWorker(base_taskflow.Bas + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_member(self, member_id): + """Creates a pool member. + +@@ -460,8 +460,11 @@ class ControllerWorker(base_taskflow.Bas + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def batch_update_members(self, old_member_ids, new_member_ids, + updated_members): + new_members = [self._member_repo.get(db_apis.get_session(), id=mid) +@@ -538,8 +541,11 @@ class ControllerWorker(base_taskflow.Bas + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_pool(self, pool_id): + """Creates a node pool. + +@@ -628,8 +634,11 @@ class ControllerWorker(base_taskflow.Bas + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_l7policy(self, l7policy_id): + """Creates an L7 Policy. + +@@ -714,8 +723,11 @@ class ControllerWorker(base_taskflow.Bas + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( +- RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), +- stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) ++ CONF.haproxy_amphora.api_db_commit_retry_initial_delay, ++ CONF.haproxy_amphora.api_db_commit_retry_backoff, ++ CONF.haproxy_amphora.api_db_commit_retry_max), ++ stop=tenacity.stop_after_attempt( ++ CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_l7rule(self, l7rule_id): + """Creates an L7 Rule. + +@@ -802,149 +814,234 @@ class ControllerWorker(base_taskflow.Bas + log=LOG): + update_l7rule_tf.run() + +- def _perform_amphora_failover(self, amp, priority): +- """Internal method to perform failover operations for an amphora. +- +- :param amp: The amphora to failover +- :param priority: The create priority +- :returns: None +- """ +- +- stored_params = {constants.FAILED_AMPHORA: amp, +- constants.LOADBALANCER_ID: amp.load_balancer_id, +- constants.BUILD_TYPE_PRIORITY: priority, } +- +- if amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP): +- amp_role = 'master_or_backup' +- elif amp.role == constants.ROLE_STANDALONE: +- amp_role = 'standalone' +- elif amp.role is None: +- amp_role = 'spare' +- else: +- amp_role = 'undefined' +- +- LOG.info("Perform failover for an amphora: %s", +- {"id": amp.id, +- "load_balancer_id": amp.load_balancer_id, +- "lb_network_ip": amp.lb_network_ip, +- "compute_id": amp.compute_id, +- "role": amp_role}) +- +- if amp.status == constants.DELETED: +- LOG.warning('Amphora %s is marked DELETED in the database but ' +- 'was submitted for failover. Deleting it from the ' +- 'amphora health table to exclude it from health ' +- 'checks and skipping the failover.', amp.id) +- self._amphora_health_repo.delete(db_apis.get_session(), +- amphora_id=amp.id) +- return +- +- if (CONF.house_keeping.spare_amphora_pool_size == 0) and ( +- CONF.nova.enable_anti_affinity is False): +- LOG.warning("Failing over amphora with no spares pool may " +- "cause delays in failover times while a new " +- "amphora instance boots.") +- +- # if we run with anti-affinity we need to set the server group +- # as well +- lb = self._amphora_repo.get_lb_for_amphora( +- db_apis.get_session(), amp.id) +- if CONF.nova.enable_anti_affinity and lb: +- stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id +- if lb and lb.flavor_id: +- stored_params[constants.FLAVOR] = ( +- self._flavor_repo.get_flavor_metadata_dict( +- db_apis.get_session(), lb.flavor_id)) +- else: +- stored_params[constants.FLAVOR] = {} +- +- failover_amphora_tf = self._taskflow_load( +- self._amphora_flows.get_failover_flow( +- role=amp.role, load_balancer=lb), +- store=stored_params) +- +- with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG): +- failover_amphora_tf.run() +- +- LOG.info("Successfully completed the failover for an amphora: %s", +- {"id": amp.id, +- "load_balancer_id": amp.load_balancer_id, +- "lb_network_ip": amp.lb_network_ip, +- "compute_id": amp.compute_id, +- "role": amp_role}) +- + def failover_amphora(self, amphora_id): + """Perform failover operations for an amphora. + ++ Note: This expects the load balancer to already be in ++ provisioning_status=PENDING_UPDATE state. ++ + :param amphora_id: ID for amphora to failover + :returns: None +- :raises AmphoraNotFound: The referenced amphora was not found ++ :raises octavia.common.exceptions.NotFound: The referenced amphora was ++ not found + """ ++ amphora = None + try: +- amp = self._amphora_repo.get(db_apis.get_session(), +- id=amphora_id) +- if not amp: +- LOG.warning("Could not fetch Amphora %s from DB, ignoring " +- "failover request.", amphora_id) ++ amphora = self._amphora_repo.get(db_apis.get_session(), ++ id=amphora_id) ++ if amphora is None: ++ LOG.error('Amphora failover for amphora %s failed because ' ++ 'there is no record of this amphora in the ' ++ 'database. Check that the [house_keeping] ' ++ 'amphora_expiry_age configuration setting is not ' ++ 'too short. Skipping failover.', amphora_id) ++ raise exceptions.NotFound(resource=constants.AMPHORA, ++ id=amphora_id) ++ ++ if amphora.status == constants.DELETED: ++ LOG.warning('Amphora %s is marked DELETED in the database but ' ++ 'was submitted for failover. Deleting it from the ' ++ 'amphora health table to exclude it from health ' ++ 'checks and skipping the failover.', amphora.id) ++ self._amphora_health_repo.delete(db_apis.get_session(), ++ amphora_id=amphora.id) + return +- self._perform_amphora_failover( +- amp, constants.LB_CREATE_FAILOVER_PRIORITY) +- if amp.load_balancer_id: +- LOG.info("Mark ACTIVE in DB for load balancer id: %s", +- amp.load_balancer_id) +- self._lb_repo.update( +- db_apis.get_session(), amp.load_balancer_id, +- provisioning_status=constants.ACTIVE) ++ ++ loadbalancer = None ++ if amphora.load_balancer_id: ++ loadbalancer = self._lb_repo.get(db_apis.get_session(), ++ id=amphora.load_balancer_id) ++ lb_amp_count = None ++ if loadbalancer: ++ if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: ++ lb_amp_count = 2 ++ elif loadbalancer.topology == constants.TOPOLOGY_SINGLE: ++ lb_amp_count = 1 ++ ++ amp_failover_flow = self._amphora_flows.get_failover_amphora_flow( ++ amphora, lb_amp_count) ++ ++ flavor = {} ++ lb_id = None ++ vip = None ++ server_group_id = None ++ if loadbalancer: ++ lb_id = loadbalancer.id ++ if loadbalancer.flavor_id: ++ flavor = self._flavor_repo.get_flavor_metadata_dict( ++ db_apis.get_session(), loadbalancer.flavor_id) ++ flavor[constants.LOADBALANCER_TOPOLOGY] = ( ++ loadbalancer.topology) ++ else: ++ flavor = {constants.LOADBALANCER_TOPOLOGY: ++ loadbalancer.topology} ++ vip = loadbalancer.vip ++ server_group_id = loadbalancer.server_group_id ++ ++ stored_params = {constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.FLAVOR: flavor, ++ constants.LOADBALANCER: loadbalancer, ++ constants.SERVER_GROUP_ID: server_group_id, ++ constants.LOADBALANCER_ID: lb_id, ++ constants.VIP: vip} ++ ++ failover_amphora_tf = self._taskflow_load(amp_failover_flow, ++ store=stored_params) ++ ++ with tf_logging.DynamicLoggingListener(failover_amphora_tf, ++ log=LOG): ++ failover_amphora_tf.run() ++ ++ LOG.info("Successfully completed the failover for an amphora: %s", ++ {"id": amphora_id, ++ "load_balancer_id": lb_id, ++ "lb_network_ip": amphora.lb_network_ip, ++ "compute_id": amphora.compute_id, ++ "role": amphora.role}) ++ + except Exception as e: +- try: +- self._lb_repo.update( +- db_apis.get_session(), amp.load_balancer_id, +- provisioning_status=constants.ERROR) +- except Exception: +- LOG.error("Unable to revert LB status to ERROR.") +- with excutils.save_and_reraise_exception(): +- LOG.error("Amphora %(id)s failover exception: %(exc)s", +- {'id': amphora_id, 'exc': e}) ++ with excutils.save_and_reraise_exception(reraise=False): ++ LOG.exception("Amphora %s failover exception: %s", ++ amphora_id, str(e)) ++ self._amphora_repo.update(db_apis.get_session(), ++ amphora_id, status=constants.ERROR) ++ if amphora and amphora.load_balancer_id: ++ self._lb_repo.update( ++ db_apis.get_session(), amphora.load_balancer_id, ++ provisioning_status=constants.ERROR) ++ ++ @staticmethod ++ def _get_amphorae_for_failover(load_balancer): ++ """Returns an ordered list of amphora to failover. ++ ++ :param load_balancer: The load balancer being failed over. ++ :returns: An ordered list of amphora to failover, ++ first amp to failover is last in the list ++ :raises octavia.common.exceptions.InvalidTopology: LB has an unknown ++ topology. ++ """ ++ if load_balancer.topology == constants.TOPOLOGY_SINGLE: ++ # In SINGLE topology, amp failover order does not matter ++ return [a for a in load_balancer.amphorae ++ if a.status != constants.DELETED] ++ ++ if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: ++ # In Active/Standby we should preference the standby amp ++ # for failover first in case the Active is still able to pass ++ # traffic. ++ # Note: The active amp can switch at any time and in less than a ++ # second, so this is "best effort". ++ amphora_driver = utils.get_amphora_driver() ++ timeout_dict = { ++ constants.CONN_MAX_RETRIES: ++ CONF.haproxy_amphora.failover_connection_max_retries, ++ constants.CONN_RETRY_INTERVAL: ++ CONF.haproxy_amphora.failover_connection_retry_interval} ++ amps = [] ++ selected_amp = None ++ for amp in load_balancer.amphorae: ++ if amp.status == constants.DELETED: ++ continue ++ if selected_amp is None: ++ try: ++ if amphora_driver.get_interface_from_ip( ++ amp, load_balancer.vip.ip_address, ++ timeout_dict): ++ # This is a potential ACTIVE, add it to the list ++ amps.append(amp) ++ else: ++ # This one doesn't have the VIP IP, so start ++ # failovers here. ++ selected_amp = amp ++ LOG.debug("Selected amphora %s as the initial " ++ "failover amphora.", amp.id) ++ except Exception: ++ # This amphora is broken, so start failovers here. ++ selected_amp = amp ++ else: ++ # We have already found a STANDBY, so add the rest to the ++ # list without querying them. ++ amps.append(amp) ++ # Put the selected amphora at the end of the list so it is ++ # first to failover. ++ if selected_amp: ++ amps.append(selected_amp) ++ return amps ++ ++ LOG.error('Unknown load balancer topology found: %s, aborting ' ++ 'failover.', load_balancer.topology) ++ raise exceptions.InvalidTopology(topology=load_balancer.topology) + + def failover_loadbalancer(self, load_balancer_id): + """Perform failover operations for a load balancer. + ++ Note: This expects the load balancer to already be in ++ provisioning_status=PENDING_UPDATE state. ++ + :param load_balancer_id: ID for load balancer to failover + :returns: None +- :raises LBNotFound: The referenced load balancer was not found ++ :raises octavia.commom.exceptions.NotFound: The load balancer was not ++ found. + """ +- +- # Note: This expects that the load balancer is already in +- # provisioning_status=PENDING_UPDATE state + try: + lb = self._lb_repo.get(db_apis.get_session(), + id=load_balancer_id) +- +- # Exclude amphora already deleted +- amps = [a for a in lb.amphorae if a.status != constants.DELETED] +- for amp in amps: +- # failover amphora in backup role +- # Note: this amp may not currently be the backup +- # TODO(johnsom) Change this to query the amp state +- # once the amp API supports it. +- if amp.role == constants.ROLE_BACKUP: +- self._perform_amphora_failover( +- amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY) +- +- for amp in amps: +- # failover everyhting else +- if amp.role != constants.ROLE_BACKUP: +- self._perform_amphora_failover( +- amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY) +- +- self._lb_repo.update( +- db_apis.get_session(), load_balancer_id, +- provisioning_status=constants.ACTIVE) ++ if lb is None: ++ raise exceptions.NotFound(resource=constants.LOADBALANCER, ++ id=load_balancer_id) ++ ++ # Get the ordered list of amphorae to failover for this LB. ++ amps = self._get_amphorae_for_failover(lb) ++ ++ if lb.topology == constants.TOPOLOGY_SINGLE: ++ if len(amps) != 1: ++ LOG.warning('%d amphorae found on load balancer %s where ' ++ 'one should exist. Repairing.', len(amps), ++ load_balancer_id) ++ elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: ++ ++ if len(amps) != 2: ++ LOG.warning('%d amphorae found on load balancer %s where ' ++ 'two should exist. Repairing.', len(amps), ++ load_balancer_id) ++ else: ++ LOG.error('Unknown load balancer topology found: %s, aborting ' ++ 'failover!', lb.topology) ++ raise exceptions.InvalidTopology(topology=lb.topology) ++ ++ # Build our failover flow. ++ lb_failover_flow = self._lb_flows.get_failover_LB_flow(amps, lb) ++ ++ # We must provide a topology in the flavor definition ++ # here for the amphora to be created with the correct ++ # configuration. ++ if lb.flavor_id: ++ flavor = self._flavor_repo.get_flavor_metadata_dict( ++ db_apis.get_session(), lb.flavor_id) ++ flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology ++ else: ++ flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology} ++ ++ stored_params = {constants.LOADBALANCER: lb, ++ constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.SERVER_GROUP_ID: lb.server_group_id, ++ constants.LOADBALANCER_ID: lb.id, ++ constants.FLAVOR: flavor} ++ ++ failover_lb_tf = self._taskflow_load(lb_failover_flow, ++ store=stored_params) ++ ++ with tf_logging.DynamicLoggingListener(failover_lb_tf, log=LOG): ++ failover_lb_tf.run() ++ LOG.info('Failover of load balancer %s completed successfully.', ++ lb.id) + + except Exception as e: +- with excutils.save_and_reraise_exception(): +- LOG.error("LB %(lbid)s failover exception: %(exc)s", +- {'lbid': load_balancer_id, 'exc': e}) ++ with excutils.save_and_reraise_exception(reraise=False): ++ LOG.exception("LB %(lbid)s failover exception: %(exc)s", ++ {'lbid': load_balancer_id, 'exc': e}) + self._lb_repo.update( + db_apis.get_session(), load_balancer_id, + provisioning_status=constants.ERROR) +Index: octavia-5.0.1/octavia/controller/worker/v1/flows/amphora_flows.py +=================================================================== +--- octavia-5.0.1.orig/octavia/controller/worker/v1/flows/amphora_flows.py ++++ octavia-5.0.1/octavia/controller/worker/v1/flows/amphora_flows.py +@@ -1,4 +1,5 @@ + # Copyright 2015 Hewlett-Packard Development Company, L.P. ++# Copyright 2020 Red Hat, Inc. All rights reserved. + # + # Licensed under the Apache License, Version 2.0 (the "License"); you may + # not use this file except in compliance with the License. You may obtain +@@ -14,26 +15,26 @@ + # + + from oslo_config import cfg ++from oslo_log import log as logging + from taskflow.patterns import graph_flow + from taskflow.patterns import linear_flow + from taskflow.patterns import unordered_flow + + from octavia.common import constants ++from octavia.common import utils + from octavia.controller.worker.v1.tasks import amphora_driver_tasks + from octavia.controller.worker.v1.tasks import cert_task + from octavia.controller.worker.v1.tasks import compute_tasks + from octavia.controller.worker.v1.tasks import database_tasks + from octavia.controller.worker.v1.tasks import lifecycle_tasks + from octavia.controller.worker.v1.tasks import network_tasks ++from octavia.controller.worker.v1.tasks import retry_tasks + + CONF = cfg.CONF ++LOG = logging.getLogger(__name__) + + + class AmphoraFlows(object): +- def __init__(self): +- # for some reason only this has the values from the config file +- self.REST_AMPHORA_DRIVER = (CONF.controller_worker.amphora_driver == +- 'amphora_haproxy_rest_driver') + + def get_create_amphora_flow(self): + """Creates a flow to create an amphora. +@@ -45,23 +46,16 @@ class AmphoraFlows(object): + provides=constants.AMPHORA_ID)) + create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask( + requires=constants.AMPHORA_ID)) +- if self.REST_AMPHORA_DRIVER: +- create_amphora_flow.add(cert_task.GenerateServerPEMTask( +- provides=constants.SERVER_PEM)) +- +- create_amphora_flow.add( +- database_tasks.UpdateAmphoraDBCertExpiration( +- requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) +- +- create_amphora_flow.add(compute_tasks.CertComputeCreate( +- requires=(constants.AMPHORA_ID, constants.SERVER_PEM, +- constants.BUILD_TYPE_PRIORITY, constants.FLAVOR), +- provides=constants.COMPUTE_ID)) +- else: +- create_amphora_flow.add(compute_tasks.ComputeCreate( +- requires=(constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY, +- constants.FLAVOR), +- provides=constants.COMPUTE_ID)) ++ create_amphora_flow.add(cert_task.GenerateServerPEMTask( ++ provides=constants.SERVER_PEM)) ++ create_amphora_flow.add( ++ database_tasks.UpdateAmphoraDBCertExpiration( ++ requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) ++ create_amphora_flow.add(compute_tasks.CertComputeCreate( ++ requires=(constants.AMPHORA_ID, constants.SERVER_PEM, ++ constants.SERVER_GROUP_ID, constants.BUILD_TYPE_PRIORITY, ++ constants.FLAVOR), ++ provides=constants.COMPUTE_ID)) + create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB( + requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) + create_amphora_flow.add(compute_tasks.ComputeActiveWait( +@@ -114,7 +108,7 @@ class AmphoraFlows(object): + + return post_map_amp_to_lb + +- def _get_create_amp_for_lb_subflow(self, prefix, role): ++ def _get_create_amp_for_lb_subflow(self, prefix, role, is_spare=False): + """Create a new amphora for lb.""" + + sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW +@@ -123,62 +117,22 @@ class AmphoraFlows(object): + name=sf_name + '-' + constants.CREATE_AMPHORA_INDB, + provides=constants.AMPHORA_ID)) + +- require_server_group_id_condition = ( +- role in (constants.ROLE_BACKUP, constants.ROLE_MASTER) and +- CONF.nova.enable_anti_affinity) +- +- if self.REST_AMPHORA_DRIVER: +- create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask( +- name=sf_name + '-' + constants.GENERATE_SERVER_PEM, +- provides=constants.SERVER_PEM)) +- +- create_amp_for_lb_subflow.add( +- database_tasks.UpdateAmphoraDBCertExpiration( +- name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION, +- requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) +- +- if require_server_group_id_condition: +- create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( +- name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, +- requires=( +- constants.AMPHORA_ID, +- constants.SERVER_PEM, +- constants.BUILD_TYPE_PRIORITY, +- constants.SERVER_GROUP_ID, +- constants.FLAVOR +- ), +- provides=constants.COMPUTE_ID)) +- else: +- create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( +- name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, +- requires=( +- constants.AMPHORA_ID, +- constants.SERVER_PEM, +- constants.BUILD_TYPE_PRIORITY, +- constants.FLAVOR +- ), +- provides=constants.COMPUTE_ID)) +- else: +- if require_server_group_id_condition: +- create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate( +- name=sf_name + '-' + constants.COMPUTE_CREATE, +- requires=( +- constants.AMPHORA_ID, +- constants.BUILD_TYPE_PRIORITY, +- constants.SERVER_GROUP_ID, +- constants.FLAVOR +- ), +- provides=constants.COMPUTE_ID)) +- else: +- create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate( +- name=sf_name + '-' + constants.COMPUTE_CREATE, +- requires=( +- constants.AMPHORA_ID, +- constants.BUILD_TYPE_PRIORITY, +- constants.FLAVOR +- ), +- provides=constants.COMPUTE_ID)) ++ create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask( ++ name=sf_name + '-' + constants.GENERATE_SERVER_PEM, ++ provides=constants.SERVER_PEM)) + ++ create_amp_for_lb_subflow.add( ++ database_tasks.UpdateAmphoraDBCertExpiration( ++ name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION, ++ requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) ++ ++ create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( ++ name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, ++ requires=(constants.AMPHORA_ID, constants.SERVER_PEM, ++ constants.BUILD_TYPE_PRIORITY, ++ constants.SERVER_GROUP_ID, ++ constants.FLAVOR), ++ provides=constants.COMPUTE_ID)) + create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId( + name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID, + requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) +@@ -200,10 +154,16 @@ class AmphoraFlows(object): + create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize( + name=sf_name + '-' + constants.AMPHORA_FINALIZE, + requires=constants.AMPHORA)) +- create_amp_for_lb_subflow.add( +- database_tasks.MarkAmphoraAllocatedInDB( +- name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB, +- requires=(constants.AMPHORA, constants.LOADBALANCER_ID))) ++ if is_spare: ++ create_amp_for_lb_subflow.add( ++ database_tasks.MarkAmphoraReadyInDB( ++ name=sf_name + '-' + constants.MARK_AMPHORA_READY_INDB, ++ requires=constants.AMPHORA)) ++ else: ++ create_amp_for_lb_subflow.add( ++ database_tasks.MarkAmphoraAllocatedInDB( ++ name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB, ++ requires=(constants.AMPHORA, constants.LOADBALANCER_ID))) + create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora( + name=sf_name + '-' + constants.RELOAD_AMPHORA, + requires=constants.AMPHORA_ID, +@@ -242,7 +202,7 @@ class AmphoraFlows(object): + return list(history.values())[0] is None + + def get_amphora_for_lb_subflow( +- self, prefix, role=constants.ROLE_STANDALONE): ++ self, prefix, role=constants.ROLE_STANDALONE, is_spare=False): + """Tries to allocate a spare amphora to a loadbalancer if none + + exists, create a new amphora. +@@ -250,6 +210,14 @@ class AmphoraFlows(object): + + sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW + ++ # Don't replace a spare with another spare, just build a fresh one. ++ if is_spare: ++ get_spare_amp_flow = linear_flow.Flow(sf_name) ++ ++ get_spare_amp_flow.add(self._get_create_amp_for_lb_subflow( ++ prefix, role, is_spare=is_spare)) ++ return get_spare_amp_flow ++ + # We need a graph flow here for a conditional flow + amp_for_lb_flow = graph_flow.Flow(sf_name) + +@@ -277,286 +245,145 @@ class AmphoraFlows(object): + decider=self._create_new_amp_for_lb_decider, + decider_depth='flow') + +- # Plug the network +- # todo(xgerman): Rework failover flow +- if prefix != constants.FAILOVER_AMPHORA_FLOW: +- sf_name = prefix + '-' + constants.AMP_PLUG_NET_SUBFLOW +- amp_for_lb_net_flow = linear_flow.Flow(sf_name) +- amp_for_lb_net_flow.add(amp_for_lb_flow) +- amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name)) +- return amp_for_lb_net_flow +- + return amp_for_lb_flow + +- def _get_amp_net_subflow(self, sf_name): +- flows = [] +- flows.append(network_tasks.PlugVIPAmpphora( +- name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, +- requires=(constants.LOADBALANCER, constants.AMPHORA, +- constants.SUBNET), +- provides=constants.AMP_DATA)) +- +- flows.append(network_tasks.ApplyQosAmphora( +- name=sf_name + '-' + constants.APPLY_QOS_AMP, +- requires=(constants.LOADBALANCER, constants.AMP_DATA, +- constants.UPDATE_DICT))) +- flows.append(database_tasks.UpdateAmphoraVIPData( +- name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA, +- requires=constants.AMP_DATA)) +- flows.append(database_tasks.ReloadAmphora( +- name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP, +- requires=constants.AMPHORA_ID, +- provides=constants.AMPHORA)) +- flows.append(database_tasks.ReloadLoadBalancer( +- name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP, +- requires=constants.LOADBALANCER_ID, +- provides=constants.LOADBALANCER)) +- flows.append(network_tasks.GetAmphoraNetworkConfigs( +- name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, +- requires=(constants.LOADBALANCER, constants.AMPHORA), +- provides=constants.AMPHORA_NETWORK_CONFIG)) +- flows.append(amphora_driver_tasks.AmphoraPostVIPPlug( +- name=sf_name + '-' + constants.AMP_POST_VIP_PLUG, +- rebind={constants.AMPHORAE_NETWORK_CONFIG: +- constants.AMPHORA_NETWORK_CONFIG}, +- requires=(constants.LOADBALANCER, +- constants.AMPHORAE_NETWORK_CONFIG))) +- return flows +- +- def get_delete_amphora_flow(self): +- """Creates a flow to delete an amphora. +- +- This should be configurable in the config file +- :returns: The flow for deleting the amphora +- :raises AmphoraNotFound: The referenced Amphora was not found ++ def get_delete_amphora_flow( ++ self, amphora, ++ retry_attempts=CONF.controller_worker.amphora_delete_retries, ++ retry_interval=( ++ CONF.controller_worker.amphora_delete_retry_interval)): ++ """Creates a subflow to delete an amphora and it's port. ++ ++ This flow is idempotent and safe to retry. ++ ++ :param amphora: An amphora object. ++ :param retry_attempts: The number of times the flow is retried. ++ :param retry_interval: The time to wait, in seconds, between retries. ++ :returns: The subflow for deleting the amphora. ++ :raises AmphoraNotFound: The referenced Amphora was not found. + """ + +- delete_amphora_flow = linear_flow.Flow(constants.DELETE_AMPHORA_FLOW) ++ delete_amphora_flow = linear_flow.Flow( ++ name=constants.DELETE_AMPHORA_FLOW + '-' + amphora.id, ++ retry=retry_tasks.SleepingRetryTimesController( ++ name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' + ++ amphora.id, ++ attempts=retry_attempts, interval=retry_interval)) + delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( +- requires=constants.AMPHORA)) +- delete_amphora_flow.add(database_tasks. +- MarkAmphoraPendingDeleteInDB( +- requires=constants.AMPHORA)) +- delete_amphora_flow.add(database_tasks. +- MarkAmphoraHealthBusy( +- requires=constants.AMPHORA)) +- delete_amphora_flow.add(compute_tasks.ComputeDelete( +- requires=constants.AMPHORA)) +- delete_amphora_flow.add(database_tasks. +- DisableAmphoraHealthMonitoring( +- requires=constants.AMPHORA)) +- delete_amphora_flow.add(database_tasks. +- MarkAmphoraDeletedInDB( +- requires=constants.AMPHORA)) +- return delete_amphora_flow +- +- def get_failover_flow(self, role=constants.ROLE_STANDALONE, +- load_balancer=None): +- """Creates a flow to failover a stale amphora +- +- :returns: The flow for amphora failover +- """ +- +- failover_amphora_flow = linear_flow.Flow( +- constants.FAILOVER_AMPHORA_FLOW) +- +- failover_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) +- +- failover_amphora_flow.add(network_tasks.FailoverPreparationForAmphora( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) +- +- # Note: It seems intuitive to boot an amphora prior to deleting +- # the old amphora, however this is a complicated issue. +- # If the target host (due to anit-affinity) is resource +- # constrained, this will fail where a post-delete will +- # succeed. Since this is async with the API it would result +- # in the LB ending in ERROR though the amps are still alive. +- # Consider in the future making this a complicated +- # try-on-failure-retry flow, or move upgrade failovers to be +- # synchronous with the API. For now spares pool and act/stdby +- # will mitigate most of this delay. +- +- # Delete the old amphora +- failover_amphora_flow.add( ++ name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora.id, ++ inject={constants.AMPHORA: amphora})) ++ delete_amphora_flow.add( + database_tasks.MarkAmphoraPendingDeleteInDB( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) +- failover_amphora_flow.add( +- database_tasks.MarkAmphoraHealthBusy( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) +- failover_amphora_flow.add(compute_tasks.ComputeDelete( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) +- failover_amphora_flow.add(network_tasks.WaitForPortDetach( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) +- failover_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) ++ name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora.id, ++ inject={constants.AMPHORA: amphora})) ++ delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy( ++ name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora.id, ++ inject={constants.AMPHORA: amphora})) ++ delete_amphora_flow.add(compute_tasks.ComputeDelete( ++ name=constants.DELETE_AMPHORA + '-' + amphora.id, ++ inject={constants.AMPHORA: amphora, ++ constants.PASSIVE_FAILURE: True})) ++ delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring( ++ name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora.id, ++ inject={constants.AMPHORA: amphora})) ++ delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB( ++ name=constants.MARK_AMPHORA_DELETED + '-' + amphora.id, ++ inject={constants.AMPHORA: amphora})) ++ if amphora.vrrp_port_id: ++ delete_amphora_flow.add(network_tasks.DeletePort( ++ name=(constants.DELETE_PORT + '-' + str(amphora.id) + '-' + ++ str(amphora.vrrp_port_id)), ++ inject={constants.PORT_ID: amphora.vrrp_port_id, ++ constants.PASSIVE_FAILURE: True})) ++ # TODO(johnsom) What about cleaning up any member ports? ++ # maybe we should get the list of attached ports prior to delete ++ # and call delete on them here. Fix this as part of ++ # https://storyboard.openstack.org/#!/story/2007077 + +- # If this is an unallocated amp (spares pool), we're done +- if not load_balancer: +- failover_amphora_flow.add( +- database_tasks.DisableAmphoraHealthMonitoring( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) +- return failover_amphora_flow ++ return delete_amphora_flow + +- # Save failed amphora details for later +- failover_amphora_flow.add( +- database_tasks.GetAmphoraDetails( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA, +- provides=constants.AMP_DATA)) ++ def get_vrrp_subflow(self, prefix, timeout_dict=None, ++ create_vrrp_group=True): ++ sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW ++ vrrp_subflow = linear_flow.Flow(sf_name) + +- # Get a new amphora +- # Note: Role doesn't matter here. We will update it later. +- get_amp_subflow = self.get_amphora_for_lb_subflow( +- prefix=constants.FAILOVER_AMPHORA_FLOW) +- failover_amphora_flow.add(get_amp_subflow) +- +- # Update the new amphora with the failed amphora details +- failover_amphora_flow.add(database_tasks.UpdateAmpFailoverDetails( +- requires=(constants.AMPHORA, constants.AMP_DATA))) ++ # Optimization for failover flow. No reason to call this ++ # when configuring the secondary amphora. ++ if create_vrrp_group: ++ vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB( ++ name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, ++ requires=constants.LOADBALANCER_ID)) + +- # Update the data stored in the flow from the database +- failover_amphora_flow.add(database_tasks.ReloadLoadBalancer( ++ vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( ++ name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, + requires=constants.LOADBALANCER_ID, +- provides=constants.LOADBALANCER)) +- +- failover_amphora_flow.add(database_tasks.ReloadAmphora( +- requires=constants.AMPHORA_ID, +- provides=constants.AMPHORA)) +- +- # Prepare to reconnect the network interface(s) +- failover_amphora_flow.add(network_tasks.GetAmphoraeNetworkConfigs( +- requires=constants.LOADBALANCER, + provides=constants.AMPHORAE_NETWORK_CONFIG)) +- failover_amphora_flow.add(database_tasks.GetListenersFromLoadbalancer( +- requires=constants.LOADBALANCER, provides=constants.LISTENERS)) +- failover_amphora_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( +- requires=constants.LOADBALANCER, provides=constants.AMPHORAE)) +- +- # Plug the VIP ports into the new amphora +- # The reason for moving these steps here is the udp listeners want to +- # do some kernel configuration before Listener update for forbidding +- # failure during rebuild amphora. +- failover_amphora_flow.add(network_tasks.PlugVIPPort( +- requires=(constants.AMPHORA, constants.AMPHORAE_NETWORK_CONFIG))) +- failover_amphora_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug( +- requires=(constants.AMPHORA, constants.LOADBALANCER, +- constants.AMPHORAE_NETWORK_CONFIG))) + +- # Listeners update needs to be run on all amphora to update ++ # VRRP update needs to be run on all amphora to update + # their peer configurations. So parallelize this with an + # unordered subflow. +- update_amps_subflow = unordered_flow.Flow( +- constants.UPDATE_AMPS_SUBFLOW) +- +- timeout_dict = { +- constants.CONN_MAX_RETRIES: +- CONF.haproxy_amphora.active_connection_max_retries, +- constants.CONN_RETRY_INTERVAL: +- CONF.haproxy_amphora.active_connection_rety_interval} +- +- # Setup parallel flows for each amp. We don't know the new amp +- # details at flow creation time, so setup a subflow for each +- # amp on the LB, they let the task index into a list of amps +- # to find the amphora it should work on. +- amp_index = 0 +- for amp in load_balancer.amphorae: +- if amp.status == constants.DELETED: +- continue +- update_amps_subflow.add( +- amphora_driver_tasks.AmpListenersUpdate( +- name=constants.AMP_LISTENER_UPDATE + '-' + str(amp_index), +- requires=(constants.LOADBALANCER, constants.AMPHORAE), +- inject={constants.AMPHORA_INDEX: amp_index, +- constants.TIMEOUT_DICT: timeout_dict})) +- amp_index += 1 ++ update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow') + +- failover_amphora_flow.add(update_amps_subflow) ++ # We have three tasks to run in order, per amphora ++ amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow') + +- # Plug the member networks into the new amphora +- failover_amphora_flow.add(network_tasks.CalculateAmphoraDelta( +- requires=(constants.LOADBALANCER, constants.AMPHORA), +- provides=constants.DELTA)) +- +- failover_amphora_flow.add(network_tasks.HandleNetworkDelta( +- requires=(constants.AMPHORA, constants.DELTA), +- provides=constants.ADDED_PORTS)) ++ amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( ++ name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF, ++ requires=constants.AMPHORAE, ++ inject={constants.AMPHORA_INDEX: 0, ++ constants.TIMEOUT_DICT: timeout_dict}, ++ provides=constants.AMP_VRRP_INT)) ++ ++ amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( ++ name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE, ++ requires=(constants.LOADBALANCER_ID, ++ constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, ++ constants.AMP_VRRP_INT), ++ inject={constants.AMPHORA_INDEX: 0, ++ constants.TIMEOUT_DICT: timeout_dict})) ++ ++ amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( ++ name=sf_name + '-0-' + constants.AMP_VRRP_START, ++ requires=constants.AMPHORAE, ++ inject={constants.AMPHORA_INDEX: 0, ++ constants.TIMEOUT_DICT: timeout_dict})) ++ ++ amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow') ++ ++ amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( ++ name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF, ++ requires=constants.AMPHORAE, ++ inject={constants.AMPHORA_INDEX: 1, ++ constants.TIMEOUT_DICT: timeout_dict}, ++ provides=constants.AMP_VRRP_INT)) ++ ++ amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( ++ name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE, ++ requires=(constants.LOADBALANCER_ID, ++ constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, ++ constants.AMP_VRRP_INT), ++ inject={constants.AMPHORA_INDEX: 1, ++ constants.TIMEOUT_DICT: timeout_dict})) ++ amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( ++ name=sf_name + '-1-' + constants.AMP_VRRP_START, ++ requires=constants.AMPHORAE, ++ inject={constants.AMPHORA_INDEX: 1, ++ constants.TIMEOUT_DICT: timeout_dict})) + +- failover_amphora_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( +- requires=(constants.LOADBALANCER, constants.ADDED_PORTS))) ++ update_amps_subflow.add(amp_0_subflow) ++ update_amps_subflow.add(amp_1_subflow) + +- failover_amphora_flow.add(database_tasks.ReloadLoadBalancer( +- name='octavia-failover-LB-reload-2', +- requires=constants.LOADBALANCER_ID, +- provides=constants.LOADBALANCER)) ++ vrrp_subflow.add(update_amps_subflow) + +- # Handle the amphora role and VRRP if necessary +- if role == constants.ROLE_MASTER: +- failover_amphora_flow.add(database_tasks.MarkAmphoraMasterInDB( +- name=constants.MARK_AMP_MASTER_INDB, +- requires=constants.AMPHORA)) +- vrrp_subflow = self.get_vrrp_subflow(role) +- failover_amphora_flow.add(vrrp_subflow) +- elif role == constants.ROLE_BACKUP: +- failover_amphora_flow.add(database_tasks.MarkAmphoraBackupInDB( +- name=constants.MARK_AMP_BACKUP_INDB, +- requires=constants.AMPHORA)) +- vrrp_subflow = self.get_vrrp_subflow(role) +- failover_amphora_flow.add(vrrp_subflow) +- elif role == constants.ROLE_STANDALONE: +- failover_amphora_flow.add( +- database_tasks.MarkAmphoraStandAloneInDB( +- name=constants.MARK_AMP_STANDALONE_INDB, +- requires=constants.AMPHORA)) +- +- failover_amphora_flow.add(amphora_driver_tasks.ListenersStart( +- requires=(constants.LOADBALANCER, constants.AMPHORA))) +- failover_amphora_flow.add( +- database_tasks.DisableAmphoraHealthMonitoring( +- rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, +- requires=constants.AMPHORA)) +- +- return failover_amphora_flow +- +- def get_vrrp_subflow(self, prefix): +- sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW +- vrrp_subflow = linear_flow.Flow(sf_name) +- vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( +- name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, +- requires=constants.LOADBALANCER, +- provides=constants.AMPHORAE_NETWORK_CONFIG)) +- vrrp_subflow.add(amphora_driver_tasks.AmphoraUpdateVRRPInterface( +- name=sf_name + '-' + constants.AMP_UPDATE_VRRP_INTF, +- requires=constants.LOADBALANCER, +- provides=constants.LOADBALANCER)) +- vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB( +- name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, +- requires=constants.LOADBALANCER, +- provides=constants.LOADBALANCER)) +- vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPUpdate( +- name=sf_name + '-' + constants.AMP_VRRP_UPDATE, +- requires=(constants.LOADBALANCER, +- constants.AMPHORAE_NETWORK_CONFIG))) +- vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPStart( +- name=sf_name + '-' + constants.AMP_VRRP_START, +- requires=constants.LOADBALANCER)) + return vrrp_subflow + + def cert_rotate_amphora_flow(self): + """Implement rotation for amphora's cert. + +- 1. Create a new certificate +- 2. Upload the cert to amphora +- 3. update the newly created certificate info to amphora +- 4. update the cert_busy flag to be false after rotation ++ 1. Create a new certificate ++ 2. Upload the cert to amphora ++ 3. update the newly created certificate info to amphora ++ 4. update the cert_busy flag to be false after rotation + + :returns: The flow for updating an amphora + """ +@@ -600,3 +427,258 @@ class AmphoraFlows(object): + requires=(constants.AMPHORA, constants.FLAVOR))) + + return update_amphora_flow ++ ++ def get_amphora_for_lb_failover_subflow( ++ self, prefix, role=constants.ROLE_STANDALONE, ++ failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False, is_spare=False): ++ """Creates a new amphora that will be used in a failover flow. ++ ++ :requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer ++ :provides: amphora_id, amphora ++ :param prefix: The flow name prefix to use on the flow and tasks. ++ :param role: The role this amphora will have in the topology. ++ :param failed_amp_vrrp_port_id: The base port ID of the failed amp. ++ :param is_vrrp_ipv6: True if the base port IP is IPv6. ++ :param is_spare: True if we are getting a spare amphroa. ++ :return: A Taskflow sub-flow that will create the amphora. ++ """ ++ ++ sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW ++ ++ amp_for_failover_flow = linear_flow.Flow(sf_name) ++ ++ # Try to allocate or boot an amphora instance (unconfigured) ++ amp_for_failover_flow.add(self.get_amphora_for_lb_subflow( ++ prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW, ++ role=role, is_spare=is_spare)) ++ ++ # If we are getting a spare amphora, this is all we need to do. ++ if is_spare: ++ return amp_for_failover_flow ++ ++ # Create the VIP base (aka VRRP) port for the amphora. ++ amp_for_failover_flow.add(network_tasks.CreateVIPBasePort( ++ name=prefix + '-' + constants.CREATE_VIP_BASE_PORT, ++ requires=(constants.VIP, constants.VIP_SG_ID, ++ constants.AMPHORA_ID), ++ provides=constants.BASE_PORT)) ++ ++ # Attach the VIP base (aka VRRP) port to the amphora. ++ amp_for_failover_flow.add(compute_tasks.AttachPort( ++ name=prefix + '-' + constants.ATTACH_PORT, ++ requires=(constants.AMPHORA, constants.PORT), ++ rebind={constants.PORT: constants.BASE_PORT})) ++ ++ # Update the amphora database record with the VIP base port info. ++ amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails( ++ name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS, ++ requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT))) ++ ++ # Make sure the amphora in the flow storage is up to date ++ # or the vrrp_ip will be empty ++ amp_for_failover_flow.add(database_tasks.ReloadAmphora( ++ name=prefix + '-' + constants.RELOAD_AMPHORA, ++ requires=constants.AMPHORA_ID, provides=constants.AMPHORA)) ++ ++ # Update the amphora networking for the plugged VIP port ++ amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( ++ name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID, ++ requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), ++ provides=constants.AMPHORAE_NETWORK_CONFIG)) ++ ++ # Disable the base (vrrp) port on the failed amphora ++ # This prevents a DAD failure when bringing up the new amphora. ++ # Keepalived will handle this for act/stdby. ++ if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and ++ is_vrrp_ipv6): ++ amp_for_failover_flow.add(network_tasks.AdminDownPort( ++ name=prefix + '-' + constants.ADMIN_DOWN_PORT, ++ inject={constants.PORT_ID: failed_amp_vrrp_port_id})) ++ ++ amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug( ++ name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG, ++ requires=(constants.AMPHORA, constants.LOADBALANCER, ++ constants.AMPHORAE_NETWORK_CONFIG))) ++ ++ # Plug member ports ++ amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta( ++ name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA, ++ requires=(constants.LOADBALANCER, constants.AMPHORA, ++ constants.VRRP_PORT), ++ rebind={constants.VRRP_PORT: constants.BASE_PORT}, ++ provides=constants.DELTA)) ++ ++ amp_for_failover_flow.add(network_tasks.HandleNetworkDelta( ++ name=prefix + '-' + constants.HANDLE_NETWORK_DELTA, ++ requires=(constants.AMPHORA, constants.DELTA), ++ provides=constants.ADDED_PORTS)) ++ ++ amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( ++ name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG, ++ requires=(constants.LOADBALANCER, constants.ADDED_PORTS))) ++ ++ return amp_for_failover_flow ++ ++ def get_failover_amphora_flow(self, failed_amphora, lb_amp_count): ++ """Get a Taskflow flow to failover an amphora. ++ ++ 1. Build a replacement amphora. ++ 2. Delete the old amphora. ++ 3. Update the amphorae listener configurations. ++ 4. Update the VRRP configurations if needed. ++ ++ :param failed_amphora: The amphora object to failover. ++ :param lb_amp_count: The number of amphora on this load balancer. ++ :returns: The flow that will provide the failover. ++ """ ++ failover_amp_flow = linear_flow.Flow( ++ constants.FAILOVER_AMPHORA_FLOW) ++ ++ # Revert amphora to status ERROR if this flow goes wrong ++ failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amphora})) ++ ++ if failed_amphora.role in (constants.ROLE_MASTER, ++ constants.ROLE_BACKUP): ++ amp_role = 'master_or_backup' ++ elif failed_amphora.role == constants.ROLE_STANDALONE: ++ amp_role = 'standalone' ++ elif failed_amphora.role is None: ++ amp_role = 'spare' ++ else: ++ amp_role = 'undefined' ++ LOG.info("Performing failover for amphora: %s", ++ {"id": failed_amphora.id, ++ "load_balancer_id": failed_amphora.load_balancer_id, ++ "lb_network_ip": failed_amphora.lb_network_ip, ++ "compute_id": failed_amphora.compute_id, ++ "role": amp_role}) ++ ++ failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amphora})) ++ ++ failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy( ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amphora})) ++ ++ failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID( ++ requires=constants.LOADBALANCER_ID, ++ provides=constants.VIP_SG_ID)) ++ ++ is_spare = True ++ is_vrrp_ipv6 = False ++ if failed_amphora.load_balancer_id: ++ is_spare = False ++ if failed_amphora.vrrp_ip: ++ is_vrrp_ipv6 = utils.is_ipv6(failed_amphora.vrrp_ip) ++ ++ # Get a replacement amphora and plug all of the networking. ++ # ++ # Do this early as the compute services have been observed to be ++ # unreliable. The community decided the chance that deleting first ++ # would open resources for an instance is less likely than the ++ # compute service failing to boot an instance for other reasons. ++ ++ # TODO(johnsom) Move this back out to run for spares after ++ # delete amphora API is available. ++ failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow( ++ prefix=constants.FAILOVER_LOADBALANCER_FLOW, ++ role=failed_amphora.role, ++ failed_amp_vrrp_port_id=failed_amphora.vrrp_port_id, ++ is_vrrp_ipv6=is_vrrp_ipv6, ++ is_spare=is_spare)) ++ ++ failover_amp_flow.add( ++ self.get_delete_amphora_flow( ++ failed_amphora, ++ retry_attempts=CONF.controller_worker.amphora_delete_retries, ++ retry_interval=( ++ CONF.controller_worker.amphora_delete_retry_interval))) ++ failover_amp_flow.add( ++ database_tasks.DisableAmphoraHealthMonitoring( ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amphora})) ++ ++ if not failed_amphora.load_balancer_id: ++ # This is an unallocated amphora (spares pool), we are done. ++ return failover_amp_flow ++ ++ failover_amp_flow.add(database_tasks.GetLoadBalancer( ++ requires=constants.LOADBALANCER_ID, ++ inject={constants.LOADBALANCER_ID: ++ failed_amphora.load_balancer_id}, ++ provides=constants.LOADBALANCER)) ++ ++ failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( ++ name=constants.GET_AMPHORAE_FROM_LB, ++ requires=constants.LOADBALANCER_ID, ++ inject={constants.LOADBALANCER_ID: ++ failed_amphora.load_balancer_id}, ++ provides=constants.AMPHORAE)) ++ ++ # Setup timeouts for our requests to the amphorae ++ timeout_dict = { ++ constants.CONN_MAX_RETRIES: ++ CONF.haproxy_amphora.active_connection_max_retries, ++ constants.CONN_RETRY_INTERVAL: ++ CONF.haproxy_amphora.active_connection_rety_interval} ++ ++ # Listeners update needs to be run on all amphora to update ++ # their peer configurations. So parallelize this with an ++ # unordered subflow. ++ update_amps_subflow = unordered_flow.Flow( ++ constants.UPDATE_AMPS_SUBFLOW) ++ ++ for amp_index in range(0, lb_amp_count): ++ update_amps_subflow.add( ++ amphora_driver_tasks.AmphoraIndexListenerUpdate( ++ name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE, ++ requires=(constants.LOADBALANCER, constants.AMPHORAE), ++ inject={constants.AMPHORA_INDEX: amp_index, ++ constants.TIMEOUT_DICT: timeout_dict})) ++ ++ failover_amp_flow.add(update_amps_subflow) ++ ++ # Configure and enable keepalived in the amphora ++ if lb_amp_count == 2: ++ failover_amp_flow.add( ++ self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW, ++ timeout_dict, create_vrrp_group=False)) ++ ++ # Reload the listener. This needs to be done here because ++ # it will create the required haproxy check scripts for ++ # the VRRP deployed above. ++ # A "U" or newer amphora-agent will remove the need for this ++ # task here. ++ # TODO(johnsom) Remove this in the "W" cycle ++ reload_listener_subflow = unordered_flow.Flow( ++ constants.AMPHORA_LISTENER_RELOAD_SUBFLOW) ++ ++ for amp_index in range(0, lb_amp_count): ++ reload_listener_subflow.add( ++ amphora_driver_tasks.AmphoraIndexListenersReload( ++ name=(str(amp_index) + '-' + ++ constants.AMPHORA_RELOAD_LISTENER), ++ requires=(constants.LOADBALANCER, constants.AMPHORAE), ++ inject={constants.AMPHORA_INDEX: amp_index, ++ constants.TIMEOUT_DICT: timeout_dict})) ++ ++ failover_amp_flow.add(reload_listener_subflow) ++ ++ # Remove any extraneous ports ++ # Note: Nova sometimes fails to delete ports attached to an instance. ++ # For example, if you create an LB with a listener, then ++ # 'openstack server delete' the amphora, you will see the vrrp ++ # port attached to that instance will remain after the instance ++ # is deleted. ++ # TODO(johnsom) Fix this as part of ++ # https://storyboard.openstack.org/#!/story/2007077 ++ ++ # Mark LB ACTIVE ++ failover_amp_flow.add( ++ database_tasks.MarkLBActiveInDB(mark_subobjects=True, ++ requires=constants.LOADBALANCER)) ++ ++ return failover_amp_flow +Index: octavia-5.0.1/octavia/controller/worker/v1/flows/load_balancer_flows.py +=================================================================== +--- octavia-5.0.1.orig/octavia/controller/worker/v1/flows/load_balancer_flows.py ++++ octavia-5.0.1/octavia/controller/worker/v1/flows/load_balancer_flows.py +@@ -1,4 +1,5 @@ + # Copyright 2015 Hewlett-Packard Development Company, L.P. ++# Copyright 2020 Red Hat, Inc. All rights reserved. + # + # Licensed under the Apache License, Version 2.0 (the "License"); you may + # not use this file except in compliance with the License. You may obtain +@@ -20,6 +21,7 @@ from taskflow.patterns import unordered_ + + from octavia.common import constants + from octavia.common import exceptions ++from octavia.common import utils + from octavia.controller.worker.v1.flows import amphora_flows + from octavia.controller.worker.v1.flows import listener_flows + from octavia.controller.worker.v1.flows import member_flows +@@ -68,7 +70,7 @@ class LoadBalancerFlows(object): + requires=(constants.LOADBALANCER_ID, constants.VIP), + provides=constants.LOADBALANCER)) + lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup( +- requires=constants.LOADBALANCER)) ++ requires=constants.LOADBALANCER_ID)) + lb_create_flow.add(network_tasks.GetSubnetFromVIP( + requires=constants.LOADBALANCER, + provides=constants.SUBNET)) +@@ -93,9 +95,15 @@ class LoadBalancerFlows(object): + return lb_create_flow + + def _create_single_topology(self): +- return (self.amp_flows.get_amphora_for_lb_subflow( ++ sf_name = (constants.ROLE_STANDALONE + '-' + ++ constants.AMP_PLUG_NET_SUBFLOW) ++ amp_for_lb_net_flow = linear_flow.Flow(sf_name) ++ amp_for_lb_flow = self.amp_flows.get_amphora_for_lb_subflow( + prefix=constants.ROLE_STANDALONE, +- role=constants.ROLE_STANDALONE), ) ++ role=constants.ROLE_STANDALONE) ++ amp_for_lb_net_flow.add(amp_for_lb_flow) ++ amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name)) ++ return amp_for_lb_net_flow + + def _create_active_standby_topology( + self, lf_name=constants.CREATE_LOADBALANCER_FLOW): +@@ -124,16 +132,60 @@ class LoadBalancerFlows(object): + + f_name = constants.CREATE_LOADBALANCER_FLOW + amps_flow = unordered_flow.Flow(f_name) +- master_amp_sf = self.amp_flows.get_amphora_for_lb_subflow( +- prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER +- ) + +- backup_amp_sf = self.amp_flows.get_amphora_for_lb_subflow( +- prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP) ++ master_sf_name = (constants.ROLE_MASTER + '-' + ++ constants.AMP_PLUG_NET_SUBFLOW) ++ master_amp_sf = linear_flow.Flow(master_sf_name) ++ master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( ++ prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER)) ++ master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name)) ++ ++ backup_sf_name = (constants.ROLE_BACKUP + '-' + ++ constants.AMP_PLUG_NET_SUBFLOW) ++ backup_amp_sf = linear_flow.Flow(backup_sf_name) ++ backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( ++ prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)) ++ backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name)) ++ + amps_flow.add(master_amp_sf, backup_amp_sf) + + return flows + [amps_flow] + ++ def _get_amp_net_subflow(self, sf_name): ++ flows = [] ++ flows.append(network_tasks.PlugVIPAmpphora( ++ name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, ++ requires=(constants.LOADBALANCER, constants.AMPHORA, ++ constants.SUBNET), ++ provides=constants.AMP_DATA)) ++ ++ flows.append(network_tasks.ApplyQosAmphora( ++ name=sf_name + '-' + constants.APPLY_QOS_AMP, ++ requires=(constants.LOADBALANCER, constants.AMP_DATA, ++ constants.UPDATE_DICT))) ++ flows.append(database_tasks.UpdateAmphoraVIPData( ++ name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA, ++ requires=constants.AMP_DATA)) ++ flows.append(database_tasks.ReloadAmphora( ++ name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP, ++ requires=constants.AMPHORA_ID, ++ provides=constants.AMPHORA)) ++ flows.append(database_tasks.ReloadLoadBalancer( ++ name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP, ++ requires=constants.LOADBALANCER_ID, ++ provides=constants.LOADBALANCER)) ++ flows.append(network_tasks.GetAmphoraNetworkConfigs( ++ name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, ++ requires=(constants.LOADBALANCER, constants.AMPHORA), ++ provides=constants.AMPHORA_NETWORK_CONFIG)) ++ flows.append(amphora_driver_tasks.AmphoraPostVIPPlug( ++ name=sf_name + '-' + constants.AMP_POST_VIP_PLUG, ++ rebind={constants.AMPHORAE_NETWORK_CONFIG: ++ constants.AMPHORA_NETWORK_CONFIG}, ++ requires=(constants.LOADBALANCER, ++ constants.AMPHORAE_NETWORK_CONFIG))) ++ return flows ++ + def _create_listeners_flow(self): + flows = [] + flows.append( +@@ -176,13 +228,6 @@ class LoadBalancerFlows(object): + created/allocated amphorae. + :return: Post amphorae association subflow + """ +- +- # Note: If any task in this flow failed, the created amphorae will be +- # left ''incorrectly'' allocated to the loadbalancer. Likely, +- # the get_new_LB_networking_subflow is the most prune to failure +- # shall deallocate the amphora from its loadbalancer and put it in a +- # READY state. +- + sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW + post_create_LB_flow = linear_flow.Flow(sf_name) + post_create_LB_flow.add( +@@ -192,6 +237,10 @@ class LoadBalancerFlows(object): + provides=constants.LOADBALANCER)) + + if topology == constants.TOPOLOGY_ACTIVE_STANDBY: ++ post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( ++ requires=constants.LOADBALANCER_ID, ++ provides=constants.AMPHORAE)) ++ + vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix) + post_create_LB_flow.add(vrrp_subflow) + +@@ -208,9 +257,10 @@ class LoadBalancerFlows(object): + + Because task flow doesn't support loops we store each listener + we want to delete in the store part and then rebind ++ + :param lb: load balancer + :return: (flow, store) -- flow for the deletion and store with all +- the listeners stored properly ++ the listeners stored properly + """ + listeners_delete_flow = unordered_flow.Flow('listener_delete_flow') + store = {} +@@ -234,6 +284,7 @@ class LoadBalancerFlows(object): + + Because task flow doesn't support loops we store each pool + we want to delete in the store part and then rebind ++ + :param lb: load balancer + :return: (flow, store) -- flow for the deletion and store with all + the listeners stored properly +@@ -286,41 +337,6 @@ class LoadBalancerFlows(object): + """ + return self._get_delete_load_balancer_flow(lb, True) + +- def get_new_LB_networking_subflow(self): +- """Create a sub-flow to setup networking. +- +- :returns: The flow to setup networking for a new amphora +- """ +- +- new_LB_net_subflow = linear_flow.Flow(constants. +- LOADBALANCER_NETWORKING_SUBFLOW) +- new_LB_net_subflow.add(network_tasks.AllocateVIP( +- requires=constants.LOADBALANCER, +- provides=constants.VIP)) +- new_LB_net_subflow.add(database_tasks.UpdateVIPAfterAllocation( +- requires=(constants.LOADBALANCER_ID, constants.VIP), +- provides=constants.LOADBALANCER)) +- new_LB_net_subflow.add(network_tasks.PlugVIP( +- requires=constants.LOADBALANCER, +- provides=constants.AMPS_DATA)) +- new_LB_net_subflow.add(network_tasks.ApplyQos( +- requires=(constants.LOADBALANCER, constants.AMPS_DATA, +- constants.UPDATE_DICT))) +- new_LB_net_subflow.add(database_tasks.UpdateAmphoraeVIPData( +- requires=constants.AMPS_DATA)) +- new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer( +- name=constants.RELOAD_LB_AFTER_PLUG_VIP, +- requires=constants.LOADBALANCER_ID, +- provides=constants.LOADBALANCER)) +- new_LB_net_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( +- requires=constants.LOADBALANCER, +- provides=constants.AMPHORAE_NETWORK_CONFIG)) +- new_LB_net_subflow.add(amphora_driver_tasks.AmphoraePostVIPPlug( +- requires=(constants.LOADBALANCER, +- constants.AMPHORAE_NETWORK_CONFIG))) +- +- return new_LB_net_subflow +- + def get_update_load_balancer_flow(self): + """Creates a flow to update a load balancer. + +@@ -339,3 +355,335 @@ class LoadBalancerFlows(object): + requires=constants.LOADBALANCER)) + + return update_LB_flow ++ ++ def get_failover_LB_flow(self, amps, lb): ++ """Failover a load balancer. ++ ++ 1. Validate the VIP port is correct and present. ++ 2. Build a replacement amphora. ++ 3. Delete the failed amphora. ++ 4. Configure the replacement amphora listeners. ++ 5. Configure VRRP for the listeners. ++ 6. Build the second replacement amphora. ++ 7. Delete the second failed amphora. ++ 8. Delete any extraneous amphora. ++ 9. Configure the listeners on the new amphorae. ++ 10. Configure the VRRP on the new amphorae. ++ 11. Reload the listener configurations to pick up VRRP changes. ++ 12. Mark the load balancer back to ACTIVE. ++ ++ :returns: The flow that will provide the failover. ++ """ ++ # Pick one amphora to be failed over if any exist. ++ failed_amp = None ++ if amps: ++ failed_amp = amps.pop() ++ ++ failover_LB_flow = linear_flow.Flow( ++ constants.FAILOVER_LOADBALANCER_FLOW) ++ ++ # Revert LB to provisioning_status ERROR if this flow goes wrong ++ failover_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( ++ requires=constants.LOADBALANCER)) ++ ++ # Setup timeouts for our requests to the amphorae ++ timeout_dict = { ++ constants.CONN_MAX_RETRIES: ++ CONF.haproxy_amphora.active_connection_max_retries, ++ constants.CONN_RETRY_INTERVAL: ++ CONF.haproxy_amphora.active_connection_rety_interval} ++ ++ if failed_amp: ++ if failed_amp.role in (constants.ROLE_MASTER, ++ constants.ROLE_BACKUP): ++ amp_role = 'master_or_backup' ++ elif failed_amp.role == constants.ROLE_STANDALONE: ++ amp_role = 'standalone' ++ elif failed_amp.role is None: ++ amp_role = 'spare' ++ else: ++ amp_role = 'undefined' ++ LOG.info("Performing failover for amphora: %s", ++ {"id": failed_amp.id, ++ "load_balancer_id": lb.id, ++ "lb_network_ip": failed_amp.lb_network_ip, ++ "compute_id": failed_amp.compute_id, ++ "role": amp_role}) ++ ++ failover_LB_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amp})) ++ ++ failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amp})) ++ ++ # Check that the VIP port exists and is ok ++ failover_LB_flow.add( ++ network_tasks.AllocateVIP(requires=constants.LOADBALANCER, ++ provides=constants.VIP)) ++ ++ # Update the database with the VIP information ++ failover_LB_flow.add(database_tasks.UpdateVIPAfterAllocation( ++ requires=(constants.LOADBALANCER_ID, constants.VIP), ++ provides=constants.LOADBALANCER)) ++ ++ # Make sure the SG has the correct rules and re-apply to the ++ # VIP port. It is not used on the VIP port, but will help lock ++ # the SG as in use. ++ failover_LB_flow.add(network_tasks.UpdateVIPSecurityGroup( ++ requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID)) ++ ++ new_amp_role = constants.ROLE_STANDALONE ++ if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: ++ new_amp_role = constants.ROLE_BACKUP ++ ++ # Get a replacement amphora and plug all of the networking. ++ # ++ # Do this early as the compute services have been observed to be ++ # unreliable. The community decided the chance that deleting first ++ # would open resources for an instance is less likely than the compute ++ # service failing to boot an instance for other reasons. ++ if failed_amp: ++ failed_vrrp_is_ipv6 = False ++ if failed_amp.vrrp_ip: ++ failed_vrrp_is_ipv6 = utils.is_ipv6(failed_amp.vrrp_ip) ++ failover_LB_flow.add( ++ self.amp_flows.get_amphora_for_lb_failover_subflow( ++ prefix=constants.FAILOVER_LOADBALANCER_FLOW, ++ role=new_amp_role, ++ failed_amp_vrrp_port_id=failed_amp.vrrp_port_id, ++ is_vrrp_ipv6=failed_vrrp_is_ipv6)) ++ else: ++ failover_LB_flow.add( ++ self.amp_flows.get_amphora_for_lb_failover_subflow( ++ prefix=constants.FAILOVER_LOADBALANCER_FLOW, ++ role=new_amp_role)) ++ ++ if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: ++ failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB( ++ name=constants.MARK_AMP_BACKUP_INDB, ++ requires=constants.AMPHORA)) ++ ++ # Delete the failed amp ++ if failed_amp: ++ failover_LB_flow.add( ++ self.amp_flows.get_delete_amphora_flow(failed_amp)) ++ ++ # Update the data stored in the flow from the database ++ failover_LB_flow.add(database_tasks.ReloadLoadBalancer( ++ requires=constants.LOADBALANCER_ID, ++ provides=constants.LOADBALANCER)) ++ ++ # Configure the listener(s) ++ # We will run update on this amphora again later if this is ++ # an active/standby load balancer because we want this amp ++ # functional as soon as possible. It must run again to update ++ # the configurations for the new peers. ++ failover_LB_flow.add(amphora_driver_tasks.AmpListenersUpdate( ++ name=constants.AMP_LISTENER_UPDATE, ++ requires=(constants.LOADBALANCER, constants.AMPHORA), ++ inject={constants.TIMEOUT_DICT: timeout_dict})) ++ ++ # Bring up the new "backup" amphora VIP now to reduce the outage ++ # on the final failover. This dropped the outage from 8-9 seconds ++ # to less than one in my lab. ++ # This does mean some steps have to be repeated later to reconfigure ++ # for the second amphora as a peer. ++ if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: ++ ++ failover_LB_flow.add(database_tasks.CreateVRRPGroupForLB( ++ name=new_amp_role + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, ++ requires=constants.LOADBALANCER_ID)) ++ ++ failover_LB_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( ++ name=(new_amp_role + '-' + ++ constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID), ++ requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), ++ provides=constants.FIRST_AMP_NETWORK_CONFIGS)) ++ ++ failover_LB_flow.add( ++ amphora_driver_tasks.AmphoraUpdateVRRPInterface( ++ name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF, ++ requires=constants.AMPHORA, ++ inject={constants.TIMEOUT_DICT: timeout_dict}, ++ provides=constants.FIRST_AMP_VRRP_INTERFACE)) ++ ++ failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPUpdate( ++ name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE, ++ requires=(constants.LOADBALANCER_ID, constants.AMPHORA), ++ rebind={constants.AMPHORAE_NETWORK_CONFIG: ++ constants.FIRST_AMP_NETWORK_CONFIGS, ++ constants.AMP_VRRP_INT: ++ constants.FIRST_AMP_VRRP_INTERFACE}, ++ inject={constants.TIMEOUT_DICT: timeout_dict})) ++ ++ failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPStart( ++ name=new_amp_role + '-' + constants.AMP_VRRP_START, ++ requires=constants.AMPHORA, ++ inject={constants.TIMEOUT_DICT: timeout_dict})) ++ ++ # Start the listener. This needs to be done here because ++ # it will create the required haproxy check scripts for ++ # the VRRP deployed above. ++ # A "V" or newer amphora-agent will remove the need for this ++ # task here. ++ # TODO(johnsom) Remove this in the "X" cycle ++ failover_LB_flow.add(amphora_driver_tasks.ListenersStart( ++ name=new_amp_role + '-' + constants.AMP_LISTENER_START, ++ requires=(constants.LOADBALANCER, constants.AMPHORA))) ++ ++ # #### Work on standby amphora if needed ##### ++ ++ new_amp_role = constants.ROLE_MASTER ++ failed_amp = None ++ if amps: ++ failed_amp = amps.pop() ++ ++ if failed_amp: ++ if failed_amp.role in (constants.ROLE_MASTER, ++ constants.ROLE_BACKUP): ++ amp_role = 'master_or_backup' ++ elif failed_amp.role == constants.ROLE_STANDALONE: ++ amp_role = 'standalone' ++ elif failed_amp.role is None: ++ amp_role = 'spare' ++ else: ++ amp_role = 'undefined' ++ LOG.info("Performing failover for amphora: %s", ++ {"id": failed_amp.id, ++ "load_balancer_id": lb.id, ++ "lb_network_ip": failed_amp.lb_network_ip, ++ "compute_id": failed_amp.compute_id, ++ "role": amp_role}) ++ ++ failover_LB_flow.add( ++ database_tasks.MarkAmphoraPendingDeleteInDB( ++ name=(new_amp_role + '-' + ++ constants.MARK_AMPHORA_PENDING_DELETE), ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amp})) ++ ++ failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( ++ name=(new_amp_role + '-' + ++ constants.MARK_AMPHORA_HEALTH_BUSY), ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amp})) ++ ++ # Get a replacement amphora and plug all of the networking. ++ # ++ # Do this early as the compute services have been observed to be ++ # unreliable. The community decided the chance that deleting first ++ # would open resources for an instance is less likely than the ++ # compute service failing to boot an instance for other reasons. ++ failover_LB_flow.add( ++ self.amp_flows.get_amphora_for_lb_failover_subflow( ++ prefix=(new_amp_role + '-' + ++ constants.FAILOVER_LOADBALANCER_FLOW), ++ role=new_amp_role)) ++ ++ failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB( ++ name=constants.MARK_AMP_MASTER_INDB, ++ requires=constants.AMPHORA)) ++ ++ # Delete the failed amp ++ if failed_amp: ++ failover_LB_flow.add( ++ self.amp_flows.get_delete_amphora_flow( ++ failed_amp)) ++ failover_LB_flow.add( ++ database_tasks.DisableAmphoraHealthMonitoring( ++ name=(new_amp_role + '-' + ++ constants.DISABLE_AMP_HEALTH_MONITORING), ++ requires=constants.AMPHORA, ++ inject={constants.AMPHORA: failed_amp})) ++ ++ # Remove any extraneous amphora ++ # Note: This runs in all topology situations. ++ # It should run before the act/stdby final listener update so ++ # that we don't bother attempting to update dead amphorae. ++ delete_extra_amps_flow = unordered_flow.Flow( ++ constants.DELETE_EXTRA_AMPHORAE_FLOW) ++ for amp in amps: ++ LOG.debug('Found extraneous amphora %s on load balancer %s. ' ++ 'Deleting.', amp.id, lb.id) ++ delete_extra_amps_flow.add( ++ self.amp_flows.get_delete_amphora_flow(amp)) ++ ++ failover_LB_flow.add(delete_extra_amps_flow) ++ ++ if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: ++ # Update the data stored in the flow from the database ++ failover_LB_flow.add(database_tasks.ReloadLoadBalancer( ++ name=new_amp_role + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, ++ requires=constants.LOADBALANCER_ID, ++ provides=constants.LOADBALANCER)) ++ ++ failover_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( ++ name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB, ++ requires=constants.LOADBALANCER_ID, ++ provides=constants.AMPHORAE)) ++ ++ # Listeners update needs to be run on all amphora to update ++ # their peer configurations. So parallelize this with an ++ # unordered subflow. ++ update_amps_subflow = unordered_flow.Flow( ++ constants.UPDATE_AMPS_SUBFLOW) ++ ++ # Setup parallel flows for each amp. We don't know the new amp ++ # details at flow creation time, so setup a subflow for each ++ # amp on the LB, they let the task index into a list of amps ++ # to find the amphora it should work on. ++ update_amps_subflow.add( ++ amphora_driver_tasks.AmphoraIndexListenerUpdate( ++ name=(constants.AMPHORA + '-0-' + ++ constants.AMP_LISTENER_UPDATE), ++ requires=(constants.LOADBALANCER, constants.AMPHORAE), ++ inject={constants.AMPHORA_INDEX: 0, ++ constants.TIMEOUT_DICT: timeout_dict})) ++ update_amps_subflow.add( ++ amphora_driver_tasks.AmphoraIndexListenerUpdate( ++ name=(constants.AMPHORA + '-1-' + ++ constants.AMP_LISTENER_UPDATE), ++ requires=(constants.LOADBALANCER, constants.AMPHORAE), ++ inject={constants.AMPHORA_INDEX: 1, ++ constants.TIMEOUT_DICT: timeout_dict})) ++ ++ failover_LB_flow.add(update_amps_subflow) ++ ++ # Configure and enable keepalived in the amphora ++ failover_LB_flow.add(self.amp_flows.get_vrrp_subflow( ++ new_amp_role + '-' + constants.GET_VRRP_SUBFLOW, ++ timeout_dict, create_vrrp_group=False)) ++ ++ # #### End of standby #### ++ ++ # Reload the listener. This needs to be done here because ++ # it will create the required haproxy check scripts for ++ # the VRRP deployed above. ++ # A "V" or newer amphora-agent will remove the need for this ++ # task here. ++ # TODO(johnsom) Remove this in the "X" cycle ++ failover_LB_flow.add( ++ amphora_driver_tasks.AmphoraIndexListenersReload( ++ name=(new_amp_role + '-' + ++ constants.AMPHORA_RELOAD_LISTENER), ++ requires=(constants.LOADBALANCER, constants.AMPHORAE), ++ inject={constants.AMPHORA_INDEX: 1, ++ constants.TIMEOUT_DICT: timeout_dict})) ++ ++ # Remove any extraneous ports ++ # Note: Nova sometimes fails to delete ports attached to an instance. ++ # For example, if you create an LB with a listener, then ++ # 'openstack server delete' the amphora, you will see the vrrp ++ # port attached to that instance will remain after the instance ++ # is deleted. ++ # TODO(johnsom) Fix this as part of ++ # https://storyboard.openstack.org/#!/story/2007077 ++ ++ # Mark LB ACTIVE ++ failover_LB_flow.add( ++ database_tasks.MarkLBActiveInDB(mark_subobjects=True, ++ requires=constants.LOADBALANCER)) ++ ++ return failover_LB_flow +Index: octavia-5.0.1/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py ++++ octavia-5.0.1/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +@@ -16,7 +16,6 @@ + from cryptography import fernet + from oslo_config import cfg + from oslo_log import log as logging +-import six + from stevedore import driver as stevedore_driver + from taskflow import task + from taskflow.types import failure +@@ -52,7 +51,26 @@ class BaseAmphoraTask(task.Task): + class AmpListenersUpdate(BaseAmphoraTask): + """Task to update the listeners on one amphora.""" + +- def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=()): ++ def execute(self, loadbalancer, amphora, timeout_dict=None): ++ # Note, we don't want this to cause a revert as it may be used ++ # in a failover flow with both amps failing. Skip it and let ++ # health manager fix it. ++ try: ++ self.amphora_driver.update_amphora_listeners( ++ loadbalancer, amphora, timeout_dict) ++ except Exception as e: ++ LOG.error('Failed to update listeners on amphora %s. Skipping ' ++ 'this amphora as it is failing to update due to: %s', ++ amphora.id, str(e)) ++ self.amphora_repo.update(db_apis.get_session(), amphora.id, ++ status=constants.ERROR) ++ ++ ++class AmphoraIndexListenerUpdate(BaseAmphoraTask): ++ """Task to update the listeners on one amphora.""" ++ ++ def execute(self, loadbalancer, amphora_index, amphorae, ++ timeout_dict=None): + # Note, we don't want this to cause a revert as it may be used + # in a failover flow with both amps failing. Skip it and let + # health manager fix it. +@@ -101,6 +119,24 @@ class ListenersStart(BaseAmphoraTask): + self.task_utils.mark_listener_prov_status_error(listener.id) + + ++class AmphoraIndexListenersReload(BaseAmphoraTask): ++ """Task to reload all listeners on an amphora.""" ++ ++ def execute(self, loadbalancer, amphorae, amphora_index, ++ timeout_dict=None): ++ """Execute listener reload routines for listeners on an amphora.""" ++ if loadbalancer.listeners: ++ self.amphora_driver.reload( ++ loadbalancer, amphorae[amphora_index], timeout_dict) ++ ++ def revert(self, loadbalancer, *args, **kwargs): ++ """Handle failed listeners reloads.""" ++ ++ LOG.warning("Reverting listener reload.") ++ for listener in loadbalancer.listeners: ++ self.task_utils.mark_listener_prov_status_error(listener.id) ++ ++ + class ListenerDelete(BaseAmphoraTask): + """Task to delete the listener on the vip.""" + +@@ -175,7 +211,11 @@ class AmphoraePostNetworkPlug(BaseAmphor + def execute(self, loadbalancer, added_ports): + """Execute post_network_plug routine.""" + amp_post_plug = AmphoraPostNetworkPlug() +- for amphora in loadbalancer.amphorae: ++ # We need to make sure we have the fresh list of amphora ++ amphorae = self.amphora_repo.get_all( ++ db_apis.get_session(), load_balancer_id=loadbalancer.id, ++ status=constants.AMPHORA_ALLOCATED)[0] ++ for amphora in amphorae: + if amphora.id in added_ports: + amp_post_plug.execute(amphora, added_ports[amphora.id]) + +@@ -184,10 +224,11 @@ class AmphoraePostNetworkPlug(BaseAmphor + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting post network plug.") +- for amphora in six.moves.filter( +- lambda amp: amp.status == constants.AMPHORA_ALLOCATED, +- loadbalancer.amphorae): + ++ amphorae = self.amphora_repo.get_all( ++ db_apis.get_session(), load_balancer_id=loadbalancer.id, ++ status=constants.AMPHORA_ALLOCATED)[0] ++ for amphora in amphorae: + self.task_utils.mark_amphora_status_error(amphora.id) + + +@@ -242,64 +283,97 @@ class AmphoraCertUpload(BaseAmphoraTask) + class AmphoraUpdateVRRPInterface(BaseAmphoraTask): + """Task to get and update the VRRP interface device name from amphora.""" + +- def execute(self, loadbalancer): +- """Execute post_vip_routine.""" +- amps = [] +- timeout_dict = { +- constants.CONN_MAX_RETRIES: +- CONF.haproxy_amphora.active_connection_max_retries, +- constants.CONN_RETRY_INTERVAL: +- CONF.haproxy_amphora.active_connection_rety_interval} +- for amp in six.moves.filter( +- lambda amp: amp.status == constants.AMPHORA_ALLOCATED, +- loadbalancer.amphorae): +- +- try: +- interface = self.amphora_driver.get_vrrp_interface( +- amp, timeout_dict=timeout_dict) +- except Exception as e: +- # This can occur when an active/standby LB has no listener +- LOG.error('Failed to get amphora VRRP interface on amphora ' +- '%s. Skipping this amphora as it is failing due to: ' +- '%s', amp.id, str(e)) +- self.amphora_repo.update(db_apis.get_session(), amp.id, +- status=constants.ERROR) +- continue +- +- self.amphora_repo.update(db_apis.get_session(), amp.id, +- vrrp_interface=interface) +- amps.append(self.amphora_repo.get(db_apis.get_session(), +- id=amp.id)) +- loadbalancer.amphorae = amps +- return loadbalancer ++ def execute(self, amphora, timeout_dict=None): ++ try: ++ interface = self.amphora_driver.get_interface_from_ip( ++ amphora, amphora.vrrp_ip, timeout_dict=timeout_dict) ++ except Exception as e: ++ # This can occur when an active/standby LB has no listener ++ LOG.error('Failed to get amphora VRRP interface on amphora ' ++ '%s. Skipping this amphora as it is failing due to: ' ++ '%s', amphora.id, str(e)) ++ self.amphora_repo.update(db_apis.get_session(), amphora.id, ++ status=constants.ERROR) ++ return None + +- def revert(self, result, loadbalancer, *args, **kwargs): +- """Handle a failed amphora vip plug notification.""" +- if isinstance(result, failure.Failure): +- return +- LOG.warning("Reverting Get Amphora VRRP Interface.") +- for amp in six.moves.filter( +- lambda amp: amp.status == constants.AMPHORA_ALLOCATED, +- loadbalancer.amphorae): +- +- try: +- self.amphora_repo.update(db_apis.get_session(), amp.id, +- vrrp_interface=None) +- except Exception as e: +- LOG.error("Failed to update amphora %(amp)s " +- "VRRP interface to None due to: %(except)s", +- {'amp': amp.id, 'except': e}) ++ self.amphora_repo.update(db_apis.get_session(), amphora.id, ++ vrrp_interface=interface) ++ return interface ++ ++ ++class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask): ++ """Task to get and update the VRRP interface device name from amphora.""" ++ ++ def execute(self, amphorae, amphora_index, timeout_dict=None): ++ amphora_id = amphorae[amphora_index].id ++ try: ++ interface = self.amphora_driver.get_interface_from_ip( ++ amphorae[amphora_index], amphorae[amphora_index].vrrp_ip, ++ timeout_dict=timeout_dict) ++ except Exception as e: ++ # This can occur when an active/standby LB has no listener ++ LOG.error('Failed to get amphora VRRP interface on amphora ' ++ '%s. Skipping this amphora as it is failing due to: ' ++ '%s', amphora_id, str(e)) ++ self.amphora_repo.update(db_apis.get_session(), amphora_id, ++ status=constants.ERROR) ++ return None ++ ++ self.amphora_repo.update(db_apis.get_session(), amphora_id, ++ vrrp_interface=interface) ++ return interface + + + class AmphoraVRRPUpdate(BaseAmphoraTask): +- """Task to update the VRRP configuration of the loadbalancer amphorae.""" ++ """Task to update the VRRP configuration of an amphora.""" + +- def execute(self, loadbalancer, amphorae_network_config): ++ def execute(self, loadbalancer_id, amphorae_network_config, amphora, ++ amp_vrrp_int, timeout_dict=None): + """Execute update_vrrp_conf.""" +- self.amphora_driver.update_vrrp_conf(loadbalancer, +- amphorae_network_config) +- LOG.debug("Uploaded VRRP configuration of loadbalancer %s amphorae", +- loadbalancer.id) ++ loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), ++ id=loadbalancer_id) ++ # Note, we don't want this to cause a revert as it may be used ++ # in a failover flow with both amps failing. Skip it and let ++ # health manager fix it. ++ amphora.vrrp_interface = amp_vrrp_int ++ try: ++ self.amphora_driver.update_vrrp_conf( ++ loadbalancer, amphorae_network_config, amphora, timeout_dict) ++ except Exception as e: ++ LOG.error('Failed to update VRRP configuration amphora %s. ' ++ 'Skipping this amphora as it is failing to update due ' ++ 'to: %s', amphora.id, str(e)) ++ self.amphora_repo.update(db_apis.get_session(), amphora.id, ++ status=constants.ERROR) ++ ++ LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora.id) ++ ++ ++class AmphoraIndexVRRPUpdate(BaseAmphoraTask): ++ """Task to update the VRRP configuration of an amphora.""" ++ ++ def execute(self, loadbalancer_id, amphorae_network_config, amphora_index, ++ amphorae, amp_vrrp_int, timeout_dict=None): ++ """Execute update_vrrp_conf.""" ++ loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), ++ id=loadbalancer_id) ++ # Note, we don't want this to cause a revert as it may be used ++ # in a failover flow with both amps failing. Skip it and let ++ # health manager fix it. ++ amphora_id = amphorae[amphora_index].id ++ amphorae[amphora_index].vrrp_interface = amp_vrrp_int ++ try: ++ self.amphora_driver.update_vrrp_conf( ++ loadbalancer, amphorae_network_config, amphorae[amphora_index], ++ timeout_dict) ++ except Exception as e: ++ LOG.error('Failed to update VRRP configuration amphora %s. ' ++ 'Skipping this amphora as it is failing to update due ' ++ 'to: %s', amphora_id, str(e)) ++ self.amphora_repo.update(db_apis.get_session(), amphora_id, ++ status=constants.ERROR) ++ ++ LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) + + + class AmphoraVRRPStop(BaseAmphoraTask): +@@ -312,12 +386,26 @@ class AmphoraVRRPStop(BaseAmphoraTask): + + + class AmphoraVRRPStart(BaseAmphoraTask): +- """Task to start keepalived of all amphorae of a LB.""" ++ """Task to start keepalived on an amphora. + +- def execute(self, loadbalancer): +- self.amphora_driver.start_vrrp_service(loadbalancer) +- LOG.debug("Started VRRP of loadbalancer %s amphorae", +- loadbalancer.id) ++ This will reload keepalived if it is already running. ++ """ ++ ++ def execute(self, amphora, timeout_dict=None): ++ self.amphora_driver.start_vrrp_service(amphora, timeout_dict) ++ LOG.debug("Started VRRP on amphora %s.", amphora.id) ++ ++ ++class AmphoraIndexVRRPStart(BaseAmphoraTask): ++ """Task to start keepalived on an amphora. ++ ++ This will reload keepalived if it is already running. ++ """ ++ ++ def execute(self, amphora_index, amphorae, timeout_dict=None): ++ self.amphora_driver.start_vrrp_service(amphorae[amphora_index], ++ timeout_dict) ++ LOG.debug("Started VRRP on amphora %s.", amphorae[amphora_index].id) + + + class AmphoraComputeConnectivityWait(BaseAmphoraTask): +Index: octavia-5.0.1/octavia/controller/worker/v1/tasks/compute_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/controller/worker/v1/tasks/compute_tasks.py ++++ octavia-5.0.1/octavia/controller/worker/v1/tasks/compute_tasks.py +@@ -21,6 +21,7 @@ from oslo_log import log as logging + from stevedore import driver as stevedore_driver + from taskflow import task + from taskflow.types import failure ++import tenacity + + from octavia.amphorae.backends.agent import agent_jinja_cfg + from octavia.common import constants +@@ -50,9 +51,9 @@ class BaseComputeTask(task.Task): + class ComputeCreate(BaseComputeTask): + """Create the compute instance for a new amphora.""" + +- def execute(self, amphora_id, config_drive_files=None, ++ def execute(self, amphora_id, server_group_id, config_drive_files=None, + build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, +- server_group_id=None, ports=None, flavor=None): ++ ports=None, flavor=None): + """Create an amphora + + :returns: an amphora +@@ -142,9 +143,9 @@ class ComputeCreate(BaseComputeTask): + + + class CertComputeCreate(ComputeCreate): +- def execute(self, amphora_id, server_pem, ++ def execute(self, amphora_id, server_pem, server_group_id, + build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, +- server_group_id=None, ports=None, flavor=None): ++ ports=None, flavor=None): + """Create an amphora + + :returns: an amphora +@@ -183,15 +184,50 @@ class DeleteAmphoraeOnLoadBalancer(BaseC + + + class ComputeDelete(BaseComputeTask): +- def execute(self, amphora): +- LOG.debug("Compute Delete execute for amphora with id %s", amphora.id) ++ ++ @tenacity.retry(retry=tenacity.retry_if_exception_type(), ++ stop=tenacity.stop_after_attempt(CONF.compute.max_retries), ++ wait=tenacity.wait_exponential( ++ multiplier=CONF.compute.retry_backoff, ++ min=CONF.compute.retry_interval, ++ max=CONF.compute.retry_max), reraise=True) ++ def execute(self, amphora, passive_failure=False): ++ if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: ++ LOG.debug('Compute delete execute for amphora with ID %s and ' ++ 'compute ID: %s', amphora.id, amphora.compute_id) ++ else: ++ LOG.warning('Retrying compute delete of %s attempt %s of %s.', ++ amphora.compute_id, ++ self.execute.retry.statistics[ ++ constants.ATTEMPT_NUMBER], ++ self.execute.retry.stop.max_attempt_number) ++ # Let the Taskflow engine know we are working and alive ++ # Don't use get with a default for 'attempt_number', we need to fail ++ # if that number is missing. ++ self.update_progress( ++ self.execute.retry.statistics[constants.ATTEMPT_NUMBER] / ++ self.execute.retry.stop.max_attempt_number) + + try: + self.compute.delete(amphora.compute_id) + except Exception: +- LOG.exception("Compute delete for amphora id: %s failed", +- amphora.id) +- raise ++ if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] != ++ self.execute.retry.stop.max_attempt_number): ++ LOG.warning('Compute delete for amphora id: %s failed. ' ++ 'Retrying.', amphora.id) ++ raise ++ if passive_failure: ++ LOG.exception('Compute delete for compute ID: %s on amphora ' ++ 'ID: %s failed. This resource will be abandoned ' ++ 'and should manually be cleaned up once the ' ++ 'compute service is functional.', ++ amphora.compute_id, amphora.id) ++ else: ++ LOG.exception('Compute delete for compute ID: %s on amphora ' ++ 'ID: %s failed. The compute service has failed. ' ++ 'Aborting and reverting.', amphora.compute_id, ++ amphora.id) ++ raise + + + class ComputeActiveWait(BaseComputeTask): +@@ -256,3 +292,31 @@ class NovaServerGroupDelete(BaseComputeT + self.compute.delete_server_group(server_group_id) + else: + return ++ ++ ++class AttachPort(BaseComputeTask): ++ def execute(self, amphora, port): ++ """Attach a port to an amphora instance. ++ ++ :param amphora: The amphora to attach the port to. ++ :param port: The port to attach to the amphora. ++ :returns: None ++ """ ++ LOG.debug('Attaching port: %s to compute: %s', ++ port.id, amphora.compute_id) ++ self.compute.attach_network_or_port(amphora.compute_id, ++ port_id=port.id) ++ ++ def revert(self, amphora, port, *args, **kwargs): ++ """Revert our port attach. ++ ++ :param amphora: The amphora to detach the port from. ++ :param port: The port to attach to the amphora. ++ """ ++ LOG.warning('Reverting port: %s attach to compute: %s', ++ port.id, amphora.compute_id) ++ try: ++ self.compute.detach_port(amphora.compute_id, port.id) ++ except Exception as e: ++ LOG.error('Failed to detach port %s from compute %s for revert ' ++ 'due to %s.', port.id, amphora.compute_id, str(e)) +Index: octavia-5.0.1/octavia/controller/worker/v1/tasks/database_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/controller/worker/v1/tasks/database_tasks.py ++++ octavia-5.0.1/octavia/controller/worker/v1/tasks/database_tasks.py +@@ -449,20 +449,21 @@ class UpdateAmphoraVIPData(BaseDatabaseT + class UpdateAmpFailoverDetails(BaseDatabaseTask): + """Update amphora failover details in the database.""" + +- def execute(self, amphora, amp_data): ++ def execute(self, amphora, vip, base_port): + """Update amphora failover details in the database. + + :param amphora: The amphora to update +- :param amp_data: data_models.Amphora object with update data ++ :param vip: The VIP object associated with this amphora. ++ :param base_port: The base port object associated with the amphora. + :returns: None + """ + # role and vrrp_priority will be updated later. + self.repos.amphora.update(db_apis.get_session(), amphora.id, +- vrrp_ip=amp_data.vrrp_ip, +- ha_ip=amp_data.ha_ip, +- vrrp_port_id=amp_data.vrrp_port_id, +- ha_port_id=amp_data.ha_port_id, +- vrrp_id=amp_data.vrrp_id) ++ vrrp_ip=base_port.fixed_ips[0].ip_address, ++ ha_ip=vip.ip_address, ++ vrrp_port_id=base_port.id, ++ ha_port_id=vip.port_id, ++ vrrp_id=1) + + + class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): +@@ -1558,15 +1559,17 @@ class GetAmphoraDetails(BaseDatabaseTask + + + class GetAmphoraeFromLoadbalancer(BaseDatabaseTask): +- """Task to pull the listeners from a loadbalancer.""" ++ """Task to pull the amphorae from a loadbalancer.""" + +- def execute(self, loadbalancer): ++ def execute(self, loadbalancer_id): + """Pull the amphorae from a loadbalancer. + +- :param loadbalancer: Load balancer which listeners are required ++ :param loadbalancer_id: Load balancer ID to get amphorae from + :returns: A list of Listener objects + """ + amphorae = [] ++ loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), ++ id=loadbalancer_id) + for amp in loadbalancer.amphorae: + a = self.amphora_repo.get(db_apis.get_session(), id=amp.id, + show_deleted=False) +@@ -1593,6 +1596,22 @@ class GetListenersFromLoadbalancer(BaseD + return listeners + + ++class GetLoadBalancer(BaseDatabaseTask): ++ """Get an load balancer object from the database.""" ++ ++ def execute(self, loadbalancer_id, *args, **kwargs): ++ """Get an load balancer object from the database. ++ ++ :param loadbalancer_id: The load balancer ID to lookup ++ :returns: The load balancer object ++ """ ++ ++ LOG.debug("Get load balancer from DB for load balancer id: %s", ++ loadbalancer_id) ++ return self.loadbalancer_repo.get(db_apis.get_session(), ++ id=loadbalancer_id) ++ ++ + class GetVipFromLoadbalancer(BaseDatabaseTask): + """Task to pull the vip from a loadbalancer.""" + +@@ -1608,25 +1627,23 @@ class GetVipFromLoadbalancer(BaseDatabas + class CreateVRRPGroupForLB(BaseDatabaseTask): + """Create a VRRP group for a load balancer.""" + +- def execute(self, loadbalancer): ++ def execute(self, loadbalancer_id): + """Create a VRRP group for a load balancer. + +- :param loadbalancer: Load balancer for which a VRRP group ++ :param loadbalancer_id: Load balancer ID for which a VRRP group + should be created +- :returns: Updated load balancer + """ + try: +- loadbalancer.vrrp_group = self.repos.vrrpgroup.create( ++ self.repos.vrrpgroup.create( + db_apis.get_session(), +- load_balancer_id=loadbalancer.id, +- vrrp_group_name=str(loadbalancer.id).replace('-', ''), ++ load_balancer_id=loadbalancer_id, ++ vrrp_group_name=str(loadbalancer_id).replace('-', ''), + vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, + vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7], + advert_int=CONF.keepalived_vrrp.vrrp_advert_int) + except odb_exceptions.DBDuplicateEntry: + LOG.debug('VRRP_GROUP entry already exists for load balancer, ' + 'skipping create.') +- return loadbalancer + + + class DisableAmphoraHealthMonitoring(BaseDatabaseTask): +Index: octavia-5.0.1/octavia/controller/worker/v1/tasks/network_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/controller/worker/v1/tasks/network_tasks.py ++++ octavia-5.0.1/octavia/controller/worker/v1/tasks/network_tasks.py +@@ -12,16 +12,23 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++from __future__ import division ++ ++import time + + from oslo_config import cfg + from oslo_log import log as logging ++from oslo_utils import excutils + import six + from taskflow import task + from taskflow.types import failure ++import tenacity + + from octavia.common import constants + from octavia.common import utils + from octavia.controller.worker import task_utils ++from octavia.db import api as db_apis ++from octavia.db import repositories + from octavia.network import base + from octavia.network import data_models as n_data_models + +@@ -36,6 +43,7 @@ class BaseNetworkTask(task.Task): + super(BaseNetworkTask, self).__init__(**kwargs) + self._network_driver = None + self.task_utils = task_utils.TaskUtils() ++ self.lb_repo = repositories.LoadBalancerRepository() + + @property + def network_driver(self): +@@ -48,14 +56,13 @@ class CalculateAmphoraDelta(BaseNetworkT + + default_provides = constants.DELTA + +- def execute(self, loadbalancer, amphora): ++ def execute(self, loadbalancer, amphora, vrrp_port=None): + LOG.debug("Calculating network delta for amphora id: %s", amphora.id) + +- # Figure out what networks we want +- # seed with lb network(s) +- vrrp_port = self.network_driver.get_port(amphora.vrrp_port_id) +- desired_network_ids = {vrrp_port.network_id}.union( +- CONF.controller_worker.amp_boot_network_list) ++ if vrrp_port is None: ++ vrrp_port = self.network_driver.get_port(amphora.vrrp_port_id) ++ management_nets = CONF.controller_worker.amp_boot_network_list ++ desired_network_ids = {vrrp_port.network_id}.union(management_nets) + + for pool in loadbalancer.pools: + member_networks = [ +@@ -354,12 +361,19 @@ class PlugVIP(BaseNetworkTask): + class UpdateVIPSecurityGroup(BaseNetworkTask): + """Task to setup SG for LB.""" + +- def execute(self, loadbalancer): +- """Task to setup SG for LB.""" ++ def execute(self, loadbalancer_id): ++ """Task to setup SG for LB. ++ ++ Task is idempotent and safe to retry. ++ """ ++ ++ LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer_id) + +- LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer.id) ++ loadbalancer = self.lb_repo.get(db_apis.get_session(), ++ id=loadbalancer_id) + +- self.network_driver.update_vip_sg(loadbalancer, loadbalancer.vip) ++ return self.network_driver.update_vip_sg(loadbalancer, ++ loadbalancer.vip) + + + class GetSubnetFromVIP(BaseNetworkTask): +@@ -493,11 +507,26 @@ class GetAmphoraNetworkConfigs(BaseNetwo + amphora=amphora) + + ++class GetAmphoraNetworkConfigsByID(BaseNetworkTask): ++ """Task to retrieve amphora network details.""" ++ ++ def execute(self, loadbalancer_id, amphora_id=None): ++ LOG.debug("Retrieving vip network details.") ++ amp_repo = repositories.AmphoraRepository() ++ loadbalancer = self.lb_repo.get(db_apis.get_session(), ++ id=loadbalancer_id) ++ amphora = amp_repo.get(db_apis.get_session(), id=amphora_id) ++ return self.network_driver.get_network_configs(loadbalancer, ++ amphora=amphora) ++ ++ + class GetAmphoraeNetworkConfigs(BaseNetworkTask): + """Task to retrieve amphorae network details.""" + +- def execute(self, loadbalancer): ++ def execute(self, loadbalancer_id): + LOG.debug("Retrieving vip network details.") ++ loadbalancer = self.lb_repo.get(db_apis.get_session(), ++ id=loadbalancer_id) + return self.network_driver.get_network_configs(loadbalancer) + + +@@ -546,36 +575,6 @@ class PlugPorts(BaseNetworkTask): + self.network_driver.plug_port(amphora, port) + + +-class PlugVIPPort(BaseNetworkTask): +- """Task to plug a VIP into a compute instance.""" +- +- def execute(self, amphora, amphorae_network_config): +- vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port +- LOG.debug('Plugging VIP VRRP port ID: %(port_id)s into compute ' +- 'instance: %(compute_id)s.', +- {'port_id': vrrp_port.id, 'compute_id': amphora.compute_id}) +- self.network_driver.plug_port(amphora, vrrp_port) +- +- def revert(self, result, amphora, amphorae_network_config, +- *args, **kwargs): +- vrrp_port = None +- try: +- vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port +- self.network_driver.unplug_port(amphora, vrrp_port) +- except Exception: +- LOG.warning('Failed to unplug vrrp port: %(port)s from amphora: ' +- '%(amp)s', {'port': vrrp_port.id, 'amp': amphora.id}) +- +- +-class WaitForPortDetach(BaseNetworkTask): +- """Task to wait for the neutron ports to detach from an amphora.""" +- +- def execute(self, amphora): +- LOG.debug('Waiting for ports to detach from amphora: %(amp_id)s.', +- {'amp_id': amphora.id}) +- self.network_driver.wait_for_port_detach(amphora) +- +- + class ApplyQos(BaseNetworkTask): + """Apply Quality of Services to the VIP""" + +@@ -657,3 +656,146 @@ class ApplyQosAmphora(BaseNetworkTask): + except Exception as e: + LOG.error('Failed to remove QoS policy: %s from port: %s due ' + 'to error: %s', orig_qos_id, amp_data.vrrp_port_id, e) ++ ++ ++class DeletePort(BaseNetworkTask): ++ """Task to delete a network port.""" ++ ++ @tenacity.retry(retry=tenacity.retry_if_exception_type(), ++ stop=tenacity.stop_after_attempt( ++ CONF.networking.max_retries), ++ wait=tenacity.wait_exponential( ++ multiplier=CONF.networking.retry_backoff, ++ min=CONF.networking.retry_interval, ++ max=CONF.networking.retry_max), reraise=True) ++ def execute(self, port_id, passive_failure=False): ++ """Delete the network port.""" ++ if port_id is None: ++ return ++ if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: ++ LOG.debug("Deleting network port %s", port_id) ++ else: ++ LOG.warning('Retrying network port %s delete attempt %s of %s.', ++ port_id, ++ self.execute.retry.statistics[ ++ constants.ATTEMPT_NUMBER], ++ self.execute.retry.stop.max_attempt_number) ++ # Let the Taskflow engine know we are working and alive ++ # Don't use get with a default for 'attempt_number', we need to fail ++ # if that number is missing. ++ self.update_progress( ++ self.execute.retry.statistics[constants.ATTEMPT_NUMBER] / ++ self.execute.retry.stop.max_attempt_number) ++ try: ++ self.network_driver.delete_port(port_id) ++ except Exception: ++ if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] != ++ self.execute.retry.stop.max_attempt_number): ++ LOG.warning('Network port delete for port id: %s failed. ' ++ 'Retrying.', port_id) ++ raise ++ if passive_failure: ++ LOG.exception('Network port delete for port ID: %s failed. ' ++ 'This resource will be abandoned and should ' ++ 'manually be cleaned up once the ' ++ 'network service is functional.', port_id) ++ # Let's at least attempt to disable it so if the instance ++ # comes back from the dead it doesn't conflict with anything. ++ try: ++ self.network_driver.admin_down_port(port_id) ++ LOG.info('Successfully disabled (admin down) network port ' ++ '%s that failed to delete.', port_id) ++ except Exception: ++ LOG.warning('Attempt to disable (admin down) network port ' ++ '%s failed. The network service has failed. ' ++ 'Continuing.', port_id) ++ else: ++ LOG.exception('Network port delete for port ID: %s failed. ' ++ 'The network service has failed. ' ++ 'Aborting and reverting.', port_id) ++ raise ++ ++ ++class CreateVIPBasePort(BaseNetworkTask): ++ """Task to create the VIP base port for an amphora.""" ++ ++ @tenacity.retry(retry=tenacity.retry_if_exception_type(), ++ stop=tenacity.stop_after_attempt( ++ CONF.networking.max_retries), ++ wait=tenacity.wait_exponential( ++ multiplier=CONF.networking.retry_backoff, ++ min=CONF.networking.retry_interval, ++ max=CONF.networking.retry_max), reraise=True) ++ def execute(self, vip, vip_sg_id, amphora_id): ++ port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id ++ fixed_ips = [{constants.SUBNET_ID: vip.subnet_id}] ++ sg_id = [] ++ if vip_sg_id: ++ sg_id = [vip_sg_id] ++ port = self.network_driver.create_port( ++ vip.network_id, name=port_name, fixed_ips=fixed_ips, ++ secondary_ips=[vip.ip_address], security_group_ids=sg_id, ++ qos_policy_id=vip.qos_policy_id) ++ LOG.info('Created port %s with ID %s for amphora %s', ++ port_name, port.id, amphora_id) ++ return port ++ ++ def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs): ++ if isinstance(result, failure.Failure): ++ return ++ try: ++ port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id ++ for port in result: ++ self.network_driver.delete_port(port.id) ++ LOG.info('Deleted port %s with ID %s for amphora %s due to a ' ++ 'revert.', port_name, port.id, amphora_id) ++ except Exception as e: ++ LOG.error('Failed to delete port %s. Resources may still be in ' ++ 'use for a port intended for amphora %s due to error ' ++ '%s. Search for a port named %s', ++ result, amphora_id, str(e), port_name) ++ ++ ++class AdminDownPort(BaseNetworkTask): ++ ++ def execute(self, port_id): ++ try: ++ self.network_driver.set_port_admin_state_up(port_id, False) ++ except base.PortNotFound: ++ return ++ for i in range(CONF.networking.max_retries): ++ port = self.network_driver.get_port(port_id) ++ if port.status == constants.DOWN: ++ LOG.debug('Disabled port: %s', port_id) ++ return ++ LOG.debug('Port %s is %s instead of DOWN, waiting.', ++ port_id, port.status) ++ time.sleep(CONF.networking.retry_interval) ++ LOG.error('Port %s failed to go DOWN. Port status is still %s. ' ++ 'Ignoring and continuing.', port_id, port.status) ++ ++ def revert(self, result, port_id, *args, **kwargs): ++ if isinstance(result, failure.Failure): ++ return ++ try: ++ self.network_driver.set_port_admin_state_up(port_id, True) ++ except Exception as e: ++ LOG.error('Failed to bring port %s admin up on revert due to: %s.', ++ port_id, str(e)) ++ ++ ++class GetVIPSecurityGroupID(BaseNetworkTask): ++ ++ def execute(self, loadbalancer_id): ++ sg_name = utils.get_vip_security_group_name(loadbalancer_id) ++ try: ++ security_group = self.network_driver.get_security_group(sg_name) ++ if security_group: ++ return security_group.id ++ except base.SecurityGroupNotFound: ++ with excutils.save_and_reraise_exception() as ctxt: ++ if self.network_driver.sec_grp_enabled: ++ LOG.error('VIP security group %s was not found.', sg_name) ++ else: ++ ctxt.reraise = False ++ return None +Index: octavia-5.0.1/octavia/controller/worker/v1/tasks/retry_tasks.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/controller/worker/v1/tasks/retry_tasks.py +@@ -0,0 +1,74 @@ ++# Copyright 2019 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++import time ++ ++from oslo_log import log as logging ++from taskflow import retry ++ ++LOG = logging.getLogger(__name__) ++ ++ ++class SleepingRetryTimesController(retry.Times): ++ """A retry controller to attempt subflow retries a number of times. ++ ++ This retry controller overrides the Times on_failure to inject a ++ sleep interval between retries. ++ It also adds a log message when all of the retries are exhausted. ++ ++ :param attempts: number of attempts to retry the associated subflow ++ before giving up ++ :type attempts: int ++ :param name: Meaningful name for this atom, should be something that is ++ distinguishable and understandable for notification, ++ debugging, storing and any other similar purposes. ++ :param provides: A set, string or list of items that ++ this will be providing (or could provide) to others, used ++ to correlate and associate the thing/s this atom ++ produces, if it produces anything at all. ++ :param requires: A set or list of required inputs for this atom's ++ ``execute`` method. ++ :param rebind: A dict of key/value pairs used to define argument ++ name conversions for inputs to this atom's ``execute`` ++ method. ++ :param revert_all: when provided this will cause the full flow to revert ++ when the number of attempts that have been tried ++ has been reached (when false, it will only locally ++ revert the associated subflow) ++ :type revert_all: bool ++ :param interval: Interval, in seconds, between retry attempts. ++ :type interval: int ++ """ ++ ++ def __init__(self, attempts=1, name=None, provides=None, requires=None, ++ auto_extract=True, rebind=None, revert_all=False, interval=1): ++ super(SleepingRetryTimesController, self).__init__( ++ attempts, name, provides, requires, auto_extract, rebind, ++ revert_all) ++ self._interval = interval ++ ++ def on_failure(self, history, *args, **kwargs): ++ if len(history) < self._attempts: ++ LOG.warning('%s attempt %s of %s failed. Sleeping %s seconds and ' ++ 'retrying.', ++ self.name[self.name.startswith('retry-') and ++ len('retry-'):], len(history), ++ self._attempts, self._interval) ++ time.sleep(self._interval) ++ return retry.RETRY ++ return self._revert_action ++ ++ def revert(self, history, *args, **kwargs): ++ LOG.error('%s retries with interval %s seconds have failed for %s. ' ++ 'Giving up.', len(history), self._interval, self.name) +Index: octavia-5.0.1/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py ++++ octavia-5.0.1/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +@@ -276,8 +276,8 @@ class AmphoraUpdateVRRPInterface(BaseAmp + loadbalancer.amphorae): + + try: +- interface = self.amphora_driver.get_vrrp_interface( +- amp, timeout_dict=timeout_dict) ++ interface = self.amphora_driver.get_interface_from_ip( ++ amp, amp.vrrp_ip, timeout_dict=timeout_dict) + except Exception as e: + # This can occur when an active/standby LB has no listener + LOG.error('Failed to get amphora VRRP interface on amphora ' +Index: octavia-5.0.1/octavia/network/base.py +=================================================================== +--- octavia-5.0.1.orig/octavia/network/base.py ++++ octavia-5.0.1/octavia/network/base.py +@@ -79,6 +79,14 @@ class QosPolicyNotFound(NetworkException + pass + + ++class SecurityGroupNotFound(NetworkException): ++ pass ++ ++ ++class CreatePortException(NetworkException): ++ pass ++ ++ + @six.add_metaclass(abc.ABCMeta) + class AbstractNetworkDriver(object): + """This class defines the methods for a fully functional network driver. +@@ -100,6 +108,24 @@ class AbstractNetworkDriver(object): + """ + + @abc.abstractmethod ++ def create_port(self, network_id, name=None, fixed_ips=(), ++ secondary_ips=(), security_group_ids=(), ++ admin_state_up=True, qos_policy_id=None): ++ """Creates a network port. ++ ++ fixed_ips = [{'subnet_id': , ('ip_address': ')},] ++ ip_address is optional in the fixed_ips dictionary. ++ ++ :param network_id: The network the port should be created on. ++ :param name: The name to apply to the port. ++ :param fixed_ips: A list of fixed IP dicts. ++ :param secondary_ips: A list of secondary IPs to add to the port. ++ :param security_group_ids: A list of security group IDs for the port. ++ :param qos_policy_id: The QoS policy ID to apply to the port. ++ :returns port: A port data model object. ++ """ ++ ++ @abc.abstractmethod + def deallocate_vip(self, vip): + """Removes any resources that reserved this virtual ip. + +@@ -110,6 +136,14 @@ class AbstractNetworkDriver(object): + """ + + @abc.abstractmethod ++ def delete_port(self, port_id): ++ """Delete a network port. ++ ++ :param port_id: The port ID to delete. ++ :returns: None ++ """ ++ ++ @abc.abstractmethod + def plug_vip(self, load_balancer, vip): + """Plugs a virtual ip as the frontend connection of a load balancer. + +@@ -253,6 +287,15 @@ class AbstractNetworkDriver(object): + """ + + @abc.abstractmethod ++ def get_security_group(self, sg_name): ++ """Retrieves the security group by it's name. ++ ++ :param sg_name: The security group name. ++ :return: octavia.network.data_models.SecurityGroup, None if not enabled ++ :raises: NetworkException, SecurityGroupNotFound ++ """ ++ ++ @abc.abstractmethod + def failover_preparation(self, amphora): + """Prepare an amphora for failover. + +@@ -348,3 +391,12 @@ class AbstractNetworkDriver(object): + :return: octavia.network.data_models.Network_IP_Availability + :raises: NetworkException, NetworkNotFound + """ ++ ++ @abc.abstractmethod ++ def set_port_admin_state_up(self, port_id, state): ++ """Set the admin state of a port. True is up, False is down. ++ ++ :param port_id: The port ID to update. ++ :param state: True for up, False for down. ++ :returns: None ++ """ +Index: octavia-5.0.1/octavia/network/data_models.py +=================================================================== +--- octavia-5.0.1.orig/octavia/network/data_models.py ++++ octavia-5.0.1/octavia/network/data_models.py +@@ -76,7 +76,7 @@ class Port(data_models.BaseDataModel): + def __init__(self, id=None, name=None, device_id=None, device_owner=None, + mac_address=None, network_id=None, status=None, + project_id=None, admin_state_up=None, fixed_ips=None, +- network=None, qos_policy_id=None): ++ network=None, qos_policy_id=None, security_group_ids=None): + self.id = id + self.name = name + self.device_id = device_id +@@ -89,6 +89,7 @@ class Port(data_models.BaseDataModel): + self.fixed_ips = fixed_ips or [] + self.network = network + self.qos_policy_id = qos_policy_id ++ self.security_group_ids = security_group_ids or [] + + def get_subnet_id(self, fixed_ip_address): + for fixed_ip in self.fixed_ips: +@@ -163,3 +164,16 @@ class Network_IP_Availability(data_model + self.total_ips = total_ips + self.used_ips = used_ips + self.subnet_ip_availability = subnet_ip_availability ++ ++ ++class SecurityGroup(data_models.BaseDataModel): ++ ++ def __init__(self, id=None, project_id=None, name=None, description=None, ++ security_group_rule_ids=None, tags=None, stateful=None): ++ self.id = id ++ self.project_id = project_id ++ self.name = name ++ self.description = description ++ self.security_group_rule_ids = security_group_rule_ids or [] ++ self.tags = tags or [] ++ self.stateful = stateful +Index: octavia-5.0.1/octavia/network/drivers/neutron/allowed_address_pairs.py +=================================================================== +--- octavia-5.0.1.orig/octavia/network/drivers/neutron/allowed_address_pairs.py ++++ octavia-5.0.1/octavia/network/drivers/neutron/allowed_address_pairs.py +@@ -11,7 +11,6 @@ + # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + # License for the specific language governing permissions and limitations + # under the License. +- + import ipaddress + import time + +@@ -25,6 +24,7 @@ from stevedore import driver as stevedor + from octavia.common import constants + from octavia.common import data_models + from octavia.common import exceptions ++from octavia.common import utils as common_utils + from octavia.i18n import _ + from octavia.network import base + from octavia.network import data_models as n_data_models +@@ -34,7 +34,6 @@ from octavia.network.drivers.neutron imp + LOG = logging.getLogger(__name__) + AAP_EXT_ALIAS = 'allowed-address-pairs' + PROJECT_ID_ALIAS = 'project-id' +-VIP_SECURITY_GRP_PREFIX = 'lb-' + OCTAVIA_OWNER = 'Octavia' + + CONF = cfg.CONF +@@ -85,11 +84,12 @@ class AllowedAddressPairsDriver(neutron_ + def _plug_amphora_vip(self, amphora, subnet): + # We need a vip port owned by Octavia for Act/Stby and failover + try: +- port = {'port': {'name': 'octavia-lb-vrrp-' + amphora.id, +- 'network_id': subnet.network_id, +- 'fixed_ips': [{'subnet_id': subnet.id}], +- 'admin_state_up': True, +- 'device_owner': OCTAVIA_OWNER}} ++ port = {constants.PORT: { ++ constants.NAME: 'octavia-lb-vrrp-' + amphora.id, ++ constants.NETWORK_ID: subnet.network_id, ++ constants.FIXED_IPS: [{'subnet_id': subnet.id}], ++ constants.ADMIN_STATE_UP: True, ++ constants.DEVICE_OWNER: OCTAVIA_OWNER}} + new_port = self.neutron_client.create_port(port) + new_port = utils.convert_port_dict_to_model(new_port) + +@@ -136,10 +136,11 @@ class AllowedAddressPairsDriver(neutron_ + raise base.PlugVIPException(message) + + def _get_lb_security_group(self, load_balancer_id): +- sec_grp_name = VIP_SECURITY_GRP_PREFIX + load_balancer_id ++ sec_grp_name = common_utils.get_vip_security_group_name( ++ load_balancer_id) + sec_grps = self.neutron_client.list_security_groups(name=sec_grp_name) +- if sec_grps and sec_grps.get('security_groups'): +- return sec_grps.get('security_groups')[0] ++ if sec_grps and sec_grps.get(constants.SECURITY_GROUPS): ++ return sec_grps.get(constants.SECURITY_GROUPS)[0] + return None + + def _get_ethertype_for_ip(self, ip): +@@ -197,7 +198,7 @@ class AllowedAddressPairsDriver(neutron_ + rule.get('protocol', '').lower() in ['tcp', 'udp'] and + (rule.get('port_range_max'), rule.get('protocol'), + rule.get('remote_ip_prefix')) in del_ports): +- rule_id = rule.get('id') ++ rule_id = rule.get(constants.ID) + try: + self.neutron_client.delete_security_group_rule(rule_id) + except neutron_client_exceptions.NotFound: +@@ -237,19 +238,11 @@ class AllowedAddressPairsDriver(neutron_ + except Exception as e: + raise base.PlugVIPException(str(e)) + +- def _update_vip_security_group(self, load_balancer, vip): +- sec_grp = self._get_lb_security_group(load_balancer.id) +- if not sec_grp: +- sec_grp_name = VIP_SECURITY_GRP_PREFIX + load_balancer.id +- sec_grp = self._create_security_group(sec_grp_name) +- self._update_security_group_rules(load_balancer, sec_grp.get('id')) +- self._add_vip_security_group_to_port(load_balancer.id, vip.port_id, +- sec_grp.get('id')) +- + def _add_vip_security_group_to_port(self, load_balancer_id, port_id, + sec_grp_id=None): + sec_grp_id = (sec_grp_id or +- self._get_lb_security_group(load_balancer_id).get('id')) ++ self._get_lb_security_group(load_balancer_id).get( ++ constants.ID)) + try: + self._add_security_group_to_port(sec_grp_id, port_id) + except base.PortNotFound: +@@ -288,10 +281,10 @@ class AllowedAddressPairsDriver(neutron_ + if self.sec_grp_enabled: + sec_grp = self._get_lb_security_group(vip.load_balancer.id) + if sec_grp: +- sec_grp_id = sec_grp.get('id') ++ sec_grp_id = sec_grp.get(constants.ID) + LOG.info( + "Removing security group %(sg)s from port %(port)s", +- {'sg': sec_grp_id, 'port': vip.port_id}) ++ {'sg': sec_grp_id, constants.PORT: vip.port_id}) + raw_port = None + try: + if port: +@@ -302,10 +295,11 @@ class AllowedAddressPairsDriver(neutron_ + 'group.', port.id) + if raw_port: + sec_grps = raw_port.get( +- 'port', {}).get('security_groups', []) ++ constants.PORT, {}).get(constants.SECURITY_GROUPS, []) + if sec_grp_id in sec_grps: + sec_grps.remove(sec_grp_id) +- port_update = {'port': {'security_groups': sec_grps}} ++ port_update = {constants.PORT: { ++ constants.SECURITY_GROUPS: sec_grps}} + try: + self.neutron_client.update_port(port.id, + port_update) +@@ -325,7 +319,7 @@ class AllowedAddressPairsDriver(neutron_ + 'pass: %s', sec_grp_id) + extra_ports = self._get_ports_by_security_group(sec_grp_id) + for extra_port in extra_ports: +- port_id = extra_port.get('id') ++ port_id = extra_port.get(constants.ID) + try: + LOG.warning('Deleting extra port %s on security ' + 'group %s...', port_id, sec_grp_id) +@@ -378,7 +372,17 @@ class AllowedAddressPairsDriver(neutron_ + + def update_vip_sg(self, load_balancer, vip): + if self.sec_grp_enabled: +- self._update_vip_security_group(load_balancer, vip) ++ sec_grp = self._get_lb_security_group(load_balancer.id) ++ if not sec_grp: ++ sec_grp_name = common_utils.get_vip_security_group_name( ++ load_balancer.id) ++ sec_grp = self._create_security_group(sec_grp_name) ++ self._update_security_group_rules(load_balancer, ++ sec_grp.get(constants.ID)) ++ self._add_vip_security_group_to_port(load_balancer.id, vip.port_id, ++ sec_grp.get(constants.ID)) ++ return sec_grp.get(constants.ID) ++ return None + + def plug_aap_port(self, load_balancer, vip, amphora, subnet): + interface = self._get_plugged_interface( +@@ -417,18 +421,79 @@ class AllowedAddressPairsDriver(neutron_ + amphora, subnet)) + return plugged_amphorae + ++ def _validate_fixed_ip(self, fixed_ips, subnet_id, ip_address): ++ """Validate an IP address exists in a fixed_ips dict ++ ++ :param fixed_ips: A port fixed_ups dict ++ :param subnet_id: The subnet that should contain the IP ++ :param ip_address: The IP address to validate ++ :returns: True if the ip address is in the dict, False if not ++ """ ++ for fixed_ip in fixed_ips: ++ normalized_fixed_ip = ipaddress.ip_address( ++ six.text_type(fixed_ip.ip_address)).compressed ++ normalized_ip = ipaddress.ip_address( ++ six.text_type(ip_address)).compressed ++ if (fixed_ip.subnet_id == subnet_id and ++ normalized_fixed_ip == normalized_ip): ++ return True ++ return False ++ ++ @staticmethod ++ def _fixed_ips_to_list_of_dicts(fixed_ips): ++ list_of_dicts = [] ++ for fixed_ip in fixed_ips: ++ list_of_dicts.append(fixed_ip.to_dict()) ++ return list_of_dicts ++ + def allocate_vip(self, load_balancer): + if load_balancer.vip.port_id: +- LOG.info('Port %s already exists. Nothing to be done.', +- load_balancer.vip.port_id) +- port = self.get_port(load_balancer.vip.port_id) +- return self._port_to_vip(port, load_balancer) ++ try: ++ port = self.get_port(load_balancer.vip.port_id) ++ fixed_ip_found = self._validate_fixed_ip( ++ port.fixed_ips, load_balancer.vip.subnet_id, ++ load_balancer.vip.ip_address) ++ if (port.network_id == load_balancer.vip.network_id and ++ fixed_ip_found): ++ LOG.info('Port %s already exists. Nothing to be done.', ++ load_balancer.vip.port_id) ++ return self._port_to_vip(port, load_balancer) ++ LOG.error('Neutron VIP mis-match. Expected ip %s on ' ++ 'subnet %s in network %s. Neutron has fixed_ips %s ' ++ 'in network %s. Deleting and recreating the VIP ' ++ 'port.', load_balancer.vip.ip_address, ++ load_balancer.vip.subnet_id, ++ load_balancer.vip.network_id, ++ self._fixed_ips_to_list_of_dicts(port.fixed_ips), ++ port.network_id) ++ if load_balancer.vip.octavia_owned: ++ self.delete_port(load_balancer.vip.port_id) ++ else: ++ raise base.AllocateVIPException( ++ 'VIP port {0} is broken, but is owned by project {1} ' ++ 'so will not be recreated. Aborting VIP allocation.' ++ .format(port.id, port.project_id)) ++ except base.AllocateVIPException as e: ++ # Catch this explicitly because otherwise we blame Neutron ++ LOG.error(getattr(e, constants.MESSAGE, None)) ++ raise ++ except base.PortNotFound: ++ LOG.warning('VIP port %s is missing from neutron. Rebuilding.', ++ load_balancer.vip.port_id) ++ except Exception as e: ++ message = _('Neutron is failing to service requests due to: ' ++ '{}. Aborting.').format(str(e)) ++ LOG.error(message) ++ raise base.AllocateVIPException( ++ message, ++ orig_msg=getattr(e, constants.MESSAGE, None), ++ orig_code=getattr(e, constants.STATUS_CODE, None),) + + fixed_ip = {} + if load_balancer.vip.subnet_id: + fixed_ip['subnet_id'] = load_balancer.vip.subnet_id + if load_balancer.vip.ip_address: +- fixed_ip['ip_address'] = load_balancer.vip.ip_address ++ fixed_ip[constants.IP_ADDRESS] = load_balancer.vip.ip_address + + # Make sure we are backward compatible with older neutron + if self._check_extension_enabled(PROJECT_ID_ALIAS): +@@ -437,29 +502,30 @@ class AllowedAddressPairsDriver(neutron_ + project_id_key = 'tenant_id' + + # It can be assumed that network_id exists +- port = {'port': {'name': 'octavia-lb-' + load_balancer.id, +- 'network_id': load_balancer.vip.network_id, +- 'admin_state_up': False, +- 'device_id': 'lb-{0}'.format(load_balancer.id), +- 'device_owner': OCTAVIA_OWNER, +- project_id_key: load_balancer.project_id}} ++ port = {constants.PORT: { ++ constants.NAME: 'octavia-lb-' + load_balancer.id, ++ constants.NETWORK_ID: load_balancer.vip.network_id, ++ constants.ADMIN_STATE_UP: False, ++ 'device_id': 'lb-{0}'.format(load_balancer.id), ++ constants.DEVICE_OWNER: OCTAVIA_OWNER, ++ project_id_key: load_balancer.project_id}} + + if fixed_ip: +- port['port']['fixed_ips'] = [fixed_ip] ++ port[constants.PORT][constants.FIXED_IPS] = [fixed_ip] + try: + new_port = self.neutron_client.create_port(port) + except Exception as e: + message = _('Error creating neutron port on network ' +- '{network_id}.').format( +- network_id=load_balancer.vip.network_id) ++ '{network_id} due to {e}.').format( ++ network_id=load_balancer.vip.network_id, e=str(e)) + LOG.exception(message) + raise base.AllocateVIPException( + message, +- orig_msg=getattr(e, 'message', None), +- orig_code=getattr(e, 'status_code', None), ++ orig_msg=getattr(e, constants.MESSAGE, None), ++ orig_code=getattr(e, constants.STATUS_CODE, None), + ) + new_port = utils.convert_port_dict_to_model(new_port) +- return self._port_to_vip(new_port, load_balancer) ++ return self._port_to_vip(new_port, load_balancer, octavia_owned=True) + + def unplug_aap_port(self, vip, amphora, subnet): + interface = self._get_plugged_interface( +@@ -475,8 +541,8 @@ class AllowedAddressPairsDriver(neutron_ + except Exception: + pass + try: +- aap_update = {'port': { +- 'allowed_address_pairs': [] ++ aap_update = {constants.PORT: { ++ constants.ALLOWED_ADDRESS_PAIRS: [] + }} + self.neutron_client.update_port(interface.port_id, + aap_update) +@@ -497,8 +563,8 @@ class AllowedAddressPairsDriver(neutron_ + pass + except Exception as e: + LOG.error('Failed to delete port. Resources may still be in ' +- 'use for port: %(port)s due to error: %s(except)s', +- {'port': amphora.vrrp_port_id, 'except': e}) ++ 'use for port: %(port)s due to error: %(except)s', ++ {constants.PORT: amphora.vrrp_port_id, 'except': e}) + + def unplug_vip(self, load_balancer, vip): + try: +@@ -518,7 +584,7 @@ class AllowedAddressPairsDriver(neutron_ + interface = self.compute.attach_network_or_port( + compute_id=compute_id, network_id=network_id, + ip_address=ip_address) +- except nova_client_exceptions.NotFound as e: ++ except exceptions.NotFound as e: + if 'Instance' in str(e): + raise base.AmphoraNotFound(str(e)) + if 'Network' in str(e): +@@ -550,7 +616,8 @@ class AllowedAddressPairsDriver(neutron_ + def update_vip(self, load_balancer, for_delete=False): + sec_grp = self._get_lb_security_group(load_balancer.id) + if sec_grp: +- self._update_security_group_rules(load_balancer, sec_grp.get('id')) ++ self._update_security_group_rules(load_balancer, ++ sec_grp.get(constants.ID)) + elif not for_delete: + raise exceptions.MissingVIPSecurityGroup(lb_id=load_balancer.id) + else: +@@ -579,8 +646,8 @@ class AllowedAddressPairsDriver(neutron_ + + for port in ports: + try: +- self.neutron_client.update_port(port.id, +- {'port': {'dns_name': ''}}) ++ self.neutron_client.update_port( ++ port.id, {constants.PORT: {'dns_name': ''}}) + + except (neutron_client_exceptions.NotFound, + neutron_client_exceptions.PortNotFoundClient): +@@ -593,7 +660,7 @@ class AllowedAddressPairsDriver(neutron_ + ip_address=None, port_id=port.id) + plugged_interface = self._nova_interface_to_octavia_interface( + amphora.compute_id, interface) +- except nova_client_exceptions.NotFound as e: ++ except exceptions.NotFound as e: + if 'Instance' in str(e): + raise base.AmphoraNotFound(str(e)) + if 'Network' in str(e): +@@ -652,6 +719,7 @@ class AllowedAddressPairsDriver(neutron_ + vip_subnet, vip_port) + return amp_configs + ++ # TODO(johnsom) This may be dead code now. Remove in failover for v2 patch + def wait_for_port_detach(self, amphora): + """Waits for the amphora ports device_id to be unset. + +@@ -681,14 +749,14 @@ class AllowedAddressPairsDriver(neutron_ + for port in ports: + try: + neutron_port = self.neutron_client.show_port( +- port.id).get('port') ++ port.id).get(constants.PORT) + device_id = neutron_port['device_id'] + start = int(time.time()) + + while device_id: + time.sleep(CONF.networking.retry_interval) + neutron_port = self.neutron_client.show_port( +- port.id).get('port') ++ port.id).get(constants.PORT) + device_id = neutron_port['device_id'] + + timed_out = int(time.time()) - start >= port_detach_timeout +@@ -702,3 +770,106 @@ class AllowedAddressPairsDriver(neutron_ + except (neutron_client_exceptions.NotFound, + neutron_client_exceptions.PortNotFoundClient): + pass ++ ++ def delete_port(self, port_id): ++ """delete a neutron port. ++ ++ :param port_id: The port ID to delete. ++ :returns: None ++ """ ++ try: ++ self.neutron_client.delete_port(port_id) ++ except (neutron_client_exceptions.NotFound, ++ neutron_client_exceptions.PortNotFoundClient): ++ LOG.debug('VIP instance port %s already deleted. Skipping.', ++ port_id) ++ except Exception as e: ++ raise exceptions.NetworkServiceError(net_error=str(e)) ++ ++ def set_port_admin_state_up(self, port_id, state): ++ """Set the admin state of a port. True is up, False is down. ++ ++ :param port_id: The port ID to update. ++ :param state: True for up, False for down. ++ :returns: None ++ """ ++ try: ++ self.neutron_client.update_port( ++ port_id, {constants.PORT: {constants.ADMIN_STATE_UP: state}}) ++ except (neutron_client_exceptions.NotFound, ++ neutron_client_exceptions.PortNotFoundClient) as e: ++ raise base.PortNotFound(str(e)) ++ except Exception as e: ++ raise exceptions.NetworkServiceError(net_error=str(e)) ++ ++ def create_port(self, network_id, name=None, fixed_ips=(), ++ secondary_ips=(), security_group_ids=(), ++ admin_state_up=True, qos_policy_id=None): ++ """Creates a network port. ++ ++ fixed_ips = [{'subnet_id': , ('ip_addrss': ')},] ++ ip_address is optional in the fixed_ips dictionary. ++ ++ :param network_id: The network the port should be created on. ++ :param name: The name to apply to the port. ++ :param fixed_ips: A list of fixed IP dicts. ++ :param secondary_ips: A list of secondary IPs to add to the port. ++ :param security_group_ids: A list of security group IDs for the port. ++ :param qos_policy_id: The QoS policy ID to apply to the port. ++ :returns port: A port data model object. ++ """ ++ try: ++ aap_list = [] ++ for ip in secondary_ips: ++ aap_list.append({constants.IP_ADDRESS: ip}) ++ port = {constants.NETWORK_ID: network_id, ++ constants.ADMIN_STATE_UP: admin_state_up, ++ constants.DEVICE_OWNER: OCTAVIA_OWNER} ++ if aap_list: ++ port[constants.ALLOWED_ADDRESS_PAIRS] = aap_list ++ if fixed_ips: ++ port[constants.FIXED_IPS] = fixed_ips ++ if name: ++ port[constants.NAME] = name ++ if qos_policy_id: ++ port[constants.QOS_POLICY_ID] = qos_policy_id ++ if security_group_ids: ++ port[constants.SECURITY_GROUPS] = security_group_ids ++ ++ new_port = self.neutron_client.create_port({constants.PORT: port}) ++ ++ LOG.debug('Created port: %(port)s', {constants.PORT: new_port}) ++ ++ return utils.convert_port_dict_to_model(new_port) ++ except Exception as e: ++ message = _('Error creating a port on network ' ++ '{network_id} due to {error}.').format( ++ network_id=network_id, error=str(e)) ++ LOG.exception(message) ++ raise base.CreatePortException(message) ++ ++ def get_security_group(self, sg_name): ++ """Retrieves the security group by it's name. ++ ++ :param sg_name: The security group name. ++ :return: octavia.network.data_models.SecurityGroup, None if not enabled ++ :raises: NetworkException, SecurityGroupNotFound ++ """ ++ try: ++ if self.sec_grp_enabled and sg_name: ++ sec_grps = self.neutron_client.list_security_groups( ++ name=sg_name) ++ if sec_grps and sec_grps.get(constants.SECURITY_GROUPS): ++ sg_dict = sec_grps.get(constants.SECURITY_GROUPS)[0] ++ return utils.convert_security_group_dict_to_model(sg_dict) ++ message = _('Security group {name} not found.').format( ++ name=sg_name) ++ raise base.SecurityGroupNotFound(message) ++ return None ++ except base.SecurityGroupNotFound: ++ raise ++ except Exception as e: ++ message = _('Error when getting security group {name} due to ' ++ '{error}').format(name=sg_name, error=str(e)) ++ LOG.exception(message) ++ raise base.NetworkException(message) +Index: octavia-5.0.1/octavia/network/drivers/neutron/base.py +=================================================================== +--- octavia-5.0.1.orig/octavia/network/drivers/neutron/base.py ++++ octavia-5.0.1/octavia/network/drivers/neutron/base.py +@@ -71,18 +71,26 @@ class BaseNeutronDriver(base.AbstractNet + self._check_extension_cache[extension_alias] = False + return self._check_extension_cache[extension_alias] + +- def _port_to_vip(self, port, load_balancer): ++ def _port_to_vip(self, port, load_balancer, octavia_owned=False): + fixed_ip = None + for port_fixed_ip in port.fixed_ips: + if port_fixed_ip.subnet_id == load_balancer.vip.subnet_id: + fixed_ip = port_fixed_ip + break +- return data_models.Vip(ip_address=fixed_ip.ip_address, +- subnet_id=fixed_ip.subnet_id, ++ if fixed_ip: ++ return data_models.Vip(ip_address=fixed_ip.ip_address, ++ subnet_id=fixed_ip.subnet_id, ++ network_id=port.network_id, ++ port_id=port.id, ++ load_balancer=load_balancer, ++ load_balancer_id=load_balancer.id, ++ octavia_owned=octavia_owned) ++ return data_models.Vip(ip_address=None, subnet_id=None, + network_id=port.network_id, + port_id=port.id, + load_balancer=load_balancer, +- load_balancer_id=load_balancer.id) ++ load_balancer_id=load_balancer.id, ++ octavia_owned=octavia_owned) + + def _nova_interface_to_octavia_interface(self, compute_id, nova_interface): + fixed_ips = [utils.convert_fixed_ip_dict_to_model(fixed_ip) +@@ -112,6 +120,7 @@ class BaseNeutronDriver(base.AbstractNet + + def _add_security_group_to_port(self, sec_grp_id, port_id): + port_update = {'port': {'security_groups': [sec_grp_id]}} ++ # Note: Neutron accepts the SG even if it already exists + try: + self.neutron_client.update_port(port_id, port_update) + except neutron_client_exceptions.PortNotFoundClient as e: +Index: octavia-5.0.1/octavia/network/drivers/neutron/utils.py +=================================================================== +--- octavia-5.0.1.orig/octavia/network/drivers/neutron/utils.py ++++ octavia-5.0.1/octavia/network/drivers/neutron/utils.py +@@ -13,6 +13,7 @@ + # under the License. + + ++from octavia.common import constants + from octavia.network import data_models as network_models + + +@@ -22,9 +23,10 @@ def convert_subnet_dict_to_model(subnet_ + host_routes = [network_models.HostRoute(nexthop=hr.get('nexthop'), + destination=hr.get('destination')) + for hr in subnet_hrs] +- return network_models.Subnet(id=subnet.get('id'), name=subnet.get('name'), ++ return network_models.Subnet(id=subnet.get(constants.ID), ++ name=subnet.get(constants.NAME), + network_id=subnet.get('network_id'), +- project_id=subnet.get('tenant_id'), ++ project_id=subnet.get(constants.TENANT_ID), + gateway_ip=subnet.get('gateway_ip'), + cidr=subnet.get('cidr'), + ip_version=subnet.get('ip_version'), +@@ -38,27 +40,28 @@ def convert_port_dict_to_model(port_dict + ip_address=fixed_ip.get('ip_address')) + for fixed_ip in port.get('fixed_ips', [])] + return network_models.Port( +- id=port.get('id'), +- name=port.get('name'), ++ id=port.get(constants.ID), ++ name=port.get(constants.NAME), + device_id=port.get('device_id'), + device_owner=port.get('device_owner'), + mac_address=port.get('mac_address'), + network_id=port.get('network_id'), + status=port.get('status'), +- project_id=port.get('tenant_id'), ++ project_id=port.get(constants.TENANT_ID), + admin_state_up=port.get('admin_state_up'), + fixed_ips=fixed_ips, +- qos_policy_id=port.get('qos_policy_id') ++ qos_policy_id=port.get('qos_policy_id'), ++ security_group_ids=port.get(constants.SECURITY_GROUPS, []) + ) + + + def convert_network_dict_to_model(network_dict): + nw = network_dict.get('network', network_dict) + return network_models.Network( +- id=nw.get('id'), +- name=nw.get('name'), ++ id=nw.get(constants.ID), ++ name=nw.get(constants.NAME), + subnets=nw.get('subnets'), +- project_id=nw.get('tenant_id'), ++ project_id=nw.get(constants.TENANT_ID), + admin_state_up=nw.get('admin_state_up'), + mtu=nw.get('mtu'), + provider_network_type=nw.get('provider:network_type'), +@@ -76,16 +79,17 @@ def convert_fixed_ip_dict_to_model(fixed + + def convert_qos_policy_dict_to_model(qos_policy_dict): + qos_policy = qos_policy_dict.get('policy', qos_policy_dict) +- return network_models.QosPolicy(id=qos_policy.get('id')) ++ return network_models.QosPolicy(id=qos_policy.get(constants.ID)) + + + # We can't use "floating_ip" because we need to match the neutron client method + def convert_floatingip_dict_to_model(floating_ip_dict): + floating_ip = floating_ip_dict.get('floatingip', floating_ip_dict) + return network_models.FloatingIP( +- id=floating_ip.get('id'), +- description=floating_ip.get('description'), +- project_id=floating_ip.get('project_id', floating_ip.get('tenant_id')), ++ id=floating_ip.get(constants.ID), ++ description=floating_ip.get(constants.DESCRIPTION), ++ project_id=floating_ip.get(constants.PROJECT_ID, ++ floating_ip.get(constants.TENANT_ID)), + status=floating_ip.get('status'), + router_id=floating_ip.get('router_id'), + port_id=floating_ip.get('port_id'), +@@ -103,3 +107,18 @@ def convert_network_ip_availability_dict + ip_avail = network_models.Network_IP_Availability.from_dict(nw_ip_avail) + ip_avail.subnet_ip_availability = nw_ip_avail.get('subnet_ip_availability') + return ip_avail ++ ++ ++def convert_security_group_dict_to_model(security_group_dict): ++ sg_rule_ids = [rule.get(constants.ID) for rule in ++ security_group_dict.get(constants.SECURITY_GROUP_RULES, [])] ++ return network_models.SecurityGroup( ++ id=security_group_dict.get(constants.ID), ++ project_id=security_group_dict.get( ++ constants.PROJECT_ID, ++ security_group_dict.get(constants.TENANT_ID)), ++ name=security_group_dict.get(constants.NAME), ++ description=security_group_dict.get(constants.DESCRIPTION), ++ security_group_rule_ids=sg_rule_ids, ++ tags=security_group_dict.get(constants.TAGS, []), ++ stateful=security_group_dict.get('stateful')) +Index: octavia-5.0.1/octavia/network/drivers/noop_driver/driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/network/drivers/noop_driver/driver.py ++++ octavia-5.0.1/octavia/network/drivers/noop_driver/driver.py +@@ -201,6 +201,12 @@ class NoopManager(object): + network_id, device_id, 'get_port_by_net_id_device_id') + return network_models.Port(id=uuidutils.generate_uuid()) + ++ def get_security_group(self, sg_name): ++ LOG.debug("Network %s no-op, get_security_group name %s", ++ self.__class__.__name__, sg_name) ++ self.networkconfigconfig[(sg_name)] = (sg_name, 'get_security_group') ++ return network_models.SecurityGroup(id=uuidutils.generate_uuid()) ++ + def failover_preparation(self, amphora): + LOG.debug("failover %s no-op, failover_preparation, amphora id %s", + self.__class__.__name__, amphora.id) +@@ -279,6 +285,53 @@ class NoopManager(object): + ip_avail.subnet_ip_availability = subnet_ip_availability + return ip_avail + ++ def delete_port(self, port_id): ++ LOG.debug("Network %s no-op, delete_port port_id %s", ++ self.__class__.__name__, port_id) ++ self.networkconfigconfig[port_id] = (port_id, 'delete_port') ++ ++ def set_port_admin_state_up(self, port_id, state): ++ LOG.debug("Network %s no-op, set_port_admin_state_up port_id %s, " ++ "state %s", self.__class__.__name__, port_id, state) ++ self.networkconfigconfig[(port_id, state)] = (port_id, state, ++ 'admin_down_port') ++ ++ def create_port(self, network_id, name=None, fixed_ips=(), ++ secondary_ips=(), security_group_ids=(), ++ admin_state_up=True, qos_policy_id=None): ++ LOG.debug("Network %s no-op, create_port network_id %s", ++ self.__class__.__name__, network_id) ++ if not name: ++ name = 'no-op-port' ++ port_id = uuidutils.generate_uuid() ++ project_id = uuidutils.generate_uuid() ++ ++ fixed_ip_obj_list = [] ++ for fixed_ip in fixed_ips: ++ if fixed_ip and not fixed_ip.get('ip_address'): ++ fixed_ip_obj_list.append( ++ network_models.FixedIP(subnet_id=fixed_ip.get('subnet_id'), ++ ip_address='198.51.100.56')) ++ else: ++ fixed_ip_obj_list.append( ++ network_models.FixedIP( ++ subnet_id=fixed_ip.get('subnet_id'), ++ ip_address=fixed_ip.get('ip_address'))) ++ if not fixed_ip_obj_list: ++ fixed_ip_obj_list = [network_models.FixedIP( ++ subnet_id=uuidutils.generate_uuid(), ++ ip_address='198.51.100.56')] ++ ++ self.networkconfigconfig[(network_id, 'create_port')] = ( ++ network_id, name, fixed_ip_obj_list, secondary_ips, ++ security_group_ids, admin_state_up, qos_policy_id) ++ return network_models.Port( ++ id=port_id, name=name, device_id='no-op-device-id', ++ device_owner='Octavia', mac_address='00:00:5E:00:53:05', ++ network_id=network_id, status='UP', project_id=project_id, ++ admin_state_up=admin_state_up, fixed_ips=fixed_ip_obj_list, ++ qos_policy_id=qos_policy_id, security_group_ids=security_group_ids) ++ + + class NoopNetworkDriver(driver_base.AbstractNetworkDriver): + def __init__(self): +@@ -334,6 +387,9 @@ class NoopNetworkDriver(driver_base.Abst + def get_port_by_net_id_device_id(self, network_id, device_id): + return self.driver.get_port_by_net_id_device_id(network_id, device_id) + ++ def get_security_group(self, sg_name): ++ return self.driver.get_security_group(sg_name) ++ + def failover_preparation(self, amphora): + self.driver.failover_preparation(amphora) + +@@ -363,3 +419,16 @@ class NoopNetworkDriver(driver_base.Abst + + def get_network_ip_availability(self, network): + return self.driver.get_network_ip_availability(network) ++ ++ def delete_port(self, port_id): ++ self.driver.delete_port(port_id) ++ ++ def set_port_admin_state_up(self, port_id, state): ++ self.driver.set_port_admin_state_up(port_id, state) ++ ++ def create_port(self, network_id, name=None, fixed_ips=(), ++ secondary_ips=(), security_group_ids=(), ++ admin_state_up=True, qos_policy_id=None): ++ return self.driver.create_port( ++ network_id, name, fixed_ips, secondary_ips, security_group_ids, ++ admin_state_up, qos_policy_id) +Index: octavia-5.0.1/octavia/opts.py +=================================================================== +--- octavia-5.0.1.orig/octavia/opts.py ++++ octavia-5.0.1/octavia/opts.py +@@ -28,6 +28,7 @@ def list_opts(): + itertools.chain(octavia.common.config.core_opts)), + ('api_settings', octavia.common.config.api_opts), + ('amphora_agent', octavia.common.config.amphora_agent_opts), ++ ('compute', octavia.common.config.compute_opts), + ('networking', octavia.common.config.networking_opts), + ('oslo_messaging', octavia.common.config.oslo_messaging_opts), + ('haproxy_amphora', octavia.common.config.haproxy_amphora_opts), +Index: octavia-5.0.1/octavia/tests/common/constants.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/common/constants.py ++++ octavia-5.0.1/octavia/tests/common/constants.py +@@ -65,6 +65,52 @@ MOCK_DEVICE_ID2 = 'Moctavia124' + MOCK_SECURITY_GROUP_ID = 'security-group-1' + MOCK_SECURITY_GROUP_NAME = 'SecurityGroup1' + ++MOCK_SECURITY_GROUP = { ++ "id": MOCK_SECURITY_GROUP_ID, ++ "name": MOCK_SECURITY_GROUP_NAME, ++ "tenant_id": MOCK_PROJECT_ID, ++ "description": "", ++ "security_group_rules": [{ ++ "id": "85f1c72b-cdd4-484f-a9c8-b3205f4e6f53", ++ "tenant_id": MOCK_PROJECT_ID, ++ "security_group_id": MOCK_SECURITY_GROUP_ID, ++ "ethertype": "IPv4", ++ "direction": "ingress", ++ "protocol": "tcp", ++ "port_range_min": 80, ++ "port_range_max": 80, ++ "remote_ip_prefix": None, ++ "remote_group_id": None, ++ "description": "", ++ "tags": [], ++ "created_at": "2020-03-12T20:44:48Z", ++ "updated_at": "2020-03-12T20:44:48Z", ++ "revision_number": 0, ++ "project_id": MOCK_PROJECT_ID ++ }, { ++ "id": "aa16ae5f-eac2-40b5-994b-5169a06228a4", ++ "tenant_id": MOCK_PROJECT_ID, ++ "security_group_id": "6530d536-3083-4d5c-a4a9-272ac7b8f3de", ++ "ethertype": "IPv4", ++ "direction": "egress", ++ "protocol": None, ++ "port_range_min": None, ++ "port_range_max": None, ++ "remote_ip_prefix": None, ++ "remote_group_id": None, ++ "description": None, ++ "tags": [], ++ "created_at": "2020-03-12T20:43:31Z", ++ "updated_at": "2020-03-12T20:43:31Z", ++ "revision_number": 0, ++ "project_id": MOCK_PROJECT_ID, ++ }], ++ "tags": [], ++ "created_at": "2020-03-12T20:43:31Z", ++ "updated_at": "2020-03-12T20:44:48Z", ++ "revision_number": 3, ++ "project_id": MOCK_PROJECT_ID} ++ + MOCK_ADMIN_STATE_UP = True + MOCK_STATUS = 'ACTIVE' + MOCK_MTU = 1500 +Index: octavia-5.0.1/octavia/tests/common/data_model_helpers.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/common/data_model_helpers.py ++++ octavia-5.0.1/octavia/tests/common/data_model_helpers.py +@@ -27,7 +27,8 @@ def generate_load_balancer_tree(): + LB_SEED = 0 + + +-def generate_load_balancer(vip=None, amphorae=None): ++def generate_load_balancer(vip=None, amphorae=None, ++ topology=constants.TOPOLOGY_SINGLE): + amphorae = amphorae or [] + global LB_SEED + LB_SEED += 1 +@@ -36,6 +37,7 @@ def generate_load_balancer(vip=None, amp + name='lb{0}'.format(LB_SEED), + description='lb{0}'.format(LB_SEED), + vip=vip, ++ topology=topology, + amphorae=amphorae) + for amp in lb.amphorae: + amp.load_balancer = lb +Index: octavia-5.0.1/octavia/tests/common/sample_data_models.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/common/sample_data_models.py ++++ octavia-5.0.1/octavia/tests/common/sample_data_models.py +@@ -569,14 +569,16 @@ class SampleDriverDataModels(object): + constants.NETWORK_ID: self.network_id, + constants.PORT_ID: self.port_id, + lib_consts.SUBNET_ID: self.subnet_id, +- constants.QOS_POLICY_ID: self.qos_policy_id} ++ constants.QOS_POLICY_ID: self.qos_policy_id, ++ constants.OCTAVIA_OWNED: None} + + self.provider_vip_dict = { + lib_consts.VIP_ADDRESS: self.ip_address, + lib_consts.VIP_NETWORK_ID: self.network_id, + lib_consts.VIP_PORT_ID: self.port_id, + lib_consts.VIP_SUBNET_ID: self.subnet_id, +- lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id} ++ lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id, ++ constants.OCTAVIA_OWNED: None} + + self.db_vip = data_models.Vip( + ip_address=self.ip_address, +Index: octavia-5.0.1/octavia/tests/common/sample_network_data.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/tests/common/sample_network_data.py +@@ -0,0 +1,198 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import collections ++ ++ ++def create_iproute_ipv4_address(ip_address, broadcast_address, interface_name): ++ """Returns a netlink/iproute (pyroute2) IPv4 address.""" ++ Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) ++ return ( ++ {'family': 2, 'prefixlen': 24, 'flags': 0, 'scope': 0, 'index': 2, ++ 'attrs': [('IFA_ADDRESS', ip_address), ('IFA_LOCAL', ip_address), ++ ('IFA_BROADCAST', broadcast_address), ++ ('IFA_LABEL', interface_name), ('IFA_FLAGS', 0), ++ ('IFA_CACHEINFO', {'ifa_preferred': 49256, ++ 'ifa_valid': 49256, 'cstamp': 1961, ++ 'tstamp': 73441020})], ++ 'header': {'length': 88, 'type': 20, 'flags': 2, ++ 'sequence_number': 258, 'pid': 7590, 'error': None, ++ 'stats': Stats(qsize=0, delta=0, delay=0)}, ++ 'event': 'RTM_NEWADDR'},) ++ ++ ++def create_iproute_ipv6_address(ip_address, interface_name): ++ """Returns a netlink/iproute (pyroute2) IPv6 address.""" ++ Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) ++ return ( ++ {'family': 10, 'prefixlen': 64, 'flags': 0, 'scope': 0, 'index': 2, ++ 'attrs': [('IFA_CACHEINFO', {'ifa_preferred': 604503, ++ 'ifa_valid': 2591703, 'cstamp': 2038, ++ 'tstamp': 77073215}), ++ ('IFA_ADDRESS', '2001:db8:ffff:ffff:ffff:ffff:ffff:ffff'), ++ ('IFA_FLAGS', 768)], ++ 'header': {'length': 72, 'type': 20, 'flags': 2, ++ 'sequence_number': 257, 'pid': 7590, 'error': None, ++ 'stats': Stats(qsize=0, delta=0, delay=0)}, ++ 'event': 'RTM_NEWADDR'}, ++ {'family': 10, 'prefixlen': 64, 'flags': 0, 'scope': 0, 'index': 2, ++ 'attrs': [('IFA_CACHEINFO', {'ifa_preferred': 604503, ++ 'ifa_valid': 2591703, 'cstamp': 2038, ++ 'tstamp': 77073215}), ++ ('IFA_ADDRESS', ip_address), ('IFA_FLAGS', 768)], ++ 'header': {'length': 72, 'type': 20, 'flags': 2, ++ 'sequence_number': 257, 'pid': 7590, 'error': None, ++ 'stats': Stats(qsize=0, delta=0, delay=0)}, ++ 'event': 'RTM_NEWADDR'},) ++ ++ ++def create_iproute_interface(interface_name): ++ """Returns a netlink/iproute (pyroute2) interface.""" ++ Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) ++ return [{ ++ 'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, ++ 'change': 0, ++ 'attrs': [('IFLA_TXQLEN', 1000), ('IFLA_IFNAME', interface_name), ++ ('IFLA_OPERSTATE', 'UP'), ('IFLA_LINKMODE', 0), ++ ('IFLA_MTU', 1500), ('IFLA_GROUP', 0), ++ ('IFLA_PROMISCUITY', 0), ('IFLA_NUM_TX_QUEUES', 1), ++ ('IFLA_GSO_MAX_SEGS', 65535), ++ ('IFLA_GSO_MAX_SIZE', 65536), ('IFLA_NUM_RX_QUEUES', 1), ++ ('IFLA_CARRIER', 1), ('IFLA_QDISC', 'fq_codel'), ++ ('IFLA_CARRIER_CHANGES', 2), ('IFLA_PROTO_DOWN', 0), ++ ('IFLA_CARRIER_UP_COUNT', 1), ++ ('IFLA_CARRIER_DOWN_COUNT', 1), ++ ('IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, ++ 'irq': 0, 'dma': 0, 'port': 0}), ++ ('IFLA_ADDRESS', '52:54:00:cf:37:9e'), ++ ('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'), ++ ('IFLA_STATS64', { ++ 'rx_packets': 756091, 'tx_packets': 780292, ++ 'rx_bytes': 234846748, 'tx_bytes': 208583687, ++ 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, ++ 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, ++ 'rx_length_errors': 0, 'rx_over_errors': 0, ++ 'rx_crc_errors': 0, 'rx_frame_errors': 0, ++ 'rx_fifo_errors': 0, 'rx_missed_errors': 0, ++ 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, ++ 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, ++ 'tx_window_errors': 0, 'rx_compressed': 0, ++ 'tx_compressed': 0}), ++ ('IFLA_STATS', { ++ 'rx_packets': 756091, 'tx_packets': 780292, ++ 'rx_bytes': 234846748, 'tx_bytes': 208583687, ++ 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, ++ 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, ++ 'rx_length_errors': 0, 'rx_over_errors': 0, ++ 'rx_crc_errors': 0, 'rx_frame_errors': 0, ++ 'rx_fifo_errors': 0, 'rx_missed_errors': 0, ++ 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, ++ 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, ++ 'tx_window_errors': 0, 'rx_compressed': 0, ++ 'tx_compressed': 0}), ++ ('IFLA_XDP', '05:00:02:00:00:00:00:00'), ++ ('IFLA_AF_SPEC', { ++ 'attrs': [ ++ ('AF_INET', { ++ 'dummy': 65664, 'forwarding': 1, ++ 'mc_forwarding': 0, 'proxy_arp': 0, ++ 'accept_redirects': 1, ++ 'secure_redirects': 1, ++ 'send_redirects': 1, 'shared_media': 1, ++ 'rp_filter': 1, 'accept_source_route': 1, ++ 'bootp_relay': 0, 'log_martians': 0, ++ 'tag': 0, 'arpfilter': 0, 'medium_id': 0, ++ 'noxfrm': 0, 'nopolicy': 0, ++ 'force_igmp_version': 0, 'arp_announce': 0, ++ 'arp_ignore': 0, 'promote_secondaries': 0, ++ 'arp_accept': 0, 'arp_notify': 0, ++ 'accept_local': 0, 'src_vmark': 0, ++ 'proxy_arp_pvlan': 0, 'route_localnet': 0, ++ 'igmpv2_unsolicited_report_interval': 10000, ++ 'igmpv3_unsolicited_report_interval': 1000}), ++ ('AF_INET6', { ++ 'attrs': [('IFLA_INET6_FLAGS', 2147483648), ++ ('IFLA_INET6_CACHEINFO', { ++ 'max_reasm_len': 65535, ++ 'tstamp': 1859, ++ 'reachable_time': 30708, ++ 'retrans_time': 1000}), ++ ('IFLA_INET6_CONF', { ++ 'forwarding': 1, 'hop_limit': 64, ++ 'mtu': 1500, 'accept_ra': 2, ++ 'accept_redirects': 1, ++ 'autoconf': 1, ++ 'dad_transmits': 1, ++ 'router_solicitations': 4294967295, ++ 'router_solicitation_interval': ++ 4000, ++ 'router_solicitation_delay': 1000, ++ 'use_tempaddr': 0, ++ 'temp_valid_lft': 604800, ++ 'temp_preferred_lft': 86400, ++ 'regen_max_retry': 3, ++ 'max_desync_factor': 600, ++ 'max_addresses': 16, ++ 'force_mld_version': 0, ++ 'accept_ra_defrtr': 1, ++ 'accept_ra_pinfo': 1, ++ 'accept_ra_rtr_pref': 1, ++ 'router_probe_interval': 60000, ++ 'accept_ra_rt_info_max_plen': 0, ++ 'proxy_ndp': 0, ++ 'optimistic_dad': 0, ++ 'accept_source_route': 0, ++ 'mc_forwarding': 0, ++ 'disable_ipv6': 0, ++ 'accept_dad': 1, 'force_tllao': 0, ++ 'ndisc_notify': 0}), ++ ('IFLA_INET6_STATS', { ++ 'num': 37, 'inpkts': 57817, ++ 'inoctets': 144065857, ++ 'indelivers': 36758, ++ 'outforwdatagrams': 0, ++ 'outpkts': 35062, ++ 'outoctets': 4796485, ++ 'inhdrerrors': 0, ++ 'intoobigerrors': 0, ++ 'innoroutes': 0, 'inaddrerrors': 0, ++ 'inunknownprotos': 0, ++ 'intruncatedpkts': 0, ++ 'indiscards': 0, ++ 'outdiscards': 0, 'outnoroutes': 0, ++ 'reasmtimeout': 0, 'reasmreqds': 0, ++ 'reasmoks': 0, 'reasmfails': 0, ++ 'fragoks': 0, 'fragfails': 0, ++ 'fragcreates': 0, ++ 'inmcastpkts': 23214, ++ 'outmcastpkts': 6546, ++ 'inbcastpkts': 0, ++ 'outbcastpkts': 0, ++ 'inmcastoctets': 2255059, ++ 'outmcastoctets': 589090, ++ 'inbcastoctets': 0, ++ 'outbcastoctets': 0, ++ 'csumerrors': 0, ++ 'noectpkts': 57860, ++ 'ect1pkts': 0, 'ect0pkts': 0, ++ 'cepkts': 0}), ++ ('IFLA_INET6_ICMP6STATS', { ++ 'num': 6, 'inmsgs': 2337, ++ 'inerrors': 0, 'outmsgs': 176, ++ 'outerrors': 0, 'csumerrors': 0}), ++ ('IFLA_INET6_TOKEN', '::'), ++ ('IFLA_INET6_ADDR_GEN_MODE', 0)]})]})], ++ 'header': {'length': 1304, 'type': 16, 'flags': 0, ++ 'sequence_number': 261, 'pid': 7590, 'error': None, ++ 'stats': Stats(qsize=0, delta=0, delay=0)}, ++ 'state': 'up', 'event': 'RTM_NEWLINK'}] +Index: octavia-5.0.1/octavia/tests/functional/amphorae/backend/agent/api_server/test_keepalivedlvs.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/functional/amphorae/backend/agent/api_server/test_keepalivedlvs.py ++++ octavia-5.0.1/octavia/tests/functional/amphorae/backend/agent/api_server/test_keepalivedlvs.py +@@ -19,6 +19,8 @@ import subprocess + import flask + import mock + ++from oslo_config import cfg ++from oslo_config import fixture as oslo_fixture + from oslo_utils import uuidutils + + from octavia.amphorae.backends.agent.api_server import keepalivedlvs +@@ -186,10 +188,14 @@ class KeepalivedLvsTestCase(base.TestCas + self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, + m_os_sysinit, m_copy2, mock_netns, mock_install_netns, + mock_systemctl): +- m_exists.side_effect = [False, False, True, True, True, False, False] ++ m_exists.side_effect = [False, False, True, True, False, False, False] + cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) + m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open + ++ conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) ++ conf.config(group='controller_worker', ++ loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY) ++ + with mock.patch('os.open') as m_open, mock.patch.object(os, + 'fdopen', + m) as m_fdopen: +@@ -248,10 +254,10 @@ class KeepalivedLvsTestCase(base.TestCas + def test_upload_udp_listener_config_start_service_failure( + self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, + m_os_sysinit, m_copy2, mock_install_netns, mock_systemctl): +- m_exists.side_effect = [False, False, True, True, True, False] +- m_check_output.side_effect = subprocess.CalledProcessError(1, 'blah!') ++ m_exists.side_effect = [False, False, True, True, False] + cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) + m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open ++ mock_systemctl.side_effect = [mock.DEFAULT, Exception('boom')] + + with mock.patch('os.open') as m_open, mock.patch.object(os, + 'fdopen', +Index: octavia-5.0.1/octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py ++++ octavia-5.0.1/octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +@@ -271,8 +271,8 @@ class TestServerTestCase(base.TestCase): + + @mock.patch('os.listdir') + @mock.patch('os.path.exists') +- @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' +- 'Loadbalancer.vrrp_check_script_update') ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' ++ 'vrrp_check_script_update') + @mock.patch('subprocess.check_output') + def _test_start(self, distro, mock_subprocess, mock_vrrp, mock_exists, + mock_listdir): +@@ -347,8 +347,8 @@ class TestServerTestCase(base.TestCase): + + @mock.patch('os.listdir') + @mock.patch('os.path.exists') +- @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' +- 'Loadbalancer.vrrp_check_script_update') ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' ++ 'vrrp_check_script_update') + @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' + 'Loadbalancer._check_haproxy_status') + @mock.patch('subprocess.check_output') +@@ -461,8 +461,8 @@ class TestServerTestCase(base.TestCase): + @mock.patch('os.listdir') + @mock.patch('os.path.exists') + @mock.patch('subprocess.check_output') +- @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' +- 'Loadbalancer.vrrp_check_script_update') ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' ++ 'vrrp_check_script_update') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + + 'get_haproxy_pid') + @mock.patch('shutil.rmtree') +@@ -2322,6 +2322,8 @@ class TestServerTestCase(base.TestCase): + self._test_upload_keepalived_config(consts.INIT_SYSVINIT, + consts.UBUNTU, mock_init_system) + ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' ++ 'vrrp_check_script_update') + @mock.patch('os.path.exists') + @mock.patch('os.makedirs') + @mock.patch('os.rename') +@@ -2330,7 +2332,8 @@ class TestServerTestCase(base.TestCase): + def _test_upload_keepalived_config(self, init_system, distro, + mock_init_system, mock_remove, + mock_subprocess, mock_rename, +- mock_makedirs, mock_exists): ++ mock_makedirs, mock_exists, ++ mock_vrrp_check): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC +@@ -2353,8 +2356,11 @@ class TestServerTestCase(base.TestCase): + mock_open.assert_called_with(cfg_path, flags, mode) + mock_fdopen.assert_called_with(123, 'wb') + self.assertEqual(200, rv.status_code) ++ mock_vrrp_check.assert_called_once_with(None, ++ consts.AMP_ACTION_START) + + mock_exists.return_value = False ++ mock_vrrp_check.reset_mock() + script_path = util.keepalived_check_script_path() + m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open + +@@ -2372,6 +2378,8 @@ class TestServerTestCase(base.TestCase): + mock_open.assert_called_with(script_path, flags, mode) + mock_fdopen.assert_called_with(123, 'w') + self.assertEqual(200, rv.status_code) ++ mock_vrrp_check.assert_called_once_with(None, ++ consts.AMP_ACTION_START) + + def test_ubuntu_manage_service_vrrp(self): + self._test_manage_service_vrrp(consts.UBUNTU) +Index: octavia-5.0.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py ++++ octavia-5.0.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py +@@ -15,6 +15,8 @@ + import subprocess + + import mock ++from oslo_config import cfg ++from oslo_config import fixture as oslo_fixture + from oslo_utils import uuidutils + + from octavia.amphorae.backends.agent.api_server import loadbalancer +@@ -23,6 +25,7 @@ from octavia.common import constants as + from octavia.tests.common import utils as test_utils + import octavia.tests.unit.base as base + ++CONF = cfg.CONF + LISTENER_ID1 = uuidutils.generate_uuid() + LB_ID1 = uuidutils.generate_uuid() + +@@ -34,39 +37,6 @@ class ListenerTestCase(base.TestCase): + self.mock_platform.return_value = "ubuntu" + self.test_loadbalancer = loadbalancer.Loadbalancer() + +- @mock.patch('os.makedirs') +- @mock.patch('os.path.exists') +- @mock.patch('os.listdir') +- @mock.patch('os.path.join') +- @mock.patch('octavia.amphorae.backends.agent.api_server.util.' +- 'get_loadbalancers') +- @mock.patch('octavia.amphorae.backends.agent.api_server.util' +- '.haproxy_sock_path') +- def test_vrrp_check_script_update(self, mock_sock_path, mock_get_lbs, +- mock_join, mock_listdir, mock_exists, +- mock_makedirs): +- mock_get_lbs.return_value = ['abc', LB_ID1] +- mock_sock_path.return_value = 'listener.sock' +- mock_exists.return_value = False +- cmd = 'haproxy-vrrp-check ' + ' '.join(['listener.sock']) + '; exit $?' +- +- path = agent_util.keepalived_dir() +- m = self.useFixture(test_utils.OpenFixture(path)).mock_open +- +- self.test_loadbalancer.vrrp_check_script_update(LB_ID1, 'stop') +- handle = m() +- handle.write.assert_called_once_with(cmd) +- +- mock_get_lbs.return_value = ['abc', LB_ID1] +- cmd = ('haproxy-vrrp-check ' + ' '.join(['listener.sock', +- 'listener.sock']) + '; exit ' +- '$?') +- +- m = self.useFixture(test_utils.OpenFixture(path)).mock_open +- self.test_loadbalancer.vrrp_check_script_update(LB_ID1, 'start') +- handle = m() +- handle.write.assert_called_once_with(cmd) +- + @mock.patch('os.path.exists') + @mock.patch('octavia.amphorae.backends.agent.api_server' + + '.util.get_haproxy_pid') +@@ -89,8 +59,8 @@ class ListenerTestCase(base.TestCase): + + @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' + 'Loadbalancer._check_haproxy_status') +- @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' +- 'Loadbalancer.vrrp_check_script_update') ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' ++ 'vrrp_check_script_update') + @mock.patch('os.path.exists') + @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' + 'Loadbalancer._check_lb_exists') +@@ -100,6 +70,8 @@ class ListenerTestCase(base.TestCase): + mock_check_status): + listener_id = uuidutils.generate_uuid() + ++ conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) ++ + mock_path_exists.side_effect = [False, True, True, False, False] + mock_check_status.side_effect = ['bogus', consts.OFFLINE] + +@@ -122,6 +94,9 @@ class ListenerTestCase(base.TestCase): + self.assertEqual(ref_details, result.json['details']) + + # Happy path - VRRP - RELOAD ++ conf.config(group="controller_worker", ++ loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY) ++ + mock_lb_exists.reset_mock() + mock_vrrp_update.reset_mock() + mock_check_output.reset_mock() +@@ -168,6 +143,9 @@ class ListenerTestCase(base.TestCase): + self.assertEqual(ref_details, result.json['details']) + + # Unhappy path - Not already running ++ conf.config(group="controller_worker", ++ loadbalancer_topology=consts.TOPOLOGY_SINGLE) ++ + mock_lb_exists.reset_mock() + mock_vrrp_update.reset_mock() + mock_check_output.reset_mock() +Index: octavia-5.0.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py ++++ octavia-5.0.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +@@ -18,6 +18,7 @@ import shutil + + import mock + from oslo_config import fixture as oslo_fixture ++import six + + from octavia.amphorae.backends.agent.api_server import osutils + from octavia.common import config +@@ -204,13 +205,13 @@ class TestOSUtils(base.TestCase): + # matches the result of any arbitrary IPv4->prefixlen conversion + SUBNET_CIDR_IPV6 = u'2001:db8::/70' + +- ip = ipaddress.ip_address(FIXED_IP) ++ ip = ipaddress.ip_address(six.text_type(FIXED_IP)) + network = ipaddress.ip_network(SUBNET_CIDR) + broadcast = network.broadcast_address.exploded + netmask = network.netmask.exploded + netmask_prefix = utils.netmask_to_prefix(netmask) + +- ipv6 = ipaddress.ip_address(FIXED_IP_IPV6) ++ ipv6 = ipaddress.ip_address(six.text_type(FIXED_IP_IPV6)) + networkv6 = ipaddress.ip_network(SUBNET_CIDR_IPV6) + broadcastv6 = networkv6.broadcast_address.exploded + netmaskv6 = networkv6.prefixlen +Index: octavia-5.0.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py ++++ octavia-5.0.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +@@ -31,6 +31,7 @@ BASE_AMP_PATH = '/var/lib/octavia' + BASE_CRT_PATH = BASE_AMP_PATH + '/certs' + CONF = cfg.CONF + LISTENER_ID1 = uuidutils.generate_uuid() ++LB_ID1 = uuidutils.generate_uuid() + + + class TestUtil(base.TestCase): +@@ -279,3 +280,130 @@ class TestUtil(base.TestCase): + self.useFixture(test_utils.OpenFixture(path, fake_cfg)) + self.assertRaises(util.ParsingError, util.parse_haproxy_file, + LISTENER_ID1) ++ ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' ++ 'get_udp_listeners') ++ @mock.patch('os.makedirs') ++ @mock.patch('os.path.exists') ++ @mock.patch('os.listdir') ++ @mock.patch('os.path.join') ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' ++ 'get_loadbalancers') ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util' ++ '.haproxy_sock_path') ++ def test_vrrp_check_script_update(self, mock_sock_path, mock_get_lbs, ++ mock_join, mock_listdir, mock_exists, ++ mock_makedirs, mock_get_listeners): ++ mock_get_lbs.return_value = ['abc', LB_ID1] ++ mock_sock_path.return_value = 'listener.sock' ++ mock_exists.side_effect = [False, False, True] ++ mock_get_lbs.side_effect = [['abc', LB_ID1], ['abc', LB_ID1], []] ++ mock_get_listeners.return_value = [] ++ ++ # Test the stop action path ++ cmd = 'haproxy-vrrp-check ' + ' '.join(['listener.sock']) + '; exit $?' ++ path = util.keepalived_dir() ++ m = self.useFixture(test_utils.OpenFixture(path)).mock_open ++ ++ util.vrrp_check_script_update(LB_ID1, 'stop') ++ ++ handle = m() ++ handle.write.assert_called_once_with(cmd) ++ ++ # Test the start action path ++ cmd = ('haproxy-vrrp-check ' + ' '.join(['listener.sock', ++ 'listener.sock']) + '; exit ' ++ '$?') ++ m = self.useFixture(test_utils.OpenFixture(path)).mock_open ++ util.vrrp_check_script_update(LB_ID1, 'start') ++ handle = m() ++ handle.write.assert_called_once_with(cmd) ++ ++ # Test the path with existing keepalived directory and no LBs ++ mock_makedirs.reset_mock() ++ cmd = 'exit 1' ++ m = self.useFixture(test_utils.OpenFixture(path)).mock_open ++ ++ util.vrrp_check_script_update(LB_ID1, 'start') ++ ++ handle = m() ++ handle.write.assert_called_once_with(cmd) ++ mock_makedirs.assert_has_calls( ++ [mock.call(util.keepalived_dir()), ++ mock.call(util.keepalived_check_scripts_dir())]) ++ ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.config_path') ++ def test_get_haproxy_vip_addresses(self, mock_cfg_path): ++ FAKE_PATH = 'fake_path' ++ mock_cfg_path.return_value = FAKE_PATH ++ self.useFixture( ++ test_utils.OpenFixture(FAKE_PATH, 'no match')).mock_open() ++ ++ # Test with no matching lines in the config file ++ self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1)) ++ mock_cfg_path.assert_called_once_with(LB_ID1) ++ ++ # Test with a matching bind line ++ mock_cfg_path.reset_mock() ++ test_data = 'no match\nbind 203.0.113.43:1\nbogus line' ++ self.useFixture( ++ test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() ++ expected_result = ['203.0.113.43'] ++ self.assertEqual(expected_result, ++ util.get_haproxy_vip_addresses(LB_ID1)) ++ mock_cfg_path.assert_called_once_with(LB_ID1) ++ ++ # Test with a matching bind line multiple binds ++ mock_cfg_path.reset_mock() ++ test_data = 'no match\nbind 203.0.113.44:1234, 203.0.113.45:4321' ++ self.useFixture( ++ test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() ++ expected_result = ['203.0.113.44', '203.0.113.45'] ++ self.assertEqual(expected_result, ++ util.get_haproxy_vip_addresses(LB_ID1)) ++ mock_cfg_path.assert_called_once_with(LB_ID1) ++ ++ # Test with a bogus bind line ++ mock_cfg_path.reset_mock() ++ test_data = 'no match\nbind\nbogus line' ++ self.useFixture( ++ test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() ++ self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1)) ++ mock_cfg_path.assert_called_once_with(LB_ID1) ++ ++ @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.' ++ 'send_ip_advertisement') ++ @mock.patch('octavia.amphorae.backends.utils.network_utils.' ++ 'get_interface_name') ++ @mock.patch('octavia.amphorae.backends.agent.api_server.util.' ++ 'get_haproxy_vip_addresses') ++ def test_send_vip_advertisements(self, mock_get_vip_addrs, ++ mock_get_int_name, mock_send_advert): ++ mock_get_vip_addrs.side_effect = [[], ['203.0.113.46'], ++ Exception('boom')] ++ mock_get_int_name.return_value = 'fake0' ++ ++ # Test no VIPs ++ util.send_vip_advertisements(LB_ID1) ++ mock_get_vip_addrs.assert_called_once_with(LB_ID1) ++ mock_get_int_name.assert_not_called() ++ mock_send_advert.assert_not_called() ++ ++ # Test with a VIP ++ mock_get_vip_addrs.reset_mock() ++ mock_get_int_name.reset_mock() ++ mock_send_advert.reset_mock() ++ util.send_vip_advertisements(LB_ID1) ++ mock_get_vip_addrs.assert_called_once_with(LB_ID1) ++ mock_get_int_name.assert_called_once_with( ++ '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE) ++ mock_send_advert.assert_called_once_with( ++ 'fake0', '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE) ++ ++ # Test with an exception (should not raise) ++ mock_get_vip_addrs.reset_mock() ++ mock_get_int_name.reset_mock() ++ mock_send_advert.reset_mock() ++ util.send_vip_advertisements(LB_ID1) ++ mock_get_int_name.assert_not_called() ++ mock_send_advert.assert_not_called() +Index: octavia-5.0.1/octavia/tests/unit/amphorae/backends/utils/test_ip_advertisement.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/tests/unit/amphorae/backends/utils/test_ip_advertisement.py +@@ -0,0 +1,213 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++from binascii import a2b_hex ++import socket ++from struct import pack ++ ++import mock ++ ++from octavia.amphorae.backends.utils import ip_advertisement ++from octavia.common import constants ++import octavia.tests.unit.base as base ++ ++ ++class TestIPAdvertisement(base.TestCase): ++ ++ def setUp(self): ++ super(TestIPAdvertisement, self).setUp() ++ ++ @mock.patch('octavia.amphorae.backends.utils.network_namespace.' ++ 'NetworkNamespace') ++ @mock.patch('socket.AF_PACKET', create=True) ++ @mock.patch('socket.socket') ++ def test_garp(self, mock_socket, mock_socket_packet, mock_netns): ++ ARP_ETHERTYPE = 0x0806 ++ EXPECTED_PACKET_DATA = (b'\xff\xff\xff\xff\xff\xff\x00\x00^\x00S3\x08' ++ b'\x06\x00\x01\x08\x00\x06\x04\x00\x01\x00' ++ b'\x00^\x00S3\xcb\x00q\x02\xff\xff\xff\xff' ++ b'\xff\xff\xcb\x00q\x02') ++ FAKE_INTERFACE = 'fake0' ++ FAKE_MAC = '00005E005333' ++ FAKE_NETNS = 'fake_netns' ++ ++ mock_garp_socket = mock.MagicMock() ++ mock_garp_socket.getsockname.return_value = [None, None, None, None, ++ a2b_hex(FAKE_MAC)] ++ mock_socket.return_value = mock_garp_socket ++ ++ # Test with a network namespace ++ ip_advertisement.garp(FAKE_INTERFACE, '203.0.113.2', net_ns=FAKE_NETNS) ++ ++ mock_netns.assert_called_once_with(FAKE_NETNS) ++ mock_garp_socket.bind.assert_called_once_with((FAKE_INTERFACE, ++ ARP_ETHERTYPE)) ++ mock_garp_socket.getsockname.assert_called_once_with() ++ mock_garp_socket.send.assert_called_once_with(EXPECTED_PACKET_DATA) ++ mock_garp_socket.close.assert_called_once_with() ++ ++ # Test without a network namespace ++ mock_netns.reset_mock() ++ mock_garp_socket.reset_mock() ++ ip_advertisement.garp(FAKE_INTERFACE, '203.0.113.2') ++ ++ mock_netns.assert_not_called() ++ mock_garp_socket.bind.assert_called_once_with((FAKE_INTERFACE, ++ ARP_ETHERTYPE)) ++ mock_garp_socket.getsockname.assert_called_once_with() ++ mock_garp_socket.send.assert_called_once_with(EXPECTED_PACKET_DATA) ++ mock_garp_socket.close.assert_called_once_with() ++ ++ def test_calculate_icmpv6_checksum(self): ++ TEST_PACKET1 = ( ++ b'\x01\r\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003\xff\x02' ++ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00' ++ b'\x00\x00:\x00 \x88\x00\x00\x00 \x01\r\xb8\x00\x00\x00\x00\x00' ++ b'\x00\x00\x00\x00\x00\x003\xff\x02\x00\x00\x00\x00\x00\x00\x00' ++ b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00:\x00') ++ TEST_PACKET2 = ( ++ b'\x01\r\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003\xff\x02' ++ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00' ++ b'\x00\x00:\x00 \x88\x00\x00\x00 \x01\r\xb8\x00\x00\x00\x00\x00' ++ b'\x00\x00\x00\x00\x00\x003\xff\x02\x00\x00\x00\x00\x00\x00\x00' ++ b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00:\x00\x01') ++ ++ self.assertEqual( ++ 35645, ip_advertisement.calculate_icmpv6_checksum(TEST_PACKET1)) ++ self.assertEqual( ++ 35389, ip_advertisement.calculate_icmpv6_checksum(TEST_PACKET2)) ++ ++ @mock.patch('fcntl.ioctl') ++ @mock.patch('octavia.amphorae.backends.utils.network_namespace.' ++ 'NetworkNamespace') ++ @mock.patch('socket.socket') ++ def test_neighbor_advertisement(self, mock_socket, mock_netns, mock_ioctl): ++ ALL_NODES_ADDR = 'ff02::1' ++ EXPECTED_PACKET_DATA = (b'\x88\x00\x1dk\xa0\x00\x00\x00 \x01\r\xb8\x00' ++ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003' ++ b'\x02\x01') ++ FAKE_INTERFACE = 'fake0' ++ FAKE_MAC = '00005E005333' ++ FAKE_NETNS = 'fake_netns' ++ ICMPV6_PROTO = socket.getprotobyname(constants.IPV6_ICMP) ++ SIOCGIFHWADDR = 0x8927 ++ SOURCE_IP = '2001:db8::33' ++ ++ mock_na_socket = mock.MagicMock() ++ mock_socket.return_value = mock_na_socket ++ mock_ioctl.return_value = a2b_hex(FAKE_MAC) ++ ++ # Test with a network namespace ++ ip_advertisement.neighbor_advertisement(FAKE_INTERFACE, SOURCE_IP, ++ net_ns=FAKE_NETNS) ++ ++ mock_netns.assert_called_once_with(FAKE_NETNS) ++ mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_RAW, ++ ICMPV6_PROTO) ++ mock_na_socket.setsockopt.assert_called_once_with( ++ socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) ++ mock_na_socket.bind.assert_called_once_with((SOURCE_IP, 0)) ++ mock_ioctl.assert_called_once_with( ++ mock_na_socket.fileno(), SIOCGIFHWADDR, ++ pack('256s', bytes(FAKE_INTERFACE.encode('utf-8')))) ++ mock_na_socket.sendto.assert_called_once_with( ++ EXPECTED_PACKET_DATA, (ALL_NODES_ADDR, 0, 0, 0)) ++ mock_na_socket.close.assert_called_once_with() ++ ++ # Test without a network namespace ++ mock_na_socket.reset_mock() ++ mock_netns.reset_mock() ++ mock_ioctl.reset_mock() ++ mock_socket.reset_mock() ++ ++ ip_advertisement.neighbor_advertisement(FAKE_INTERFACE, SOURCE_IP) ++ ++ mock_netns.assert_not_called() ++ mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_RAW, ++ ICMPV6_PROTO) ++ mock_na_socket.setsockopt.assert_called_once_with( ++ socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) ++ mock_na_socket.bind.assert_called_once_with((SOURCE_IP, 0)) ++ mock_ioctl.assert_called_once_with( ++ mock_na_socket.fileno(), SIOCGIFHWADDR, ++ pack('256s', bytes(FAKE_INTERFACE.encode('utf-8')))) ++ mock_na_socket.sendto.assert_called_once_with( ++ EXPECTED_PACKET_DATA, (ALL_NODES_ADDR, 0, 0, 0)) ++ mock_na_socket.close.assert_called_once_with() ++ ++ @mock.patch('octavia.common.utils.is_ipv6') ++ @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.garp') ++ @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.' ++ 'neighbor_advertisement') ++ def test_send_ip_advertisement(self, mock_na, mock_garp, mock_is_ipv6): ++ FAKE_INTERFACE = 'fake0' ++ FAKE_NETNS = 'fake_netns' ++ IPV4_ADDRESS = '203.0.113.9' ++ IPV6_ADDRESS = '2001:db8::33' ++ ++ mock_is_ipv6.side_effect = [mock.DEFAULT, mock.DEFAULT, False] ++ ++ # Test IPv4 advertisement ++ ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV4_ADDRESS) ++ ++ mock_garp.assert_called_once_with(FAKE_INTERFACE, IPV4_ADDRESS, None) ++ mock_na.assert_not_called() ++ ++ # Test IPv4 advertisement with a network namespace ++ mock_garp.reset_mock() ++ mock_na.reset_mock() ++ ++ ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV4_ADDRESS, ++ net_ns=FAKE_NETNS) ++ ++ mock_garp.assert_called_once_with(FAKE_INTERFACE, IPV4_ADDRESS, ++ FAKE_NETNS) ++ mock_na.assert_not_called() ++ ++ # Test IPv6 advertisement ++ mock_garp.reset_mock() ++ mock_na.reset_mock() ++ ++ ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS) ++ ++ mock_garp.assert_not_called() ++ mock_na.assert_called_once_with(FAKE_INTERFACE, IPV6_ADDRESS, None) ++ ++ # Test IPv6 advertisement with a network namespace ++ mock_garp.reset_mock() ++ mock_na.reset_mock() ++ ++ ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS, ++ net_ns=FAKE_NETNS) ++ ++ mock_garp.assert_not_called() ++ mock_na.assert_called_once_with(FAKE_INTERFACE, IPV6_ADDRESS, ++ FAKE_NETNS) ++ ++ # Test bogus IP ++ mock_garp.reset_mock() ++ mock_na.reset_mock() ++ ++ ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, 'not an IP') ++ ++ mock_garp.assert_not_called() ++ mock_na.assert_not_called() ++ ++ # Test unknown IP version ++ mock_garp.reset_mock() ++ mock_na.reset_mock() ++ ++ ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS) ++ ++ mock_garp.assert_not_called() ++ mock_na.assert_not_called() +Index: octavia-5.0.1/octavia/tests/unit/amphorae/backends/utils/test_network_namespace.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/tests/unit/amphorae/backends/utils/test_network_namespace.py +@@ -0,0 +1,117 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import random ++ ++import mock ++ ++from octavia.amphorae.backends.utils import network_namespace ++from octavia.tests.common import utils as test_utils ++import octavia.tests.unit.base as base ++ ++ ++class TestNetworkNamespace(base.TestCase): ++ ++ def setUp(self): ++ super(TestNetworkNamespace, self).setUp() ++ ++ @mock.patch('ctypes.get_errno') ++ @mock.patch('ctypes.CDLL') ++ def test_error_handler(self, mock_cdll, mock_get_errno): ++ FAKE_NETNS = 'fake-netns' ++ netns = network_namespace.NetworkNamespace(FAKE_NETNS) ++ ++ # Test result 0 ++ netns._error_handler(0, None, None) ++ ++ mock_get_errno.assert_not_called() ++ ++ # Test result -1 ++ mock_get_errno.reset_mock() ++ ++ self.assertRaises(OSError, netns._error_handler, -1, None, None) ++ ++ mock_get_errno.assert_called_once_with() ++ ++ @mock.patch('os.getpid') ++ @mock.patch('ctypes.CDLL') ++ def test_init(self, mock_cdll, mock_getpid): ++ FAKE_NETNS = 'fake-netns' ++ FAKE_PID = random.randrange(100000) ++ mock_cdll_obj = mock.MagicMock() ++ mock_cdll.return_value = mock_cdll_obj ++ mock_getpid.return_value = FAKE_PID ++ expected_current_netns = '/proc/{pid}/ns/net'.format(pid=FAKE_PID) ++ expected_target_netns = '/var/run/netns/{netns}'.format( ++ netns=FAKE_NETNS) ++ ++ netns = network_namespace.NetworkNamespace(FAKE_NETNS) ++ ++ self.assertEqual(expected_current_netns, netns.current_netns) ++ self.assertEqual(expected_target_netns, netns.target_netns) ++ self.assertEqual(mock_cdll_obj.setns, netns.set_netns) ++ self.assertEqual(netns.set_netns.errcheck, netns._error_handler) ++ ++ @mock.patch('os.getpid') ++ @mock.patch('ctypes.CDLL') ++ def test_enter(self, mock_cdll, mock_getpid): ++ CLONE_NEWNET = 0x40000000 ++ FAKE_NETNS = 'fake-netns' ++ FAKE_PID = random.randrange(100000) ++ current_netns_fd = random.randrange(100000) ++ target_netns_fd = random.randrange(100000) ++ mock_getpid.return_value = FAKE_PID ++ mock_cdll_obj = mock.MagicMock() ++ mock_cdll.return_value = mock_cdll_obj ++ expected_current_netns = '/proc/{pid}/ns/net'.format(pid=FAKE_PID) ++ expected_target_netns = '/var/run/netns/{netns}'.format( ++ netns=FAKE_NETNS) ++ ++ netns = network_namespace.NetworkNamespace(FAKE_NETNS) ++ ++ current_mock_open = self.useFixture( ++ test_utils.OpenFixture(expected_current_netns)).mock_open ++ current_mock_open.return_value = current_netns_fd ++ ++ target_mock_open = self.useFixture( ++ test_utils.OpenFixture(expected_target_netns)).mock_open ++ handle = target_mock_open() ++ handle.fileno.return_value = target_netns_fd ++ ++ netns.__enter__() ++ ++ self.assertEqual(current_netns_fd, netns.current_netns_fd) ++ netns.set_netns.assert_called_once_with(target_netns_fd, CLONE_NEWNET) ++ ++ @mock.patch('os.getpid') ++ @mock.patch('ctypes.CDLL') ++ def test_exit(self, mock_cdll, mock_getpid): ++ CLONE_NEWNET = 0x40000000 ++ FAKE_NETNS = 'fake-netns' ++ FAKE_PID = random.randrange(100000) ++ current_netns_fileno = random.randrange(100000) ++ mock_getpid.return_value = FAKE_PID ++ mock_cdll_obj = mock.MagicMock() ++ mock_cdll.return_value = mock_cdll_obj ++ mock_current_netns_fd = mock.MagicMock() ++ mock_current_netns_fd.fileno.return_value = current_netns_fileno ++ ++ netns = network_namespace.NetworkNamespace(FAKE_NETNS) ++ ++ netns.current_netns_fd = mock_current_netns_fd ++ ++ netns.__exit__() ++ ++ netns.set_netns.assert_called_once_with(current_netns_fileno, ++ CLONE_NEWNET) ++ mock_current_netns_fd.close.assert_called_once_with() +Index: octavia-5.0.1/octavia/tests/unit/amphorae/backends/utils/test_network_utils.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/tests/unit/amphorae/backends/utils/test_network_utils.py +@@ -0,0 +1,140 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import mock ++ ++from octavia.amphorae.backends.utils import network_utils ++from octavia.common import exceptions ++from octavia.tests.common import sample_network_data ++import octavia.tests.unit.base as base ++ ++ ++class TestNetworkUtils(base.TestCase): ++ ++ def setUp(self): ++ super(TestNetworkUtils, self).setUp() ++ ++ def test_find_interface(self): ++ FAKE_INTERFACE = 'fake0' ++ IPV4_ADDRESS = '203.0.113.55' ++ BROADCAST_ADDRESS = '203.0.113.55' ++ IPV6_ADDRESS = '2001:db8::55' ++ SAMPLE_IPV4_ADDR = sample_network_data.create_iproute_ipv4_address( ++ IPV4_ADDRESS, BROADCAST_ADDRESS, FAKE_INTERFACE) ++ SAMPLE_IPV6_ADDR = sample_network_data.create_iproute_ipv6_address( ++ IPV6_ADDRESS, FAKE_INTERFACE) ++ SAMPLE_INTERFACE = sample_network_data.create_iproute_interface( ++ FAKE_INTERFACE) ++ BROKEN_INTERFACE = [{'attrs': []}] ++ ++ mock_ip_addr = mock.MagicMock() ++ mock_rtnl_api = mock.MagicMock() ++ mock_rtnl_api.get_addr.side_effect = [[], SAMPLE_IPV4_ADDR, ++ SAMPLE_IPV6_ADDR, ++ SAMPLE_IPV6_ADDR] ++ mock_rtnl_api.get_links.side_effect = [SAMPLE_INTERFACE, ++ SAMPLE_INTERFACE, ++ BROKEN_INTERFACE] ++ ++ # Test no match ++ IPV4_ADDRESS = '203.0.113.55' ++ mock_ip_addr.version = 4 ++ ++ self.assertIsNone(network_utils._find_interface(IPV4_ADDRESS, ++ mock_rtnl_api, ++ IPV4_ADDRESS)) ++ ++ # Test with IPv4 address ++ mock_rtnl_api.reset_mock() ++ mock_ip_addr.version = 4 ++ ++ result = network_utils._find_interface(IPV4_ADDRESS, mock_rtnl_api, ++ IPV4_ADDRESS) ++ ++ self.assertEqual(FAKE_INTERFACE, result) ++ mock_rtnl_api.get_addr.assert_called_once_with(address=IPV4_ADDRESS) ++ mock_rtnl_api.get_links.assert_called_once_with(2) ++ ++ # Test with IPv6 address ++ mock_rtnl_api.reset_mock() ++ mock_ip_addr.version = 6 ++ ++ result = network_utils._find_interface(IPV6_ADDRESS, mock_rtnl_api, ++ IPV6_ADDRESS) ++ ++ self.assertEqual(FAKE_INTERFACE, result) ++ mock_rtnl_api.get_addr.assert_called_once_with(address=IPV6_ADDRESS) ++ mock_rtnl_api.get_links.assert_called_once_with(2) ++ ++ # Test with a broken interface ++ mock_rtnl_api.reset_mock() ++ mock_ip_addr.version = 6 ++ ++ self.assertIsNone(network_utils._find_interface(IPV6_ADDRESS, ++ mock_rtnl_api, ++ IPV6_ADDRESS)) ++ mock_rtnl_api.get_addr.assert_called_once_with(address=IPV6_ADDRESS) ++ mock_rtnl_api.get_links.assert_called_once_with(2) ++ ++ @mock.patch('octavia.amphorae.backends.utils.network_utils.' ++ '_find_interface') ++ @mock.patch('pyroute2.IPRoute', create=True) ++ @mock.patch('pyroute2.NetNS', create=True) ++ def test_get_interface_name(self, mock_netns, mock_ipr, mock_find_int): ++ FAKE_INTERFACE = 'fake0' ++ FAKE_NETNS = 'fake-ns' ++ IPV4_ADDRESS = '203.0.113.64' ++ ++ mock_ipr_enter_obj = mock.MagicMock() ++ mock_ipr_obj = mock.MagicMock() ++ mock_ipr_obj.__enter__.return_value = mock_ipr_enter_obj ++ mock_ipr.return_value = mock_ipr_obj ++ ++ mock_netns_enter_obj = mock.MagicMock() ++ mock_netns_obj = mock.MagicMock() ++ mock_netns_obj.__enter__.return_value = mock_netns_enter_obj ++ mock_netns.return_value = mock_netns_obj ++ ++ mock_find_int.side_effect = [FAKE_INTERFACE, FAKE_INTERFACE, None] ++ ++ # Test a bogus IP address ++ self.assertRaises(exceptions.InvalidIPAddress, ++ network_utils.get_interface_name, 'not an IP', None) ++ ++ # Test with no network namespace ++ result = network_utils.get_interface_name(IPV4_ADDRESS) ++ ++ self.assertEqual(FAKE_INTERFACE, result) ++ mock_ipr.assert_called_once_with() ++ mock_find_int.assert_called_once_with(IPV4_ADDRESS, mock_ipr_enter_obj, ++ IPV4_ADDRESS) ++ ++ # Test with network namespace ++ mock_ipr.reset_mock() ++ mock_find_int.reset_mock() ++ ++ result = network_utils.get_interface_name(IPV4_ADDRESS, ++ net_ns=FAKE_NETNS) ++ self.assertEqual(FAKE_INTERFACE, result) ++ mock_netns.assert_called_once_with(FAKE_NETNS) ++ mock_find_int.assert_called_once_with(IPV4_ADDRESS, ++ mock_netns_enter_obj, ++ IPV4_ADDRESS) ++ ++ # Test no interface found ++ mock_ipr.reset_mock() ++ mock_find_int.reset_mock() ++ ++ self.assertRaises( ++ exceptions.NotFound, network_utils.get_interface_name, ++ IPV4_ADDRESS, net_ns=FAKE_NETNS) +Index: octavia-5.0.1/octavia/tests/unit/amphorae/drivers/haproxy/test_exceptions.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/tests/unit/amphorae/drivers/haproxy/test_exceptions.py +@@ -0,0 +1,52 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import mock ++ ++from octavia.amphorae.drivers.haproxy import exceptions ++import octavia.tests.unit.base as base ++ ++ ++class TestHAProxyExceptions(base.TestCase): ++ ++ def setUp(self): ++ super(TestHAProxyExceptions, self).setUp() ++ ++ @mock.patch('octavia.amphorae.drivers.haproxy.exceptions.LOG') ++ def test_check_exception(self, mock_logger): ++ ++ response_mock = mock.MagicMock() ++ ++ # Test exception that should raise and log ++ response_mock.status_code = 404 ++ ++ self.assertRaises(exceptions.NotFound, exceptions.check_exception, ++ response_mock) ++ mock_logger.error.assert_called_once() ++ ++ # Test exception that should raise but not log ++ mock_logger.reset_mock() ++ response_mock.status_code = 403 ++ ++ self.assertRaises(exceptions.Forbidden, exceptions.check_exception, ++ response_mock, log_error=False) ++ mock_logger.error.assert_not_called() ++ ++ # Test exception that should be ignored ++ mock_logger.reset_mock() ++ response_mock.status_code = 401 ++ ++ result = exceptions.check_exception(response_mock, ignore=[401]) ++ ++ mock_logger.error.assert_not_called() ++ self.assertEqual(response_mock, result) +Index: octavia-5.0.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py ++++ octavia-5.0.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py +@@ -451,7 +451,30 @@ class TestHaproxyAmphoraLoadBalancerDriv + self.driver.start(loadbalancer) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( +- amp1, listener.id) ++ amp1, listener.id, None) ++ ++ def test_reload(self): ++ amp1 = mock.MagicMock() ++ amp1.api_version = API_VERSION ++ amp2 = mock.MagicMock() ++ amp2.api_version = API_VERSION ++ amp2.status = constants.DELETED ++ loadbalancer = mock.MagicMock() ++ loadbalancer.id = uuidutils.generate_uuid() ++ loadbalancer.amphorae = [amp1, amp2] ++ loadbalancer.vip = self.sv ++ listener = mock.MagicMock() ++ listener.id = uuidutils.generate_uuid() ++ listener.protocol = constants.PROTOCOL_HTTP ++ loadbalancer.listeners = [listener] ++ listener.load_balancer = loadbalancer ++ self.driver.clients[ ++ API_VERSION].reload_listener.__name__ = 'reload_listener' ++ # Execute driver method ++ self.driver.reload(loadbalancer) ++ self.driver.clients[ ++ API_VERSION].reload_listener.assert_called_once_with( ++ amp1, listener.id, None) + + def test_start_with_amphora(self): + # Execute driver method +@@ -461,7 +484,7 @@ class TestHaproxyAmphoraLoadBalancerDriv + self.driver.start(self.lb, self.amp) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( +- self.amp, self.sl.id) ++ self.amp, self.sl.id, None) + + self.driver.clients[API_VERSION].start_listener.reset_mock() + amp.status = constants.DELETED +@@ -475,7 +498,7 @@ class TestHaproxyAmphoraLoadBalancerDriv + self.driver.start(self.lb_udp) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( +- self.amp, self.sl_udp.id) ++ self.amp, self.sl_udp.id, None) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._process_secret') +@@ -618,11 +641,6 @@ class TestHaproxyAmphoraLoadBalancerDriv + fixed_ips=expected_fixed_ips, + mtu=FAKE_MTU)) + +- def test_get_vrrp_interface(self): +- self.driver.get_vrrp_interface(self.amp) +- self.driver.clients[API_VERSION].get_interface.assert_called_once_with( +- self.amp, self.amp.vrrp_ip, timeout_dict=None) +- + def test_get_haproxy_versions(self): + ref_haproxy_versions = ['1', '6'] + result = self.driver._get_haproxy_versions(self.amp) +Index: octavia-5.0.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py ++++ octavia-5.0.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +@@ -451,7 +451,30 @@ class TestHaproxyAmphoraLoadBalancerDriv + self.driver.start(loadbalancer) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( +- amp1, loadbalancer.id) ++ amp1, loadbalancer.id, None) ++ ++ def test_reload(self): ++ amp1 = mock.MagicMock() ++ amp1.api_version = API_VERSION ++ amp2 = mock.MagicMock() ++ amp2.api_version = API_VERSION ++ amp2.status = constants.DELETED ++ loadbalancer = mock.MagicMock() ++ loadbalancer.id = uuidutils.generate_uuid() ++ loadbalancer.amphorae = [amp1, amp2] ++ loadbalancer.vip = self.sv ++ listener = mock.MagicMock() ++ listener.id = uuidutils.generate_uuid() ++ listener.protocol = constants.PROTOCOL_HTTP ++ loadbalancer.listeners = [listener] ++ listener.load_balancer = loadbalancer ++ self.driver.clients[ ++ API_VERSION].reload_listener.__name__ = 'reload_listener' ++ # Execute driver method ++ self.driver.reload(loadbalancer) ++ self.driver.clients[ ++ API_VERSION].reload_listener.assert_called_once_with( ++ amp1, loadbalancer.id, None) + + def test_start_with_amphora(self): + # Execute driver method +@@ -461,7 +484,7 @@ class TestHaproxyAmphoraLoadBalancerDriv + self.driver.start(self.lb, self.amp) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( +- self.amp, self.lb.id) ++ self.amp, self.lb.id, None) + + self.driver.clients[API_VERSION].start_listener.reset_mock() + amp.status = constants.DELETED +@@ -475,7 +498,7 @@ class TestHaproxyAmphoraLoadBalancerDriv + self.driver.start(self.lb_udp) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( +- self.amp, self.sl_udp.id) ++ self.amp, self.sl_udp.id, None) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._process_secret') +@@ -711,11 +734,6 @@ class TestHaproxyAmphoraLoadBalancerDriv + fixed_ips=expected_fixed_ips, + mtu=FAKE_MTU)) + +- def test_get_vrrp_interface(self): +- self.driver.get_vrrp_interface(self.amp) +- self.driver.clients[API_VERSION].get_interface.assert_called_once_with( +- self.amp, self.amp.vrrp_ip, timeout_dict=None) +- + def test_get_haproxy_versions(self): + ref_haproxy_versions = ['1', '6'] + result = self.driver._get_haproxy_versions(self.amp) +Index: octavia-5.0.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_common.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_common.py +@@ -0,0 +1,83 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import mock ++ ++from octavia.amphorae.drivers.haproxy import exceptions as exc ++from octavia.amphorae.drivers.haproxy import rest_api_driver ++import octavia.tests.unit.base as base ++ ++ ++class TestHAProxyAmphoraDriver(base.TestCase): ++ ++ def setUp(self): ++ super(TestHAProxyAmphoraDriver, self).setUp() ++ self.driver = rest_api_driver.HaproxyAmphoraLoadBalancerDriver() ++ ++ @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' ++ 'HaproxyAmphoraLoadBalancerDriver.' ++ '_populate_amphora_api_version') ++ def test_get_interface_from_ip(self, mock_api_version): ++ FAKE_INTERFACE = 'fake0' ++ IP_ADDRESS = '203.0.113.42' ++ TIMEOUT_DICT = {'outa': 'time'} ++ amphora_mock = mock.MagicMock() ++ amphora_mock.api_version = '0' ++ client_mock = mock.MagicMock() ++ client_mock.get_interface.side_effect = [ ++ {'interface': FAKE_INTERFACE}, {'interface': FAKE_INTERFACE}, ++ {}, exc.NotFound] ++ self.driver.clients['0'] = client_mock ++ ++ # Test interface found no timeout ++ ++ result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) ++ ++ self.assertEqual(FAKE_INTERFACE, result) ++ mock_api_version.assert_called_once_with(amphora_mock, None) ++ client_mock.get_interface.assert_called_once_with( ++ amphora_mock, IP_ADDRESS, None, log_error=False) ++ ++ # Test interface found with timeout ++ mock_api_version.reset_mock() ++ client_mock.reset_mock() ++ ++ result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS, ++ timeout_dict=TIMEOUT_DICT) ++ ++ self.assertEqual(FAKE_INTERFACE, result) ++ mock_api_version.assert_called_once_with(amphora_mock, TIMEOUT_DICT) ++ client_mock.get_interface.assert_called_once_with( ++ amphora_mock, IP_ADDRESS, TIMEOUT_DICT, log_error=False) ++ ++ # Test no interface data ++ mock_api_version.reset_mock() ++ client_mock.reset_mock() ++ ++ result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) ++ ++ self.assertIsNone(result) ++ mock_api_version.assert_called_once_with(amphora_mock, None) ++ client_mock.get_interface.assert_called_once_with( ++ amphora_mock, IP_ADDRESS, None, log_error=False) ++ ++ # Test NotFound ++ mock_api_version.reset_mock() ++ client_mock.reset_mock() ++ ++ result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) ++ ++ self.assertIsNone(result) ++ mock_api_version.assert_called_once_with(amphora_mock, None) ++ client_mock.get_interface.assert_called_once_with( ++ amphora_mock, IP_ADDRESS, None, log_error=False) +Index: octavia-5.0.1/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py ++++ octavia-5.0.1/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +@@ -53,13 +53,27 @@ class TestVRRPRestDriver(base.TestCase): + + mock_templater.return_value = self.FAKE_CONFIG + +- self.keepalived_mixin.update_vrrp_conf(self.lb_mock, +- self.amphorae_network_config) ++ self.keepalived_mixin.update_vrrp_conf( ++ self.lb_mock, self.amphorae_network_config, self.amphora_mock) + + self.clients[API_VERSION].upload_vrrp_config.assert_called_once_with( + self.amphora_mock, + self.FAKE_CONFIG) + ++ # Test amphora not in AMPHORA_ALLOCATED state ++ mock_templater.reset_mock() ++ self.clients[API_VERSION].upload_vrrp_config.reset_mock() ++ ready_amphora_mock = mock.MagicMock() ++ ready_amphora_mock.id = uuidutils.generate_uuid() ++ ready_amphora_mock.status = constants.AMPHORA_READY ++ ready_amphora_mock.api_version = API_VERSION ++ ++ self.keepalived_mixin.update_vrrp_conf( ++ self.lb_mock, self.amphorae_network_config, ready_amphora_mock) ++ ++ mock_templater.assert_not_called() ++ self.clients[API_VERSION].upload_vrrp_config.assert_not_called() ++ + def test_stop_vrrp_service(self): + + self.keepalived_mixin.stop_vrrp_service(self.lb_mock) +@@ -69,10 +83,21 @@ class TestVRRPRestDriver(base.TestCase): + + def test_start_vrrp_service(self): + +- self.keepalived_mixin.start_vrrp_service(self.lb_mock) ++ self.keepalived_mixin.start_vrrp_service(self.amphora_mock) + + self.clients[API_VERSION].start_vrrp.assert_called_once_with( +- self.amphora_mock) ++ self.amphora_mock, timeout_dict=None) ++ ++ # Test amphora not in AMPHORA_ALLOCATED state ++ self.clients[API_VERSION].start_vrrp.reset_mock() ++ ready_amphora_mock = mock.MagicMock() ++ ready_amphora_mock.id = uuidutils.generate_uuid() ++ ready_amphora_mock.status = constants.AMPHORA_READY ++ ready_amphora_mock.api_version = API_VERSION ++ ++ self.keepalived_mixin.start_vrrp_service(ready_amphora_mock) ++ ++ self.clients[API_VERSION].start_vrrp.assert_not_called() + + def test_reload_vrrp_service(self): + +Index: octavia-5.0.1/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py ++++ octavia-5.0.1/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py +@@ -49,7 +49,7 @@ class TestNoopAmphoraLoadBalancerDriver( + self.listener.id = uuidutils.generate_uuid() + self.listener.protocol_port = 80 + self.vip = data_models.Vip() +- self.vip.ip_address = "10.0.0.1" ++ self.vip.ip_address = "192.51.100.1" + self.amphora = data_models.Amphora() + self.amphora.id = self.FAKE_UUID_1 + self.load_balancer = data_models.LoadBalancer( +@@ -152,3 +152,12 @@ class TestNoopAmphoraLoadBalancerDriver( + 'update_amphora_agent_config'), + self.driver.driver.amphoraconfig[( + self.amphora.id, self.agent_config)]) ++ ++ def test_get_interface_from_ip(self): ++ result = self.driver.get_interface_from_ip(self.amphora, ++ '198.51.100.99') ++ self.assertEqual('noop0', result) ++ ++ result = self.driver.get_interface_from_ip(self.amphora, ++ '198.51.100.9') ++ self.assertIsNone(result) +Index: octavia-5.0.1/octavia/tests/unit/common/test_utils.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/common/test_utils.py ++++ octavia-5.0.1/octavia/tests/unit/common/test_utils.py +@@ -11,7 +11,11 @@ + # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + # License for the specific language governing permissions and limitations + # under the License. ++import mock + ++from oslo_utils import uuidutils ++ ++from octavia.common import constants + import octavia.common.utils as utils + import octavia.tests.unit.base as base + +@@ -21,6 +25,14 @@ class TestConfig(base.TestCase): + def test_get_hostname(self): + self.assertNotEqual(utils.get_hostname(), '') + ++ def test_is_ipv4(self): ++ self.assertTrue(utils.is_ipv4('192.0.2.10')) ++ self.assertTrue(utils.is_ipv4('169.254.0.10')) ++ self.assertTrue(utils.is_ipv4('0.0.0.0')) ++ self.assertFalse(utils.is_ipv4('::')) ++ self.assertFalse(utils.is_ipv4('2001:db8::1')) ++ self.assertFalse(utils.is_ipv4('fe80::225:90ff:fefb:53ad')) ++ + def test_is_ipv6(self): + self.assertFalse(utils.is_ipv6('192.0.2.10')) + self.assertFalse(utils.is_ipv6('169.254.0.10')) +@@ -75,3 +87,22 @@ class TestConfig(base.TestCase): + ] + for str, sha1 in str_to_sha1: + self.assertEqual(sha1, utils.base64_sha1_string(str)) ++ ++ @mock.patch('stevedore.driver.DriverManager') ++ def test_get_amphora_driver(self, mock_stevedore_driver): ++ FAKE_AMP_DRIVER = 'fake_amp_drvr' ++ driver_mock = mock.MagicMock() ++ driver_mock.driver = FAKE_AMP_DRIVER ++ mock_stevedore_driver.return_value = driver_mock ++ ++ result = utils.get_amphora_driver() ++ ++ self.assertEqual(FAKE_AMP_DRIVER, result) ++ ++ def test_get_vip_secuirty_group_name(self): ++ FAKE_LB_ID = uuidutils.generate_uuid() ++ self.assertIsNone(utils.get_vip_security_group_name(None)) ++ ++ expected_sg_name = constants.VIP_SECURITY_GROUP_PREFIX + FAKE_LB_ID ++ self.assertEqual(expected_sg_name, ++ utils.get_vip_security_group_name(FAKE_LB_ID)) +Index: octavia-5.0.1/octavia/tests/unit/compute/drivers/test_nova_driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/compute/drivers/test_nova_driver.py ++++ octavia-5.0.1/octavia/tests/unit/compute/drivers/test_nova_driver.py +@@ -413,10 +413,72 @@ class TestNovaClient(base.TestCase): + server=self.compute_id, net_id=self.network_id, fixed_ip=None, + port_id=None) + +- def test_attach_network_or_port_exception(self): ++ def test_attach_network_or_port_conflict_exception(self): ++ self.manager.manager.interface_attach.side_effect = ( ++ nova_exceptions.Conflict('test_exception')) ++ interface_mock = mock.MagicMock() ++ interface_mock.id = self.port_id ++ bad_interface_mock = mock.MagicMock() ++ bad_interface_mock.id = uuidutils.generate_uuid() ++ self.manager.manager.interface_list.side_effect = [ ++ [interface_mock], [bad_interface_mock], [], Exception('boom')] ++ ++ # No port specified ++ self.assertRaises(exceptions.ComputeUnknownException, ++ self.manager.attach_network_or_port, ++ self.compute_id, self.network_id) ++ ++ # Port already attached ++ result = self.manager.attach_network_or_port(self.compute_id, ++ port_id=self.port_id) ++ self.assertEqual(interface_mock, result) ++ ++ # Port not found ++ self.assertRaises(exceptions.ComputePortInUseException, ++ self.manager.attach_network_or_port, ++ self.compute_id, port_id=self.port_id) ++ ++ # No ports attached ++ self.assertRaises(exceptions.ComputePortInUseException, ++ self.manager.attach_network_or_port, ++ self.compute_id, port_id=self.port_id) ++ ++ # Get attached ports list exception ++ self.assertRaises(exceptions.ComputeUnknownException, ++ self.manager.attach_network_or_port, ++ self.compute_id, port_id=self.port_id) ++ ++ def test_attach_network_or_port_general_not_found_exception(self): + self.manager.manager.interface_attach.side_effect = [ + nova_exceptions.NotFound('test_exception')] +- self.assertRaises(nova_exceptions.NotFound, ++ self.assertRaises(exceptions.NotFound, ++ self.manager.attach_network_or_port, ++ self.compute_id, self.network_id) ++ ++ def test_attach_network_or_port_instance_not_found_exception(self): ++ self.manager.manager.interface_attach.side_effect = [ ++ nova_exceptions.NotFound('Instance disappeared')] ++ self.assertRaises(exceptions.NotFound, ++ self.manager.attach_network_or_port, ++ self.compute_id, self.network_id) ++ ++ def test_attach_network_or_port_network_not_found_exception(self): ++ self.manager.manager.interface_attach.side_effect = [ ++ nova_exceptions.NotFound('Network disappeared')] ++ self.assertRaises(exceptions.NotFound, ++ self.manager.attach_network_or_port, ++ self.compute_id, self.network_id) ++ ++ def test_attach_network_or_port_port_not_found_exception(self): ++ self.manager.manager.interface_attach.side_effect = [ ++ nova_exceptions.NotFound('Port disappeared')] ++ self.assertRaises(exceptions.NotFound, ++ self.manager.attach_network_or_port, ++ self.compute_id, self.network_id) ++ ++ def test_attach_network_or_port_unknown_exception(self): ++ self.manager.manager.interface_attach.side_effect = [Exception('boom')] ++ self.assertRaises(exceptions.ComputeUnknownException, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +@@ -16,6 +16,7 @@ + import mock + from oslo_config import cfg + from oslo_config import fixture as oslo_fixture ++from oslo_utils import uuidutils + from taskflow.patterns import linear_flow as flow + + from octavia.common import constants +@@ -42,6 +43,7 @@ class TestAmphoraFlows(base.TestCase): + self.amp1 = data_models.Amphora(id=1) + self.amp2 = data_models.Amphora(id=2) + self.amp3 = data_models.Amphora(id=3, status=constants.DELETED) ++ self.amp4 = data_models.Amphora(id=uuidutils.generate_uuid()) + self.lb = data_models.LoadBalancer( + id=4, amphorae=[self.amp1, self.amp2, self.amp3]) + +@@ -57,7 +59,7 @@ class TestAmphoraFlows(base.TestCase): + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) +- self.assertEqual(2, len(amp_flow.requires)) ++ self.assertEqual(3, len(amp_flow.requires)) + + def test_get_create_amphora_flow_cert(self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() +@@ -71,7 +73,7 @@ class TestAmphoraFlows(base.TestCase): + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) +- self.assertEqual(2, len(amp_flow.requires)) ++ self.assertEqual(3, len(amp_flow.requires)) + + def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver): + +@@ -89,7 +91,7 @@ class TestAmphoraFlows(base.TestCase): + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) +- self.assertEqual(3, len(amp_flow.requires)) ++ self.assertEqual(4, len(amp_flow.requires)) + + def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver): + +@@ -109,7 +111,7 @@ class TestAmphoraFlows(base.TestCase): + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) +- self.assertEqual(3, len(amp_flow.requires)) ++ self.assertEqual(4, len(amp_flow.requires)) + + def test_get_cert_master_create_amphora_for_lb_flow( + self, mock_get_net_driver): +@@ -130,7 +132,7 @@ class TestAmphoraFlows(base.TestCase): + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) +- self.assertEqual(3, len(amp_flow.requires)) ++ self.assertEqual(4, len(amp_flow.requires)) + + def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow( + self, mock_get_net_driver): +@@ -143,7 +145,6 @@ class TestAmphoraFlows(base.TestCase): + + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) +- self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) +@@ -170,7 +171,7 @@ class TestAmphoraFlows(base.TestCase): + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) +- self.assertEqual(3, len(amp_flow.requires)) ++ self.assertEqual(4, len(amp_flow.requires)) + + def test_get_cert_bogus_create_amphora_for_lb_flow( + self, mock_get_net_driver): +@@ -190,7 +191,7 @@ class TestAmphoraFlows(base.TestCase): + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) +- self.assertEqual(3, len(amp_flow.requires)) ++ self.assertEqual(4, len(amp_flow.requires)) + + def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow( + self, mock_get_net_driver): +@@ -202,7 +203,6 @@ class TestAmphoraFlows(base.TestCase): + + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) +- self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) +@@ -213,14 +213,13 @@ class TestAmphoraFlows(base.TestCase): + + def test_get_delete_amphora_flow(self, mock_get_net_driver): + +- amp_flow = self.AmpFlow.get_delete_amphora_flow() ++ amp_flow = self.AmpFlow.get_delete_amphora_flow(self.amp4) + + self.assertIsInstance(amp_flow, flow.Flow) + +- self.assertIn(constants.AMPHORA, amp_flow.requires) +- ++ # This flow injects the required data at flow compile time. + self.assertEqual(0, len(amp_flow.provides)) +- self.assertEqual(1, len(amp_flow.requires)) ++ self.assertEqual(0, len(amp_flow.requires)) + + def test_allocate_amp_to_lb_decider(self, mock_get_net_driver): + history = mock.MagicMock() +@@ -240,98 +239,113 @@ class TestAmphoraFlows(base.TestCase): + result = self.AmpFlow._create_new_amp_for_lb_decider(history) + self.assertFalse(result) + +- def test_get_failover_flow_allocated(self, mock_get_net_driver): ++ def test_get_failover_flow_act_stdby(self, mock_get_net_driver): ++ failed_amphora = data_models.Amphora( ++ id=uuidutils.generate_uuid(), role=constants.ROLE_MASTER, ++ load_balancer_id=uuidutils.generate_uuid()) + +- amp_flow = self.AmpFlow.get_failover_flow( +- load_balancer=self.lb) ++ amp_flow = self.AmpFlow.get_failover_amphora_flow( ++ failed_amphora, 2) + + self.assertIsInstance(amp_flow, flow.Flow) + +- self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) ++ self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) ++ self.assertIn(constants.FLAVOR, amp_flow.requires) ++ self.assertIn(constants.LOADBALANCER, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) ++ self.assertIn(constants.VIP, amp_flow.requires) + +- self.assertIn(constants.AMP_DATA, amp_flow.provides) ++ self.assertIn(constants.ADDED_PORTS, amp_flow.provides) ++ self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) ++ self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) ++ self.assertIn(constants.BASE_PORT, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) +- self.assertIn(constants.LISTENERS, amp_flow.provides) ++ self.assertIn(constants.DELTA, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) ++ self.assertIn(constants.SERVER_PEM, amp_flow.provides) ++ self.assertIn(constants.VIP_SG_ID, amp_flow.provides) + +- self.assertEqual(4, len(amp_flow.requires)) +- self.assertEqual(12, len(amp_flow.provides)) +- +- amp_flow = self.AmpFlow.get_failover_flow( +- role=constants.ROLE_MASTER, load_balancer=self.lb) +- +- self.assertIsInstance(amp_flow, flow.Flow) +- +- self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) +- self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) +- +- self.assertIn(constants.AMP_DATA, amp_flow.provides) +- self.assertIn(constants.AMPHORA, amp_flow.provides) +- self.assertIn(constants.AMPHORA_ID, amp_flow.provides) +- self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) +- self.assertIn(constants.COMPUTE_ID, amp_flow.provides) +- self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) +- self.assertIn(constants.LISTENERS, amp_flow.provides) +- self.assertIn(constants.LOADBALANCER, amp_flow.provides) ++ self.assertEqual(6, len(amp_flow.requires)) ++ self.assertEqual(13, len(amp_flow.provides)) + +- self.assertEqual(4, len(amp_flow.requires)) +- self.assertEqual(12, len(amp_flow.provides)) ++ def test_get_failover_flow_standalone(self, mock_get_net_driver): ++ failed_amphora = data_models.Amphora( ++ id=uuidutils.generate_uuid(), role=constants.ROLE_STANDALONE, ++ load_balancer_id=uuidutils.generate_uuid(), vrrp_ip='2001:3b8::32') + +- amp_flow = self.AmpFlow.get_failover_flow( +- role=constants.ROLE_BACKUP, load_balancer=self.lb) ++ amp_flow = self.AmpFlow.get_failover_amphora_flow( ++ failed_amphora, 1) + + self.assertIsInstance(amp_flow, flow.Flow) + +- self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) ++ self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) ++ self.assertIn(constants.FLAVOR, amp_flow.requires) ++ self.assertIn(constants.LOADBALANCER, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) ++ self.assertIn(constants.VIP, amp_flow.requires) + +- self.assertIn(constants.AMP_DATA, amp_flow.provides) ++ self.assertIn(constants.ADDED_PORTS, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) ++ self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) ++ self.assertIn(constants.BASE_PORT, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) +- self.assertIn(constants.LISTENERS, amp_flow.provides) ++ self.assertIn(constants.DELTA, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) ++ self.assertIn(constants.SERVER_PEM, amp_flow.provides) ++ self.assertIn(constants.VIP_SG_ID, amp_flow.provides) + +- self.assertEqual(4, len(amp_flow.requires)) ++ self.assertEqual(6, len(amp_flow.requires)) + self.assertEqual(12, len(amp_flow.provides)) + +- amp_flow = self.AmpFlow.get_failover_flow( +- role='BOGUSROLE', load_balancer=self.lb) ++ def test_get_failover_flow_bogus_role(self, mock_get_net_driver): ++ failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(), ++ role='bogus') ++ ++ amp_flow = self.AmpFlow.get_failover_amphora_flow( ++ failed_amphora, 1) + + self.assertIsInstance(amp_flow, flow.Flow) + +- self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) ++ # TODO(johnsom) Uncomment after amphora failover builds a replacement ++ # amphora. ++ # self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) ++ # self.assertIn(constants.FLAVOR, amp_flow.requires) ++ # self.assertEqual(5, len(amp_flow.requires)) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + +- self.assertIn(constants.AMP_DATA, amp_flow.provides) +- self.assertIn(constants.AMPHORA, amp_flow.provides) +- self.assertIn(constants.AMPHORA_ID, amp_flow.provides) +- self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) +- self.assertIn(constants.COMPUTE_ID, amp_flow.provides) +- self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) +- self.assertIn(constants.LISTENERS, amp_flow.provides) +- self.assertIn(constants.LOADBALANCER, amp_flow.provides) ++ # self.assertIn(constants.AMPHORA, amp_flow.provides) ++ # self.assertIn(constants.AMPHORA_ID, amp_flow.provides) ++ # self.assertIn(constants.COMPUTE_ID, amp_flow.provides) ++ # self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) ++ # self.assertIn(constants.SERVER_PEM, amp_flow.provides) ++ # self.assertIn(constants.VIP_SG_ID, amp_flow.provides) ++ # self.assertEqual(6, len(amp_flow.provides)) + +- self.assertEqual(4, len(amp_flow.requires)) +- self.assertEqual(12, len(amp_flow.provides)) ++ self.assertEqual(1, len(amp_flow.requires)) ++ self.assertEqual(1, len(amp_flow.provides)) + + def test_get_failover_flow_spare(self, mock_get_net_driver): + +- amp_flow = self.AmpFlow.get_failover_flow() ++ amp_flow = self.AmpFlow.get_failover_amphora_flow(self.amp4, 0) + + self.assertIsInstance(amp_flow, flow.Flow) + +- self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) ++ # TODO(johnsom) Uncomment after amphora failover builds a replacement ++ # amphora. ++ # self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) ++ # self.assertEqual(5, len(amp_flow.requires)) ++ # self.assertEqual(6, len(amp_flow.provides)) ++ self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertEqual(1, len(amp_flow.requires)) +- self.assertEqual(0, len(amp_flow.provides)) ++ self.assertEqual(1, len(amp_flow.provides)) + + def test_cert_rotate_amphora_flow(self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() +@@ -350,12 +364,30 @@ class TestAmphoraFlows(base.TestCase): + + self.assertIsInstance(vrrp_subflow, flow.Flow) + +- self.assertIn(constants.LOADBALANCER, vrrp_subflow.provides) ++ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) ++ self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) + +- self.assertIn(constants.LOADBALANCER, vrrp_subflow.requires) ++ self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) ++ self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + + self.assertEqual(2, len(vrrp_subflow.provides)) +- self.assertEqual(1, len(vrrp_subflow.requires)) ++ self.assertEqual(2, len(vrrp_subflow.requires)) ++ ++ def test_get_vrrp_subflow_dont_create_vrrp_group( ++ self, mock_get_net_driver): ++ vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123', ++ create_vrrp_group=False) ++ ++ self.assertIsInstance(vrrp_subflow, flow.Flow) ++ ++ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) ++ self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) ++ ++ self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) ++ self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) ++ ++ self.assertEqual(2, len(vrrp_subflow.provides)) ++ self.assertEqual(2, len(vrrp_subflow.requires)) + + def test_get_post_map_lb_subflow(self, mock_get_net_driver): + +@@ -420,3 +452,64 @@ class TestAmphoraFlows(base.TestCase): + + self.assertEqual(2, len(amp_flow.requires)) + self.assertEqual(0, len(amp_flow.provides)) ++ ++ def test_get_amphora_for_lb_failover_flow_single(self, ++ mock_get_net_driver): ++ FAILED_PORT_ID = uuidutils.generate_uuid() ++ TEST_PREFIX = 'test_prefix' ++ ++ get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow( ++ TEST_PREFIX, role=constants.ROLE_STANDALONE, ++ failed_amp_vrrp_port_id=FAILED_PORT_ID, is_vrrp_ipv6=True) ++ ++ self.assertIsInstance(get_amp_flow, flow.Flow) ++ ++ self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires) ++ self.assertIn(constants.FLAVOR, get_amp_flow.requires) ++ self.assertIn(constants.LOADBALANCER, get_amp_flow.requires) ++ self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires) ++ self.assertIn(constants.VIP, get_amp_flow.requires) ++ self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires) ++ ++ self.assertIn(constants.ADDED_PORTS, get_amp_flow.provides) ++ self.assertIn(constants.AMPHORA, get_amp_flow.provides) ++ self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides) ++ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides) ++ self.assertIn(constants.BASE_PORT, get_amp_flow.provides) ++ self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides) ++ self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides) ++ self.assertIn(constants.DELTA, get_amp_flow.provides) ++ self.assertIn(constants.SERVER_PEM, get_amp_flow.provides) ++ ++ self.assertEqual(7, len(get_amp_flow.requires), get_amp_flow.requires) ++ self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides) ++ ++ def test_get_amphora_for_lb_failover_flow_act_stdby(self, ++ mock_get_net_driver): ++ TEST_PREFIX = 'test_prefix' ++ ++ get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow( ++ TEST_PREFIX, role=constants.ROLE_MASTER, ++ is_spare=False) ++ ++ self.assertIsInstance(get_amp_flow, flow.Flow) ++ ++ self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires) ++ self.assertIn(constants.FLAVOR, get_amp_flow.requires) ++ self.assertIn(constants.LOADBALANCER, get_amp_flow.requires) ++ self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires) ++ self.assertIn(constants.VIP, get_amp_flow.requires) ++ self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires) ++ ++ self.assertIn(constants.ADDED_PORTS, get_amp_flow.provides) ++ self.assertIn(constants.AMPHORA, get_amp_flow.provides) ++ self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides) ++ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides) ++ self.assertIn(constants.BASE_PORT, get_amp_flow.provides) ++ self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides) ++ self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides) ++ self.assertIn(constants.DELTA, get_amp_flow.provides) ++ self.assertIn(constants.SERVER_PEM, get_amp_flow.provides) ++ ++ self.assertEqual(7, len(get_amp_flow.requires), get_amp_flow.requires) ++ self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides) +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +@@ -16,6 +16,7 @@ + import mock + from oslo_config import cfg + from oslo_config import fixture as oslo_fixture ++from oslo_utils import uuidutils + from taskflow.patterns import linear_flow as flow + + from octavia.common import constants +@@ -156,7 +157,7 @@ class TestLoadBalancerFlows(base.TestCas + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + +- self.assertEqual(2, len(amp_flow.provides)) ++ self.assertEqual(4, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + # Test mark_active=False +@@ -169,7 +170,7 @@ class TestLoadBalancerFlows(base.TestCas + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + +- self.assertEqual(2, len(amp_flow.provides)) ++ self.assertEqual(4, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + def test_get_create_load_balancer_flows_single_listeners( +@@ -195,7 +196,7 @@ class TestLoadBalancerFlows(base.TestCas + self.assertIn(constants.AMP_DATA, create_flow.provides) + self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides) + +- self.assertEqual(4, len(create_flow.requires)) ++ self.assertEqual(5, len(create_flow.requires)) + self.assertEqual(13, len(create_flow.provides), + create_flow.provides) + +@@ -223,6 +224,229 @@ class TestLoadBalancerFlows(base.TestCas + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + create_flow.provides) + +- self.assertEqual(4, len(create_flow.requires)) +- self.assertEqual(14, len(create_flow.provides), ++ self.assertEqual(5, len(create_flow.requires)) ++ self.assertEqual(16, len(create_flow.provides), + create_flow.provides) ++ ++ def _test_get_failover_LB_flow_single(self, amphorae): ++ lb_mock = mock.MagicMock() ++ lb_mock.id = uuidutils.generate_uuid() ++ lb_mock.topology = constants.TOPOLOGY_SINGLE ++ ++ failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock) ++ ++ self.assertIsInstance(failover_flow, flow.Flow) ++ ++ self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires) ++ self.assertIn(constants.FLAVOR, failover_flow.requires) ++ self.assertIn(constants.LOADBALANCER, failover_flow.requires) ++ self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires) ++ ++ self.assertIn(constants.ADDED_PORTS, failover_flow.provides) ++ self.assertIn(constants.AMPHORA, failover_flow.provides) ++ self.assertIn(constants.AMPHORA_ID, failover_flow.provides) ++ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, ++ failover_flow.provides) ++ self.assertIn(constants.BASE_PORT, failover_flow.provides) ++ self.assertIn(constants.COMPUTE_ID, failover_flow.provides) ++ self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides) ++ self.assertIn(constants.DELTA, failover_flow.provides) ++ self.assertIn(constants.LOADBALANCER, failover_flow.provides) ++ self.assertIn(constants.SERVER_PEM, failover_flow.provides) ++ self.assertIn(constants.VIP, failover_flow.provides) ++ self.assertIn(constants.VIP_SG_ID, failover_flow.provides) ++ ++ self.assertEqual(5, len(failover_flow.requires), ++ failover_flow.requires) ++ self.assertEqual(12, len(failover_flow.provides), ++ failover_flow.provides) ++ ++ def test_get_failover_LB_flow_no_amps_single(self, mock_get_net_driver): ++ self._test_get_failover_LB_flow_single([]) ++ ++ def test_get_failover_LB_flow_one_amp_single(self, mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = constants.ROLE_STANDALONE ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = None ++ amphora_mock.vrrp_ip = None ++ ++ self._test_get_failover_LB_flow_single([amphora_mock]) ++ ++ def test_get_failover_LB_flow_one_spare_amp_single(self, ++ mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = None ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = None ++ amphora_mock.vrrp_ip = None ++ ++ self._test_get_failover_LB_flow_single([amphora_mock]) ++ ++ def test_get_failover_LB_flow_one_bogus_amp_single(self, ++ mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = 'bogus' ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = None ++ amphora_mock.vrrp_ip = None ++ ++ self._test_get_failover_LB_flow_single([amphora_mock]) ++ ++ def test_get_failover_LB_flow_two_amp_single(self, mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora2_mock = mock.MagicMock() ++ amphora2_mock.role = constants.ROLE_STANDALONE ++ amphora2_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora2_mock.compute_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_port_id = None ++ amphora2_mock.vrrp_ip = None ++ ++ self._test_get_failover_LB_flow_single([amphora_mock, amphora2_mock]) ++ ++ def _test_get_failover_LB_flow_no_amps_act_stdby(self, amphorae): ++ lb_mock = mock.MagicMock() ++ lb_mock.id = uuidutils.generate_uuid() ++ lb_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY ++ ++ failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock) ++ ++ self.assertIsInstance(failover_flow, flow.Flow) ++ ++ self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires) ++ self.assertIn(constants.FLAVOR, failover_flow.requires) ++ self.assertIn(constants.LOADBALANCER, failover_flow.requires) ++ self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires) ++ ++ self.assertIn(constants.ADDED_PORTS, failover_flow.provides) ++ self.assertIn(constants.AMP_VRRP_INT, failover_flow.provides) ++ self.assertIn(constants.AMPHORA, failover_flow.provides) ++ self.assertIn(constants.AMPHORA_ID, failover_flow.provides) ++ self.assertIn(constants.AMPHORAE, failover_flow.provides) ++ self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, ++ failover_flow.provides) ++ self.assertIn(constants.BASE_PORT, failover_flow.provides) ++ self.assertIn(constants.COMPUTE_ID, failover_flow.provides) ++ self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides) ++ self.assertIn(constants.DELTA, failover_flow.provides) ++ self.assertIn(constants.FIRST_AMP_NETWORK_CONFIGS, ++ failover_flow.provides) ++ self.assertIn(constants.FIRST_AMP_VRRP_INTERFACE, ++ failover_flow.provides) ++ self.assertIn(constants.LOADBALANCER, failover_flow.provides) ++ self.assertIn(constants.SERVER_PEM, failover_flow.provides) ++ self.assertIn(constants.VIP, failover_flow.provides) ++ self.assertIn(constants.VIP_SG_ID, failover_flow.provides) ++ ++ self.assertEqual(5, len(failover_flow.requires), ++ failover_flow.requires) ++ self.assertEqual(16, len(failover_flow.provides), ++ failover_flow.provides) ++ ++ def test_get_failover_LB_flow_no_amps_act_stdby(self, mock_get_net_driver): ++ self._test_get_failover_LB_flow_no_amps_act_stdby([]) ++ ++ def test_get_failover_LB_flow_one_amps_act_stdby(self, amphorae): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = constants.ROLE_MASTER ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = None ++ amphora_mock.vrrp_ip = None ++ ++ self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock]) ++ ++ def test_get_failover_LB_flow_two_amps_act_stdby(self, ++ mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = constants.ROLE_MASTER ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_ip = '192.0.2.46' ++ amphora2_mock = mock.MagicMock() ++ amphora2_mock.role = constants.ROLE_BACKUP ++ amphora2_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora2_mock.compute_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_ip = '2001:db8::46' ++ ++ self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock, ++ amphora2_mock]) ++ ++ def test_get_failover_LB_flow_three_amps_act_stdby(self, ++ mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = constants.ROLE_MASTER ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_ip = '192.0.2.46' ++ amphora2_mock = mock.MagicMock() ++ amphora2_mock.role = constants.ROLE_BACKUP ++ amphora2_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora2_mock.compute_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_ip = '2001:db8::46' ++ amphora3_mock = mock.MagicMock() ++ amphora3_mock.vrrp_ip = None ++ ++ self._test_get_failover_LB_flow_no_amps_act_stdby( ++ [amphora_mock, amphora2_mock, amphora3_mock]) ++ ++ def test_get_failover_LB_flow_two_amps_bogus_act_stdby( ++ self, mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = 'bogus' ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_ip = '192.0.2.46' ++ amphora2_mock = mock.MagicMock() ++ amphora2_mock.role = constants.ROLE_MASTER ++ amphora2_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora2_mock.compute_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_ip = '2001:db8::46' ++ ++ self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock, ++ amphora2_mock]) ++ ++ def test_get_failover_LB_flow_two_amps_spare_act_stdby( ++ self, mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = None ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_ip = '192.0.2.46' ++ amphora2_mock = mock.MagicMock() ++ amphora2_mock.role = constants.ROLE_MASTER ++ amphora2_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora2_mock.compute_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_ip = '2001:db8::46' ++ ++ self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock, ++ amphora2_mock]) ++ ++ def test_get_failover_LB_flow_two_amps_standalone_act_stdby( ++ self, mock_get_net_driver): ++ amphora_mock = mock.MagicMock() ++ amphora_mock.role = constants.ROLE_STANDALONE ++ amphora_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora_mock.compute_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora_mock.vrrp_ip = '192.0.2.46' ++ amphora2_mock = mock.MagicMock() ++ amphora2_mock.role = constants.ROLE_MASTER ++ amphora2_mock.lb_network_id = uuidutils.generate_uuid() ++ amphora2_mock.compute_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() ++ amphora2_mock.vrrp_ip = '2001:db8::46' ++ ++ self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock, ++ amphora2_mock]) +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +@@ -40,6 +40,7 @@ FAKE_CONFIG_FILE = 'fake config file' + _amphora_mock = mock.MagicMock() + _amphora_mock.id = AMP_ID + _amphora_mock.status = constants.AMPHORA_ALLOCATED ++_amphora_mock.vrrp_ip = '198.51.100.65' + _load_balancer_mock = mock.MagicMock() + _load_balancer_mock.id = LB_ID + _listener_mock = mock.MagicMock() +@@ -76,33 +77,52 @@ class TestAmphoraDriverTasks(base.TestCa + active_connection_rety_interval=CONN_RETRY_INTERVAL) + conf.config(group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_SINGLE) ++ self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, ++ constants.REQ_READ_TIMEOUT: 2, ++ constants.CONN_MAX_RETRIES: 3, ++ constants.CONN_RETRY_INTERVAL: 4} + super(TestAmphoraDriverTasks, self).setUp() + +- def test_amp_listener_update(self, +- mock_driver, +- mock_generate_uuid, +- mock_log, +- mock_get_session, +- mock_listener_repo_get, +- mock_listener_repo_update, +- mock_amphora_repo_update): +- +- timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, +- constants.REQ_READ_TIMEOUT: 2, +- constants.CONN_MAX_RETRIES: 3, +- constants.CONN_RETRY_INTERVAL: 4} ++ def test_amp_listeners_update(self, ++ mock_driver, ++ mock_generate_uuid, ++ mock_log, ++ mock_get_session, ++ mock_listener_repo_get, ++ mock_listener_repo_update, ++ mock_amphora_repo_update): + + amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate() ++ amp_list_update_obj.execute(_load_balancer_mock, _amphora_mock, ++ self.timeout_dict) ++ ++ mock_driver.update_amphora_listeners.assert_called_once_with( ++ _load_balancer_mock, _amphora_mock, self.timeout_dict) ++ ++ mock_driver.update_amphora_listeners.side_effect = Exception('boom') ++ ++ amp_list_update_obj.execute(_load_balancer_mock, _amphora_mock, ++ self.timeout_dict) ++ ++ mock_amphora_repo_update.assert_called_once_with( ++ _session_mock, AMP_ID, status=constants.ERROR) ++ ++ def test_amphorae_listeners_update( ++ self, mock_driver, mock_generate_uuid, mock_log, mock_get_session, ++ mock_listener_repo_get, mock_listener_repo_update, ++ mock_amphora_repo_update): ++ ++ amp_list_update_obj = amphora_driver_tasks.AmphoraIndexListenerUpdate() + amp_list_update_obj.execute(_load_balancer_mock, 0, +- [_amphora_mock], timeout_dict) ++ [_amphora_mock], self.timeout_dict) + + mock_driver.update_amphora_listeners.assert_called_once_with( +- _load_balancer_mock, _amphora_mock, timeout_dict) ++ _load_balancer_mock, _amphora_mock, self.timeout_dict) + + mock_driver.update_amphora_listeners.side_effect = Exception('boom') + + amp_list_update_obj.execute(_load_balancer_mock, 0, +- [_amphora_mock], timeout_dict) ++ [_amphora_mock], self.timeout_dict) + + mock_amphora_repo_update.assert_called_once_with( + _session_mock, AMP_ID, status=constants.ERROR) +@@ -170,6 +190,36 @@ class TestAmphoraDriverTasks(base.TestCa + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_error') ++ def test_amphora_index_listeners_reload( ++ self, mock_prov_status_error, mock_driver, mock_generate_uuid, ++ mock_log, mock_get_session, mock_listener_repo_get, ++ mock_listener_repo_update, mock_amphora_repo_update): ++ amphora_mock = mock.MagicMock() ++ listeners_reload_obj = ( ++ amphora_driver_tasks.AmphoraIndexListenersReload()) ++ mock_lb = mock.MagicMock() ++ mock_listener = mock.MagicMock() ++ mock_listener.id = '12345' ++ ++ # Test no listeners ++ mock_lb.listeners = None ++ listeners_reload_obj.execute(mock_lb, None, 0) ++ mock_driver.reload.assert_not_called() ++ ++ # Test with listeners ++ mock_driver.start.reset_mock() ++ mock_lb.listeners = [mock_listener] ++ listeners_reload_obj.execute(mock_lb, [amphora_mock], 0, ++ timeout_dict=self.timeout_dict) ++ mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock, ++ self.timeout_dict) ++ # Test revert ++ mock_lb.listeners = [mock_listener] ++ listeners_reload_obj.revert(mock_lb) ++ mock_prov_status_error.assert_called_once_with('12345') ++ ++ @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' ++ 'mark_listener_prov_status_error') + def test_listeners_start(self, + mock_prov_status_error, + mock_driver, +@@ -296,6 +346,12 @@ class TestAmphoraDriverTasks(base.TestCa + status=constants.ERROR) + self.assertIsNone(amp) + ++ # Test revert when this task failed ++ repo.AmphoraRepository.update.reset_mock() ++ amp = amphora_finalize_obj.revert( ++ failure.Failure.from_exception(Exception('boom')), _amphora_mock) ++ repo.AmphoraRepository.update.assert_not_called() ++ + def test_amphora_post_network_plug(self, + mock_driver, + mock_generate_uuid, +@@ -332,7 +388,14 @@ class TestAmphoraDriverTasks(base.TestCa + + self.assertIsNone(amp) + +- def test_amphorae_post_network_plug(self, mock_driver, ++ # Test revert when this task failed ++ repo.AmphoraRepository.update.reset_mock() ++ amp = amphora_post_network_plug_obj.revert( ++ failure.Failure.from_exception(Exception('boom')), _amphora_mock) ++ repo.AmphoraRepository.update.assert_not_called() ++ ++ @mock.patch('octavia.db.repositories.AmphoraRepository.get_all') ++ def test_amphorae_post_network_plug(self, mock_amp_get_all, mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, +@@ -342,7 +405,7 @@ class TestAmphoraDriverTasks(base.TestCa + mock_driver.get_network.return_value = _network_mock + _amphora_mock.id = AMP_ID + _amphora_mock.compute_id = COMPUTE_ID +- _LB_mock.amphorae = [_amphora_mock] ++ mock_amp_get_all.return_value = [[_amphora_mock], None] + amphora_post_network_plug_obj = (amphora_driver_tasks. + AmphoraePostNetworkPlug()) + +@@ -354,6 +417,14 @@ class TestAmphoraDriverTasks(base.TestCa + (mock_driver.post_network_plug. + assert_called_once_with(_amphora_mock, port_mock)) + ++ # Test with no ports to plug ++ mock_driver.post_network_plug.reset_mock() ++ ++ _deltas_mock = {'0': [port_mock]} ++ ++ amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock) ++ mock_driver.post_network_plug.assert_not_called() ++ + # Test revert + amp = amphora_post_network_plug_obj.revert(None, _LB_mock, + _deltas_mock) +@@ -376,6 +447,13 @@ class TestAmphoraDriverTasks(base.TestCa + + self.assertIsNone(amp) + ++ # Test revert when this task failed ++ repo.AmphoraRepository.update.reset_mock() ++ amp = amphora_post_network_plug_obj.revert( ++ failure.Failure.from_exception(Exception('boom')), _amphora_mock, ++ None) ++ repo.AmphoraRepository.update.assert_not_called() ++ + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_amphora_post_vip_plug(self, + mock_loadbalancer_repo_update, +@@ -426,6 +504,13 @@ class TestAmphoraDriverTasks(base.TestCa + + self.assertIsNone(amp) + ++ # Test revert when this task failed ++ repo.AmphoraRepository.update.reset_mock() ++ amp = amphora_post_vip_plug_obj.revert( ++ failure.Failure.from_exception(Exception('boom')), _amphora_mock, ++ None) ++ repo.AmphoraRepository.update.assert_not_called() ++ + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_amphorae_post_vip_plug(self, + mock_loadbalancer_repo_update, +@@ -465,6 +550,13 @@ class TestAmphoraDriverTasks(base.TestCa + + self.assertIsNone(amp) + ++ # Test revert when this task failed ++ repo.AmphoraRepository.update.reset_mock() ++ amp = amphora_post_vip_plug_obj.revert( ++ failure.Failure.from_exception(Exception('boom')), _amphora_mock, ++ None) ++ repo.AmphoraRepository.update.assert_not_called() ++ + def test_amphora_cert_upload(self, + mock_driver, + mock_generate_uuid, +@@ -491,45 +583,59 @@ class TestAmphoraDriverTasks(base.TestCa + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): ++ FAKE_INTERFACE = 'fake0' + _LB_mock.amphorae = _amphorae_mock ++ mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE, ++ Exception('boom')] + + timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, + constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} + + amphora_update_vrrp_interface_obj = ( + amphora_driver_tasks.AmphoraUpdateVRRPInterface()) +- amphora_update_vrrp_interface_obj.execute(_LB_mock) +- mock_driver.get_vrrp_interface.assert_called_once_with( +- _amphora_mock, timeout_dict=timeout_dict) ++ amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict) ++ mock_driver.get_interface_from_ip.assert_called_once_with( ++ _amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict) ++ mock_amphora_repo_update.assert_called_once_with( ++ _session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE) + +- # Test revert +- mock_driver.reset_mock() ++ # Test with an exception ++ mock_amphora_repo_update.reset_mock() ++ amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict) ++ mock_amphora_repo_update.assert_called_once_with( ++ _session_mock, _amphora_mock.id, status=constants.ERROR) + ++ def test_amphora_index_update_vrrp_interface( ++ self, mock_driver, mock_generate_uuid, mock_log, mock_get_session, ++ mock_listener_repo_get, mock_listener_repo_update, ++ mock_amphora_repo_update): ++ FAKE_INTERFACE = 'fake0' + _LB_mock.amphorae = _amphorae_mock +- amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock) +- mock_amphora_repo_update.assert_called_with(_session_mock, +- _amphora_mock.id, +- vrrp_interface=None) ++ mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE, ++ Exception('boom')] + +- mock_driver.reset_mock() +- mock_amphora_repo_update.reset_mock() ++ timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, ++ constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} + +- failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT")) +- amphora_update_vrrp_interface_obj.revert(failure_obj, _LB_mock) +- self.assertFalse(mock_amphora_repo_update.called) ++ amphora_update_vrrp_interface_obj = ( ++ amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface()) ++ amphora_update_vrrp_interface_obj.execute( ++ [_amphora_mock], 0, timeout_dict) ++ mock_driver.get_interface_from_ip.assert_called_once_with( ++ _amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict) ++ mock_amphora_repo_update.assert_called_once_with( ++ _session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE) + +- # Test revert with exception +- mock_driver.reset_mock() ++ # Test with an exception + mock_amphora_repo_update.reset_mock() +- mock_amphora_repo_update.side_effect = Exception('fail') +- +- _LB_mock.amphorae = _amphorae_mock +- amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock) +- mock_amphora_repo_update.assert_called_with(_session_mock, +- _amphora_mock.id, +- vrrp_interface=None) ++ amphora_update_vrrp_interface_obj.execute( ++ [_amphora_mock], 0, timeout_dict) ++ mock_amphora_repo_update.assert_called_once_with( ++ _session_mock, _amphora_mock.id, status=constants.ERROR) + ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_amphora_vrrp_update(self, ++ mock_lb_get, + mock_driver, + mock_generate_uuid, + mock_log, +@@ -538,11 +644,53 @@ class TestAmphoraDriverTasks(base.TestCa + mock_listener_repo_update, + mock_amphora_repo_update): + amphorae_network_config = mock.MagicMock() ++ mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT, ++ Exception('boom')] ++ mock_lb_get.return_value = _LB_mock + amphora_vrrp_update_obj = ( + amphora_driver_tasks.AmphoraVRRPUpdate()) +- amphora_vrrp_update_obj.execute(_LB_mock, amphorae_network_config) ++ amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, ++ _amphora_mock, 'fakeint0') ++ mock_driver.update_vrrp_conf.assert_called_once_with( ++ _LB_mock, amphorae_network_config, _amphora_mock, None) ++ ++ # Test with an exception ++ mock_amphora_repo_update.reset_mock() ++ amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, ++ _amphora_mock, 'fakeint0') ++ mock_amphora_repo_update.assert_called_once_with( ++ _session_mock, _amphora_mock.id, status=constants.ERROR) ++ ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') ++ def test_amphora_index_vrrp_update(self, ++ mock_lb_get, ++ mock_driver, ++ mock_generate_uuid, ++ mock_log, ++ mock_get_session, ++ mock_listener_repo_get, ++ mock_listener_repo_update, ++ mock_amphora_repo_update): ++ amphorae_network_config = mock.MagicMock() ++ mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT, ++ Exception('boom')] ++ mock_lb_get.return_value = _LB_mock ++ amphora_vrrp_update_obj = ( ++ amphora_driver_tasks.AmphoraIndexVRRPUpdate()) ++ ++ amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, ++ 0, [_amphora_mock], 'fakeint0', ++ timeout_dict=self.timeout_dict) + mock_driver.update_vrrp_conf.assert_called_once_with( +- _LB_mock, amphorae_network_config) ++ _LB_mock, amphorae_network_config, _amphora_mock, ++ self.timeout_dict) ++ ++ # Test with an exception ++ mock_amphora_repo_update.reset_mock() ++ amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, ++ 0, [_amphora_mock], 'fakeint0') ++ mock_amphora_repo_update.assert_called_once_with( ++ _session_mock, _amphora_mock.id, status=constants.ERROR) + + def test_amphora_vrrp_stop(self, + mock_driver, +@@ -567,8 +715,25 @@ class TestAmphoraDriverTasks(base.TestCa + mock_amphora_repo_update): + amphora_vrrp_start_obj = ( + amphora_driver_tasks.AmphoraVRRPStart()) +- amphora_vrrp_start_obj.execute(_LB_mock) +- mock_driver.start_vrrp_service.assert_called_once_with(_LB_mock) ++ amphora_vrrp_start_obj.execute(_amphora_mock, ++ timeout_dict=self.timeout_dict) ++ mock_driver.start_vrrp_service.assert_called_once_with( ++ _amphora_mock, self.timeout_dict) ++ ++ def test_amphora_index_vrrp_start(self, ++ mock_driver, ++ mock_generate_uuid, ++ mock_log, ++ mock_get_session, ++ mock_listener_repo_get, ++ mock_listener_repo_update, ++ mock_amphora_repo_update): ++ amphora_vrrp_start_obj = ( ++ amphora_driver_tasks.AmphoraIndexVRRPStart()) ++ amphora_vrrp_start_obj.execute(0, [_amphora_mock], ++ timeout_dict=self.timeout_dict) ++ mock_driver.start_vrrp_service.assert_called_once_with( ++ _amphora_mock, self.timeout_dict) + + def test_amphora_compute_connectivity_wait(self, + mock_driver, +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +@@ -18,6 +18,7 @@ import mock + from oslo_config import cfg + from oslo_config import fixture as oslo_fixture + from oslo_utils import uuidutils ++import tenacity + + from octavia.common import constants + from octavia.common import exceptions +@@ -169,7 +170,8 @@ class TestComputeTasks(base.TestCase): + + mock_driver.build.return_value = COMPUTE_ID + # Test execute() +- compute_id = createcompute.execute(_amphora_mock.id, ports=[_port]) ++ compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], ++ server_group_id=None) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( +@@ -421,19 +423,54 @@ class TestComputeTasks(base.TestCase): + @mock.patch('stevedore.driver.DriverManager.driver') + def test_delete_amphorae_on_load_balancer(self, mock_driver): + ++ mock_driver.delete.side_effect = [mock.DEFAULT, ++ exceptions.OctaviaException('boom')] ++ + delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer() ++ + delete_amps.execute(_load_balancer_mock) + + mock_driver.delete.assert_called_once_with(COMPUTE_ID) + ++ # Test compute driver exception is raised ++ self.assertRaises(exceptions.OctaviaException, delete_amps.execute, ++ _load_balancer_mock) ++ + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_delete(self, mock_driver): ++ mock_driver.delete.side_effect = [ ++ mock.DEFAULT, exceptions.OctaviaException('boom'), ++ mock.DEFAULT, exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom')] + + delete_compute = compute_tasks.ComputeDelete() ++ ++ # Limit the retry attempts for the test run to save time ++ delete_compute.execute.retry.stop = tenacity.stop_after_attempt(2) ++ + delete_compute.execute(_amphora_mock) + + mock_driver.delete.assert_called_once_with(COMPUTE_ID) + ++ # Test retry after a compute exception ++ mock_driver.reset_mock() ++ delete_compute.execute(_amphora_mock) ++ mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID), ++ mock.call(COMPUTE_ID)]) ++ ++ # Test passive failure ++ mock_driver.reset_mock() ++ delete_compute.execute(_amphora_mock, passive_failure=True) ++ mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID), ++ mock.call(COMPUTE_ID)]) ++ ++ # Test non-passive failure ++ mock_driver.reset_mock() ++ self.assertRaises(exceptions.OctaviaException, delete_compute.execute, ++ _amphora_mock, passive_failure=False) ++ + @mock.patch('stevedore.driver.DriverManager.driver') + def test_nova_server_group_create(self, mock_driver): + nova_sever_group_obj = compute_tasks.NovaServerGroupCreate() +@@ -479,3 +516,34 @@ class TestComputeTasks(base.TestCase): + sg_id = None + nova_sever_group_obj.execute(sg_id) + self.assertFalse(mock_driver.delete_server_group.called, sg_id) ++ ++ @mock.patch('stevedore.driver.DriverManager.driver') ++ def test_attach_port(self, mock_driver): ++ COMPUTE_ID = uuidutils.generate_uuid() ++ PORT_ID = uuidutils.generate_uuid() ++ amphora_mock = mock.MagicMock() ++ port_mock = mock.MagicMock() ++ amphora_mock.compute_id = COMPUTE_ID ++ port_mock.id = PORT_ID ++ ++ attach_port_obj = compute_tasks.AttachPort() ++ ++ # Test execute ++ attach_port_obj.execute(amphora_mock, port_mock) ++ ++ mock_driver.attach_network_or_port.assert_called_once_with( ++ COMPUTE_ID, port_id=PORT_ID) ++ ++ # Test revert ++ mock_driver.reset_mock() ++ ++ attach_port_obj.revert(amphora_mock, port_mock) ++ ++ mock_driver.detach_port.assert_called_once_with(COMPUTE_ID, PORT_ID) ++ ++ # Test rever exception ++ mock_driver.reset_mock() ++ mock_driver.detach_port.side_effect = [Exception('boom')] ++ ++ # should not raise ++ attach_port_obj.revert(amphora_mock, port_mock) +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +@@ -49,7 +49,7 @@ VIP_IP = '192.0.5.2' + VRRP_IP = '192.0.5.3' + HA_IP = '192.0.5.4' + AMP_ROLE = 'FAKE_ROLE' +-VRRP_ID = random.randrange(255) ++VRRP_ID = 1 + VRRP_PRIORITY = random.randrange(100) + CACHED_ZONE = 'zone1' + IMAGE_ID = uuidutils.generate_uuid() +@@ -489,9 +489,17 @@ class TestDatabaseTasks(base.TestCase): + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): ++ mock_base_port = mock.MagicMock() ++ mock_base_port.id = VRRP_PORT_ID ++ mock_fixed_ip = mock.MagicMock() ++ mock_fixed_ip.ip_address = VRRP_IP ++ mock_base_port.fixed_ips = [mock_fixed_ip] ++ mock_vip = mock.MagicMock() ++ mock_vip.ip_address = HA_IP ++ mock_vip.port_id = HA_PORT_ID + + update_amp_fo_details = database_tasks.UpdateAmpFailoverDetails() +- update_amp_fo_details.execute(_amphora_mock, _amphora_mock) ++ update_amp_fo_details.execute(_amphora_mock, mock_vip, mock_base_port) + + mock_amphora_repo_update.assert_called_once_with( + 'TEST', +@@ -1760,9 +1768,11 @@ class TestDatabaseTasks(base.TestCase): + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', AMP_ID, role=None, vrrp_priority=None) + ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + def test_get_amphorae_from_loadbalancer(self, + mock_amphora_get, ++ mock_lb_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, +@@ -1776,6 +1786,7 @@ class TestDatabaseTasks(base.TestCase): + amp2.id = uuidutils.generate_uuid() + lb = mock.MagicMock() + lb.amphorae = [amp1, amp2] ++ mock_lb_get.return_value = lb + + mock_amphora_get.side_effect = [_amphora_mock, None] + +@@ -1800,6 +1811,23 @@ class TestDatabaseTasks(base.TestCase): + mock_listener_get.assert_called_once_with('TEST', id=_listener_mock.id) + self.assertEqual([_listener_mock], result) + ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') ++ def test_get_loadbalancer(self, mock_lb_get, mock_generate_uuid, mock_LOG, ++ mock_get_session, mock_loadbalancer_repo_update, ++ mock_listener_repo_update, ++ mock_amphora_repo_update, ++ mock_amphora_repo_delete): ++ FAKE_LB = 'fake LB' ++ LB_ID = uuidutils.generate_uuid() ++ get_loadbalancer_obj = database_tasks.GetLoadBalancer() ++ ++ mock_lb_get.return_value = FAKE_LB ++ ++ result = get_loadbalancer_obj.execute(LB_ID) ++ ++ self.assertEqual(FAKE_LB, result) ++ mock_lb_get.assert_called_once_with('TEST', id=LB_ID) ++ + def test_get_vip_from_loadbalancer(self, + mock_generate_uuid, + mock_LOG, +@@ -1827,7 +1855,7 @@ class TestDatabaseTasks(base.TestCase): + mock_get_session.side_effect = ['TEST', + odb_exceptions.DBDuplicateEntry] + create_vrrp_group = database_tasks.CreateVRRPGroupForLB() +- create_vrrp_group.execute(_loadbalancer_mock) ++ create_vrrp_group.execute(_loadbalancer_mock.id) + mock_vrrp_group_create.assert_called_once_with( + 'TEST', load_balancer_id=LB_ID, + vrrp_group_name=LB_ID.replace('-', ''), +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +@@ -18,9 +18,11 @@ from oslo_config import cfg + from oslo_config import fixture as oslo_fixture + from oslo_utils import uuidutils + from taskflow.types import failure ++import tenacity + + from octavia.common import constants + from octavia.common import data_models as o_data_models ++from octavia.common import exceptions + from octavia.controller.worker.v1.tasks import network_tasks + from octavia.network import base as net_base + from octavia.network import data_models +@@ -79,11 +81,75 @@ class TestNetworkTasks(base.TestCase): + self.amphora_mock.id = AMPHORA_ID + self.amphora_mock.compute_id = COMPUTE_ID + self.amphora_mock.status = constants.AMPHORA_ALLOCATED +- conf = oslo_fixture.Config(cfg.CONF) +- conf.config(group="controller_worker", amp_boot_network_list=['netid']) +- ++ self.boot_net_id = NETWORK_ID ++ conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) ++ conf.config(group="controller_worker", ++ amp_boot_network_list=[self.boot_net_id]) ++ conf.config(group="networking", max_retries=1) + super(TestNetworkTasks, self).setUp() + ++ def test_calculate_amphora_delta(self, mock_get_net_driver): ++ DELETE_NETWORK_ID = uuidutils.generate_uuid() ++ MEMBER_NETWORK_ID = uuidutils.generate_uuid() ++ MEMBER_SUBNET_ID = uuidutils.generate_uuid() ++ VRRP_PORT_ID = uuidutils.generate_uuid() ++ mock_driver = mock.MagicMock() ++ mock_get_net_driver.return_value = mock_driver ++ member_mock = mock.MagicMock() ++ member_mock.subnet_id = MEMBER_SUBNET_ID ++ pool_mock = mock.MagicMock() ++ pool_mock.members = [member_mock] ++ lb_mock = mock.MagicMock() ++ lb_mock.pools = [pool_mock] ++ amphora_mock = mock.MagicMock() ++ amphora_mock.id = AMPHORA_ID ++ amphora_mock.compute_id = COMPUTE_ID ++ amphora_mock.vrrp_port_id = VRRP_PORT_ID ++ vrrp_port_mock = mock.MagicMock() ++ vrrp_port_mock.network_id = self.boot_net_id ++ mock_subnet = mock.MagicMock() ++ mock_subnet.network_id = MEMBER_NETWORK_ID ++ nic1_delete_mock = mock.MagicMock() ++ nic1_delete_mock.network_id = DELETE_NETWORK_ID ++ nic2_keep_mock = mock.MagicMock() ++ nic2_keep_mock.network_id = self.boot_net_id ++ ++ mock_driver.get_port.return_value = vrrp_port_mock ++ mock_driver.get_subnet.return_value = mock_subnet ++ mock_driver.get_plugged_networks.return_value = [nic1_delete_mock, ++ nic2_keep_mock] ++ ++ calc_amp_delta = network_tasks.CalculateAmphoraDelta() ++ ++ # Test vrrp_port_id is None ++ result = calc_amp_delta.execute(lb_mock, amphora_mock) ++ ++ self.assertEqual(AMPHORA_ID, result.amphora_id) ++ self.assertEqual(COMPUTE_ID, result.compute_id) ++ self.assertEqual(1, len(result.add_nics)) ++ self.assertEqual(MEMBER_NETWORK_ID, result.add_nics[0].network_id) ++ self.assertEqual(1, len(result.delete_nics)) ++ self.assertEqual(DELETE_NETWORK_ID, result.delete_nics[0].network_id) ++ mock_driver.get_port.assert_called_once_with(VRRP_PORT_ID) ++ mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID) ++ mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) ++ ++ # Test with vrrp_port_id ++ mock_driver.reset_mock() ++ ++ result = calc_amp_delta.execute(lb_mock, amphora_mock, ++ vrrp_port=vrrp_port_mock) ++ ++ self.assertEqual(AMPHORA_ID, result.amphora_id) ++ self.assertEqual(COMPUTE_ID, result.compute_id) ++ self.assertEqual(1, len(result.add_nics)) ++ self.assertEqual(MEMBER_NETWORK_ID, result.add_nics[0].network_id) ++ self.assertEqual(1, len(result.delete_nics)) ++ self.assertEqual(DELETE_NETWORK_ID, result.delete_nics[0].network_id) ++ mock_driver.get_port.assert_not_called() ++ mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID) ++ mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) ++ + def test_calculate_delta(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver +@@ -661,12 +727,39 @@ class TestNetworkTasks(base.TestCase): + net_task.execute(lb) + mock_driver.update_vip.assert_called_once_with(lb, for_delete=True) + +- def test_get_amphorae_network_configs(self, mock_get_net_driver): ++ @mock.patch('octavia.db.api.get_session', return_value='TEST') ++ @mock.patch('octavia.db.repositories.AmphoraRepository.get') ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') ++ def test_get_amphora_network_configs_by_id( ++ self, mock_lb_get, mock_amp_get, ++ mock_get_session, mock_get_net_driver): ++ LB_ID = uuidutils.generate_uuid() ++ AMP_ID = uuidutils.generate_uuid() ++ mock_driver = mock.MagicMock() ++ mock_get_net_driver.return_value = mock_driver ++ mock_amp_get.return_value = 'mock amphora' ++ mock_lb_get.return_value = 'mock load balancer' ++ ++ net_task = network_tasks.GetAmphoraNetworkConfigsByID() ++ ++ net_task.execute(LB_ID, AMP_ID) ++ ++ mock_driver.get_network_configs.assert_called_once_with( ++ 'mock load balancer', amphora='mock amphora') ++ mock_amp_get.assert_called_once_with('TEST', id=AMP_ID) ++ mock_lb_get.assert_called_once_with('TEST', id=LB_ID) ++ ++ @mock.patch('octavia.db.api.get_session', return_value='TEST') ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') ++ def test_get_amphorae_network_configs(self, mock_lb_get, mock_get_session, ++ mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + lb = o_data_models.LoadBalancer() ++ mock_lb_get.return_value = lb + net_task = network_tasks.GetAmphoraeNetworkConfigs() +- net_task.execute(lb) ++ net_task.execute(lb.id) ++ mock_lb_get.assert_called_once_with('TEST', id=lb.id) + mock_driver.get_network_configs.assert_called_once_with(lb) + + def test_failover_preparation_for_amphora(self, mock_get_net_driver): +@@ -738,41 +831,20 @@ class TestNetworkTasks(base.TestCase): + mock_driver.plug_port.assert_any_call(amphora, port1) + mock_driver.plug_port.assert_any_call(amphora, port2) + +- def test_plug_vip_port(self, mock_get_net_driver): +- mock_driver = mock.MagicMock() +- mock_get_net_driver.return_value = mock_driver +- vrrp_port = mock.MagicMock() +- +- amphorae_network_config = mock.MagicMock() +- amphorae_network_config.get().vrrp_port = vrrp_port +- +- plugvipport = network_tasks.PlugVIPPort() +- plugvipport.execute(self.amphora_mock, amphorae_network_config) +- mock_driver.plug_port.assert_any_call(self.amphora_mock, vrrp_port) +- +- # test revert +- plugvipport.revert(None, self.amphora_mock, amphorae_network_config) +- mock_driver.unplug_port.assert_any_call(self.amphora_mock, vrrp_port) +- +- def test_wait_for_port_detach(self, mock_get_net_driver): +- mock_driver = mock.MagicMock() +- mock_get_net_driver.return_value = mock_driver +- +- amphora = o_data_models.Amphora(id=AMPHORA_ID, +- lb_network_ip=IP_ADDRESS) +- +- waitforportdetach = network_tasks.WaitForPortDetach() +- waitforportdetach.execute(amphora) +- +- mock_driver.wait_for_port_detach.assert_called_once_with(amphora) +- +- def test_update_vip_sg(self, mock_get_net_driver): ++ @mock.patch('octavia.db.api.get_session', return_value='TEST') ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') ++ def test_update_vip_sg(self, mock_lb_get, mock_get_session, ++ mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver ++ mock_lb_get.return_value = self.load_balancer_mock + net = network_tasks.UpdateVIPSecurityGroup() + +- net.execute(LB) +- mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip) ++ net.execute(self.load_balancer_mock.id) ++ mock_lb_get.assert_called_once_with('TEST', ++ id=self.load_balancer_mock.id) ++ mock_driver.update_vip_sg.assert_called_once_with( ++ self.load_balancer_mock, self.load_balancer_mock.vip) + + def test_get_subnet_from_vip(self, mock_get_net_driver): + mock_driver = mock.MagicMock() +@@ -799,3 +871,274 @@ class TestNetworkTasks(base.TestCase): + net.revert(AMPS_DATA[0], LB, self.amphora_mock, mockSubnet) + mock_driver.unplug_aap_port.assert_called_once_with( + LB.vip, self.amphora_mock, mockSubnet) ++ ++ @mock.patch('octavia.controller.worker.v1.tasks.network_tasks.DeletePort.' ++ 'update_progress') ++ def test_delete_port(self, mock_update_progress, mock_get_net_driver): ++ PORT_ID = uuidutils.generate_uuid() ++ mock_driver = mock.MagicMock() ++ mock_get_net_driver.return_value = mock_driver ++ mock_driver.delete_port.side_effect = [ ++ mock.DEFAULT, exceptions.OctaviaException('boom'), mock.DEFAULT, ++ exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom')] ++ mock_driver.admin_down_port.side_effect = [ ++ mock.DEFAULT, exceptions.OctaviaException('boom')] ++ ++ net_task = network_tasks.DeletePort() ++ ++ # Limit the retry attempts for the test run to save time ++ net_task.execute.retry.stop = tenacity.stop_after_attempt(2) ++ ++ # Test port ID is None (no-op) ++ net_task.execute(None) ++ ++ mock_update_progress.assert_not_called() ++ mock_driver.delete_port.assert_not_called() ++ ++ # Test successful delete ++ mock_update_progress.reset_mock() ++ mock_driver.reset_mock() ++ ++ net_task.execute(PORT_ID) ++ ++ mock_update_progress.assert_called_once_with(0.5) ++ mock_driver.delete_port.assert_called_once_with(PORT_ID) ++ ++ # Test exception and successful retry ++ mock_update_progress.reset_mock() ++ mock_driver.reset_mock() ++ ++ net_task.execute(PORT_ID) ++ ++ mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) ++ mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), ++ mock.call(PORT_ID)]) ++ ++ # Test passive failure ++ mock_update_progress.reset_mock() ++ mock_driver.reset_mock() ++ ++ net_task.execute(PORT_ID, passive_failure=True) ++ ++ mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) ++ mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), ++ mock.call(PORT_ID)]) ++ mock_driver.admin_down_port.assert_called_once_with(PORT_ID) ++ ++ # Test passive failure admin down failure ++ mock_update_progress.reset_mock() ++ mock_driver.reset_mock() ++ mock_driver.admin_down_port.reset_mock() ++ ++ net_task.execute(PORT_ID, passive_failure=True) ++ ++ mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) ++ mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), ++ mock.call(PORT_ID)]) ++ mock_driver.admin_down_port.assert_called_once_with(PORT_ID) ++ ++ # Test non-passive failure ++ mock_update_progress.reset_mock() ++ mock_driver.reset_mock() ++ mock_driver.admin_down_port.reset_mock() ++ ++ mock_driver.admin_down_port.side_effect = [ ++ exceptions.OctaviaException('boom')] ++ ++ self.assertRaises(exceptions.OctaviaException, net_task.execute, ++ PORT_ID) ++ ++ mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) ++ mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), ++ mock.call(PORT_ID)]) ++ mock_driver.admin_down_port.assert_not_called() ++ ++ def test_create_vip_base_port(self, mock_get_net_driver): ++ AMP_ID = uuidutils.generate_uuid() ++ PORT_ID = uuidutils.generate_uuid() ++ VIP_NETWORK_ID = uuidutils.generate_uuid() ++ VIP_QOS_ID = uuidutils.generate_uuid() ++ VIP_SG_ID = uuidutils.generate_uuid() ++ VIP_SUBNET_ID = uuidutils.generate_uuid() ++ VIP_IP_ADDRESS = '203.0.113.81' ++ mock_driver = mock.MagicMock() ++ mock_get_net_driver.return_value = mock_driver ++ vip_mock = mock.MagicMock() ++ vip_mock.ip_address = VIP_IP_ADDRESS ++ vip_mock.network_id = VIP_NETWORK_ID ++ vip_mock.qos_policy_id = VIP_QOS_ID ++ vip_mock.subnet_id = VIP_SUBNET_ID ++ port_mock = mock.MagicMock() ++ port_mock.id = PORT_ID ++ ++ mock_driver.create_port.side_effect = [ ++ port_mock, exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom'), ++ exceptions.OctaviaException('boom')] ++ mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')] ++ ++ net_task = network_tasks.CreateVIPBasePort() ++ ++ # Limit the retry attempts for the test run to save time ++ net_task.execute.retry.stop = tenacity.stop_after_attempt(2) ++ ++ # Test execute ++ result = net_task.execute(vip_mock, VIP_SG_ID, AMP_ID) ++ ++ self.assertEqual(port_mock, result) ++ mock_driver.create_port.assert_called_once_with( ++ VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID, ++ fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}], ++ secondary_ips=[VIP_IP_ADDRESS], security_group_ids=[VIP_SG_ID], ++ qos_policy_id=VIP_QOS_ID) ++ ++ # Test execute exception ++ mock_driver.reset_mock() ++ ++ self.assertRaises(exceptions.OctaviaException, net_task.execute, ++ vip_mock, None, AMP_ID) ++ ++ # Test revert when this task failed ++ mock_driver.reset_mock() ++ ++ net_task.revert(failure.Failure.from_exception(Exception('boom')), ++ vip_mock, VIP_SG_ID, AMP_ID) ++ ++ mock_driver.delete_port.assert_not_called() ++ ++ # Test revert ++ mock_driver.reset_mock() ++ ++ net_task.revert([port_mock], vip_mock, VIP_SG_ID, AMP_ID) ++ ++ mock_driver.delete_port.assert_called_once_with(PORT_ID) ++ ++ # Test revert exception ++ mock_driver.reset_mock() ++ ++ net_task.revert([port_mock], vip_mock, VIP_SG_ID, AMP_ID) ++ ++ mock_driver.delete_port.assert_called_once_with(PORT_ID) ++ ++ @mock.patch('time.sleep') ++ def test_admin_down_port(self, mock_sleep, mock_get_net_driver): ++ PORT_ID = uuidutils.generate_uuid() ++ mock_driver = mock.MagicMock() ++ mock_get_net_driver.return_value = mock_driver ++ port_down_mock = mock.MagicMock() ++ port_down_mock.status = constants.DOWN ++ port_up_mock = mock.MagicMock() ++ port_up_mock.status = constants.UP ++ mock_driver.set_port_admin_state_up.side_effect = [ ++ mock.DEFAULT, net_base.PortNotFound, mock.DEFAULT, mock.DEFAULT, ++ Exception('boom')] ++ mock_driver.get_port.side_effect = [port_down_mock, port_up_mock] ++ ++ net_task = network_tasks.AdminDownPort() ++ ++ # Test execute ++ net_task.execute(PORT_ID) ++ ++ mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, ++ False) ++ mock_driver.get_port.assert_called_once_with(PORT_ID) ++ ++ # Test passive fail on port not found ++ mock_driver.reset_mock() ++ ++ net_task.execute(PORT_ID) ++ ++ mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, ++ False) ++ mock_driver.get_port.assert_not_called() ++ ++ # Test passive fail on port stays up ++ mock_driver.reset_mock() ++ ++ net_task.execute(PORT_ID) ++ ++ mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, ++ False) ++ mock_driver.get_port.assert_called_once_with(PORT_ID) ++ ++ # Test revert when this task failed ++ mock_driver.reset_mock() ++ ++ net_task.revert(failure.Failure.from_exception(Exception('boom')), ++ PORT_ID) ++ ++ mock_driver.set_port_admin_state_up.assert_not_called() ++ ++ # Test revert ++ mock_driver.reset_mock() ++ ++ net_task.revert(None, PORT_ID) ++ ++ mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, ++ True) ++ ++ # Test revert exception passive failure ++ mock_driver.reset_mock() ++ ++ net_task.revert(None, PORT_ID) ++ ++ mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, ++ True) ++ ++ @mock.patch('octavia.common.utils.get_vip_security_group_name') ++ def test_get_vip_security_group_id(self, mock_get_sg_name, ++ mock_get_net_driver): ++ LB_ID = uuidutils.generate_uuid() ++ SG_ID = uuidutils.generate_uuid() ++ SG_NAME = 'fake_SG_name' ++ mock_driver = mock.MagicMock() ++ mock_get_net_driver.return_value = mock_driver ++ mock_get_sg_name.return_value = SG_NAME ++ sg_mock = mock.MagicMock() ++ sg_mock.id = SG_ID ++ mock_driver.get_security_group.side_effect = [ ++ sg_mock, None, net_base.SecurityGroupNotFound, ++ net_base.SecurityGroupNotFound] ++ ++ net_task = network_tasks.GetVIPSecurityGroupID() ++ ++ # Test execute ++ result = net_task.execute(LB_ID) ++ ++ mock_driver.get_security_group.assert_called_once_with(SG_NAME) ++ mock_get_sg_name.assert_called_once_with(LB_ID) ++ ++ # Test execute with empty get subnet response ++ mock_driver.reset_mock() ++ mock_get_sg_name.reset_mock() ++ ++ result = net_task.execute(LB_ID) ++ ++ self.assertIsNone(result) ++ mock_get_sg_name.assert_called_once_with(LB_ID) ++ ++ # Test execute no security group found, security groups enabled ++ mock_driver.reset_mock() ++ mock_get_sg_name.reset_mock() ++ mock_driver.sec_grp_enabled = True ++ ++ self.assertRaises(net_base.SecurityGroupNotFound, net_task.execute, ++ LB_ID) ++ mock_driver.get_security_group.assert_called_once_with(SG_NAME) ++ mock_get_sg_name.assert_called_once_with(LB_ID) ++ ++ # Test execute no security group found, security groups disabled ++ mock_driver.reset_mock() ++ mock_get_sg_name.reset_mock() ++ mock_driver.sec_grp_enabled = False ++ ++ result = net_task.execute(LB_ID) ++ ++ self.assertIsNone(result) ++ mock_driver.get_security_group.assert_called_once_with(SG_NAME) ++ mock_get_sg_name.assert_called_once_with(LB_ID) +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +=================================================================== +--- /dev/null ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +@@ -0,0 +1,47 @@ ++# Copyright 2020 Red Hat, Inc. All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import mock ++ ++from taskflow import retry ++ ++from octavia.controller.worker.v1.tasks import retry_tasks ++import octavia.tests.unit.base as base ++ ++ ++class TestRetryTasks(base.TestCase): ++ ++ def setUp(self): ++ super(TestRetryTasks, self).setUp() ++ ++ @mock.patch('time.sleep') ++ def test_sleeping_retry_times_controller(self, mock_sleep): ++ retry_ctrlr = retry_tasks.SleepingRetryTimesController( ++ attempts=2, name='test_retry') ++ ++ # Test on_failure that should RETRY ++ history = ['boom'] ++ ++ result = retry_ctrlr.on_failure(history) ++ ++ self.assertEqual(retry.RETRY, result) ++ ++ # Test on_failure retries exhausted, should REVERT ++ history = ['boom', 'bang', 'pow'] ++ ++ result = retry_ctrlr.on_failure(history) ++ ++ self.assertEqual(retry.REVERT, result) ++ ++ # Test revert - should not raise ++ retry_ctrlr.revert(history) +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v1/test_controller_worker.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/controller/worker/v1/test_controller_worker.py ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v1/test_controller_worker.py +@@ -21,6 +21,7 @@ from oslo_utils import uuidutils + from octavia.common import base_taskflow + from octavia.common import constants + from octavia.common import data_models ++from octavia.common import exceptions + from octavia.controller.worker.v1 import controller_worker + import octavia.tests.unit.base as base + +@@ -50,6 +51,8 @@ _vip_mock = mock.MagicMock() + _listener_mock = mock.MagicMock() + _load_balancer_mock = mock.MagicMock() + _load_balancer_mock.listeners = [_listener_mock] ++_load_balancer_mock.topology = constants.TOPOLOGY_SINGLE ++_load_balancer_mock.flavor_id = None + _member_mock = mock.MagicMock() + _pool_mock = mock.MagicMock() + _l7policy_mock = mock.MagicMock() +@@ -143,7 +146,8 @@ class TestControllerWorker(base.TestCase + 'TEST', + store={constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_SPARES_POOL_PRIORITY, +- constants.FLAVOR: None})) ++ constants.FLAVOR: None, ++ constants.SERVER_GROUP_ID: None})) + + _flow_mock.run.assert_called_once_with() + +@@ -152,38 +156,6 @@ class TestControllerWorker(base.TestCase + self.assertEqual(AMP_ID, amp) + + @mock.patch('octavia.controller.worker.v1.flows.' +- 'amphora_flows.AmphoraFlows.get_delete_amphora_flow', +- return_value='TEST') +- def test_delete_amphora(self, +- mock_get_delete_amp_flow, +- mock_api_get_session, +- mock_dyn_log_listener, +- mock_taskflow_load, +- mock_pool_repo_get, +- mock_member_repo_get, +- mock_l7rule_repo_get, +- mock_l7policy_repo_get, +- mock_listener_repo_get, +- mock_lb_repo_get, +- mock_health_mon_repo_get, +- mock_amp_repo_get): +- +- _flow_mock.reset_mock() +- +- cw = controller_worker.ControllerWorker() +- cw.delete_amphora(AMP_ID) +- +- mock_amp_repo_get.assert_called_once_with( +- _db_session, +- id=AMP_ID) +- +- (base_taskflow.BaseTaskFlowEngine._taskflow_load. +- assert_called_once_with('TEST', +- store={constants.AMPHORA: _amphora_mock})) +- +- _flow_mock.run.assert_called_once_with() +- +- @mock.patch('octavia.controller.worker.v1.flows.' + 'health_monitor_flows.HealthMonitorFlows.' + 'get_create_health_monitor_flow', + return_value=_flow_mock) +@@ -423,7 +395,7 @@ class TestControllerWorker(base.TestCase + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, +- constants.FLAVOR: None ++ constants.FLAVOR: None, constants.SERVER_GROUP_ID: None + } + lb_mock = mock.MagicMock() + lb_mock.listeners = [] +@@ -470,7 +442,7 @@ class TestControllerWorker(base.TestCase + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, +- constants.FLAVOR: None ++ constants.FLAVOR: None, constants.SERVER_GROUP_ID: None + } + setattr(mock_lb_repo_get.return_value, 'topology', + constants.TOPOLOGY_ACTIVE_STANDBY) +@@ -517,7 +489,7 @@ class TestControllerWorker(base.TestCase + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, +- constants.FLAVOR: None ++ constants.FLAVOR: None, constants.SERVER_GROUP_ID: None + } + + cw = controller_worker.ControllerWorker() +@@ -570,7 +542,7 @@ class TestControllerWorker(base.TestCase + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, +- constants.FLAVOR: None ++ constants.FLAVOR: None, constants.SERVER_GROUP_ID: None + } + + cw = controller_worker.ControllerWorker() +@@ -1149,26 +1121,26 @@ class TestControllerWorker(base.TestCase + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + @mock.patch('octavia.controller.worker.v1.flows.' +- 'amphora_flows.AmphoraFlows.get_failover_flow', ++ 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', + return_value=_flow_mock) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') +- def test_failover_amphora(self, +- mock_update, +- mock_get_failover_flow, +- mock_get_flavor_meta, +- mock_api_get_session, +- mock_dyn_log_listener, +- mock_taskflow_load, +- mock_pool_repo_get, +- mock_member_repo_get, +- mock_l7rule_repo_get, +- mock_l7policy_repo_get, +- mock_listener_repo_get, +- mock_lb_repo_get, +- mock_health_mon_repo_get, +- mock_amp_repo_get): +- ++ def test_failover_amphora_lb_single(self, ++ mock_update, ++ mock_get_failover_flow, ++ mock_get_flavor_meta, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): + _flow_mock.reset_mock() ++ mock_lb_repo_get.return_value = _load_balancer_mock + + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) +@@ -1176,22 +1148,175 @@ class TestControllerWorker(base.TestCase + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + _flow_mock, +- store={constants.FAILED_AMPHORA: _amphora_mock, ++ store={constants.FLAVOR: {'loadbalancer_topology': ++ _load_balancer_mock.topology}, ++ constants.LOADBALANCER: _load_balancer_mock, + constants.LOADBALANCER_ID: +- _amphora_mock.load_balancer_id, ++ _load_balancer_mock.id, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, +- constants.FLAVOR: {} ++ constants.SERVER_GROUP_ID: ++ _load_balancer_mock.server_group_id, ++ constants.VIP: _load_balancer_mock.vip + })) + + _flow_mock.run.assert_called_once_with() +- mock_update.assert_called_with(_db_session, LB_ID, +- provisioning_status=constants.ACTIVE) + +- @mock.patch('octavia.controller.worker.v1.controller_worker.' +- 'ControllerWorker._perform_amphora_failover') ++ @mock.patch('octavia.db.repositories.FlavorRepository.' ++ 'get_flavor_metadata_dict', return_value={}) ++ @mock.patch('octavia.controller.worker.v1.flows.' ++ 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', ++ return_value=_flow_mock) ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') ++ def test_failover_amphora_lb_act_stdby(self, ++ mock_update, ++ mock_get_failover_flow, ++ mock_get_flavor_meta, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ _flow_mock.reset_mock() ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.listeners = [_listener_mock] ++ load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY ++ load_balancer_mock.flavor_id = None ++ load_balancer_mock.vip = _vip_mock ++ ++ mock_lb_repo_get.return_value = load_balancer_mock ++ ++ cw = controller_worker.ControllerWorker() ++ cw.failover_amphora(AMP_ID) ++ ++ (base_taskflow.BaseTaskFlowEngine._taskflow_load. ++ assert_called_once_with( ++ _flow_mock, ++ store={constants.FLAVOR: {'loadbalancer_topology': ++ load_balancer_mock.topology}, ++ constants.LOADBALANCER: load_balancer_mock, ++ constants.LOADBALANCER_ID: load_balancer_mock.id, ++ constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.SERVER_GROUP_ID: ++ load_balancer_mock.server_group_id, ++ constants.VIP: load_balancer_mock.vip ++ })) ++ ++ _flow_mock.run.assert_called_once_with() ++ ++ @mock.patch('octavia.db.repositories.FlavorRepository.' ++ 'get_flavor_metadata_dict', return_value={}) ++ @mock.patch('octavia.controller.worker.v1.flows.' ++ 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', ++ return_value=_flow_mock) ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') ++ def test_failover_amphora_unknown_topology(self, ++ mock_update, ++ mock_get_failover_flow, ++ mock_get_flavor_meta, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ ++ _flow_mock.reset_mock() ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.listeners = [_listener_mock] ++ load_balancer_mock.topology = 'bogus' ++ load_balancer_mock.flavor_id = None ++ load_balancer_mock.vip = _vip_mock ++ ++ mock_lb_repo_get.return_value = load_balancer_mock ++ ++ cw = controller_worker.ControllerWorker() ++ cw.failover_amphora(AMP_ID) ++ ++ (base_taskflow.BaseTaskFlowEngine._taskflow_load. ++ assert_called_once_with( ++ _flow_mock, ++ store={constants.FLAVOR: {'loadbalancer_topology': ++ load_balancer_mock.topology}, ++ constants.LOADBALANCER: load_balancer_mock, ++ constants.LOADBALANCER_ID: load_balancer_mock.id, ++ constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.SERVER_GROUP_ID: ++ load_balancer_mock.server_group_id, ++ constants.VIP: load_balancer_mock.vip ++ })) ++ ++ _flow_mock.run.assert_called_once_with() ++ ++ @mock.patch('octavia.db.repositories.FlavorRepository.' ++ 'get_flavor_metadata_dict', return_value={}) ++ @mock.patch('octavia.controller.worker.v1.flows.' ++ 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', ++ return_value=_flow_mock) ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') ++ def test_failover_amphora_with_flavor(self, ++ mock_update, ++ mock_get_failover_flow, ++ mock_get_flavor_meta, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ _flow_mock.reset_mock() ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.listeners = [_listener_mock] ++ load_balancer_mock.topology = constants.TOPOLOGY_SINGLE ++ load_balancer_mock.flavor_id = uuidutils.generate_uuid() ++ load_balancer_mock.vip = _vip_mock ++ mock_get_flavor_meta.return_value = {'taste': 'spicy'} ++ ++ mock_lb_repo_get.return_value = load_balancer_mock ++ ++ cw = controller_worker.ControllerWorker() ++ cw.failover_amphora(AMP_ID) ++ ++ (base_taskflow.BaseTaskFlowEngine._taskflow_load. ++ assert_called_once_with( ++ _flow_mock, ++ store={constants.FLAVOR: {'loadbalancer_topology': ++ load_balancer_mock.topology, ++ 'taste': 'spicy'}, ++ constants.LOADBALANCER: load_balancer_mock, ++ constants.LOADBALANCER_ID: load_balancer_mock.id, ++ constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.SERVER_GROUP_ID: None, ++ constants.SERVER_GROUP_ID: ++ load_balancer_mock.server_group_id, ++ constants.VIP: load_balancer_mock.vip ++ })) ++ ++ _flow_mock.run.assert_called_once_with() ++ ++ @mock.patch('octavia.controller.worker.v1.flows.amphora_flows.' ++ 'AmphoraFlows.get_failover_amphora_flow') + def test_failover_amp_missing_amp(self, +- mock_perform_amp_failover, ++ mock_get_amp_failover, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, +@@ -1209,14 +1334,11 @@ class TestControllerWorker(base.TestCase + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + +- mock_perform_amp_failover.assert_not_called() ++ mock_get_amp_failover.assert_not_called() + +- @mock.patch('octavia.controller.worker.v1.controller_worker.' +- 'ControllerWorker._perform_amphora_failover') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amp_flow_exception(self, + mock_update, +- mock_perform_amp_failover, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, +@@ -1229,18 +1351,21 @@ class TestControllerWorker(base.TestCase + mock_health_mon_repo_get, + mock_amp_repo_get): + +- mock_perform_amp_failover.side_effect = TestException('boom') ++ mock_amphora = mock.MagicMock() ++ mock_amphora.id = AMP_ID ++ mock_amphora.load_balancer_id = LB_ID ++ mock_amp_repo_get.return_value = mock_amphora ++ ++ mock_lb_repo_get.side_effect = TestException('boom') + cw = controller_worker.ControllerWorker() +- self.assertRaises(TestException, cw.failover_amphora, AMP_ID) ++ cw.failover_amphora(AMP_ID) + mock_update.assert_called_with(_db_session, LB_ID, + provisioning_status=constants.ERROR) + +- @mock.patch('octavia.controller.worker.v1.controller_worker.' +- 'ControllerWorker._perform_amphora_failover') +- @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') ++ @mock.patch('octavia.controller.worker.v1.flows.amphora_flows.' ++ 'AmphoraFlows.get_failover_amphora_flow') + def test_failover_amp_no_lb(self, +- mock_lb_update, +- mock_perform_amp_failover, ++ mock_get_failover_amp_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, +@@ -1252,23 +1377,35 @@ class TestControllerWorker(base.TestCase + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): +- +- amphora = mock.MagicMock() +- amphora.load_balancer_id = None +- mock_amp_repo_get.return_value = amphora ++ _flow_mock.run.reset_mock() ++ FAKE_FLOW = 'FAKE_FLOW' ++ mock_amphora = mock.MagicMock() ++ mock_amphora.load_balancer_id = None ++ mock_amphora.id = AMP_ID ++ mock_amphora.status = constants.AMPHORA_READY ++ mock_amp_repo_get.return_value = mock_amphora ++ mock_get_failover_amp_flow.return_value = FAKE_FLOW ++ expected_stored_params = {constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.FLAVOR: {}, ++ constants.LOADBALANCER: None, ++ constants.LOADBALANCER_ID: None, ++ constants.SERVER_GROUP_ID: None, ++ constants.VIP: None} + + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + +- mock_lb_update.assert_not_called() +- mock_perform_amp_failover.assert_called_once_with( +- amphora, constants.LB_CREATE_FAILOVER_PRIORITY) ++ mock_get_failover_amp_flow.assert_called_once_with(mock_amphora, None) ++ (base_taskflow.BaseTaskFlowEngine._taskflow_load. ++ assert_called_once_with(FAKE_FLOW, store=expected_stored_params)) ++ _flow_mock.run.assert_called_once_with() + + @mock.patch( + 'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora', + return_value=None) + @mock.patch('octavia.controller.worker.v1.flows.' +- 'amphora_flows.AmphoraFlows.get_failover_flow', ++ 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', + return_value=_flow_mock) + def test_failover_spare_amphora(self, + mock_get_failover_flow, +@@ -1292,19 +1429,21 @@ class TestControllerWorker(base.TestCase + mock_amphora.id = AMP_ID + mock_amphora.status = constants.AMPHORA_READY + mock_amphora.load_balancer_id = None ++ mock_amp_repo_get.return_value = mock_amphora + + cw = controller_worker.ControllerWorker() +- cw._perform_amphora_failover(mock_amphora, +- constants.LB_CREATE_FAILOVER_PRIORITY) ++ cw.failover_amphora(AMP_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + _flow_mock, +- store={constants.FAILED_AMPHORA: mock_amphora, ++ store={constants.LOADBALANCER: None, + constants.LOADBALANCER_ID: None, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, +- constants.FLAVOR: {} ++ constants.FLAVOR: {}, ++ constants.SERVER_GROUP_ID: None, ++ constants.VIP: None + })) + + _flow_mock.run.assert_called_once_with() +@@ -1328,71 +1467,345 @@ class TestControllerWorker(base.TestCase + mock_amphora = mock.MagicMock() + mock_amphora.id = AMP_ID + mock_amphora.status = constants.DELETED ++ mock_amp_repo_get.return_value = mock_amphora + + cw = controller_worker.ControllerWorker() +- cw._perform_amphora_failover(mock_amphora, 10) ++ cw.failover_amphora(AMP_ID) + + mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID) + mock_taskflow_load.assert_not_called() + +- @mock.patch('octavia.controller.worker.v1.' +- 'controller_worker.ControllerWorker._perform_amphora_failover') ++ def test_get_amphorae_for_failover_single(self, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ amphora1_mock = mock.MagicMock() ++ amphora1_mock.status = constants.AMPHORA_ALLOCATED ++ amphora2_mock = mock.MagicMock() ++ amphora2_mock.status = constants.DELETED ++ ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.topology = constants.TOPOLOGY_SINGLE ++ load_balancer_mock.amphorae = [amphora1_mock, amphora2_mock] ++ ++ cw = controller_worker.ControllerWorker() ++ result = cw._get_amphorae_for_failover(load_balancer_mock) ++ ++ self.assertEqual([amphora1_mock], result) ++ ++ @mock.patch('octavia.common.utils.get_amphora_driver') ++ def test_get_amphorae_for_failover_act_stdby(self, ++ mock_get_amp_driver, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ # Note: This test uses three amphora even though we only have ++ # two per load balancer to properly test the ordering from ++ # this method. ++ amp_driver_mock = mock.MagicMock() ++ amp_driver_mock.get_interface_from_ip.side_effect = [ ++ 'fake0', None, 'fake1'] ++ mock_get_amp_driver.return_value = amp_driver_mock ++ backup_amphora_mock = mock.MagicMock() ++ backup_amphora_mock.status = constants.AMPHORA_ALLOCATED ++ deleted_amphora_mock = mock.MagicMock() ++ deleted_amphora_mock.status = constants.DELETED ++ master_amphora_mock = mock.MagicMock() ++ master_amphora_mock.status = constants.AMPHORA_ALLOCATED ++ bogus_amphora_mock = mock.MagicMock() ++ bogus_amphora_mock.status = constants.AMPHORA_ALLOCATED ++ ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY ++ load_balancer_mock.amphorae = [ ++ master_amphora_mock, deleted_amphora_mock, backup_amphora_mock, ++ bogus_amphora_mock] ++ ++ cw = controller_worker.ControllerWorker() ++ result = cw._get_amphorae_for_failover(load_balancer_mock) ++ ++ self.assertEqual([master_amphora_mock, bogus_amphora_mock, ++ backup_amphora_mock], result) ++ ++ @mock.patch('octavia.common.utils.get_amphora_driver') ++ def test_get_amphorae_for_failover_act_stdby_net_split( ++ self, mock_get_amp_driver, mock_api_get_session, ++ mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, ++ mock_member_repo_get, mock_l7rule_repo_get, mock_l7policy_repo_get, ++ mock_listener_repo_get, mock_lb_repo_get, mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ # Case where the amps can't see eachother and somehow end up with ++ # two amphora with an interface. This is highly unlikely as the ++ # higher priority amphora should get the IP in a net split, but ++ # let's test the code for this odd case. ++ # Note: This test uses three amphora even though we only have ++ # two per load balancer to properly test the ordering from ++ # this method. ++ amp_driver_mock = mock.MagicMock() ++ amp_driver_mock.get_interface_from_ip.side_effect = [ ++ 'fake0', 'fake1'] ++ mock_get_amp_driver.return_value = amp_driver_mock ++ backup_amphora_mock = mock.MagicMock() ++ backup_amphora_mock.status = constants.AMPHORA_ALLOCATED ++ deleted_amphora_mock = mock.MagicMock() ++ deleted_amphora_mock.status = constants.DELETED ++ master_amphora_mock = mock.MagicMock() ++ master_amphora_mock.status = constants.AMPHORA_ALLOCATED ++ ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY ++ load_balancer_mock.amphorae = [ ++ backup_amphora_mock, deleted_amphora_mock, master_amphora_mock] ++ ++ cw = controller_worker.ControllerWorker() ++ result = cw._get_amphorae_for_failover(load_balancer_mock) ++ ++ self.assertEqual([backup_amphora_mock, master_amphora_mock], result) ++ ++ def test_get_amphorae_for_failover_bogus_topology(self, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.topology = 'bogus' ++ ++ cw = controller_worker.ControllerWorker() ++ self.assertRaises(exceptions.InvalidTopology, ++ cw._get_amphorae_for_failover, ++ load_balancer_mock) ++ ++ @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' ++ 'LoadBalancerFlows.get_failover_LB_flow') ++ @mock.patch('octavia.controller.worker.v1.controller_worker.' ++ 'ControllerWorker._get_amphorae_for_failover') ++ def test_failover_loadbalancer_single(self, ++ mock_get_amps_for_failover, ++ mock_get_failover_lb_flow, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ FAKE_FLOW = 'FAKE_FLOW' ++ _flow_mock.reset_mock() ++ mock_lb_repo_get.return_value = _load_balancer_mock ++ mock_get_amps_for_failover.return_value = [_amphora_mock] ++ mock_get_failover_lb_flow.return_value = FAKE_FLOW ++ ++ expected_flavor = {constants.LOADBALANCER_TOPOLOGY: ++ _load_balancer_mock.topology} ++ expected_flow_store = {constants.LOADBALANCER: _load_balancer_mock, ++ constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.LOADBALANCER_ID: ++ _load_balancer_mock.id, ++ constants.SERVER_GROUP_ID: ++ _load_balancer_mock.server_group_id, ++ constants.FLAVOR: expected_flavor} ++ ++ cw = controller_worker.ControllerWorker() ++ cw.failover_loadbalancer(LB_ID) ++ ++ mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) ++ mock_get_amps_for_failover.assert_called_once_with(_load_balancer_mock) ++ mock_get_failover_lb_flow.assert_called_once_with([_amphora_mock], ++ _load_balancer_mock) ++ mock_taskflow_load.assert_called_once_with(FAKE_FLOW, ++ store=expected_flow_store) ++ _flow_mock.run.assert_called_once_with() ++ ++ @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' ++ 'LoadBalancerFlows.get_failover_LB_flow') ++ @mock.patch('octavia.controller.worker.v1.controller_worker.' ++ 'ControllerWorker._get_amphorae_for_failover') ++ def test_failover_loadbalancer_act_stdby(self, ++ mock_get_amps_for_failover, ++ mock_get_failover_lb_flow, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ FAKE_FLOW = 'FAKE_FLOW' ++ _flow_mock.reset_mock() ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.listeners = [_listener_mock] ++ load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY ++ load_balancer_mock.flavor_id = None ++ load_balancer_mock.vip = _vip_mock ++ mock_lb_repo_get.return_value = load_balancer_mock ++ mock_get_amps_for_failover.return_value = [_amphora_mock, ++ _amphora_mock] ++ mock_get_failover_lb_flow.return_value = FAKE_FLOW ++ ++ expected_flavor = {constants.LOADBALANCER_TOPOLOGY: ++ load_balancer_mock.topology} ++ expected_flow_store = {constants.LOADBALANCER: load_balancer_mock, ++ constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.LOADBALANCER_ID: ++ load_balancer_mock.id, ++ constants.SERVER_GROUP_ID: ++ load_balancer_mock.server_group_id, ++ constants.FLAVOR: expected_flavor} ++ ++ cw = controller_worker.ControllerWorker() ++ cw.failover_loadbalancer(LB_ID) ++ ++ mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) ++ mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) ++ mock_get_failover_lb_flow.assert_called_once_with( ++ [_amphora_mock, _amphora_mock], load_balancer_mock) ++ mock_taskflow_load.assert_called_once_with(FAKE_FLOW, ++ store=expected_flow_store) ++ _flow_mock.run.assert_called_once_with() ++ + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') +- def test_failover_loadbalancer(self, +- mock_update, +- mock_perform, +- mock_api_get_session, +- mock_dyn_log_listener, +- mock_taskflow_load, +- mock_pool_repo_get, +- mock_member_repo_get, +- mock_l7rule_repo_get, +- mock_l7policy_repo_get, +- mock_listener_repo_get, +- mock_lb_repo_get, +- mock_health_mon_repo_get, +- mock_amp_repo_get): +- _amphora_mock2 = mock.MagicMock() +- _amphora_mock3 = mock.MagicMock() +- _amphora_mock3.status = constants.DELETED +- _load_balancer_mock.amphorae = [ +- _amphora_mock, _amphora_mock2, _amphora_mock3] +- cw = controller_worker.ControllerWorker() +- cw.failover_loadbalancer('123') +- mock_perform.assert_called_with( +- _amphora_mock2, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY) +- mock_update.assert_called_with(_db_session, '123', +- provisioning_status=constants.ACTIVE) +- +- mock_perform.reset +- _load_balancer_mock.amphorae = [ +- _amphora_mock, _amphora_mock2, _amphora_mock3] +- _amphora_mock2.role = constants.ROLE_BACKUP +- cw.failover_loadbalancer('123') +- # because mock2 gets failed over earlier now _amphora_mock +- # is the last one +- mock_perform.assert_called_with( +- _amphora_mock, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY) +- mock_update.assert_called_with(_db_session, '123', +- provisioning_status=constants.ACTIVE) +- +- mock_perform.reset +- mock_perform.side_effect = OverflowError() +- self.assertRaises(OverflowError, cw.failover_loadbalancer, 123) +- mock_update.assert_called_with(_db_session, 123, +- provisioning_status=constants.ERROR) ++ def test_failover_loadbalancer_no_lb(self, ++ mock_lb_repo_update, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ mock_lb_repo_get.return_value = None ++ ++ cw = controller_worker.ControllerWorker() ++ cw.failover_loadbalancer(LB_ID) ++ ++ mock_lb_repo_update.assert_called_once_with( ++ _db_session, LB_ID, provisioning_status=constants.ERROR) ++ ++ @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') ++ @mock.patch('octavia.controller.worker.v1.controller_worker.' ++ 'ControllerWorker._get_amphorae_for_failover') ++ def test_failover_loadbalancer_with_bogus_topology( ++ self, mock_get_amps_for_failover, mock_lb_repo_update, ++ mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, ++ mock_pool_repo_get, mock_member_repo_get, mock_l7rule_repo_get, ++ mock_l7policy_repo_get, mock_listener_repo_get, mock_lb_repo_get, ++ mock_health_mon_repo_get, mock_amp_repo_get): ++ _flow_mock.reset_mock() ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.topology = 'bogus' ++ mock_lb_repo_get.return_value = load_balancer_mock ++ mock_get_amps_for_failover.return_value = [_amphora_mock] ++ ++ cw = controller_worker.ControllerWorker() ++ result = cw.failover_loadbalancer(LB_ID) ++ ++ self.assertIsNone(result) ++ mock_lb_repo_update.assert_called_once_with( ++ _db_session, LB_ID, provisioning_status=constants.ERROR) ++ mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) ++ mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) ++ ++ @mock.patch('octavia.db.repositories.FlavorRepository.' ++ 'get_flavor_metadata_dict', return_value={'taste': 'spicy'}) ++ @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' ++ 'LoadBalancerFlows.get_failover_LB_flow') ++ @mock.patch('octavia.controller.worker.v1.controller_worker.' ++ 'ControllerWorker._get_amphorae_for_failover') ++ def test_failover_loadbalancer_with_flavor(self, ++ mock_get_amps_for_failover, ++ mock_get_failover_lb_flow, ++ mock_get_flavor_meta, ++ mock_api_get_session, ++ mock_dyn_log_listener, ++ mock_taskflow_load, ++ mock_pool_repo_get, ++ mock_member_repo_get, ++ mock_l7rule_repo_get, ++ mock_l7policy_repo_get, ++ mock_listener_repo_get, ++ mock_lb_repo_get, ++ mock_health_mon_repo_get, ++ mock_amp_repo_get): ++ FAKE_FLOW = 'FAKE_FLOW' ++ _flow_mock.reset_mock() ++ load_balancer_mock = mock.MagicMock() ++ load_balancer_mock.listeners = [_listener_mock] ++ load_balancer_mock.topology = constants.TOPOLOGY_SINGLE ++ load_balancer_mock.flavor_id = uuidutils.generate_uuid() ++ load_balancer_mock.vip = _vip_mock ++ mock_lb_repo_get.return_value = load_balancer_mock ++ mock_get_amps_for_failover.return_value = [_amphora_mock, ++ _amphora_mock] ++ mock_get_failover_lb_flow.return_value = FAKE_FLOW ++ ++ expected_flavor = {'taste': 'spicy', constants.LOADBALANCER_TOPOLOGY: ++ load_balancer_mock.topology} ++ expected_flow_store = {constants.LOADBALANCER: load_balancer_mock, ++ constants.BUILD_TYPE_PRIORITY: ++ constants.LB_CREATE_FAILOVER_PRIORITY, ++ constants.LOADBALANCER_ID: ++ load_balancer_mock.id, ++ constants.FLAVOR: expected_flavor, ++ constants.SERVER_GROUP_ID: ++ load_balancer_mock.server_group_id} ++ ++ cw = controller_worker.ControllerWorker() ++ cw.failover_loadbalancer(LB_ID) ++ ++ mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) ++ mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) ++ mock_get_failover_lb_flow.assert_called_once_with( ++ [_amphora_mock, _amphora_mock], load_balancer_mock) ++ mock_taskflow_load.assert_called_once_with(FAKE_FLOW, ++ store=expected_flow_store) ++ _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + @mock.patch('octavia.controller.worker.v1.flows.' +- 'amphora_flows.AmphoraFlows.get_failover_flow', ++ 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', + return_value=_flow_mock) + @mock.patch( + 'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora', + return_value=_load_balancer_mock) +- @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_anti_affinity(self, +- mock_update, + mock_get_lb_for_amphora, + mock_get_update_listener_flow, + mock_get_flavor_meta, +@@ -1418,18 +1831,18 @@ class TestControllerWorker(base.TestCase + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + _flow_mock, +- store={constants.FAILED_AMPHORA: _amphora_mock, +- constants.LOADBALANCER_ID: +- _amphora_mock.load_balancer_id, ++ store={constants.LOADBALANCER_ID: _load_balancer_mock.id, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, +- constants.SERVER_GROUP_ID: "123", +- constants.FLAVOR: {} ++ constants.FLAVOR: {'loadbalancer_topology': ++ _load_balancer_mock.topology}, ++ constants.LOADBALANCER: _load_balancer_mock, ++ constants.VIP: _load_balancer_mock.vip, ++ constants.SERVER_GROUP_ID: ++ _load_balancer_mock.server_group_id + })) + + _flow_mock.run.assert_called_once_with() +- mock_update.assert_called_with(_db_session, LB_ID, +- provisioning_status=constants.ACTIVE) + + @mock.patch('octavia.controller.worker.v1.flows.' + 'amphora_flows.AmphoraFlows.cert_rotate_amphora_flow', +Index: octavia-5.0.1/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py ++++ octavia-5.0.1/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +@@ -499,8 +499,9 @@ class TestAmphoraDriverTasks(base.TestCa + amphora_update_vrrp_interface_obj = ( + amphora_driver_tasks.AmphoraUpdateVRRPInterface()) + amphora_update_vrrp_interface_obj.execute(_LB_mock) +- mock_driver.get_vrrp_interface.assert_called_once_with( +- _amphora_mock, timeout_dict=timeout_dict) ++ mock_driver.get_interface_from_ip.assert_called_once_with( ++ _amphora_mock, _amphora_mock.vrrp_ip, ++ timeout_dict=timeout_dict) + + # Test revert + mock_driver.reset_mock() +Index: octavia-5.0.1/octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py ++++ octavia-5.0.1/octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +@@ -342,15 +342,65 @@ class TestAllowedAddressPairsDriver(base + for amp in amps: + mock_plug_aap.assert_any_call(lb, lb.vip, amp, subnet) + +- def test_update_vip_sg(self): +- lb = dmh.generate_load_balancer_tree() +- list_security_groups = self.driver.neutron_client.list_security_groups +- list_security_groups.return_value = { +- 'security_groups': [ +- {'id': 'lb-sec-grp1'} +- ] +- } +- self.driver.update_vip_sg(lb, lb.vip) ++ @mock.patch('octavia.common.utils.get_vip_security_group_name') ++ def test_update_vip_sg(self, mock_get_sg_name): ++ LB_ID = uuidutils.generate_uuid() ++ SG_ID = uuidutils.generate_uuid() ++ VIP_PORT_ID = uuidutils.generate_uuid() ++ TEST_SG_NAME = 'test_SG_name' ++ lb_mock = mock.MagicMock() ++ lb_mock.id = LB_ID ++ vip_mock = mock.MagicMock() ++ vip_mock.port_id = VIP_PORT_ID ++ security_group_dict = {'id': SG_ID} ++ mock_get_sg_name.return_value = TEST_SG_NAME ++ ++ test_driver = allowed_address_pairs.AllowedAddressPairsDriver() ++ ++ test_driver._add_vip_security_group_to_port = mock.MagicMock() ++ test_driver._create_security_group = mock.MagicMock() ++ test_driver._get_lb_security_group = mock.MagicMock() ++ test_driver._update_security_group_rules = mock.MagicMock() ++ test_driver._get_lb_security_group.side_effect = [security_group_dict, ++ None] ++ test_driver._create_security_group.return_value = security_group_dict ++ ++ # Test security groups disabled ++ test_driver.sec_grp_enabled = False ++ ++ result = test_driver.update_vip_sg(lb_mock, vip_mock) ++ ++ self.assertIsNone(result) ++ test_driver._add_vip_security_group_to_port.assert_not_called() ++ test_driver._get_lb_security_group.assert_not_called() ++ test_driver._update_security_group_rules.assert_not_called() ++ ++ # Test by security group ID ++ test_driver.sec_grp_enabled = True ++ ++ result = test_driver.update_vip_sg(lb_mock, vip_mock) ++ ++ self.assertEqual(SG_ID, result) ++ test_driver._update_security_group_rules.assert_called_once_with( ++ lb_mock, SG_ID) ++ test_driver._add_vip_security_group_to_port.assert_called_once_with( ++ LB_ID, VIP_PORT_ID, SG_ID) ++ ++ # Test by security group name ++ test_driver._add_vip_security_group_to_port.reset_mock() ++ test_driver._get_lb_security_group.reset_mock() ++ test_driver._update_security_group_rules.reset_mock() ++ ++ result = test_driver.update_vip_sg(lb_mock, vip_mock) ++ ++ self.assertEqual(SG_ID, result) ++ mock_get_sg_name.assert_called_once_with(LB_ID) ++ test_driver._create_security_group.assert_called_once_with( ++ TEST_SG_NAME) ++ test_driver._update_security_group_rules.assert_called_once_with( ++ lb_mock, SG_ID) ++ test_driver._add_vip_security_group_to_port.assert_called_once_with( ++ LB_ID, VIP_PORT_ID, SG_ID) + + def test_plug_aap_port(self): + lb = dmh.generate_load_balancer_tree() +@@ -453,12 +503,38 @@ class TestAllowedAddressPairsDriver(base + t_constants.MOCK_VRRP_IP2]) + self.assertEqual(lb.vip.ip_address, amp.ha_ip) + ++ def test_validate_fixed_ip(self): ++ IP_ADDRESS = '203.0.113.61' ++ OTHER_IP_ADDRESS = '203.0.113.62' ++ SUBNET_ID = uuidutils.generate_uuid() ++ OTHER_SUBNET_ID = uuidutils.generate_uuid() ++ fixed_ip_mock = mock.MagicMock() ++ fixed_ip_mock.subnet_id = SUBNET_ID ++ fixed_ip_mock.ip_address = IP_ADDRESS ++ ++ # valid ++ result = self.driver._validate_fixed_ip([fixed_ip_mock], SUBNET_ID, ++ IP_ADDRESS) ++ self.assertTrue(result) ++ ++ # no subnet match ++ result = self.driver._validate_fixed_ip( ++ [fixed_ip_mock], OTHER_SUBNET_ID, IP_ADDRESS) ++ self.assertFalse(result) ++ ++ # no IP match ++ result = self.driver._validate_fixed_ip([fixed_ip_mock], SUBNET_ID, ++ OTHER_IP_ADDRESS) ++ self.assertFalse(result) ++ + def test_allocate_vip_when_port_already_provided(self): + show_port = self.driver.neutron_client.show_port + show_port.return_value = t_constants.MOCK_NEUTRON_PORT + fake_lb_vip = data_models.Vip( + port_id=t_constants.MOCK_PORT_ID, +- subnet_id=t_constants.MOCK_SUBNET_ID) ++ subnet_id=t_constants.MOCK_SUBNET_ID, ++ network_id=t_constants.MOCK_NETWORK_ID, ++ ip_address=t_constants.MOCK_IP_ADDRESS) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip) + vip = self.driver.allocate_vip(fake_lb) + self.assertIsInstance(vip, data_models.Vip) +@@ -467,6 +543,108 @@ class TestAllowedAddressPairsDriver(base + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + ++ @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' ++ '_check_extension_enabled', return_value=True) ++ def test_allocate_vip_with_port_mismatch(self, mock_check_ext): ++ bad_existing_port = mock.MagicMock() ++ bad_existing_port.port_id = uuidutils.generate_uuid() ++ bad_existing_port.network_id = uuidutils.generate_uuid() ++ bad_existing_port.subnet_id = uuidutils.generate_uuid() ++ show_port = self.driver.neutron_client.show_port ++ show_port.return_value = bad_existing_port ++ port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT) ++ port_create_dict['port']['device_owner'] = ( ++ allowed_address_pairs.OCTAVIA_OWNER) ++ port_create_dict['port']['device_id'] = 'lb-1' ++ create_port = self.driver.neutron_client.create_port ++ create_port.return_value = port_create_dict ++ show_subnet = self.driver.neutron_client.show_subnet ++ show_subnet.return_value = {'subnet': { ++ 'id': t_constants.MOCK_SUBNET_ID, ++ 'network_id': t_constants.MOCK_NETWORK_ID ++ }} ++ fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, ++ network_id=t_constants.MOCK_NETWORK_ID, ++ port_id=t_constants.MOCK_PORT_ID, ++ octavia_owned=True) ++ fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, ++ project_id='test-project') ++ vip = self.driver.allocate_vip(fake_lb) ++ exp_create_port_call = { ++ 'port': { ++ 'name': 'octavia-lb-1', ++ 'network_id': t_constants.MOCK_NETWORK_ID, ++ 'device_id': 'lb-1', ++ 'device_owner': allowed_address_pairs.OCTAVIA_OWNER, ++ 'admin_state_up': False, ++ 'project_id': 'test-project', ++ 'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}] ++ } ++ } ++ self.driver.neutron_client.delete_port.assert_called_once_with( ++ t_constants.MOCK_PORT_ID) ++ create_port.assert_called_once_with(exp_create_port_call) ++ self.assertIsInstance(vip, data_models.Vip) ++ self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) ++ self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) ++ self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) ++ self.assertEqual(fake_lb.id, vip.load_balancer_id) ++ ++ @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' ++ 'get_port', side_effect=network_base.PortNotFound) ++ @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' ++ '_check_extension_enabled', return_value=True) ++ def test_allocate_vip_when_port_not_found(self, mock_check_ext, ++ mock_get_port): ++ port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT) ++ port_create_dict['port']['device_owner'] = ( ++ allowed_address_pairs.OCTAVIA_OWNER) ++ port_create_dict['port']['device_id'] = 'lb-1' ++ create_port = self.driver.neutron_client.create_port ++ create_port.return_value = port_create_dict ++ show_subnet = self.driver.neutron_client.show_subnet ++ show_subnet.return_value = {'subnet': { ++ 'id': t_constants.MOCK_SUBNET_ID, ++ 'network_id': t_constants.MOCK_NETWORK_ID ++ }} ++ fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, ++ network_id=t_constants.MOCK_NETWORK_ID, ++ port_id=t_constants.MOCK_PORT_ID) ++ fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, ++ project_id='test-project') ++ vip = self.driver.allocate_vip(fake_lb) ++ exp_create_port_call = { ++ 'port': { ++ 'name': 'octavia-lb-1', ++ 'network_id': t_constants.MOCK_NETWORK_ID, ++ 'device_id': 'lb-1', ++ 'device_owner': allowed_address_pairs.OCTAVIA_OWNER, ++ 'admin_state_up': False, ++ 'project_id': 'test-project', ++ 'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}] ++ } ++ } ++ create_port.assert_called_once_with(exp_create_port_call) ++ self.assertIsInstance(vip, data_models.Vip) ++ self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) ++ self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) ++ self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) ++ self.assertEqual(fake_lb.id, vip.load_balancer_id) ++ ++ @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' ++ 'get_port', side_effect=Exception('boom')) ++ @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' ++ '_check_extension_enabled', return_value=True) ++ def test_allocate_vip_unkown_exception(self, mock_check_ext, ++ mock_get_port): ++ fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, ++ network_id=t_constants.MOCK_NETWORK_ID, ++ port_id=t_constants.MOCK_PORT_ID) ++ fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, ++ project_id='test-project') ++ self.assertRaises(network_base.AllocateVIPException, ++ self.driver.allocate_vip, fake_lb) ++ + def test_allocate_vip_when_port_creation_fails(self): + fake_lb_vip = data_models.Vip( + subnet_id=t_constants.MOCK_SUBNET_ID) +@@ -514,6 +692,77 @@ class TestAllowedAddressPairsDriver(base + self.assertEqual(fake_lb.id, vip.load_balancer_id) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' ++ '_check_extension_enabled', return_value=True) ++ def test_allocate_vip_when_no_port_fixed_ip(self, mock_check_ext): ++ port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT) ++ port_create_dict['port']['device_owner'] = ( ++ allowed_address_pairs.OCTAVIA_OWNER) ++ port_create_dict['port']['device_id'] = 'lb-1' ++ create_port = self.driver.neutron_client.create_port ++ create_port.return_value = port_create_dict ++ show_subnet = self.driver.neutron_client.show_subnet ++ show_subnet.return_value = {'subnet': { ++ 'id': t_constants.MOCK_SUBNET_ID, ++ 'network_id': t_constants.MOCK_NETWORK_ID ++ }} ++ fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, ++ network_id=t_constants.MOCK_NETWORK_ID, ++ ip_address=t_constants.MOCK_IP_ADDRESS) ++ fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, ++ project_id='test-project') ++ vip = self.driver.allocate_vip(fake_lb) ++ exp_create_port_call = { ++ 'port': { ++ 'name': 'octavia-lb-1', ++ 'network_id': t_constants.MOCK_NETWORK_ID, ++ 'device_id': 'lb-1', ++ 'device_owner': allowed_address_pairs.OCTAVIA_OWNER, ++ 'admin_state_up': False, ++ 'project_id': 'test-project', ++ 'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID, ++ 'ip_address': t_constants.MOCK_IP_ADDRESS}] ++ } ++ } ++ create_port.assert_called_once_with(exp_create_port_call) ++ self.assertIsInstance(vip, data_models.Vip) ++ self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) ++ self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) ++ self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) ++ self.assertEqual(fake_lb.id, vip.load_balancer_id) ++ ++ @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' ++ '_check_extension_enabled', return_value=True) ++ def test_allocate_vip_when_no_port_no_fixed_ip(self, mock_check_ext): ++ port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT) ++ port_create_dict['port']['device_owner'] = ( ++ allowed_address_pairs.OCTAVIA_OWNER) ++ port_create_dict['port']['device_id'] = 'lb-1' ++ create_port = self.driver.neutron_client.create_port ++ create_port.return_value = port_create_dict ++ show_subnet = self.driver.neutron_client.show_subnet ++ show_subnet.return_value = {'subnet': { ++ 'id': t_constants.MOCK_SUBNET_ID, ++ 'network_id': t_constants.MOCK_NETWORK_ID ++ }} ++ fake_lb_vip = data_models.Vip(network_id=t_constants.MOCK_NETWORK_ID) ++ fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, ++ project_id='test-project') ++ vip = self.driver.allocate_vip(fake_lb) ++ exp_create_port_call = { ++ 'port': { ++ 'name': 'octavia-lb-1', ++ 'network_id': t_constants.MOCK_NETWORK_ID, ++ 'device_id': 'lb-1', ++ 'device_owner': allowed_address_pairs.OCTAVIA_OWNER, ++ 'admin_state_up': False, ++ 'project_id': 'test-project'} ++ } ++ create_port.assert_called_once_with(exp_create_port_call) ++ self.assertIsInstance(vip, data_models.Vip) ++ self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) ++ self.assertEqual(fake_lb.id, vip.load_balancer_id) ++ ++ @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=False) + def test_allocate_vip_when_no_port_provided_tenant(self, mock_check_ext): + port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT) +@@ -627,8 +876,8 @@ class TestAllowedAddressPairsDriver(base + def test_plug_network_when_compute_instance_cant_be_found(self): + net_id = t_constants.MOCK_NOVA_INTERFACE.net_id + network_attach = self.driver.compute.attach_network_or_port +- network_attach.side_effect = nova_exceptions.NotFound( +- 404, message='Instance not found') ++ network_attach.side_effect = exceptions.NotFound( ++ resource='Instance not found', id=1) + self.assertRaises(network_base.AmphoraNotFound, + self.driver.plug_network, + t_constants.MOCK_COMPUTE_ID, net_id) +@@ -963,20 +1212,20 @@ class TestAllowedAddressPairsDriver(base + port_id=self.PORT_ID) + + # NotFound cases +- network_attach.side_effect = nova_exceptions.NotFound( +- 1, message='Instance') ++ network_attach.side_effect = exceptions.NotFound( ++ resource='Instance', id=1) + self.assertRaises(network_base.AmphoraNotFound, + self.driver.plug_port, + amphora, + port) +- network_attach.side_effect = nova_exceptions.NotFound( +- 1, message='Network') ++ network_attach.side_effect = exceptions.NotFound( ++ resource='Network', id=1) + self.assertRaises(network_base.NetworkNotFound, + self.driver.plug_port, + amphora, + port) +- network_attach.side_effect = nova_exceptions.NotFound( +- 1, message='bogus') ++ network_attach.side_effect = exceptions.NotFound( ++ resource='bogus', id=1) + self.assertRaises(network_base.PlugNetworkException, + self.driver.plug_port, + amphora, +@@ -1101,3 +1350,157 @@ class TestAllowedAddressPairsDriver(base + self.assertRaises(network_base.TimeoutException, + self.driver.wait_for_port_detach, + amphora) ++ ++ def test_delete_port(self): ++ PORT_ID = uuidutils.generate_uuid() ++ ++ self.driver.neutron_client.delete_port.side_effect = [ ++ mock.DEFAULT, neutron_exceptions.NotFound, Exception('boom')] ++ ++ # Test successful delete ++ self.driver.delete_port(PORT_ID) ++ ++ self.driver.neutron_client.delete_port.assert_called_once_with(PORT_ID) ++ ++ # Test port NotFound (does not raise) ++ self.driver.delete_port(PORT_ID) ++ ++ # Test unknown exception ++ self.assertRaises(exceptions.NetworkServiceError, ++ self.driver.delete_port, PORT_ID) ++ ++ def test_set_port_admin_state_up(self): ++ PORT_ID = uuidutils.generate_uuid() ++ TEST_STATE = 'test state' ++ ++ self.driver.neutron_client.update_port.side_effect = [ ++ mock.DEFAULT, neutron_exceptions.NotFound, Exception('boom')] ++ ++ # Test successful state set ++ self.driver.set_port_admin_state_up(PORT_ID, TEST_STATE) ++ ++ self.driver.neutron_client.update_port.assert_called_once_with( ++ PORT_ID, {'port': {'admin_state_up': TEST_STATE}}) ++ ++ # Test port NotFound ++ self.assertRaises(network_base.PortNotFound, ++ self.driver.set_port_admin_state_up, ++ PORT_ID, {'port': {'admin_state_up': TEST_STATE}}) ++ ++ # Test unknown exception ++ self.assertRaises(exceptions.NetworkServiceError, ++ self.driver.set_port_admin_state_up, PORT_ID, ++ {'port': {'admin_state_up': TEST_STATE}}) ++ ++ def test_create_port(self): ++ ADMIN_STATE_UP = False ++ FAKE_NAME = 'fake_name' ++ IP_ADDRESS1 = '203.0.113.71' ++ IP_ADDRESS2 = '203.0.113.72' ++ IP_ADDRESS3 = '203.0.113.73' ++ NETWORK_ID = uuidutils.generate_uuid() ++ QOS_POLICY_ID = uuidutils.generate_uuid() ++ SECONDARY_IPS = [IP_ADDRESS2, IP_ADDRESS3] ++ SECURITY_GROUP_ID = uuidutils.generate_uuid() ++ SUBNET1_ID = uuidutils.generate_uuid() ++ FIXED_IPS = [{'subnet_id': SUBNET1_ID, 'ip_address': IP_ADDRESS1}] ++ ++ MOCK_NEUTRON_PORT = {'port': { ++ 'network_id': NETWORK_ID, 'device_id': t_constants.MOCK_DEVICE_ID, ++ 'device_owner': t_constants.MOCK_DEVICE_OWNER, ++ 'id': t_constants.MOCK_PORT_ID, 'name': FAKE_NAME, ++ 'tenant_id': t_constants.MOCK_PROJECT_ID, ++ 'admin_state_up': ADMIN_STATE_UP, ++ 'status': t_constants.MOCK_STATUS, ++ 'mac_address': t_constants.MOCK_MAC_ADDR, ++ 'fixed_ips': [{'ip_address': IP_ADDRESS1, ++ 'subnet_id': SUBNET1_ID}], ++ 'security_groups': [], ++ 'qos_policy_id': QOS_POLICY_ID}} ++ ++ reference_port_dict = {'admin_state_up': ADMIN_STATE_UP, ++ 'device_id': t_constants.MOCK_DEVICE_ID, ++ 'device_owner': t_constants.MOCK_DEVICE_OWNER, ++ 'fixed_ips': [], ++ 'id': t_constants.MOCK_PORT_ID, ++ 'mac_address': t_constants.MOCK_MAC_ADDR, ++ 'name': FAKE_NAME, ++ 'network': None, ++ 'network_id': NETWORK_ID, ++ 'project_id': t_constants.MOCK_PROJECT_ID, ++ 'qos_policy_id': QOS_POLICY_ID, ++ 'security_group_ids': [], ++ 'status': t_constants.MOCK_STATUS} ++ ++ self.driver.neutron_client.create_port.side_effect = [ ++ MOCK_NEUTRON_PORT, MOCK_NEUTRON_PORT, Exception('boom')] ++ ++ # Test successful path ++ result = self.driver.create_port( ++ NETWORK_ID, name=FAKE_NAME, fixed_ips=FIXED_IPS, ++ secondary_ips=SECONDARY_IPS, ++ security_group_ids=[SECURITY_GROUP_ID], admin_state_up=False, ++ qos_policy_id=QOS_POLICY_ID) ++ ++ self.assertEqual(reference_port_dict, result.to_dict()) ++ self.driver.neutron_client.create_port.assert_called_once_with( ++ {'port': { ++ 'network_id': NETWORK_ID, 'admin_state_up': ADMIN_STATE_UP, ++ 'device_owner': allowed_address_pairs.OCTAVIA_OWNER, ++ 'allowed_address_pairs': [ ++ {'ip_address': IP_ADDRESS2}, {'ip_address': IP_ADDRESS3}], ++ 'fixed_ips': [{ ++ 'subnet_id': SUBNET1_ID, 'ip_address': IP_ADDRESS1}], ++ 'name': FAKE_NAME, 'qos_policy_id': QOS_POLICY_ID, ++ 'security_groups': [SECURITY_GROUP_ID]}}) ++ ++ # Test minimal successful path ++ result = self.driver.create_port(NETWORK_ID) ++ ++ self.assertEqual(reference_port_dict, result.to_dict()) ++ ++ # Test exception ++ self.assertRaises(network_base.CreatePortException, ++ self.driver.create_port, NETWORK_ID, name=FAKE_NAME, ++ fixed_ips=FIXED_IPS, secondary_ips=SECONDARY_IPS, ++ security_group_ids=[SECURITY_GROUP_ID], ++ admin_state_up=False, qos_policy_id=QOS_POLICY_ID) ++ ++ def test_get_security_group(self): ++ ++ # Test the case of security groups disabled in neutron ++ FAKE_SG_NAME = 'Fake_SG_name' ++ FAKE_NEUTRON_SECURITY_GROUPS = {'security_groups': [ ++ t_constants.MOCK_SECURITY_GROUP]} ++ reference_sg_dict = {'id': t_constants.MOCK_SECURITY_GROUP_ID, ++ 'name': t_constants.MOCK_SECURITY_GROUP_NAME, ++ 'description': '', 'tags': [], ++ 'security_group_rule_ids': [], ++ 'stateful': None, ++ 'project_id': t_constants.MOCK_PROJECT_ID} ++ ++ self.driver.neutron_client.list_security_groups.side_effect = [ ++ FAKE_NEUTRON_SECURITY_GROUPS, None, Exception('boom')] ++ ++ self.driver.sec_grp_enabled = False ++ result = self.driver.get_security_group(FAKE_SG_NAME) ++ ++ self.assertIsNone(result) ++ self.driver.neutron_client.list_security_groups.assert_not_called() ++ ++ # Test successful get of the security group ++ self.driver.sec_grp_enabled = True ++ ++ result = self.driver.get_security_group(FAKE_SG_NAME) ++ ++ self.assertEqual(reference_sg_dict, result.to_dict()) ++ self.driver.neutron_client.list_security_groups.called_once_with( ++ name=FAKE_SG_NAME) ++ ++ # Test no security groups returned ++ self.assertRaises(network_base.SecurityGroupNotFound, ++ self.driver.get_security_group, FAKE_SG_NAME) ++ ++ # Test with an unknown exception ++ self.assertRaises(network_base.NetworkException, ++ self.driver.get_security_group, FAKE_SG_NAME) +Index: octavia-5.0.1/octavia/tests/unit/network/drivers/neutron/test_utils.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/network/drivers/neutron/test_utils.py ++++ octavia-5.0.1/octavia/tests/unit/network/drivers/neutron/test_utils.py +@@ -66,6 +66,7 @@ class TestNeutronUtils(base.TestCase): + project_id=t_constants.MOCK_PROJECT_ID, + admin_state_up=t_constants.MOCK_ADMIN_STATE_UP, + fixed_ips=[], ++ security_group_ids=[], + ) + self._compare_ignore_value_none(model_obj.to_dict(), assert_dict) + fixed_ips = t_constants.MOCK_NEUTRON_PORT['port']['fixed_ips'] +Index: octavia-5.0.1/octavia/tests/unit/network/drivers/test_network_noop_driver.py +=================================================================== +--- octavia-5.0.1.orig/octavia/tests/unit/network/drivers/test_network_noop_driver.py ++++ octavia-5.0.1/octavia/tests/unit/network/drivers/test_network_noop_driver.py +@@ -16,6 +16,7 @@ import mock + from oslo_utils import uuidutils + + from octavia.db import models ++from octavia.network import data_models as network_models + from octavia.network.drivers.noop_driver import driver + import octavia.tests.unit.base as base + +@@ -186,6 +187,14 @@ class TestNoopNetworkDriver(base.TestCas + self.device_id)] + ) + ++ def test_get_security_group(self): ++ FAKE_SG_NAME = 'fake_sg_name' ++ result = self.driver.get_security_group(FAKE_SG_NAME) ++ ++ self.assertEqual((FAKE_SG_NAME, 'get_security_group'), ++ self.driver.driver.networkconfigconfig[FAKE_SG_NAME]) ++ self.assertTrue(uuidutils.is_uuid_like(result.id)) ++ + def test_plug_port(self): + self.driver.plug_port(self.amphora1, self.port) + self.assertEqual( +@@ -237,3 +246,50 @@ class TestNoopNetworkDriver(base.TestCas + self.driver.driver.networkconfigconfig[self.amphora1.id, + self.vip.ip_address] + ) ++ ++ def test_delete_port(self): ++ PORT_ID = uuidutils.generate_uuid() ++ ++ self.driver.delete_port(PORT_ID) ++ ++ self.assertEqual((PORT_ID, 'delete_port'), ++ self.driver.driver.networkconfigconfig[PORT_ID]) ++ ++ def test_set_port_admin_state_up(self): ++ PORT_ID = uuidutils.generate_uuid() ++ ++ self.driver.set_port_admin_state_up(PORT_ID, False) ++ ++ self.assertEqual( ++ (PORT_ID, False, 'admin_down_port'), ++ self.driver.driver.networkconfigconfig[(PORT_ID, False)]) ++ ++ def test_create_port(self): ++ FAKE_NAME = 'fake_name' ++ IP_ADDRESS = '2001:db8::77' ++ NETWORK_ID = uuidutils.generate_uuid() ++ QOS_POLICY_ID = uuidutils.generate_uuid() ++ SUBNET_ID = uuidutils.generate_uuid() ++ FIXED_IPS = [{'ip_address': IP_ADDRESS, 'subnet_id': SUBNET_ID}, ++ {'subnet_id': SUBNET_ID}] ++ ++ # Test minimum ++ result = self.driver.create_port(NETWORK_ID) ++ ++ self.assertIsInstance(result, network_models.Port) ++ self.assertEqual(NETWORK_ID, result.network_id) ++ ++ # Test full parameters ++ result = self.driver.create_port( ++ NETWORK_ID, name=FAKE_NAME, fixed_ips=FIXED_IPS, ++ admin_state_up=False, qos_policy_id=QOS_POLICY_ID) ++ ++ self.assertIsInstance(result, network_models.Port) ++ self.assertEqual(NETWORK_ID, result.network_id) ++ self.assertEqual(FAKE_NAME, result.name) ++ self.assertEqual(IP_ADDRESS, result.fixed_ips[0].ip_address) ++ self.assertEqual(SUBNET_ID, result.fixed_ips[0].subnet_id) ++ self.assertEqual('198.51.100.56', result.fixed_ips[1].ip_address) ++ self.assertEqual(SUBNET_ID, result.fixed_ips[1].subnet_id) ++ self.assertEqual(QOS_POLICY_ID, result.qos_policy_id) ++ self.assertFalse(result.admin_state_up) +Index: octavia-5.0.1/releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml +=================================================================== +--- /dev/null ++++ octavia-5.0.1/releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml +@@ -0,0 +1,11 @@ ++--- ++upgrade: ++ - | ++ The failover improvements do not require an updated amphora image, ++ but updating existing amphora will minimize the failover ++ outage time for standalone amphora on subsequent failovers. ++fixes: ++ - | ++ Significantly improved the reliability and performance of amphora ++ and load balancer failovers. This is especially true when the ++ Nova service is experiencing failures. +Index: octavia-5.0.1/tools/create_flow_docs.py +=================================================================== +--- octavia-5.0.1.orig/tools/create_flow_docs.py ++++ octavia-5.0.1/tools/create_flow_docs.py +@@ -52,13 +52,12 @@ def generate(flow_list, output_directory + current_instance = current_class() + get_flow_method = getattr(current_instance, current_tuple[2]) + if (current_tuple[1] == 'AmphoraFlows' and +- current_tuple[2] == 'get_failover_flow'): ++ current_tuple[2] == 'get_failover_amphora_flow'): + amp1 = dmh.generate_amphora() + amp2 = dmh.generate_amphora() + lb = dmh.generate_load_balancer(amphorae=[amp1, amp2]) + current_engine = engines.load( +- get_flow_method(role=constants.ROLE_STANDALONE, +- load_balancer=lb)) ++ get_flow_method(amp1, 2)) + elif (current_tuple[1] == 'LoadBalancerFlows' and + current_tuple[2] == 'get_create_load_balancer_flow'): + current_engine = engines.load( +@@ -74,6 +73,15 @@ def generate(flow_list, output_directory + lb = dmh.generate_load_balancer() + delete_flow, store = get_flow_method(lb) + current_engine = engines.load(delete_flow) ++ elif (current_tuple[1] == 'LoadBalancerFlows' and ++ current_tuple[2] == 'get_failover_LB_flow'): ++ amp1 = dmh.generate_amphora() ++ amp2 = dmh.generate_amphora() ++ lb = dmh.generate_load_balancer( ++ amphorae=[amp1, amp2], ++ topology=constants.TOPOLOGY_ACTIVE_STANDBY) ++ current_engine = engines.load( ++ get_flow_method([amp1, amp2], lb)) + elif (current_tuple[1] == 'MemberFlows' and + current_tuple[2] == 'get_batch_update_members_flow'): + current_engine = engines.load( +Index: octavia-5.0.1/tools/flow-list.txt +=================================================================== +--- octavia-5.0.1.orig/tools/flow-list.txt ++++ octavia-5.0.1/tools/flow-list.txt +@@ -3,12 +3,13 @@ + # Format: + # module class flow + octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_create_amphora_flow +-octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_failover_flow ++octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_failover_amphora_flow + octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow + octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow + octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow + octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow + octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow ++octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_failover_LB_flow + octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_listener_flow + octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_all_listeners_flow + octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_delete_listener_flow diff -Nru octavia-5.0.1/debian/patches/0004-Add-missing-reload-method-in-amphora-noop-driver.patch octavia-5.0.1/debian/patches/0004-Add-missing-reload-method-in-amphora-noop-driver.patch --- octavia-5.0.1/debian/patches/0004-Add-missing-reload-method-in-amphora-noop-driver.patch 1970-01-01 01:00:00.000000000 +0100 +++ octavia-5.0.1/debian/patches/0004-Add-missing-reload-method-in-amphora-noop-driver.patch 2020-10-15 20:15:24.000000000 +0100 @@ -0,0 +1,95 @@ +From d285f517d2842b0cc87de997f941214025c7e979 Mon Sep 17 00:00:00 2001 +From: Carlos Goncalves +Date: Tue, 23 Jun 2020 10:54:32 +0200 +Subject: [PATCH] Add missing reload method in amphora noop driver + +The reload method was also missing in the abstract class. + +Task: 40140 +Story: 2007847 + +Change-Id: I2328b3dc4d5b95c8771a305d3d4bb1dee6019117 +(cherry picked from commit 89123c0fc117e095c44ed21e360394974d3e15a5) +(cherry picked from commit 7e3d09ae76101ea4d49b2f9b3a906b2d9066f11f) +--- + octavia/amphorae/drivers/driver_base.py | 14 ++++++++++++++ + octavia/amphorae/drivers/noop_driver/driver.py | 11 +++++++++++ + .../test_noop_amphoraloadbalancer_driver.py | 8 ++++++++ + 3 files changed, 33 insertions(+) + +diff --git a/octavia/amphorae/drivers/driver_base.py b/octavia/amphorae/drivers/driver_base.py +index 725412b9..3f27dfa5 100644 +--- a/octavia/amphorae/drivers/driver_base.py ++++ b/octavia/amphorae/drivers/driver_base.py +@@ -68,6 +68,20 @@ class AmphoraLoadBalancerDriver(object): + add more function along with the development. + """ + ++ @abc.abstractmethod ++ def reload(self, loadbalancer, amphora): ++ """Reload the listeners on the amphora. ++ ++ :param loadbalancer: loadbalancer object to reload listeners ++ :type loadbalancer: octavia.db.models.LoadBalancer ++ :param amphora: Amphora to start. If None, reload on all amphora ++ :type amphora: octavia.db.models.Amphora ++ :returns: return a value list (listener, vip, status flag--enable) ++ ++ At this moment, we just build the basic structure for testing, will ++ add more function along with the development. ++ """ ++ + @abc.abstractmethod + def delete(self, listener): + """Delete the listener on the vip. +diff --git a/octavia/amphorae/drivers/noop_driver/driver.py b/octavia/amphorae/drivers/noop_driver/driver.py +index 60de1d92..fc512e63 100644 +--- a/octavia/amphorae/drivers/noop_driver/driver.py ++++ b/octavia/amphorae/drivers/noop_driver/driver.py +@@ -64,6 +64,13 @@ class NoopManager(object): + (loadbalancer.id, amphora.id)] = (loadbalancer, amphora, + 'start') + ++ def reload(self, loadbalancer, amphora=None): ++ LOG.debug("Amphora %s no-op, reload listeners, lb %s, amp %s", ++ self.__class__.__name__, loadbalancer.id, amphora) ++ self.amphoraconfig[ ++ (loadbalancer.id, amphora.id)] = (loadbalancer, amphora, ++ 'reload') ++ + def delete(self, listener): + LOG.debug("Amphora %s no-op, delete listener %s, vip %s", + self.__class__.__name__, +@@ -141,6 +148,10 @@ class NoopAmphoraLoadBalancerDriver( + + self.driver.start(loadbalancer, amphora) + ++ def reload(self, loadbalancer, amphora=None): ++ ++ self.driver.reload(loadbalancer, amphora) ++ + def delete(self, listener): + + self.driver.delete(listener) +diff --git a/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py b/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py +index 381d47c1..ac0e40d6 100644 +--- a/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py ++++ b/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py +@@ -95,6 +95,14 @@ class TestNoopAmphoraLoadBalancerDriver(base.TestCase): + self.driver.driver.amphoraconfig[( + self.load_balancer.id, '321')]) + ++ def test_reload(self): ++ mock_amphora = mock.MagicMock() ++ mock_amphora.id = '321' ++ self.driver.reload(self.load_balancer, amphora=mock_amphora) ++ self.assertEqual((self.load_balancer, mock_amphora, 'reload'), ++ self.driver.driver.amphoraconfig[( ++ self.load_balancer.id, '321')]) ++ + def test_delete(self): + self.driver.delete(self.listener) + self.assertEqual((self.listener, self.vip, 'delete'), +-- +2.17.1 + diff -Nru octavia-5.0.1/debian/patches/0005-Fix-missing-params-in-amphora-base-and-noop-driver.patch octavia-5.0.1/debian/patches/0005-Fix-missing-params-in-amphora-base-and-noop-driver.patch --- octavia-5.0.1/debian/patches/0005-Fix-missing-params-in-amphora-base-and-noop-driver.patch 1970-01-01 01:00:00.000000000 +0100 +++ octavia-5.0.1/debian/patches/0005-Fix-missing-params-in-amphora-base-and-noop-driver.patch 2020-10-15 20:15:24.000000000 +0100 @@ -0,0 +1,111 @@ +From dc47caa1a27e0c7876637715d62f2520d27f7ec7 Mon Sep 17 00:00:00 2001 +From: Carlos Goncalves +Date: Wed, 24 Jun 2020 11:55:11 +0200 +Subject: [PATCH] Fix missing params in amphora base and noop driver + +Running amphora failover against the amphora noop driver was raising a +TypeError (reload() takes from 2 to 3 positional arguments but 4 were +given). + +Change-Id: I64172d6995959cf377364584ad9a2395f9ec0605 +(cherry picked from commit 41c628a084002017d2003926cf0e25ba3ffeee0c) +(cherry picked from commit 176791e675141b37ddf139a9d9f2ae6bb814fa20) +--- + octavia/amphorae/drivers/driver_base.py | 14 ++++++++++-- + .../amphorae/drivers/noop_driver/driver.py | 22 ++++++++++--------- + 2 files changed, 24 insertions(+), 12 deletions(-) + +diff --git a/octavia/amphorae/drivers/driver_base.py b/octavia/amphorae/drivers/driver_base.py +index 3f27dfa5..72781c53 100644 +--- a/octavia/amphorae/drivers/driver_base.py ++++ b/octavia/amphorae/drivers/driver_base.py +@@ -55,13 +55,18 @@ class AmphoraLoadBalancerDriver(object): + """ + + @abc.abstractmethod +- def start(self, loadbalancer, amphora): ++ def start(self, loadbalancer, amphora, timeout_dict=None): + """Start the listeners on the amphora. + + :param loadbalancer: loadbalancer object to start listeners + :type loadbalancer: octavia.db.models.LoadBalancer + :param amphora: Amphora to start. If None, start on all amphora + :type amphora: octavia.db.models.Amphora ++ :param timeout_dict: Dictionary of timeout values for calls to the ++ amphora. May contain: req_conn_timeout, ++ req_read_timeout, conn_max_retries, ++ conn_retry_interval ++ :type timeout_dict: dict + :returns: return a value list (listener, vip, status flag--enable) + + At this moment, we just build the basic structure for testing, will +@@ -69,13 +74,18 @@ class AmphoraLoadBalancerDriver(object): + """ + + @abc.abstractmethod +- def reload(self, loadbalancer, amphora): ++ def reload(self, loadbalancer, amphora, timeout_dict=None): + """Reload the listeners on the amphora. + + :param loadbalancer: loadbalancer object to reload listeners + :type loadbalancer: octavia.db.models.LoadBalancer + :param amphora: Amphora to start. If None, reload on all amphora + :type amphora: octavia.db.models.Amphora ++ :param timeout_dict: Dictionary of timeout values for calls to the ++ amphora. May contain: req_conn_timeout, ++ req_read_timeout, conn_max_retries, ++ conn_retry_interval ++ :type timeout_dict: dict + :returns: return a value list (listener, vip, status flag--enable) + + At this moment, we just build the basic structure for testing, will +diff --git a/octavia/amphorae/drivers/noop_driver/driver.py b/octavia/amphorae/drivers/noop_driver/driver.py +index fc512e63..8dfa79a3 100644 +--- a/octavia/amphorae/drivers/noop_driver/driver.py ++++ b/octavia/amphorae/drivers/noop_driver/driver.py +@@ -57,16 +57,18 @@ class NoopManager(object): + loadbalancer.vip, + 'active') + +- def start(self, loadbalancer, amphora=None): +- LOG.debug("Amphora %s no-op, start listeners, lb %s, amp %s", +- self.__class__.__name__, loadbalancer.id, amphora) ++ def start(self, loadbalancer, amphora=None, timeout_dict=None): ++ LOG.debug("Amphora %s no-op, start listeners, lb %s, amp %s" ++ "timeouts %s", self.__class__.__name__, loadbalancer.id, ++ amphora, timeout_dict) + self.amphoraconfig[ + (loadbalancer.id, amphora.id)] = (loadbalancer, amphora, + 'start') + +- def reload(self, loadbalancer, amphora=None): +- LOG.debug("Amphora %s no-op, reload listeners, lb %s, amp %s", +- self.__class__.__name__, loadbalancer.id, amphora) ++ def reload(self, loadbalancer, amphora=None, timeout_dict=None): ++ LOG.debug("Amphora %s no-op, reload listeners, lb %s, amp %s, " ++ "timeouts %s", self.__class__.__name__, loadbalancer.id, ++ amphora, timeout_dict) + self.amphoraconfig[ + (loadbalancer.id, amphora.id)] = (loadbalancer, amphora, + 'reload') +@@ -144,13 +146,13 @@ class NoopAmphoraLoadBalancerDriver( + + self.driver.update(loadbalancer) + +- def start(self, loadbalancer, amphora=None): ++ def start(self, loadbalancer, amphora=None, timeout_dict=None): + +- self.driver.start(loadbalancer, amphora) ++ self.driver.start(loadbalancer, amphora, timeout_dict) + +- def reload(self, loadbalancer, amphora=None): ++ def reload(self, loadbalancer, amphora=None, timeout_dict=None): + +- self.driver.reload(loadbalancer, amphora) ++ self.driver.reload(loadbalancer, amphora, timeout_dict) + + def delete(self, listener): + +-- +2.17.1 + diff -Nru octavia-5.0.1/debian/patches/series octavia-5.0.1/debian/patches/series --- octavia-5.0.1/debian/patches/series 2020-07-08 06:25:17.000000000 +0100 +++ octavia-5.0.1/debian/patches/series 2020-10-15 20:15:24.000000000 +0100 @@ -1,2 +1,8 @@ disable-sphinxcontrib.rsvgconverter.patch fix-batch-member-create-for-v1-amphora-driver.patch +0000-Use-retry-for-AmphoraComputeConnectivityWait.patch +0001-Validate-resource-access-when-creating-loadbalancer-.patch +0002-Workaround-peer-name-starting-with-hyphen.patch +0003-Refactor-the-failover-flows.patch +0004-Add-missing-reload-method-in-amphora-noop-driver.patch +0005-Fix-missing-params-in-amphora-base-and-noop-driver.patch