From e7a3a07bfcd5557384402c9b3faa35172c5856cc Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 27 Oct 2017 16:03:15 -0400 Subject: [PATCH] Validate new image via scheduler during rebuild During a rebuild we bypass the scheduler because we are always rebuilding the instance on the same host it's already on. However, we allow passing a new image during rebuild and that new image needs to be validated to work with the instance host by running it through the scheduler filters, like the ImagePropertiesFilter. Otherwise the new image could violate constraints placed on the host by the admin. This change checks to see if there is a new image provided and if so, modifies the request spec passed to the scheduler so that the new image is validated all while restricting the scheduler to still pick the same host that the instance is running on. If the image is not valid for the host, the scheduler will raise NoValidHost and the rebuild stops. A functional test is added to show the recreate of the bug and that we probably stop the rebuild now in conductor by calling the scheduler to validate the image. Co-Authored-By: Sylvain Bauza Closes-Bug: #1664931 Conflicts: nova/conductor/manager.py nova/tests/functional/integrated_helpers.py nova/tests/functional/test_servers.py NOTE(mriedem): There are a few changes needed for Ocata: 1. I6590f0eda4ec4996543ad40d8c2640b83fc3dd9d changed some of the conditional logic in the conductor rebuild_instance method. That refactor is not replayed here, just the necessary change for the fix. 2. The _wait_for_action_fail_completion method didn't exist in Ocata. 3. The PlacementFixture wasn't used in _IntegratedTestBase so it's done as part of the test setup. 4. The default scheduler filters were different in Ocata so the test just restricts to using ImagePropertiesFilter. 5. A few imports were needed in the test module. 6. The post_server_action API client method did not allow passing through check_response_status so api_post is used instead. Change-Id: I11746d1ea996a0f18b7c54b4c9c21df58cc4714b (cherry picked from commit ada062fedd13be156e35ec3e09453ec9c91f8c38) (cherry picked from commit f637d14a4c1a32083f7246f3222d4a0a88bf8045) --- nova/compute/api.py | 17 ++++++- nova/conductor/manager.py | 2 +- nova/tests/functional/api/client.py | 10 ++++ nova/tests/functional/integrated_helpers.py | 47 +++++++++++++++-- nova/tests/functional/test_servers.py | 78 +++++++++++++++++++++++++++++ nova/tests/unit/compute/test_compute_api.py | 12 ++++- 6 files changed, 159 insertions(+), 7 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 588a812..cc1507b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -3051,9 +3051,24 @@ class API(base.Base): # sure that all our instances are currently migrated to have an # attached RequestSpec object but let's consider that the operator only # half migrated all their instances in the meantime. + host = instance.host try: request_spec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) + # If a new image is provided on rebuild, we will need to run + # through the scheduler again, but we want the instance to be + # rebuilt on the same host it's already on. + if orig_image_ref != image_href: + request_spec.requested_destination = objects.Destination( + host=instance.host, + node=instance.node) + # We have to modify the request spec that goes to the scheduler + # to contain the new image. We persist this since we've already + # changed the instance.image_ref above so we're being + # consistent. + request_spec.image = objects.ImageMeta.from_dict(image) + request_spec.save() + host = None # This tells conductor to call the scheduler. except exception.RequestSpecNotFound: # Some old instances can still have no RequestSpec object attached # to them, we need to support the old way @@ -3063,7 +3078,7 @@ class API(base.Base): new_pass=admin_password, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, - preserve_ephemeral=preserve_ephemeral, host=instance.host, + preserve_ephemeral=preserve_ephemeral, host=host, request_spec=request_spec, kwargs=kwargs) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 9ef3eff..11e3be8 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -705,7 +705,7 @@ class ComputeTaskManager(base.Base): filter_properties = {'ignore_hosts': [instance.host]} request_spec = scheduler_utils.build_request_spec( context, image_ref, [instance]) - else: + elif recreate: # NOTE(sbauza): Augment the RequestSpec object by excluding # the source host for avoiding the scheduler to pick it request_spec.ignore_hosts = request_spec.ignore_hosts or [] diff --git a/nova/tests/functional/api/client.py b/nova/tests/functional/api/client.py index e0e9fe4..b7b68d4 100644 --- a/nova/tests/functional/api/client.py +++ b/nova/tests/functional/api/client.py @@ -295,6 +295,16 @@ class TestOpenStackClient(object): def delete_image(self, image_id): return self.api_delete('/images/%s' % image_id) + def put_image_meta_key(self, image_id, key, value): + """Creates or updates a given image metadata key/value pair.""" + req_body = { + 'meta': { + key: value + } + } + return self.api_put('/images/%s/metadata/%s' % (image_id, key), + req_body) + def get_flavor(self, flavor_id): return self.api_get('/flavors/%s' % flavor_id).body['flavor'] diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py index a6014ac..e1339bf 100644 --- a/nova/tests/functional/integrated_helpers.py +++ b/nova/tests/functional/integrated_helpers.py @@ -77,10 +77,11 @@ class _IntegratedTestBase(test.TestCase): self.flags(use_neutron=self.USE_NEUTRON) nova.tests.unit.image.fake.stub_out_image_service(self) - self._setup_services() self.useFixture(cast_as_call.CastAsCall(self.stubs)) + self._setup_services() + self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) def _setup_compute_service(self): @@ -99,8 +100,11 @@ class _IntegratedTestBase(test.TestCase): self.conductor = self.start_service('conductor') self.consoleauth = self.start_service('consoleauth') - self.network = self.start_service('network', - manager=CONF.network_manager) + if self.USE_NEUTRON: + self.useFixture(nova_fixtures.NeutronFixture(self)) + else: + self.network = self.start_service('network', + manager=CONF.network_manager) self.scheduler = self._setup_scheduler_service() self.compute = self._setup_compute_service() @@ -248,3 +252,40 @@ class InstanceHelperMixin(object): if networks is not None: server['networks'] = networks return server + + def _wait_for_action_fail_completion( + self, server, expected_action, event_name, api=None): + """Polls instance action events for the given instance, action and + action event name until it finds the action event with an error + result. + """ + if api is None: + api = self.api + completion_event = None + for attempt in range(10): + actions = api.get_instance_actions(server['id']) + # Look for the migrate action. + for action in actions: + if action['action'] == expected_action: + events = ( + api.api_get( + '/servers/%s/os-instance-actions/%s' % + (server['id'], action['request_id']) + ).body['instanceAction']['events']) + # Look for the action event being in error state. + for event in events: + if (event['event'] == event_name and + event['result'] is not None and + event['result'].lower() == 'error'): + completion_event = event + # Break out of the events loop. + break + if completion_event: + # Break out of the actions loop. + break + # We didn't find the completion event yet, so wait a bit. + time.sleep(0.5) + + if completion_event is None: + self.fail('Timed out waiting for %s failure event. Current ' + 'instance actions: %s' % (event_name, actions)) diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py index 0f3df6b..729d7ad 100644 --- a/nova/tests/functional/test_servers.py +++ b/nova/tests/functional/test_servers.py @@ -23,17 +23,20 @@ from oslo_serialization import base64 from oslo_utils import timeutils from nova.compute import api as compute_api +from nova.compute import instance_actions from nova.compute import rpcapi from nova import context from nova import exception from nova import objects from nova.objects import block_device as block_device_obj from nova import test +from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client from nova.tests.functional import integrated_helpers from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_block_device from nova.tests.unit import fake_network +import nova.tests.unit.image.fake from nova import volume @@ -907,3 +910,78 @@ class ServerTestV220(ServersTestBase): self.assertTrue(mock_clean_vols.called) self._delete_server(server_id) + + +class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase, + integrated_helpers.InstanceHelperMixin): + api_major_version = 'v2.1' + # We have to cap the microversion at 2.38 because that's the max we + # can use to update image metadata via our compute images proxy API. + microversion = '2.38' + # We want to use neutron and we shouldn't require locking. + USE_NEUTRON = True + REQUIRES_LOCKING = False + + # We need the ImagePropertiesFilter so override the base class setup + # which configures to use the chance_scheduler. + def _setup_scheduler_service(self): + # Make sure we're using the PlacementFixture before starting the + # scheduler since the FilterScheduler relies on Placement. + self.useFixture(nova_fixtures.PlacementFixture()) + self.flags(enabled_filters=['ImagePropertiesFilter'], + group='filter_scheduler') + return self.start_service('scheduler') + + def test_rebuild_with_image_novalidhost(self): + """Creates a server with an image that is valid for the single compute + that we have. Then rebuilds the server, passing in an image with + metadata that does not fit the single compute which should result in + a NoValidHost error. The ImagePropertiesFilter filter is enabled by + default so that should filter out the host based on the image meta. + """ + server_req_body = { + 'server': { + # We hard-code from a fake image since we can't get images + # via the compute /images proxy API with microversion > 2.35. + 'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6', + 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture, + 'name': 'test_rebuild_with_image_novalidhost', + # We don't care about networking for this test. This requires + # microversion >= 2.37. + 'networks': 'none' + } + } + server = self.api.post_server(server_req_body) + self._wait_for_state_change(self.api, server, 'ACTIVE') + # Now update the image metadata to be something that won't work with + # the fake compute driver we're using since the fake driver has an + # "x86_64" architecture. + rebuild_image_ref = ( + nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID) + self.api.put_image_meta_key( + rebuild_image_ref, 'hw_architecture', 'unicore32') + # Now rebuild the server with that updated image and it should result + # in a NoValidHost failure from the scheduler. + rebuild_req_body = { + 'rebuild': { + 'imageRef': rebuild_image_ref + } + } + # Since we're using the CastAsCall fixture, the NoValidHost error + # should actually come back to the API and result in a 500 error. + # Normally the user would get a 202 response because nova-api RPC casts + # to nova-conductor which RPC calls the scheduler which raises the + # NoValidHost. We can mimic the end user way to figure out the failure + # by looking for the failed 'rebuild' instance action event. + self.api.api_post('/servers/%s/action' % server['id'], + rebuild_req_body, check_response_status=[500]) + # Look for the failed rebuild action. + self._wait_for_action_fail_completion( + server, instance_actions.REBUILD, 'rebuild_server', + # Before microversion 2.51 events are only returned for instance + # actions if you're an admin. + self.api_fixture.admin_api) + # Unfortunately the server's image_ref is updated to be the new image + # even though the rebuild should not work. + server = self.api.get_server(server['id']) + self.assertEqual(rebuild_image_ref, server['image']['id']) diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py index 227894b..2f48f9a 100644 --- a/nova/tests/unit/compute/test_compute_api.py +++ b/nova/tests/unit/compute/test_compute_api.py @@ -3182,6 +3182,7 @@ class _ComputeAPIUnitTestMixIn(object): None, image, flavor, {}, [], None) self.assertNotEqual(orig_system_metadata, instance.system_metadata) + @mock.patch.object(objects.RequestSpec, 'save') @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @@ -3193,7 +3194,7 @@ class _ComputeAPIUnitTestMixIn(object): def test_rebuild_change_image(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save, - req_spec_get_by_inst_uuid): + req_spec_get_by_inst_uuid, req_spec_save): orig_system_metadata = {} get_flavor.return_value = test_flavor.fake_flavor orig_image_href = 'orig_image' @@ -3240,8 +3241,15 @@ class _ComputeAPIUnitTestMixIn(object): injected_files=files_to_inject, image_ref=new_image_href, orig_image_ref=orig_image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, - preserve_ephemeral=False, host=instance.host, + preserve_ephemeral=False, host=None, request_spec=fake_spec, kwargs={}) + # assert the request spec was modified so the scheduler picks + # the existing instance host/node + req_spec_save.assert_called_once_with() + self.assertIn('requested_destination', fake_spec) + requested_destination = fake_spec.requested_destination + self.assertEqual(instance.host, requested_destination.host) + self.assertEqual(instance.node, requested_destination.node) _check_auto_disk_config.assert_called_once_with(image=new_image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, -- 2.7.4