Remove legacy request spec compat code from API

For operations which require a request spec, we now require that
the request spec exists when we look it up in the API or we fail.
All instances created since Newton should have a request spec, and
instances created before Newton should have been migrated to have
a request spec as part of an online data migration. Failing to find
a request spec will now result in a 500 internal server error.

As a result, the RequestSpec.get_by_instance_uuid query is moved
to before updating the instance.task_state in the various API methods
since we want to do all we can before updating the instance in case
we fail.

Related to blueprint request-spec-use-by-compute

Change-Id: I34ffaf285718059b55f90e812b57f1e11d566c6f
This commit is contained in:
Matt Riedemann 2018-10-30 12:57:44 -04:00
parent ed4fe3ead6
commit 52d515ba7d
5 changed files with 162 additions and 182 deletions

View File

@ -3304,6 +3304,9 @@ class API(base.Base):
else:
orig_image_ref = instance.image_ref
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject, root_bdm)
@ -3365,38 +3368,31 @@ class API(base.Base):
# attached RequestSpec object but let's consider that the operator only
# half migrated all their instances in the meantime.
host = instance.host
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
# If a new image is provided on rebuild, we will need to run
# through the scheduler again, but we want the instance to be
# rebuilt on the same host it's already on.
if orig_image_ref != image_href:
# We have to modify the request spec that goes to the scheduler
# to contain the new image. We persist this since we've already
# changed the instance.image_ref above so we're being
# consistent.
request_spec.image = objects.ImageMeta.from_dict(image)
request_spec.save()
if 'scheduler_hints' not in request_spec:
request_spec.scheduler_hints = {}
# Nuke the id on this so we can't accidentally save
# this hint hack later
del request_spec.id
# If a new image is provided on rebuild, we will need to run
# through the scheduler again, but we want the instance to be
# rebuilt on the same host it's already on.
if orig_image_ref != image_href:
# We have to modify the request spec that goes to the scheduler
# to contain the new image. We persist this since we've already
# changed the instance.image_ref above so we're being
# consistent.
request_spec.image = objects.ImageMeta.from_dict(image)
request_spec.save()
if 'scheduler_hints' not in request_spec:
request_spec.scheduler_hints = {}
# Nuke the id on this so we can't accidentally save
# this hint hack later
del request_spec.id
# NOTE(danms): Passing host=None tells conductor to
# call the scheduler. The _nova_check_type hint
# requires that the scheduler returns only the same
# host that we are currently on and only checks
# rebuild-related filters.
request_spec.scheduler_hints['_nova_check_type'] = ['rebuild']
request_spec.force_hosts = [instance.host]
request_spec.force_nodes = [instance.node]
host = None
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
# NOTE(danms): Passing host=None tells conductor to
# call the scheduler. The _nova_check_type hint
# requires that the scheduler returns only the same
# host that we are currently on and only checks
# rebuild-related filters.
request_spec.scheduler_hints['_nova_check_type'] = ['rebuild']
request_spec.force_hosts = [instance.host]
request_spec.force_nodes = [instance.node]
host = None
self.compute_task_api.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
@ -3453,6 +3449,16 @@ class API(base.Base):
self._check_quota_for_upsize(context, instance, instance.flavor,
instance.old_flavor)
# Conductor updated the RequestSpec.flavor during the initial resize
# operation to point at the new flavor, so we need to update the
# RequestSpec to point back at the original flavor, otherwise
# subsequent move operations through the scheduler will be using the
# wrong flavor.
reqspec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
reqspec.flavor = instance.old_flavor
reqspec.save()
instance.task_state = task_states.RESIZE_REVERTING
instance.save(expected_task_state=[None])
@ -3462,21 +3468,6 @@ class API(base.Base):
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
# Conductor updated the RequestSpec.flavor during the initial resize
# operation to point at the new flavor, so we need to update the
# RequestSpec to point back at the original flavor, otherwise
# subsequent move operations through the scheduler will be using the
# wrong flavor.
try:
reqspec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
reqspec.flavor = instance.old_flavor
reqspec.save()
except exception.RequestSpecNotFound:
# TODO(mriedem): Make this a failure in Stein when we drop
# compatibility for missing request specs.
pass
# TODO(melwitt): We're not rechecking for strict quota here to guard
# against going over quota during a race at this time because the
# resource consumption for this operation is written to the database
@ -3601,16 +3592,20 @@ class API(base.Base):
current_instance_type,
new_instance_type)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance.host)
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
request_spec.ignore_hosts = filter_properties['ignore_hosts']
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
if self.cell_type == 'api':
# Create migration record.
self._resize_cells_support(context, instance,
@ -3624,39 +3619,21 @@ class API(base.Base):
self._record_action_start(context, instance,
instance_actions.RESIZE)
# NOTE(sbauza): The migration script we provided in Newton should make
# sure that all our instances are currently migrated to have an
# attached RequestSpec object but let's consider that the operator only
# half migrated all their instances in the meantime.
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
request_spec.ignore_hosts = filter_properties['ignore_hosts']
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
if host_name is not None:
# If there is no request spec we cannot honor the request
# and we need to fail.
raise exception.CannotMigrateWithTargetHost()
request_spec = None
# TODO(melwitt): We're not rechecking for strict quota here to guard
# against going over quota during a race at this time because the
# resource consumption for this operation is written to the database
# by compute.
scheduler_hint = {'filter_properties': filter_properties}
if request_spec:
if host_name is None:
# If 'host_name' is not specified,
# clear the 'requested_destination' field of the RequestSpec.
request_spec.requested_destination = None
else:
# Set the host and the node so that the scheduler will
# validate them.
request_spec.requested_destination = objects.Destination(
host=node.host, node=node.hypervisor_hostname)
if host_name is None:
# If 'host_name' is not specified,
# clear the 'requested_destination' field of the RequestSpec.
request_spec.requested_destination = None
else:
# Set the host and the node so that the scheduler will
# validate them.
request_spec.requested_destination = objects.Destination(
host=node.host, node=node.hypervisor_hostname)
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
@ -3707,18 +3684,14 @@ class API(base.Base):
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
self.compute_task_api.unshelve_instance(context, instance,
request_spec)
@ -4487,6 +4460,9 @@ class API(base.Base):
# state.
nodes = objects.ComputeNodeList.get_all_by_host(context, host_name)
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
@ -4502,14 +4478,6 @@ class API(base.Base):
self.consoleauth_rpcapi.delete_tokens_for_instance(
context, instance.uuid)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
# NOTE(sbauza): Force is a boolean by the new related API version
if force is False and host_name:
# Unset the host to make sure we call the scheduler
@ -4520,19 +4488,13 @@ class API(base.Base):
# compute per service but doesn't support live migrations,
# let's provide the first one.
target = nodes[0]
if request_spec:
# TODO(sbauza): Hydrate a fake spec for old instances not yet
# having a request spec attached to them (particularly true for
# cells v1). For the moment, let's keep the same behaviour for
# all the instances but provide the destination only if a spec
# is found.
destination = objects.Destination(
host=target.host,
node=target.hypervisor_hostname
)
# This is essentially a hint to the scheduler to only consider
# the specified host but still run it through the filters.
request_spec.requested_destination = destination
destination = objects.Destination(
host=target.host,
node=target.hypervisor_hostname
)
# This is essentially a hint to the scheduler to only consider
# the specified host but still run it through the filters.
request_spec.requested_destination = destination
try:
self.compute_task_api.live_migrate_instance(context, instance,
@ -4657,6 +4619,9 @@ class API(base.Base):
'expected to be down, but it was up.', inst_host)
raise exception.ComputeServiceInUse(host=inst_host)
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.EVACUATE)
@ -4680,14 +4645,6 @@ class API(base.Base):
action=fields_obj.NotificationAction.EVACUATE,
source=fields_obj.NotificationSource.API)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
# NOTE(sbauza): Force is a boolean by the new related API version
if force is False and host:
nodes = objects.ComputeNodeList.get_all_by_host(context, host)
@ -4697,17 +4654,11 @@ class API(base.Base):
# compute per service but doesn't support evacuations,
# let's provide the first one.
target = nodes[0]
if request_spec:
# TODO(sbauza): Hydrate a fake spec for old instances not yet
# having a request spec attached to them (particularly true for
# cells v1). For the moment, let's keep the same behaviour for
# all the instances but provide the destination only if a spec
# is found.
destination = objects.Destination(
host=target.host,
node=target.hypervisor_hostname
)
request_spec.requested_destination = destination
destination = objects.Destination(
host=target.host,
node=target.hypervisor_hostname
)
request_spec.requested_destination = destination
return self.compute_task_api.rebuild_instance(context,
instance=instance,

View File

@ -83,6 +83,18 @@ class ServerActionsControllerTestV21(test.TestCase):
self.context = self.req.environ['nova.context']
self.image_api = image.API()
# Assume that anything that hits the compute API and looks for a
# RequestSpec doesn't care about it, since testing logic that deep
# should be done in nova.tests.unit.compute.test_compute_api.
mock_reqspec = mock.patch('nova.objects.RequestSpec')
mock_reqspec.start()
self.addCleanup(mock_reqspec.stop)
# Similarly we shouldn't care about anything hitting conductor from
# these tests.
mock_conductor = mock.patch.object(
self.controller.compute_api, 'compute_task_api')
mock_conductor.start()
self.addCleanup(mock_conductor.stop)
def _get_controller(self):
return self.servers.ServersController()
@ -565,11 +577,20 @@ class ServerActionsControllerTestV21(test.TestCase):
def return_image_meta(*args, **kwargs):
image_meta_table = {
'1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
uuids.kernel_image_id: {
'id': uuids.kernel_image_id,
'status': 'active',
'container_format': 'aki'},
uuids.ramdisk_image_id: {
'id': uuids.ramdisk_image_id,
'status': 'active',
'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
{'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'status': 'active',
'container_format': 'raw',
'properties': {'kernel_id': uuids.kernel_image_id,
'ramdisk_id': uuids.ramdisk_image_id}},
}
image_id = args[2]
try:
@ -589,8 +610,8 @@ class ServerActionsControllerTestV21(test.TestCase):
},
}
self.controller._action_rebuild(self.req, FAKE_UUID, body=body).obj
self.assertEqual(instance_meta['kernel_id'], '1')
self.assertEqual(instance_meta['ramdisk_id'], '2')
self.assertEqual(instance_meta['kernel_id'], uuids.kernel_image_id)
self.assertEqual(instance_meta['ramdisk_id'], uuids.ramdisk_image_id)
@mock.patch.object(compute_api.API, 'rebuild')
def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):

View File

@ -239,6 +239,18 @@ class ControllerTest(test.TestCase):
policy.init()
self.addCleanup(policy.reset)
fake_network.stub_out_nw_api_get_instance_nw_info(self)
# Assume that anything that hits the compute API and looks for a
# RequestSpec doesn't care about it, since testing logic that deep
# should be done in nova.tests.unit.compute.test_compute_api.
mock_reqspec = mock.patch('nova.objects.RequestSpec')
mock_reqspec.start()
self.addCleanup(mock_reqspec.stop)
# Similarly we shouldn't care about anything hitting conductor from
# these tests.
mock_conductor = mock.patch.object(
self.controller.compute_api, 'compute_task_api')
mock_conductor.start()
self.addCleanup(mock_conductor.stop)
class ServersControllerTest(ControllerTest):

View File

@ -8882,7 +8882,8 @@ class ComputeAPITestCase(BaseTestCase):
scheduler_hints={'group':
'5b674f73-c8cf-40ef-9965-3b6fe4b304b1'})
def _test_rebuild(self, vm_state):
@mock.patch('nova.objects.RequestSpec')
def _test_rebuild(self, mock_reqspec, vm_state=None):
instance = self._create_fake_instance_obj()
instance_uuid = instance['uuid']
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
@ -8913,7 +8914,7 @@ class ComputeAPITestCase(BaseTestCase):
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance', fake_rpc_rebuild):
image_ref = instance["image_ref"] + '-new_image_ref'
image_ref = uuids.new_image_ref
password = "new_password"
instance.vm_state = vm_state
@ -8978,7 +8979,8 @@ class ComputeAPITestCase(BaseTestCase):
self.assertIn('Unable to find root block device mapping for '
'volume-backed instance', six.text_type(ex))
def test_rebuild_with_deleted_image(self):
@mock.patch('nova.objects.RequestSpec')
def test_rebuild_with_deleted_image(self, mock_reqspec):
# If we're given a deleted image by glance, we should not be able to
# rebuild from it
instance = self._create_fake_instance_obj(
@ -8996,7 +8998,8 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.rebuild(self.context, instance,
self.fake_image['id'], 'new_password')
def test_rebuild_with_too_little_ram(self):
@mock.patch('nova.objects.RequestSpec')
def test_rebuild_with_too_little_ram(self, mock_reqspec):
instance = self._create_fake_instance_obj(
params={'image_ref': FAKE_IMAGE_REF})
instance.flavor.memory_mb = 64
@ -9016,7 +9019,8 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
def test_rebuild_with_too_little_disk(self):
@mock.patch('nova.objects.RequestSpec')
def test_rebuild_with_too_little_disk(self, mock_reqspec):
instance = self._create_fake_instance_obj(
params={'image_ref': FAKE_IMAGE_REF})
@ -9046,7 +9050,8 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
def test_rebuild_with_just_enough_ram_and_disk(self):
@mock.patch('nova.objects.RequestSpec')
def test_rebuild_with_just_enough_ram_and_disk(self, mock_reqspec):
instance = self._create_fake_instance_obj(
params={'image_ref': FAKE_IMAGE_REF})
@ -9070,7 +9075,8 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
def test_rebuild_with_no_ram_and_disk_reqs(self):
@mock.patch('nova.objects.RequestSpec')
def test_rebuild_with_no_ram_and_disk_reqs(self, mock_reqspec):
instance = self._create_fake_instance_obj(
params={'image_ref': FAKE_IMAGE_REF})
@ -9091,7 +9097,8 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
def test_rebuild_with_too_large_image(self):
@mock.patch('nova.objects.RequestSpec')
def test_rebuild_with_too_large_image(self, mock_reqspec):
instance = self._create_fake_instance_obj(
params={'image_ref': FAKE_IMAGE_REF})
@ -12501,7 +12508,8 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
self.assertRaises(exception.FlavorNotFound,
self.compute_api.create, self.context, self.inst_type, None)
def test_can_resize_to_visible_instance_type(self):
@mock.patch('nova.objects.RequestSpec')
def test_can_resize_to_visible_instance_type(self, mock_reqspec):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id =\
flavors.get_flavor_by_flavor_id

View File

@ -1908,7 +1908,8 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Quotas.check_deltas')
@mock.patch('nova.objects.Migration.get_by_instance_and_status')
@mock.patch('nova.context.RequestContext.elevated')
def test_revert_resize_concurrent_fail(self, mock_elevated,
@mock.patch('nova.objects.RequestSpec')
def test_revert_resize_concurrent_fail(self, mock_reqspec, mock_elevated,
mock_get_migration, mock_check):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
@ -2056,10 +2057,17 @@ class _ComputeAPIUnitTestMixIn(object):
host_name=host_name,
**extra_kwargs)
else:
self.compute_api.resize(self.context, fake_inst,
clean_shutdown=clean_shutdown,
host_name=host_name,
**extra_kwargs)
if request_spec:
self.compute_api.resize(self.context, fake_inst,
clean_shutdown=clean_shutdown,
host_name=host_name,
**extra_kwargs)
else:
self.assertRaises(exception.RequestSpecNotFound,
self.compute_api.resize,
self.context, fake_inst,
clean_shutdown=clean_shutdown,
host_name=host_name, **extra_kwargs)
if request_spec:
if allow_same_host:
@ -2106,10 +2114,12 @@ class _ComputeAPIUnitTestMixIn(object):
mock_inst_save.assert_called_once_with(
expected_task_state=[None])
if self.cell_type == 'api':
if self.cell_type == 'api' and request_spec:
mock_migration.assert_called_once_with(context=self.context)
mock_elevated.assert_called_once_with()
mig.create.assert_called_once_with()
else:
mock_migration.assert_not_called()
mock_get_by_instance_uuid.assert_called_once_with(self.context,
fake_inst.uuid)
@ -2118,15 +2128,21 @@ class _ComputeAPIUnitTestMixIn(object):
mock_record.assert_called_once_with(self.context, fake_inst,
'resize')
else:
mock_record.assert_called_once_with(self.context, fake_inst,
'migrate')
if request_spec:
mock_record.assert_called_once_with(
self.context, fake_inst, 'migrate')
else:
mock_record.assert_not_called()
mock_resize.assert_called_once_with(
self.context, fake_inst, extra_kwargs,
scheduler_hint=scheduler_hint,
flavor=test.MatchType(objects.Flavor),
clean_shutdown=clean_shutdown,
request_spec=fake_spec)
if request_spec:
mock_resize.assert_called_once_with(
self.context, fake_inst, extra_kwargs,
scheduler_hint=scheduler_hint,
flavor=test.MatchType(objects.Flavor),
clean_shutdown=clean_shutdown,
request_spec=fake_spec)
else:
mock_resize.assert_not_called()
def _test_migrate(self, *args, **kwargs):
self._test_resize(*args, flavor_id_passed=False, **kwargs)
@ -2198,34 +2214,6 @@ class _ComputeAPIUnitTestMixIn(object):
def test_migrate_request_spec_not_found(self):
self._test_migrate(request_spec=False)
@mock.patch.object(objects.Migration, 'create')
@mock.patch.object(objects.InstanceAction, 'action_start')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host',
return_value=[objects.ComputeNode(
host='target_host',
hypervisor_hostname='hypervisor_host')])
def test_migrate_request_spec_not_found_with_target_host(
self, mock_get_all_by_host, mock_get_by_instance_uuid, mock_save,
mock_action_start, mock_migration_create):
fake_inst = self._create_instance_obj()
mock_get_by_instance_uuid.side_effect = (
exception.RequestSpecNotFound(instance_uuid=fake_inst.uuid))
self.assertRaises(exception.CannotMigrateWithTargetHost,
self.compute_api.resize, self.context,
fake_inst, host_name='target_host')
mock_get_all_by_host.assert_called_once_with(
self.context, 'target_host', True)
mock_get_by_instance_uuid.assert_called_once_with(self.context,
fake_inst.uuid)
mock_save.assert_called_once_with(expected_task_state=[None])
mock_action_start.assert_called_once_with(
self.context, fake_inst.uuid, instance_actions.MIGRATE,
want_result=False)
if self.cell_type == 'api':
mock_migration_create.assert_called_once_with()
def test_migrate_with_requested_destination(self):
# RequestSpec has requested_destination
self._test_migrate(requested_destination=True)