Add request_spec to server move RPC calls

To be able to fill the allocation key in the port binding:profile during
the move operations the nova-compute needs to get the RequestSpec object
to have access to the port - resource provider mapping calculated in the
conductor.

This patch bumps the compute RPC api version and adds a new request_spec
parameter to multiple calls. Also it makes sure that the request_spec is
passed by the sender.

Change-Id: If1f465112b8e9b0304b8b5b864b985f72168d839
blueprint: support-move-ops-with-qos-ports
This commit is contained in:
Balazs Gibizer 2019-04-25 17:00:48 +02:00 committed by Matt Riedemann
parent a413150b20
commit a0e60feb3e
11 changed files with 224 additions and 96 deletions

View File

@ -3456,7 +3456,8 @@ class API(base.Base):
# by compute.
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute)
migration.dest_compute,
reqspec)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])

View File

@ -519,7 +519,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='5.1')
target = messaging.Target(version='5.2')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@ -4148,7 +4148,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def revert_resize(self, context, instance, migration):
def revert_resize(self, context, instance, migration, request_spec=None):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
@ -4201,7 +4201,7 @@ class ComputeManager(manager.Manager):
# RPC cast back to the source host to finish the revert there.
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute)
migration, migration.source_compute, request_spec)
def _finish_revert_resize_network_migrate_finish(self, context, instance,
migration):
@ -4251,7 +4251,8 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def finish_revert_resize(self, context, instance, migration):
def finish_revert_resize(
self, context, instance, migration, request_spec=None):
"""Finishes the second half of reverting a resize on the source host.
Bring the original source instance state back (active/shutoff) and
@ -4383,7 +4384,8 @@ class ComputeManager(manager.Manager):
return True
def _prep_resize(self, context, image, instance, instance_type,
filter_properties, node, migration, clean_shutdown=True):
filter_properties, node, migration, request_spec,
clean_shutdown=True):
if not filter_properties:
filter_properties = {}
@ -4424,7 +4426,7 @@ class ComputeManager(manager.Manager):
# RPC cast to the source host to start the actual resize/migration.
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, clean_shutdown)
instance_type, request_spec, clean_shutdown)
def _send_prep_resize_notifications(
self, context, instance, phase, flavor):
@ -4491,7 +4493,8 @@ class ComputeManager(manager.Manager):
try:
self._prep_resize(context, image, instance,
instance_type, filter_properties,
node, migration, clean_shutdown)
node, migration, request_spec,
clean_shutdown)
except Exception:
# Since we hit a failure, we're either rescheduling or dead
# and either way we need to cleanup any allocations created
@ -4583,7 +4586,8 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def resize_instance(self, context, instance, image,
migration, instance_type, clean_shutdown):
migration, instance_type, clean_shutdown,
request_spec=None):
"""Starts the migration of a running instance to another host.
This is initiated from the destination host's ``prep_resize`` routine
@ -4591,13 +4595,14 @@ class ComputeManager(manager.Manager):
"""
try:
self._resize_instance(context, instance, image, migration,
instance_type, clean_shutdown)
instance_type, clean_shutdown, request_spec)
except Exception:
with excutils.save_and_reraise_exception():
self._revert_allocation(context, instance, migration)
def _resize_instance(self, context, instance, image,
migration, instance_type, clean_shutdown):
migration, instance_type, clean_shutdown,
request_spec):
with self._error_out_instance_on_exception(context, instance), \
errors_out_migration_ctxt(migration):
network_info = self.network_api.get_instance_nw_info(context,
@ -4643,7 +4648,8 @@ class ComputeManager(manager.Manager):
# RPC cast to the destination host to finish the resize/migration.
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info, migration.dest_compute)
migration, image, disk_info, migration.dest_compute,
request_spec)
self._send_resize_instance_notifications(
context, instance, bdms, network_info,
@ -4835,7 +4841,7 @@ class ComputeManager(manager.Manager):
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
migration):
migration, request_spec=None):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
@ -5353,7 +5359,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties, node):
filter_properties, node, request_spec=None):
"""Unshelve the instance.
:param context: request context
@ -5362,6 +5368,8 @@ class ComputeManager(manager.Manager):
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
:param request_spec: the RequestSpec object used to schedule the
instance
"""
if filter_properties is None:
filter_properties = {}

View File

@ -364,6 +364,9 @@ class ComputeAPI(object):
* 5.0 - Remove 4.x compatibility
* 5.1 - Make prep_resize() take a RequestSpec object rather than a
legacy dict.
* 5.2 - Add request_spec parameter for the following: resize_instance,
finish_resize, revert_resize, finish_revert_resize,
unshelve_instance
'''
VERSION_ALIASES = {
@ -596,22 +599,45 @@ class ComputeAPI(object):
instance=instance, volume_id=volume_id,
attachment_id=attachment_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info, host):
client = self.router.client(ctxt)
version = '5.0'
cctxt = client.prepare(
server=host, version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info)
def finish_resize(self, ctxt, instance, migration, image, disk_info, host,
request_spec):
msg_args = {
'instance': instance,
'migration': migration,
'image': image,
'disk_info': disk_info,
'request_spec': request_spec,
}
def finish_revert_resize(self, ctxt, instance, migration, host):
client = self.router.client(ctxt)
version = '5.0'
version = '5.2'
if not client.can_send_version(version):
msg_args.pop('request_spec')
version = '5.0'
cctxt = client.prepare(
server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration)
cctxt.cast(ctxt, 'finish_resize', **msg_args)
def finish_revert_resize(self, ctxt, instance, migration, host,
request_spec):
msg_args = {
'instance': instance,
'migration': migration,
'request_spec': request_spec,
}
client = self.router.client(ctxt)
version = '5.2'
if not client.can_send_version(version):
msg_args.pop('request_spec')
version = '5.0'
cctxt = client.prepare(
server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize', **msg_args)
def get_console_output(self, ctxt, instance, tail_length):
version = '5.0'
@ -882,14 +908,20 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
clean_shutdown=True):
request_spec, clean_shutdown=True):
msg_args = {'instance': instance, 'migration': migration,
'image': image,
'instance_type': instance_type,
'clean_shutdown': clean_shutdown,
'request_spec': request_spec,
}
version = '5.0'
version = '5.2'
client = self.router.client(ctxt)
if not client.can_send_version(version):
msg_args.pop('request_spec')
version = '5.0'
cctxt = client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
@ -900,13 +932,24 @@ class ComputeAPI(object):
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host):
def revert_resize(self, ctxt, instance, migration, host, request_spec):
msg_args = {
'instance': instance,
'migration': migration,
'request_spec': request_spec,
}
client = self.router.client(ctxt)
version = '5.0'
version = '5.2'
if not client.can_send_version(version):
msg_args.pop('request_spec')
version = '5.0'
cctxt = client.prepare(
server=_compute_host(host, instance), version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration)
cctxt.cast(ctxt, 'revert_resize', **msg_args)
def rollback_live_migration_at_destination(self, ctxt, instance, host,
destroy_disks,
@ -1052,16 +1095,24 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'shelve_offload_instance', instance=instance,
clean_shutdown=clean_shutdown)
def unshelve_instance(self, ctxt, instance, host, image=None,
def unshelve_instance(self, ctxt, instance, host, request_spec, image=None,
filter_properties=None, node=None):
version = '5.0'
version = '5.2'
msg_kwargs = {
'instance': instance,
'image': image,
'filter_properties': filter_properties,
'node': node,
'request_spec': request_spec,
}
cctxt = self.router.client(ctxt).prepare(
client = self.router.client(ctxt)
if not client.can_send_version(version):
msg_kwargs.pop('request_spec')
version = '5.0'
cctxt = client.prepare(
server=host, version=version)
cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs)

View File

@ -902,8 +902,8 @@ class ComputeTaskManager(base.Base):
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
context, instance, host, request_spec, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None

View File

@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 38
SERVICE_VERSION = 39
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -153,6 +153,9 @@ SERVICE_VERSION_HISTORY = (
{'compute_rpc': '5.1'},
# Version 38: set_host_enabled reflects COMPUTE_STATUS_DISABLED trait
{'compute_rpc': '5.1'},
# Version 39: resize_instance, finish_resize, revert_resize,
# finish_revert_resize, unshelve_instance takes a RequestSpec object
{'compute_rpc': '5.2'},
)

View File

@ -4645,7 +4645,8 @@ class ComputeTestCase(BaseTestCase,
'clean_shutdown': True}),
("unrescue_instance", task_states.UNRESCUING),
("revert_resize", task_states.RESIZE_REVERTING,
{'migration': migration}),
{'migration': migration,
'request_spec': {}}),
("prep_resize", task_states.RESIZE_PREP,
{'image': {},
'instance_type': instance_type,
@ -4659,7 +4660,8 @@ class ComputeTestCase(BaseTestCase,
{'migration': migration,
'image': {},
'instance_type': {},
'clean_shutdown': True}),
'clean_shutdown': True,
'request_spec': {}}),
("pause_instance", task_states.PAUSING),
("unpause_instance", task_states.UNPAUSING),
("suspend_instance", task_states.SUSPENDING),
@ -4858,7 +4860,8 @@ class ComputeTestCase(BaseTestCase,
self.compute.finish_resize(self.context,
migration=migration,
disk_info=disk_info, image=image,
instance=instance)
instance=instance,
request_spec=objects.RequestSpec())
mock_setup.assert_called_once_with(self.context, instance,
'fake-mini')
@ -4912,6 +4915,7 @@ class ComputeTestCase(BaseTestCase,
# create instance
instance = self._create_fake_instance_obj()
request_spec = objects.RequestSpec()
# create volume
volume = {'instance_uuid': None,
@ -4996,7 +5000,7 @@ class ComputeTestCase(BaseTestCase,
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, request_spec={},
image={}, request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
host_list=[])
@ -5015,7 +5019,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(instance_type),
clean_shutdown=True)
clean_shutdown=True, request_spec=request_spec)
# assert bdm is unchanged
disk_info = db.block_device_mapping_get_all_by_instance(
@ -5050,7 +5054,8 @@ class ComputeTestCase(BaseTestCase,
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
disk_info={}, image={}, instance=instance,
request_spec=request_spec)
# assert volume attached correctly
disk_info = db.block_device_mapping_get_all_by_instance(
@ -5084,10 +5089,13 @@ class ComputeTestCase(BaseTestCase,
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
request_spec = objects.RequestSpec()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={},
request_spec={}, filter_properties={},
request_spec=request_spec,
filter_properties={},
node=None, migration=None,
clean_shutdown=True, host_list=[])
@ -5101,7 +5109,8 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(test.TestingException, self.compute.finish_resize,
self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
disk_info={}, image={}, instance=instance,
request_spec=request_spec)
instance.refresh()
self.assertEqual(vm_states.ERROR, instance.vm_state)
@ -5209,7 +5218,9 @@ class ComputeTestCase(BaseTestCase,
new_type = objects.Flavor.get_by_name(self.context, 'm1.small')
new_type_id = new_type['id']
flavor_id = new_type['flavorid']
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
block_device_mapping=[])
instance.host = 'foo'
@ -5228,13 +5239,14 @@ class ComputeTestCase(BaseTestCase,
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, instance_type=new_type,
clean_shutdown=True)
clean_shutdown=True, request_spec=request_spec)
time_fixture.advance_time_delta(cur_time - old_time)
fake_notifier.NOTIFICATIONS = []
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
disk_info={}, image={}, instance=instance,
request_spec=request_spec)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
@ -5348,6 +5360,7 @@ class ComputeTestCase(BaseTestCase,
throw_up)
instance = self._create_fake_instance_obj()
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
@ -5355,7 +5368,7 @@ class ComputeTestCase(BaseTestCase,
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor, image={},
request_spec={},
request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
host_list=[])
@ -5371,7 +5384,7 @@ class ComputeTestCase(BaseTestCase,
migration=migration, image={},
instance_type=jsonutils.to_primitive(
self.default_flavor),
clean_shutdown=True)
clean_shutdown=True, request_spec=request_spec)
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
@ -5388,13 +5401,15 @@ class ComputeTestCase(BaseTestCase,
throw_up)
instance = self._create_fake_instance_obj()
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor, image={},
request_spec={},
request_spec=request_spec,
filter_properties={}, node=None,
migration=None, host_list=[],
clean_shutdown=True)
@ -5410,7 +5425,7 @@ class ComputeTestCase(BaseTestCase,
migration=migration, image={},
instance_type=jsonutils.to_primitive(
self.default_flavor),
clean_shutdown=True)
clean_shutdown=True, request_spec=request_spec)
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
@ -5422,13 +5437,15 @@ class ComputeTestCase(BaseTestCase,
# Ensure instance can be migrated/resized.
instance = self._create_fake_instance_obj()
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor, image={},
request_spec={}, filter_properties={}, node=None,
request_spec=request_spec, filter_properties={}, node=None,
clean_shutdown=True, migration=None, host_list=[])
# verify 'old_vm_state' was set on system_metadata
@ -5465,7 +5482,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(self.default_flavor),
clean_shutdown=clean_shutdown)
clean_shutdown=clean_shutdown, request_spec=request_spec)
mock_notify_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='resize', phase='start', bdms='fake_bdms'),
@ -5512,6 +5529,8 @@ class ComputeTestCase(BaseTestCase,
params = {'vm_state': old_vm_state, 'power_state': p_state}
instance = self._create_fake_instance_obj(params)
request_spec = objects.RequestSpec()
self.flags(allow_resize_to_same_host=True)
self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)
self.stub_out('nova.virt.fake.FakeDriver.confirm_migration',
@ -5522,7 +5541,8 @@ class ComputeTestCase(BaseTestCase,
# Get initial memory usage
memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
block_device_mapping=[])
# Confirm the instance size before the resize starts
@ -5544,7 +5564,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_ref,
image={}, request_spec={},
image={}, request_spec=request_spec,
filter_properties={}, node=None, clean_shutdown=True,
migration=None, host_list=None)
@ -5571,10 +5591,12 @@ class ComputeTestCase(BaseTestCase,
migration=migration,
image={},
instance_type=new_instance_type_ref,
clean_shutdown=True)
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
disk_info={}, image={}, instance=instance,
request_spec=request_spec)
# Memory usage shouldn't had changed
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
@ -5799,7 +5821,10 @@ class ComputeTestCase(BaseTestCase,
def test_revert_resize_with_pci(self):
self._test_resize_with_pci(
self.compute.revert_resize, '0000:0b:00.1')
lambda context, instance, migration:
self.compute.revert_resize(
context, instance, migration, objects.RequestSpec()),
'0000:0b:00.1')
@mock.patch.object(nova.compute.utils, 'notify_about_instance_action')
def _test_finish_revert_resize(self, mock_notify, power_on,
@ -5830,6 +5855,7 @@ class ComputeTestCase(BaseTestCase,
'info_cache': objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([]))}
instance = self._create_fake_instance_obj(params)
request_spec = objects.RequestSpec()
self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)
self.stub_out('nova.virt.fake.FakeDriver.finish_revert_migration',
@ -5840,7 +5866,8 @@ class ComputeTestCase(BaseTestCase,
# Get initial memory usage
memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
block_device_mapping=[])
instance.refresh()
@ -5862,7 +5889,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_ref,
image={}, request_spec={},
image={}, request_spec=request_spec,
filter_properties={}, node=None,
migration=None, clean_shutdown=True, host_list=[])
@ -5888,10 +5915,12 @@ class ComputeTestCase(BaseTestCase,
migration=migration,
image={},
instance_type=new_instance_type_ref,
clean_shutdown=True)
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
disk_info={}, image={}, instance=instance,
request_spec=request_spec)
# Memory usage shouldn't had changed
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
@ -5909,7 +5938,8 @@ class ComputeTestCase(BaseTestCase,
instance.save()
self.compute.revert_resize(self.context,
migration=migration, instance=instance)
migration=migration, instance=instance,
request_spec=request_spec)
# Resources from the migration (based on initial flavor) should
# be freed now
@ -5928,7 +5958,8 @@ class ComputeTestCase(BaseTestCase,
self.compute.finish_revert_resize(self.context,
migration=migration,
instance=instance)
instance=instance,
request_spec=request_spec)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='resize_revert', phase='start',
@ -5980,6 +6011,7 @@ class ComputeTestCase(BaseTestCase,
params = {'info_cache': objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([]))}
instance = self._create_fake_instance_obj(params)
request_spec = objects.RequestSpec()
self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)
self.stub_out('nova.virt.fake.FakeDriver.finish_revert_migration',
@ -5987,14 +6019,15 @@ class ComputeTestCase(BaseTestCase,
self._stub_out_resize_network_methods()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
block_device_mapping=[])
new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_ref,
image={}, request_spec={},
image={}, request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
host_list=[])
@ -6013,16 +6046,19 @@ class ComputeTestCase(BaseTestCase,
migration=migration,
image={},
instance_type=new_instance_type_ref,
clean_shutdown=True)
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
disk_info={}, image={}, instance=instance,
request_spec=request_spec)
instance.task_state = task_states.RESIZE_REVERTING
instance.save()
self.compute.revert_resize(self.context,
migration=migration, instance=instance)
migration=migration, instance=instance,
request_spec=request_spec)
# NOTE(hanrong): Prove that we pass the right value to the
# "self.network_api.migrate_instance_finish".
@ -6033,7 +6069,8 @@ class ComputeTestCase(BaseTestCase,
self.compute.finish_revert_resize(self.context,
migration=migration,
instance=instance)
instance=instance,
request_spec=request_spec)
self.assertEqual(instance.host, migration.source_compute)
self.assertNotEqual(migration.dest_compute, migration.source_compute)
@ -6053,6 +6090,8 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instance.host = 'foo'
@ -6060,7 +6099,8 @@ class ComputeTestCase(BaseTestCase,
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor,
image={},
request_spec={}, filter_properties={},
request_spec=request_spec,
filter_properties={},
node=None, clean_shutdown=True,
migration=None, host_list=[])
migration = objects.Migration.get_by_instance_and_status(
@ -6073,7 +6113,7 @@ class ComputeTestCase(BaseTestCase,
migration=migration, image={},
instance_type=jsonutils.to_primitive(
self.default_flavor),
clean_shutdown=True)
clean_shutdown=True, request_spec=request_spec)
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()

View File

@ -1691,7 +1691,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_record_action.assert_called_once_with(self.context, fake_inst,
'revertResize')
mock_revert_resize.assert_called_once_with(
self.context, fake_inst, fake_mig, 'compute-dest')
self.context, fake_inst, fake_mig, 'compute-dest',
mock_get_reqspec.return_value)
def test_revert_resize(self):
self._test_revert_resize()

View File

@ -7238,7 +7238,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.TestResizeError, self.compute.finish_resize,
context=self.context, disk_info=[], image=self.image,
instance=self.instance,
migration=self.migration
migration=self.migration,
request_spec=objects.RequestSpec()
)
# Assert that we set the migration to an error state
@ -7255,7 +7256,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.TestResizeError, self.compute.finish_resize,
context=self.context, disk_info=[], image=self.image,
instance=self.instance,
migration=self.migration
migration=self.migration,
request_spec=objects.RequestSpec()
)
# Assert that we set the migration to an error state
@ -7295,7 +7297,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.TestResizeError, self.compute.resize_instance,
context=self.context, instance=self.instance, image=self.image,
migration=self.migration,
instance_type='type', clean_shutdown=True)
instance_type='type', clean_shutdown=True,
request_spec=objects.RequestSpec())
# Assert that we set the migration to an error state
self.assertEqual("error", self.migration.status)
@ -7314,7 +7317,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.TestResizeError, self.compute.resize_instance,
context=self.context, instance=self.instance, image=self.image,
migration=self.migration,
instance_type='type', clean_shutdown=True)
instance_type='type', clean_shutdown=True,
request_spec=objects.RequestSpec())
# Assert that we did not set the migration to an error state
self.assertEqual('post-migrating', self.migration.status)
@ -7363,9 +7367,12 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
# Inform compute that instance uses non-shared or shared storage
_is_instance_storage_shared.return_value = is_shared
request_spec = objects.RequestSpec()
self.compute.revert_resize(context=self.context,
migration=self.migration,
instance=self.instance)
instance=self.instance,
request_spec=request_spec)
_is_instance_storage_shared.assert_called_once_with(
self.context, self.instance,
@ -7377,7 +7384,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock.ANY, mock.ANY, not is_shared)
mock_finish_revert.assert_called_once_with(
self.context, self.instance, self.migration,
self.migration.source_compute)
self.migration.source_compute, request_spec)
do_test()
@ -7436,9 +7443,11 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.migration.uuid = uuids.migration
self.migration.source_compute = self.instance['host']
self.migration.source_node = self.instance['host']
request_spec = objects.RequestSpec()
self.compute.finish_revert_resize(context=self.context,
migration=self.migration,
instance=self.instance)
instance=self.instance,
request_spec=request_spec)
finish_revert_migration.assert_called_with(self.context,
self.instance, 'nw_info', self.migration, mock.ANY, mock.ANY)
# Make sure the migration.dest_compute is not still set to the
@ -7449,6 +7458,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
do_test()
def test_finish_revert_resize_migration_context(self):
request_spec = objects.RequestSpec()
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'drop_move_claim')
@mock.patch('nova.compute.rpcapi.ComputeAPI.finish_revert_resize')
@ -7486,7 +7497,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.compute.revert_resize(context=self.context,
migration=self.migration,
instance=self.instance)
instance=self.instance,
request_spec=request_spec)
mock_drop_move_claim.assert_called_once_with(self.context,
self.instance, self.instance.node)
self.assertIsNotNone(self.instance.migration_context)
@ -7545,7 +7557,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.migration.uuid = uuids.migration
self.compute.finish_revert_resize(context=self.context,
instance=self.instance,
migration=self.migration)
migration=self.migration,
request_spec=request_spec)
self.assertIsNone(self.instance.migration_context)
# We should only have one attachment_update/complete call for the
# volume BDM that had an attachment.
@ -9124,7 +9137,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
ex = self.assertRaises(
exception.InstanceFaultRollback, self.compute._prep_resize,
self.context, instance.image_meta, instance, instance.flavor,
filter_properties={}, node=instance.node, migration=migration)
filter_properties={}, node=instance.node, migration=migration,
request_spec=mock.sentinel)
self.assertIsInstance(
ex.inner_exception, exception.UnableToMigrateToSelf)

View File

@ -30,6 +30,7 @@ from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_request_spec
class ComputeRpcAPITestCase(test.NoDBTestCase):
@ -50,6 +51,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
{'source_type': 'volume', 'destination_type': 'volume',
'instance_uuid': self.fake_instance_obj.uuid,
'volume_id': 'fake-volume-id'}))
self.fake_request_spec_obj = fake_request_spec.fake_spec_obj()
# FIXME(melwitt): Temporary while things have no mappings
self.patcher1 = mock.patch('nova.objects.InstanceMapping.'
'get_by_instance_uuid')
@ -236,12 +238,14 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
image='image', disk_info='disk_info', host='host')
image='image', disk_info='disk_info', host='host',
request_spec=self.fake_request_spec_obj, version='5.2')
def test_finish_revert_resize(self):
self._test_compute_api('finish_revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host')
host='host', request_spec=self.fake_request_spec_obj,
version='5.2')
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
@ -451,7 +455,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type=self.fake_flavor_obj,
clean_shutdown=True, version='5.0')
clean_shutdown=True, request_spec=self.fake_request_spec_obj,
version='5.2')
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
@ -460,7 +465,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_revert_resize(self):
self._test_compute_api('revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host')
host='host', request_spec=self.fake_request_spec_obj,
version='5.2')
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',
@ -530,7 +536,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
self._test_compute_api('unshelve_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
filter_properties={'fakeprop': 'fakeval'}, node='node',
version='5.0')
request_spec=self.fake_request_spec_obj,
version='5.2')
def test_volume_snapshot_create(self):
self._test_compute_api('volume_snapshot_create', 'cast',

View File

@ -347,7 +347,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance(
self.context, instance, image=image,
filter_properties=filter_properties,
node=node)
node=node, request_spec=objects.RequestSpec())
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
@ -446,7 +446,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node)
filter_properties=filter_properties, node=node,
request_spec=objects.RequestSpec())
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
@ -540,7 +541,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.assertRaises(test.TestingException,
self.compute.unshelve_instance,
self.context, instance, image=None,
filter_properties=filter_properties, node=node)
filter_properties=filter_properties, node=node,
request_spec=objects.RequestSpec())
mock_notify_instance_action.assert_called_once_with(
self.context, instance, 'fake-mini', action='unshelve',

View File

@ -1303,7 +1303,8 @@ class _BaseTaskTestCase(object):
# NOTE(sbauza): Since the instance is dehydrated when passing
# through the RPC API, we can only assert mock.ANY for it
unshelve_instance.assert_called_once_with(
self.context, mock.ANY, host['host'], image=mock.ANY,
self.context, mock.ANY, host['host'],
test.MatchType(objects.RequestSpec), image=mock.ANY,
filter_properties=filter_properties, node=host['nodename']
)
@ -1397,7 +1398,7 @@ class _BaseTaskTestCase(object):
mock_schedule.assert_called_once_with(
self.context, fake_spec, [instance.uuid], return_alternates=False)
mock_unshelve.assert_called_once_with(
self.context, instance, 'fake_host', image='fake_image',
self.context, instance, 'fake_host', fake_spec, image='fake_image',
filter_properties=dict(
# populate_filter_properties adds limits={}
fake_spec.to_legacy_filter_properties_dict(), limits={}),
@ -1488,7 +1489,7 @@ class _BaseTaskTestCase(object):
mock_schedule.assert_called_once_with(
self.context, fake_spec, [instance.uuid], return_alternates=False)
mock_unshelve.assert_called_once_with(
self.context, instance, 'fake_host', image=None,
self.context, instance, 'fake_host', fake_spec, image=None,
filter_properties={'limits': {}}, node='fake_node')
def test_rebuild_instance(self):