Add finish_snapshot_based_resize_at_dest compute method

This adds a new method to the nova-compute service which will
be synchronously RPC called from (super)conductor on the
destination host in the target cell during a cross-cell resize.

The method is similar to how the existing finish_resize method
works but parts are more like unshelve_instance, i.e. the use
of _prep_block_device, driver.spawn and deleting the snapshot
image (if there is one) after the guest is spawned.

Part of blueprint cross-cell-resize

Change-Id: I46728c191997da3c7b4c6ed9d6d3f9e6fb73ca3f
This commit is contained in:
Matt Riedemann 2019-02-05 18:55:16 -05:00
parent b377f1c0cd
commit a2fba47bcf
5 changed files with 617 additions and 4 deletions

View File

@ -521,7 +521,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='5.6')
target = messaging.Target(version='5.7')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@ -1917,9 +1917,10 @@ class ComputeManager(manager.Manager):
# useful details which the standard InvalidBDM error message lacks.
raise exception.InvalidBDM(six.text_type(ex))
def _update_instance_after_spawn(self, context, instance):
def _update_instance_after_spawn(self, context, instance,
vm_state=vm_states.ACTIVE):
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
# NOTE(sean-k-mooney): configdrive.update_instance checks
# instance.launched_at to determine if it is the first or
@ -5356,6 +5357,203 @@ class ComputeManager(manager.Manager):
action=fields.NotificationAction.RESIZE_FINISH, phase=phase,
bdms=bdms)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def finish_snapshot_based_resize_at_dest(
self, ctxt, instance, migration, snapshot_id, request_spec):
"""Finishes the snapshot-based resize at the destination compute.
Sets up block devices and networking on the destination compute and
spawns the guest.
:param ctxt: nova auth request context targeted at the target cell DB
:param instance: The Instance object being resized with the
``migration_context`` field set. Upon successful completion of this
method the vm_state should be "resized", the task_state should be
None, and migration context, host/node and flavor-related fields
should be set on the instance.
:param migration: The Migration object for this resize operation. Upon
successful completion of this method the migration status should
be "finished".
:param snapshot_id: ID of the image snapshot created for a
non-volume-backed instance, else None.
:param request_spec: nova.objects.RequestSpec object for the operation
"""
LOG.info('Finishing snapshot based resize on destination host %s.',
self.host, instance=instance)
with self._error_out_instance_on_exception(ctxt, instance):
# Note that if anything fails here, the migration-based allocations
# created in conductor should be reverted by conductor as well,
# see MigrationTask.rollback.
self._finish_snapshot_based_resize_at_dest(
ctxt, instance, migration, snapshot_id)
def _finish_snapshot_based_resize_at_dest(
self, ctxt, instance, migration, snapshot_id):
"""Private variant of finish_snapshot_based_resize_at_dest so the
caller can handle reverting resource allocations on failure and perform
other generic error handling.
"""
# Figure out the image metadata to use when spawning the guest.
if snapshot_id:
image_meta = objects.ImageMeta.from_image_ref(
ctxt, self.image_api, snapshot_id)
else:
# Just use what is already on the volume-backed instance.
image_meta = instance.image_meta
resize = migration.migration_type == 'resize'
instance.old_flavor = instance.flavor
if resize:
flavor = instance.new_flavor
# If we are resizing to a new flavor we need to set the
# flavor-related fields on the instance.
# NOTE(mriedem): This is likely where storing old/new_flavor on
# the MigrationContext would make this cleaner.
self._set_instance_info(instance, flavor)
instance.apply_migration_context()
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
# This seems a bit late to be sending the start notification but
# it is what traditional resize has always done as well and it does
# contain the changes to the instance with the new_flavor and
# task_state.
bdms = instance.get_bdms()
network_info = instance.get_network_info()
self._send_finish_resize_notifications(
ctxt, instance, bdms, network_info,
fields.NotificationPhase.START)
# Setup volumes and networking and spawn the guest in the hypervisor.
self._finish_snapshot_based_resize_at_dest_spawn(
ctxt, instance, migration, image_meta, bdms)
# If we spawned from a temporary snapshot image we can delete that now,
# similar to how unshelve works.
if snapshot_id:
# FIXME(mriedem): Need to deal with bug 1653953 for libvirt with
# the rbd image backend. I think the cleanest thing we can do is
# from the driver check to see if instance.migration_context is not
# None and if so, get the Migration record for that context
# (instance.migration_context.migration_id) and from that check the
# Migration.cross_cell_move flag and if True, then flatten the
# image.
compute_utils.delete_image(
ctxt, instance, self.image_api, snapshot_id)
migration.status = 'finished'
migration.save()
self._update_instance_after_spawn(
ctxt, instance, vm_state=vm_states.RESIZED)
# Setting the host/node values will make the ResourceTracker continue
# to track usage for this instance on this host.
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.save(expected_task_state=task_states.RESIZE_FINISH)
# Broadcast to all schedulers that the instance is on this host.
self._update_scheduler_instance_info(ctxt, instance)
self._send_finish_resize_notifications(
ctxt, instance, bdms, network_info,
fields.NotificationPhase.END)
def _finish_snapshot_based_resize_at_dest_spawn(
self, ctxt, instance, migration, image_meta, bdms):
"""Sets up volumes and networking and spawns the guest on the dest host
If the instance was stopped when the resize was initiated the guest
will be created but remain in a shutdown power state.
If the spawn fails, port bindings are rolled back to the source host
and volume connections are terminated for this dest host.
:param ctxt: nova auth request context
:param instance: Instance object being migrated
:param migration: Migration object for the operation
:param image_meta: ImageMeta object used during driver.spawn
:param bdms: BlockDeviceMappingList of BDMs for the instance
"""
# Update the volume attachments using this host's connector.
# That will update the BlockDeviceMapping.connection_info which
# will be used to connect the volumes on this host during spawn().
block_device_info = self._prep_block_device(ctxt, instance, bdms)
allocations = self.reportclient.get_allocations_for_consumer(
ctxt, instance.uuid)
# We do not call self.network_api.setup_networks_on_host here because
# for neutron that sets up the port migration profile which is only
# used during live migration with DVR. Yes it is gross knowing what
# that method does internally. We could change this when bug 1814837
# is fixed if setup_networks_on_host is made smarter by passing the
# migration record and the method checks the migration_type.
# Activate the port bindings for this host.
# FIXME(mriedem): We're going to have the same issue as bug 1813789
# here because this will update the port bindings and send the
# network-vif-plugged event and that means when driver.spawn waits for
# it we might have already gotten the event and neutron won't send
# another one so we could timeout.
# TODO(mriedem): Calculate provider mappings when we support cross-cell
# resize/migrate with ports having resource requests.
self.network_api.migrate_instance_finish(
ctxt, instance, migration, provider_mappings=None)
network_info = self.network_api.get_instance_nw_info(ctxt, instance)
# If the original vm_state was STOPPED, we do not automatically
# power on the instance after it is migrated.
power_on = instance.system_metadata['old_vm_state'] == vm_states.ACTIVE
try:
# NOTE(mriedem): If this instance uses a config drive, it will get
# rebuilt here which means any personality files will be lost,
# similar to unshelve. If the instance is not using a config drive
# and getting metadata from the metadata API service, personality
# files would be lost regardless of the move operation.
self.driver.spawn(
ctxt, instance, image_meta, injected_files=[],
admin_password=None, allocations=allocations,
network_info=network_info, block_device_info=block_device_info,
power_on=power_on)
except Exception:
with excutils.save_and_reraise_exception(logger=LOG):
# Rollback port bindings to the source host.
try:
# This is gross but migrate_instance_start looks at the
# migration.dest_compute to determine where to activate the
# port bindings and we want the source compute port
# bindings to be re-activated. Remember at this point the
# instance.host is still pointing at the source compute.
# TODO(mriedem): Maybe we should be calling
# setup_instance_network_on_host here to deal with pci
# devices?
with utils.temporary_mutation(
migration, dest_compute=migration.source_compute):
self.network_api.migrate_instance_start(
ctxt, instance, migration)
except Exception:
LOG.exception(
'Failed to activate port bindings on the source '
'host: %s', migration.source_compute,
instance=instance)
# Rollback volume connections on this host.
for bdm in bdms:
if bdm.is_volume:
try:
self._remove_volume_connection(
ctxt, bdm, instance, delete_attachment=True)
except Exception:
LOG.exception('Failed to remove volume connection '
'on this host %s for volume %s.',
self.host, bdm.volume_id,
instance=instance)
@wrap_exception()
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):

View File

@ -373,6 +373,7 @@ class ComputeAPI(object):
* 5.4 - Add cache_images() support
* 5.5 - Add prep_snapshot_based_resize_at_dest()
* 5.6 - Add prep_snapshot_based_resize_at_source()
* 5.7 - Add finish_snapshot_based_resize_at_dest()
'''
VERSION_ALIASES = {
@ -655,6 +656,46 @@ class ComputeAPI(object):
server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize', **msg_args)
def finish_snapshot_based_resize_at_dest(
self, ctxt, instance, migration, snapshot_id, request_spec):
"""Finishes the snapshot-based resize at the destination compute.
Sets up block devices and networking on the destination compute and
spawns the guest.
This is a synchronous RPC call using the ``long_rpc_timeout``
configuration option.
:param ctxt: nova auth request context targeted at the target cell DB
:param instance: The Instance object being resized with the
``migration_context`` field set. Upon successful completion of this
method the vm_state should be "resized", the task_state should be
None, and migration context, host/node and flavor-related fields
should be set on the instance.
:param migration: The Migration object for this resize operation. Upon
successful completion of this method the migration status should
be "finished".
:param snapshot_id: ID of the image snapshot created for a
non-volume-backed instance, else None.
:param request_spec: nova.objects.RequestSpec object for the operation
:raises: nova.exception.MigrationError if the destination compute
service is too old for this method
:raises: oslo_messaging.exceptions.MessagingTimeout if the pre-check
RPC call times out
"""
client = self.router.client(ctxt)
version = '5.7'
if not client.can_send_version(version):
raise exception.MigrationError(reason=_('Compute too old'))
cctxt = client.prepare(
server=migration.dest_compute, version=version,
call_monitor_timeout=CONF.rpc_response_timeout,
timeout=CONF.long_rpc_timeout)
return cctxt.call(
ctxt, 'finish_snapshot_based_resize_at_dest',
instance=instance, migration=migration, snapshot_id=snapshot_id,
request_spec=request_spec)
def get_console_output(self, ctxt, instance, tail_length):
version = '5.0'
cctxt = self.router.client(ctxt).prepare(

View File

@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 43
SERVICE_VERSION = 44
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -167,6 +167,8 @@ SERVICE_VERSION_HISTORY = (
{'compute_rpc': '5.5'},
# Version 43: Compute RPC version 5.6: prep_snapshot_based_resize_at_source
{'compute_rpc': '5.6'},
# Version 44: Compute RPC version 5.7: finish_snapshot_based_resize_at_dest
{'compute_rpc': '5.7'},
)

View File

@ -10504,6 +10504,342 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertIsNone(self.instance.task_state)
instance_save.assert_called_once_with()
@mock.patch('nova.compute.utils.add_instance_fault_from_exc')
@mock.patch('nova.objects.Instance.save')
def test_finish_snapshot_based_resize_at_dest_outer_error(
self, instance_save, add_fault):
"""Tests the error handling on the finish_snapshot_based_resize_at_dest
method.
"""
request_spec = objects.RequestSpec()
self.instance.task_state = task_states.RESIZE_MIGRATED
with mock.patch.object(
self.compute, '_finish_snapshot_based_resize_at_dest',
side_effect=test.TestingException('oops')) as _finish:
ex = self.assertRaises(
test.TestingException,
self.compute.finish_snapshot_based_resize_at_dest,
self.context, self.instance, self.migration, uuids.snapshot_id,
request_spec)
# Assert the non-decorator mock calls.
_finish.assert_called_once_with(
self.context, self.instance, self.migration, uuids.snapshot_id)
# Assert _error_out_instance_on_exception is called.
self.assertEqual(vm_states.ERROR, self.instance.vm_state)
# Assert wrap_instance_fault is called.
add_fault.assert_called_once_with(
self.context, self.instance, ex, mock.ANY)
# Assert wrap_exception is called.
self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self.assertEqual(
'compute.%s' % fields.NotificationAction.EXCEPTION,
fake_notifier.VERSIONED_NOTIFICATIONS[0]['event_type'])
# Assert errors_out_migration is called.
self.assertEqual('error', self.migration.status)
self.migration.save.assert_called_once_with()
# Assert reverts_task_state is called.
self.assertIsNone(self.instance.task_state)
# Instance.save is called twice:
# 1. _error_out_instance_on_exception
# 2. reverts_task_state
self.assertEqual(2, instance_save.call_count)
@mock.patch('nova.objects.Instance.get_bdms')
@mock.patch('nova.objects.Instance.apply_migration_context')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.manager.ComputeManager.'
'_send_finish_resize_notifications')
@mock.patch('nova.compute.manager.ComputeManager.'
'_finish_snapshot_based_resize_at_dest_spawn')
@mock.patch('nova.objects.ImageMeta.from_image_ref')
@mock.patch('nova.compute.utils.delete_image')
def _test_finish_snapshot_based_resize_at_dest(
self, delete_image, from_image_ref, _finish_spawn, notify,
inst_save, apply_migration_context, get_bdms, snapshot_id=None):
"""Happy path test for finish_snapshot_based_resize_at_dest."""
# Setup the fake instance.
request_spec = objects.RequestSpec()
self.instance.task_state = task_states.RESIZE_MIGRATED
nwinfo = network_model.NetworkInfo([
network_model.VIF(id=uuids.port_id)])
self.instance.info_cache = objects.InstanceInfoCache(
network_info=nwinfo)
self.instance.new_flavor = fake_flavor.fake_flavor_obj(self.context)
old_flavor = self.instance.flavor
# Mock out ImageMeta.
if snapshot_id:
from_image_ref.return_value = objects.ImageMeta()
# Setup the fake migration.
self.migration.migration_type = 'resize'
self.migration.dest_compute = uuids.dest
self.migration.dest_node = uuids.dest
with mock.patch.object(self.compute, 'network_api') as network_api:
network_api.get_instance_nw_info.return_value = nwinfo
# Run that big beautiful code!
self.compute.finish_snapshot_based_resize_at_dest(
self.context, self.instance, self.migration, snapshot_id,
request_spec)
# Check the changes to the instance and migration object.
self.assertEqual(vm_states.RESIZED, self.instance.vm_state)
self.assertIsNone(self.instance.task_state)
self.assertIs(self.instance.flavor, self.instance.new_flavor)
self.assertIs(self.instance.old_flavor, old_flavor)
self.assertEqual(self.migration.dest_compute, self.instance.host)
self.assertEqual(self.migration.dest_node, self.instance.node)
self.assertEqual('finished', self.migration.status)
# Assert the mock calls.
if snapshot_id:
from_image_ref.assert_called_once_with(
self.context, self.compute.image_api, snapshot_id)
delete_image.assert_called_once_with(
self.context, self.instance, self.compute.image_api,
snapshot_id)
else:
from_image_ref.assert_not_called()
delete_image.assert_not_called()
# The instance migration context was applied and changes were saved
# to the instance twice.
apply_migration_context.assert_called_once_with()
inst_save.assert_has_calls([
mock.call(expected_task_state=task_states.RESIZE_MIGRATED),
mock.call(expected_task_state=task_states.RESIZE_FINISH)])
self.migration.save.assert_called_once_with()
# Start and end notifications were sent.
notify.assert_has_calls([
mock.call(self.context, self.instance, get_bdms.return_value,
nwinfo, fields.NotificationPhase.START),
mock.call(self.context, self.instance, get_bdms.return_value,
nwinfo, fields.NotificationPhase.END)])
# Volumes and networking were setup prior to calling driver spawn.
spawn_image_meta = from_image_ref.return_value \
if snapshot_id else test.MatchType(objects.ImageMeta)
_finish_spawn.assert_called_once_with(
self.context, self.instance, self.migration, spawn_image_meta,
get_bdms.return_value)
def test_finish_snapshot_based_resize_at_dest_image_backed(self):
"""Happy path test for finish_snapshot_based_resize_at_dest with
an image-backed server where snapshot_id is provided.
"""
self._test_finish_snapshot_based_resize_at_dest(
snapshot_id=uuids.snapshot_id)
def test_finish_snapshot_based_resize_at_dest_volume_backed(self):
"""Happy path test for finish_snapshot_based_resize_at_dest with
a volume-backed server where snapshot_id is None.
"""
self._test_finish_snapshot_based_resize_at_dest(snapshot_id=None)
@mock.patch('nova.compute.manager.ComputeManager._prep_block_device')
@mock.patch('nova.compute.manager.ComputeManager.'
'_remove_volume_connection')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocations_for_consumer')
def _test_finish_snapshot_based_resize_at_dest_spawn_fails(
self, get_allocs, remove_volume_connection, _prep_block_device,
volume_backed=False):
"""Tests _finish_snapshot_based_resize_at_dest_spawn where spawn fails.
"""
nwinfo = network_model.NetworkInfo([
network_model.VIF(id=uuids.port_id)])
self.instance.system_metadata['old_vm_state'] = vm_states.STOPPED
# Mock out BDMs.
if volume_backed:
# Single remote volume BDM.
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
source_type='volume', destination_type='volume',
volume_id=uuids.volume_id, boot_index=0)])
else:
# Single local image BDM.
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
source_type='image', destination_type='local',
image_id=uuids.image_id, boot_index=0)])
self.migration.migration_type = 'migration'
self.migration.dest_compute = uuids.dest
self.migration.source_compute = uuids.source
image_meta = self.instance.image_meta
# Stub out migrate_instance_start so we can assert how it is called.
def fake_migrate_instance_start(context, instance, migration):
# Make sure the migration.dest_compute was temporarily changed
# to the source_compute value.
self.assertEqual(uuids.source, migration.dest_compute)
with test.nested(
mock.patch.object(self.compute, 'network_api'),
mock.patch.object(self.compute.driver, 'spawn',
side_effect=test.TestingException('spawn fail')),
) as (
network_api, spawn,
):
network_api.get_instance_nw_info.return_value = nwinfo
network_api.migrate_instance_start.side_effect = \
fake_migrate_instance_start
# Run that big beautiful code!
self.assertRaises(
test.TestingException,
self.compute._finish_snapshot_based_resize_at_dest_spawn,
self.context, self.instance, self.migration, image_meta, bdms)
# Assert the mock calls.
# Volumes and networking were setup prior to calling driver spawn.
_prep_block_device.assert_called_once_with(
self.context, self.instance, bdms)
get_allocs.assert_called_once_with(self.context, self.instance.uuid)
network_api.migrate_instance_finish.assert_called_once_with(
self.context, self.instance, self.migration,
provider_mappings=None)
spawn.assert_called_once_with(
self.context, self.instance, image_meta,
injected_files=[], admin_password=None,
allocations=get_allocs.return_value, network_info=nwinfo,
block_device_info=_prep_block_device.return_value, power_on=False)
# Port bindings were rolled back to the source host.
network_api.migrate_instance_start.assert_called_once_with(
self.context, self.instance, self.migration)
if volume_backed:
# Volume connections were deleted.
remove_volume_connection.assert_called_once_with(
self.context, bdms[0], self.instance, delete_attachment=True)
else:
remove_volume_connection.assert_not_called()
def test_finish_snapshot_based_resize_at_dest_spawn_fails_image_back(self):
"""Tests _finish_snapshot_based_resize_at_dest_spawn failing with an
image-backed server.
"""
self._test_finish_snapshot_based_resize_at_dest_spawn_fails(
volume_backed=False)
def test_finish_snapshot_based_resize_at_dest_spawn_fails_vol_backed(self):
"""Tests _finish_snapshot_based_resize_at_dest_spawn failing with a
volume-backed server.
"""
self._test_finish_snapshot_based_resize_at_dest_spawn_fails(
volume_backed=True)
@mock.patch('nova.compute.manager.ComputeManager._prep_block_device')
@mock.patch('nova.compute.manager.ComputeManager.'
'_remove_volume_connection')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocations_for_consumer')
def test_finish_snapshot_based_resize_at_dest_spawn_fail_graceful_rollback(
self, get_allocs, remove_volume_connection, _prep_block_device):
"""Tests that the cleanup except block is graceful in that one
failure does not prevent trying to cleanup the other resources.
"""
nwinfo = network_model.NetworkInfo([
network_model.VIF(id=uuids.port_id)])
self.instance.system_metadata['old_vm_state'] = vm_states.STOPPED
# Three BDMs: two volume (one of which will fail rollback) and a local.
bdms = objects.BlockDeviceMappingList(objects=[
# First volume BDM which fails rollback.
objects.BlockDeviceMapping(
destination_type='volume', volume_id=uuids.bad_volume),
# Second volume BDM is rolled back.
objects.BlockDeviceMapping(
destination_type='volume', volume_id=uuids.good_volume),
# Third BDM is a local image BDM so we do not try to roll it back.
objects.BlockDeviceMapping(
destination_type='local', image_id=uuids.image_id)
])
self.migration.migration_type = 'migration'
self.migration.dest_compute = uuids.dest
self.migration.source_compute = uuids.source
image_meta = self.instance.image_meta
with test.nested(
mock.patch.object(self.compute, 'network_api'),
mock.patch.object(self.compute.driver, 'spawn',
side_effect=test.TestingException(
'spawn fail')),
) as (
network_api, spawn,
):
network_api.get_instance_nw_info.return_value = nwinfo
# Mock migrate_instance_start to fail on rollback.
network_api.migrate_instance_start.side_effect = \
exception.PortNotFound(port_id=uuids.port_id)
# Mock remove_volume_connection to fail on the first call.
remove_volume_connection.side_effect = [
exception.CinderConnectionFailed(reason='gremlins'), None]
# Run that big beautiful code!
self.assertRaises(
test.TestingException,
self.compute._finish_snapshot_based_resize_at_dest_spawn,
self.context, self.instance, self.migration, image_meta, bdms)
# Assert the mock calls.
# Volumes and networking were setup prior to calling driver spawn.
_prep_block_device.assert_called_once_with(
self.context, self.instance, bdms)
get_allocs.assert_called_once_with(self.context, self.instance.uuid)
network_api.migrate_instance_finish.assert_called_once_with(
self.context, self.instance, self.migration,
provider_mappings=None)
spawn.assert_called_once_with(
self.context, self.instance, image_meta,
injected_files=[], admin_password=None,
allocations=get_allocs.return_value, network_info=nwinfo,
block_device_info=_prep_block_device.return_value, power_on=False)
# Port bindings were rolled back to the source host.
network_api.migrate_instance_start.assert_called_once_with(
self.context, self.instance, self.migration)
# Volume connections were deleted.
remove_volume_connection.assert_has_calls([
mock.call(self.context, bdms[0], self.instance,
delete_attachment=True),
mock.call(self.context, bdms[1], self.instance,
delete_attachment=True)])
# Assert the expected errors to get logged.
self.assertIn('Failed to activate port bindings on the source',
self.stdlog.logger.output)
self.assertIn('Failed to remove volume connection',
self.stdlog.logger.output)
@mock.patch('nova.compute.manager.ComputeManager._prep_block_device')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocations_for_consumer')
def test_finish_snapshot_based_resize_at_dest_spawn(
self, get_allocs, _prep_block_device):
"""Happy path test for test_finish_snapshot_based_resize_at_dest_spawn.
"""
nwinfo = network_model.NetworkInfo([
network_model.VIF(id=uuids.port_id)])
self.instance.system_metadata['old_vm_state'] = vm_states.ACTIVE
self.migration.migration_type = 'migration'
self.migration.dest_compute = uuids.dest
self.migration.source_compute = uuids.source
image_meta = self.instance.image_meta
bdms = objects.BlockDeviceMappingList()
with test.nested(
mock.patch.object(self.compute, 'network_api'),
mock.patch.object(self.compute.driver, 'spawn')
) as (
network_api, spawn,
):
network_api.get_instance_nw_info.return_value = nwinfo
# Run that big beautiful code!
self.compute._finish_snapshot_based_resize_at_dest_spawn(
self.context, self.instance, self.migration, image_meta, bdms)
# Assert the mock calls.
_prep_block_device.assert_called_once_with(
self.context, self.instance, bdms)
get_allocs.assert_called_once_with(self.context, self.instance.uuid)
network_api.migrate_instance_finish.assert_called_once_with(
self.context, self.instance, self.migration,
provider_mappings=None)
spawn.assert_called_once_with(
self.context, self.instance, image_meta,
injected_files=[], admin_password=None,
allocations=get_allocs.return_value, network_info=nwinfo,
block_device_info=_prep_block_device.return_value, power_on=True)
class ComputeManagerInstanceUsageAuditTestCase(test.TestCase):
def setUp(self):

View File

@ -155,6 +155,11 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
host = kwargs['instances'][0]['host']
elif 'destination' in kwargs:
host = expected_kwargs.pop('destination')
elif 'prepare_server' in kwargs:
# This is the "server" kwarg to the prepare() method so remove it
# from both kwargs that go to the actual RPC method call.
expected_kwargs.pop('prepare_server')
host = kwargs.pop('prepare_server')
else:
host = kwargs['instance']['host']
@ -565,6 +570,37 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
snapshot_id=uuids.snapshot_id)
self.assertIn('Compute too old', six.text_type(ex))
def test_finish_snapshot_based_resize_at_dest(self):
"""Tests happy path for finish_snapshot_based_resize_at_dest."""
self.flags(long_rpc_timeout=1234)
self._test_compute_api(
'finish_snapshot_based_resize_at_dest', 'call',
# compute method kwargs
instance=self.fake_instance_obj,
migration=migration_obj.Migration(dest_compute='dest'),
snapshot_id=uuids.snapshot_id,
request_spec=objects.RequestSpec(),
# client.prepare kwargs
version='5.7', prepare_server='dest',
call_monitor_timeout=60, timeout=1234)
@mock.patch('nova.rpc.ClientRouter.client')
def test_finish_snapshot_based_resize_at_dest_old_compute(self, client):
"""Tests when the dest compute service is too old to call
finish_snapshot_based_resize_at_dest so MigrationError is raised.
"""
client.return_value.can_send_version.return_value = False
rpcapi = compute_rpcapi.ComputeAPI()
ex = self.assertRaises(
exception.MigrationError,
rpcapi.finish_snapshot_based_resize_at_dest,
self.context,
instance=self.fake_instance_obj,
migration=migration_obj.Migration(dest_compute='dest'),
snapshot_id=uuids.snapshot_id,
request_spec=objects.RequestSpec())
self.assertIn('Compute too old', six.text_type(ex))
def test_reboot_instance(self):
self.maxDiff = None
self._test_compute_api('reboot_instance', 'cast',