Merge "Make live migration hold resources with a migration allocation"
This commit is contained in:
commit
b5ea12183b
@ -3716,8 +3716,8 @@ class ComputeManager(manager.Manager):
|
||||
cn_uuid = rt.get_node_uuid(nodename)
|
||||
|
||||
if migration.source_node == nodename:
|
||||
if migration.status == 'confirmed':
|
||||
# NOTE(danms): We're confirming on the source node, so try to
|
||||
if migration.status in ('confirmed', 'completed'):
|
||||
# NOTE(danms): We're finishing on the source node, so try to
|
||||
# delete the allocation based on the migration uuid
|
||||
deleted = self.reportclient.delete_allocation_for_instance(
|
||||
migration.uuid)
|
||||
@ -5904,6 +5904,9 @@ class ComputeManager(manager.Manager):
|
||||
LOG.exception('Pre live migration failed at %s',
|
||||
dest, instance=instance)
|
||||
self._set_migration_status(migration, 'error')
|
||||
# Make sure we set this for _rollback_live_migration()
|
||||
# so it can find it, as expected if it was called later
|
||||
migrate_data.migration = migration
|
||||
self._rollback_live_migration(context, instance, dest,
|
||||
migrate_data)
|
||||
|
||||
@ -6167,10 +6170,6 @@ class ComputeManager(manager.Manager):
|
||||
# host even before next periodic task.
|
||||
self.update_available_resource(ctxt)
|
||||
|
||||
rt = self._get_resource_tracker()
|
||||
rt.delete_allocation_for_migrated_instance(
|
||||
instance, source_node)
|
||||
|
||||
self._update_scheduler_instance_info(ctxt, instance)
|
||||
self._notify_about_instance_usage(ctxt, instance,
|
||||
"live_migration._post.end",
|
||||
@ -6182,6 +6181,34 @@ class ComputeManager(manager.Manager):
|
||||
if migrate_data and migrate_data.obj_attr_is_set('migration'):
|
||||
migrate_data.migration.status = 'completed'
|
||||
migrate_data.migration.save()
|
||||
migration = migrate_data.migration
|
||||
rc = self.scheduler_client.reportclient
|
||||
# Check to see if our migration has its own allocations
|
||||
allocs = rc.get_allocations_for_consumer(migration.uuid)
|
||||
else:
|
||||
# We didn't have data on a migration, which means we can't
|
||||
# look up to see if we had new-style migration-based
|
||||
# allocations. This should really only happen in cases of
|
||||
# a buggy virt driver or some really old component in the
|
||||
# system. Log a warning so we know it happened.
|
||||
allocs = None
|
||||
LOG.warning('Live migration ended with no migrate_data '
|
||||
'record. Unable to clean up migration-based '
|
||||
'allocations which is almost certainly not '
|
||||
'an expected situation.')
|
||||
|
||||
if allocs:
|
||||
# We had a migration-based allocation that we need to handle
|
||||
self._delete_allocation_after_move(instance,
|
||||
migrate_data.migration,
|
||||
instance.flavor,
|
||||
source_node)
|
||||
else:
|
||||
# No migration-based allocations, so do the old thing and
|
||||
# attempt to clean up any doubled per-instance allocation
|
||||
rt = self._get_resource_tracker()
|
||||
rt.delete_allocation_for_migrated_instance(
|
||||
instance, source_node)
|
||||
|
||||
def _consoles_enabled(self):
|
||||
"""Returns whether a console is enable."""
|
||||
@ -6277,23 +6304,6 @@ class ComputeManager(manager.Manager):
|
||||
Contains the status we want to set for the migration object
|
||||
|
||||
"""
|
||||
# Remove allocations created in Placement for the dest node.
|
||||
# NOTE(mriedem): The migrate_data.migration object does not have the
|
||||
# dest_node (or dest UUID) set, so we have to lookup the destination
|
||||
# ComputeNode with only the hostname.
|
||||
dest_node = objects.ComputeNode.get_first_node_by_host_for_old_compat(
|
||||
context, dest, use_slave=True)
|
||||
reportclient = self.scheduler_client.reportclient
|
||||
resources = scheduler_utils.resources_from_flavor(
|
||||
instance, instance.flavor)
|
||||
reportclient.remove_provider_from_instance_allocation(
|
||||
instance.uuid, dest_node.uuid, instance.user_id,
|
||||
instance.project_id, resources)
|
||||
|
||||
instance.task_state = None
|
||||
instance.progress = 0
|
||||
instance.save(expected_task_state=[task_states.MIGRATING])
|
||||
|
||||
# TODO(tdurakov): remove dict to object conversion once RPC API version
|
||||
# is bumped to 5.x
|
||||
if isinstance(migrate_data, dict):
|
||||
@ -6307,6 +6317,20 @@ class ComputeManager(manager.Manager):
|
||||
else:
|
||||
migration = None
|
||||
|
||||
if migration:
|
||||
# Remove allocations created in Placement for the dest node.
|
||||
# If migration is None, we must be so old we don't have placement,
|
||||
# so no need to do something else.
|
||||
self._revert_allocation(context, instance, migration)
|
||||
else:
|
||||
LOG.error('Unable to revert allocations during live migration '
|
||||
'rollback; compute driver did not provide migrate_data',
|
||||
instance=instance)
|
||||
|
||||
instance.task_state = None
|
||||
instance.progress = 0
|
||||
instance.save(expected_task_state=[task_states.MIGRATING])
|
||||
|
||||
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
|
||||
self.network_api.setup_networks_on_host(context, instance, self.host)
|
||||
|
||||
|
@ -16,6 +16,7 @@ import six
|
||||
|
||||
from nova.compute import power_state
|
||||
from nova.conductor.tasks import base
|
||||
from nova.conductor.tasks import migrate
|
||||
import nova.conf
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
@ -27,6 +28,12 @@ LOG = logging.getLogger(__name__)
|
||||
CONF = nova.conf.CONF
|
||||
|
||||
|
||||
def should_do_migration_allocation(context):
|
||||
minver = objects.Service.get_minimum_version_multi(context,
|
||||
['nova-compute'])
|
||||
return minver >= 25
|
||||
|
||||
|
||||
class LiveMigrationTask(base.TaskBase):
|
||||
def __init__(self, context, instance, destination,
|
||||
block_migration, disk_over_commit, migration, compute_rpcapi,
|
||||
@ -43,11 +50,23 @@ class LiveMigrationTask(base.TaskBase):
|
||||
self.servicegroup_api = servicegroup_api
|
||||
self.scheduler_client = scheduler_client
|
||||
self.request_spec = request_spec
|
||||
self._source_cn = None
|
||||
self._held_allocations = None
|
||||
|
||||
def _execute(self):
|
||||
self._check_instance_is_active()
|
||||
self._check_host_is_up(self.source)
|
||||
|
||||
if should_do_migration_allocation(self.context):
|
||||
self._source_cn, self._held_allocations = (
|
||||
# NOTE(danms): This may raise various exceptions, which will
|
||||
# propagate to the API and cause a 500. This is what we
|
||||
# want, as it would indicate internal data structure corruption
|
||||
# (such as missing migrations, compute nodes, etc).
|
||||
migrate.replace_allocation_with_migration(self.context,
|
||||
self.instance,
|
||||
self.migration))
|
||||
|
||||
if not self.destination:
|
||||
# Either no host was specified in the API request and the user
|
||||
# wants the scheduler to pick a destination host, or a host was
|
||||
@ -73,7 +92,9 @@ class LiveMigrationTask(base.TaskBase):
|
||||
# ComputeTaskManager.
|
||||
scheduler_utils.claim_resources_on_destination(
|
||||
self.scheduler_client.reportclient, self.instance,
|
||||
source_node, dest_node)
|
||||
source_node, dest_node,
|
||||
source_node_allocations=self._held_allocations)
|
||||
|
||||
# dest_node is a ComputeNode object, so we need to get the actual
|
||||
# node name off it to set in the Migration object below.
|
||||
dest_node = dest_node.hypervisor_hostname
|
||||
@ -99,7 +120,11 @@ class LiveMigrationTask(base.TaskBase):
|
||||
# calls, since this class currently makes no state changes,
|
||||
# except to call the compute method, that has no matching
|
||||
# rollback call right now.
|
||||
pass
|
||||
if self._held_allocations:
|
||||
migrate.revert_allocation_for_migration(self._source_cn,
|
||||
self.instance,
|
||||
self.migration,
|
||||
self._held_allocations)
|
||||
|
||||
def _check_instance_is_active(self):
|
||||
if self.instance.power_state not in (power_state.RUNNING,
|
||||
|
@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NOTE(danms): This is the global service version counter
|
||||
SERVICE_VERSION = 24
|
||||
SERVICE_VERSION = 25
|
||||
|
||||
|
||||
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
|
||||
@ -117,6 +117,9 @@ SERVICE_VERSION_HISTORY = (
|
||||
{'compute_rpc': '4.18'},
|
||||
# Version 24: Add support for Cinder v3 attach/detach API.
|
||||
{'compute_rpc': '4.18'},
|
||||
# Version 25: Compute hosts allow migration-based allocations
|
||||
# for live migration.
|
||||
{'compute_rpc': '4.18'},
|
||||
)
|
||||
|
||||
|
||||
|
@ -339,7 +339,8 @@ def resources_from_request_spec(spec_obj):
|
||||
# TODO(mriedem): Remove this when select_destinations() in the scheduler takes
|
||||
# some sort of skip_filters flag.
|
||||
def claim_resources_on_destination(
|
||||
reportclient, instance, source_node, dest_node):
|
||||
reportclient, instance, source_node, dest_node,
|
||||
source_node_allocations=None):
|
||||
"""Copies allocations from source node to dest node in Placement
|
||||
|
||||
Normally the scheduler will allocate resources on a chosen destination
|
||||
@ -361,9 +362,10 @@ def claim_resources_on_destination(
|
||||
node fails.
|
||||
"""
|
||||
# Get the current allocations for the source node and the instance.
|
||||
source_node_allocations = (
|
||||
reportclient.get_allocations_for_consumer_by_provider(
|
||||
source_node.uuid, instance.uuid))
|
||||
if not source_node_allocations:
|
||||
source_node_allocations = (
|
||||
reportclient.get_allocations_for_consumer_by_provider(
|
||||
source_node.uuid, instance.uuid))
|
||||
if source_node_allocations:
|
||||
# Generate an allocation request for the destination node.
|
||||
alloc_request = {
|
||||
|
@ -5968,9 +5968,7 @@ class ComputeTestCase(BaseTestCase,
|
||||
@mock.patch.object(fake.FakeDriver, 'get_instance_disk_info')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
|
||||
@mock.patch.object(objects.ComputeNode,
|
||||
'get_first_node_by_host_for_old_compat')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'remove_provider_from_instance_allocation')
|
||||
'get_by_host_and_nodename')
|
||||
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'remove_volume_connection')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI,
|
||||
@ -5978,7 +5976,7 @@ class ComputeTestCase(BaseTestCase,
|
||||
@mock.patch('nova.objects.Migration.save')
|
||||
def test_live_migration_exception_rolls_back(self, mock_save,
|
||||
mock_rollback, mock_remove,
|
||||
mock_get_uuid, mock_remove_allocs,
|
||||
mock_get_uuid,
|
||||
mock_get_node, mock_pre, mock_get_disk):
|
||||
# Confirm exception when pre_live_migration fails.
|
||||
c = context.get_admin_context()
|
||||
@ -6014,14 +6012,31 @@ class ComputeTestCase(BaseTestCase,
|
||||
mock_get_uuid.return_value = fake_bdms
|
||||
|
||||
# start test
|
||||
migration = objects.Migration()
|
||||
with mock.patch.object(self.compute.network_api,
|
||||
'setup_networks_on_host') as mock_setup:
|
||||
migration = objects.Migration(uuid=uuids.migration)
|
||||
|
||||
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
|
||||
@mock.patch.object(self.compute, 'reportclient')
|
||||
def do_it(mock_client, mock_setup):
|
||||
mock_client.get_allocations_for_consumer.return_value = {
|
||||
mock.sentinel.source: {
|
||||
'resources': mock.sentinel.allocs,
|
||||
}
|
||||
}
|
||||
self.assertRaises(test.TestingException,
|
||||
self.compute.live_migration,
|
||||
c, dest=dest_host, block_migration=True,
|
||||
instance=instance, migration=migration,
|
||||
migrate_data=migrate_data)
|
||||
mock_setup.assert_called_once_with(c, instance, self.compute.host)
|
||||
mock_client.put_allocations.assert_called_once_with(
|
||||
mock.sentinel.source, instance.uuid,
|
||||
mock.sentinel.allocs,
|
||||
instance.project_id, instance.user_id)
|
||||
mock_client.delete_allocation_for_instance.assert_called_once_with(
|
||||
migration.uuid)
|
||||
|
||||
do_it()
|
||||
|
||||
instance.refresh()
|
||||
|
||||
self.assertEqual('src_host', instance.host)
|
||||
@ -6032,10 +6047,7 @@ class ComputeTestCase(BaseTestCase,
|
||||
block_device_info=block_device_info)
|
||||
mock_pre.assert_called_once_with(c,
|
||||
instance, True, 'fake_disk', dest_host, migrate_data)
|
||||
mock_remove_allocs.assert_called_once_with(
|
||||
instance.uuid, dest_node.uuid, instance.user_id,
|
||||
instance.project_id, test.MatchType(dict))
|
||||
mock_setup.assert_called_once_with(c, instance, self.compute.host)
|
||||
|
||||
mock_get_uuid.assert_called_with(c, instance.uuid)
|
||||
mock_remove.assert_has_calls([
|
||||
mock.call(c, instance, uuids.volume_id_1, dest_host),
|
||||
@ -6061,13 +6073,15 @@ class ComputeTestCase(BaseTestCase,
|
||||
instance.host = self.compute.host
|
||||
dest = 'desthost'
|
||||
|
||||
migration = objects.Migration(uuid=uuids.migration,
|
||||
source_node=instance.node)
|
||||
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
||||
is_shared_instance_path=False,
|
||||
is_shared_block_storage=False)
|
||||
is_shared_block_storage=False,
|
||||
migration=migration)
|
||||
mock_pre.return_value = migrate_data
|
||||
|
||||
# start test
|
||||
migration = objects.Migration()
|
||||
with test.nested(
|
||||
mock.patch.object(
|
||||
self.compute.network_api, 'migrate_instance_start'),
|
||||
@ -6172,19 +6186,24 @@ class ComputeTestCase(BaseTestCase,
|
||||
'task_state': task_states.MIGRATING,
|
||||
'power_state': power_state.PAUSED})
|
||||
|
||||
migration_obj = objects.Migration(uuid=uuids.migration,
|
||||
source_node=instance.node,
|
||||
status='completed')
|
||||
migration = {'source_compute': srchost, 'dest_compute': dest, }
|
||||
migrate_data = objects.LibvirtLiveMigrateData(
|
||||
is_shared_instance_path=False,
|
||||
is_shared_block_storage=False,
|
||||
migration=migration_obj,
|
||||
block_migration=False)
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(
|
||||
self.compute.network_api, 'migrate_instance_start'),
|
||||
mock.patch.object(
|
||||
self.compute.network_api, 'setup_networks_on_host')
|
||||
self.compute.network_api, 'setup_networks_on_host'),
|
||||
mock.patch.object(migration_obj, 'save'),
|
||||
) as (
|
||||
mock_migrate, mock_setup
|
||||
mock_migrate, mock_setup, mock_mig_save
|
||||
):
|
||||
self.compute._post_live_migration(c, instance, dest,
|
||||
migrate_data=migrate_data)
|
||||
@ -6213,7 +6232,9 @@ class ComputeTestCase(BaseTestCase,
|
||||
'power_state': power_state.PAUSED})
|
||||
instance.save()
|
||||
|
||||
migration_obj = objects.Migration()
|
||||
migration_obj = objects.Migration(uuid=uuids.migration,
|
||||
source_node=instance.node,
|
||||
status='completed')
|
||||
migrate_data = migrate_data_obj.LiveMigrateData(
|
||||
migration=migration_obj)
|
||||
|
||||
@ -6271,11 +6292,13 @@ class ComputeTestCase(BaseTestCase,
|
||||
"""
|
||||
dest = 'desthost'
|
||||
srchost = self.compute.host
|
||||
srcnode = 'srcnode'
|
||||
|
||||
# creating testdata
|
||||
c = context.get_admin_context()
|
||||
instance = self._create_fake_instance_obj({
|
||||
'host': srchost,
|
||||
'node': srcnode,
|
||||
'state_description': 'migrating',
|
||||
'state': power_state.PAUSED},
|
||||
context=c)
|
||||
@ -6284,7 +6307,9 @@ class ComputeTestCase(BaseTestCase,
|
||||
'power_state': power_state.PAUSED})
|
||||
instance.save()
|
||||
|
||||
migration_obj = objects.Migration()
|
||||
migration_obj = objects.Migration(source_node=srcnode,
|
||||
uuid=uuids.migration,
|
||||
status='completed')
|
||||
migrate_data = migrate_data_obj.LiveMigrateData(
|
||||
migration=migration_obj)
|
||||
|
||||
@ -6305,12 +6330,14 @@ class ComputeTestCase(BaseTestCase,
|
||||
'clear_events_for_instance'),
|
||||
mock.patch.object(self.compute, 'update_available_resource'),
|
||||
mock.patch.object(migration_obj, 'save'),
|
||||
mock.patch.object(self.compute, '_get_resource_tracker'),
|
||||
) as (
|
||||
post_live_migration, unfilter_instance,
|
||||
migrate_instance_start, post_live_migration_at_destination,
|
||||
post_live_migration_at_source, setup_networks_on_host,
|
||||
clear_events, update_available_resource, mig_save
|
||||
clear_events, update_available_resource, mig_save, getrt
|
||||
):
|
||||
getrt.return_value.get_node_uuid.return_value = srcnode
|
||||
self.compute._post_live_migration(c, instance, dest,
|
||||
migrate_data=migrate_data)
|
||||
update_available_resource.assert_has_calls([mock.call(c)])
|
||||
@ -6354,32 +6381,31 @@ class ComputeTestCase(BaseTestCase,
|
||||
mock.patch.object(objects.BlockDeviceMappingList,
|
||||
'get_by_instance_uuid'),
|
||||
mock.patch.object(self.compute.driver, 'get_volume_connector'),
|
||||
mock.patch.object(cinder.API, 'terminate_connection')
|
||||
mock.patch.object(cinder.API, 'terminate_connection'),
|
||||
mock.patch.object(self.compute, '_delete_allocation_after_move'),
|
||||
) as (
|
||||
migrate_instance_start, post_live_migration_at_destination,
|
||||
setup_networks_on_host, clear_events_for_instance,
|
||||
get_instance_volume_block_device_info, get_by_instance_uuid,
|
||||
get_volume_connector, terminate_connection
|
||||
get_volume_connector, terminate_connection, delete_alloc,
|
||||
):
|
||||
get_by_instance_uuid.return_value = bdms
|
||||
get_volume_connector.return_value = 'fake-connector'
|
||||
|
||||
self.compute._post_live_migration(c, instance, 'dest_host')
|
||||
self.compute._post_live_migration(c, instance, 'dest_host',
|
||||
migrate_data=mock.MagicMock())
|
||||
|
||||
terminate_connection.assert_called_once_with(
|
||||
c, uuids.volume_id, 'fake-connector')
|
||||
|
||||
@mock.patch.object(objects.ComputeNode,
|
||||
'get_first_node_by_host_for_old_compat')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'remove_provider_from_instance_allocation')
|
||||
'get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
|
||||
def test_rollback_live_migration(self, mock_bdms, mock_remove_allocs,
|
||||
mock_get_node):
|
||||
def test_rollback_live_migration(self, mock_bdms, mock_get_node):
|
||||
c = context.get_admin_context()
|
||||
instance = mock.MagicMock()
|
||||
migration = mock.MagicMock()
|
||||
migrate_data = {'migration': migration}
|
||||
migration = objects.Migration(uuid=uuids.migration)
|
||||
migrate_data = objects.LibvirtLiveMigrateData(migration=migration)
|
||||
|
||||
dest_node = objects.ComputeNode(host='foo', uuid=uuids.dest_node)
|
||||
mock_get_node.return_value = dest_node
|
||||
@ -6387,15 +6413,14 @@ class ComputeTestCase(BaseTestCase,
|
||||
mock_bdms.return_value = bdms
|
||||
|
||||
@mock.patch('nova.compute.utils.notify_about_instance_action')
|
||||
@mock.patch.object(migration, 'save')
|
||||
@mock.patch.object(self.compute, '_revert_allocation')
|
||||
@mock.patch.object(self.compute, '_live_migration_cleanup_flags')
|
||||
@mock.patch.object(self.compute, 'network_api')
|
||||
def _test(mock_nw_api, mock_lmcf, mock_notify):
|
||||
def _test(mock_nw_api, mock_lmcf, mock_ra, mock_mig_save, mock_notify):
|
||||
mock_lmcf.return_value = False, False
|
||||
self.compute._rollback_live_migration(c, instance, 'foo',
|
||||
migrate_data=migrate_data)
|
||||
mock_remove_allocs.assert_called_once_with(
|
||||
instance.uuid, dest_node.uuid, instance.user_id,
|
||||
instance.project_id, test.MatchType(dict))
|
||||
mock_notify.assert_has_calls([
|
||||
mock.call(c, instance, self.compute.host,
|
||||
action='live_migration_rollback', phase='start',
|
||||
@ -6405,19 +6430,17 @@ class ComputeTestCase(BaseTestCase,
|
||||
bdms=bdms)])
|
||||
mock_nw_api.setup_networks_on_host.assert_called_once_with(
|
||||
c, instance, self.compute.host)
|
||||
mock_ra.assert_called_once_with(mock.ANY, instance, migration)
|
||||
mock_mig_save.assert_called_once_with()
|
||||
_test()
|
||||
|
||||
self.assertEqual('error', migration.status)
|
||||
self.assertEqual(0, instance.progress)
|
||||
migration.save.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(objects.ComputeNode,
|
||||
'get_first_node_by_host_for_old_compat')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'remove_provider_from_instance_allocation')
|
||||
'get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
|
||||
def test_rollback_live_migration_set_migration_status(self, mock_bdms,
|
||||
mock_remove_allocs,
|
||||
mock_get_node):
|
||||
c = context.get_admin_context()
|
||||
instance = mock.MagicMock()
|
||||
@ -6429,17 +6452,16 @@ class ComputeTestCase(BaseTestCase,
|
||||
bdms = objects.BlockDeviceMappingList()
|
||||
mock_bdms.return_value = bdms
|
||||
|
||||
@mock.patch.object(self.compute, '_revert_allocation')
|
||||
@mock.patch('nova.compute.utils.notify_about_instance_action')
|
||||
@mock.patch.object(self.compute, '_live_migration_cleanup_flags')
|
||||
@mock.patch.object(self.compute, 'network_api')
|
||||
def _test(mock_nw_api, mock_lmcf, mock_notify):
|
||||
def _test(mock_nw_api, mock_lmcf, mock_notify, mock_ra):
|
||||
mock_lmcf.return_value = False, False
|
||||
self.compute._rollback_live_migration(c, instance, 'foo',
|
||||
migrate_data=migrate_data,
|
||||
migration_status='fake')
|
||||
mock_remove_allocs.assert_called_once_with(
|
||||
instance.uuid, dest_node.uuid, instance.user_id,
|
||||
instance.project_id, test.MatchType(dict))
|
||||
mock_ra.assert_called_once_with(mock.ANY, instance, migration)
|
||||
mock_notify.assert_has_calls([
|
||||
mock.call(c, instance, self.compute.host,
|
||||
action='live_migration_rollback', phase='start',
|
||||
|
@ -6551,31 +6551,31 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
|
||||
@mock.patch.object(objects.ComputeNode,
|
||||
'get_first_node_by_host_for_old_compat')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'remove_provider_from_instance_allocation')
|
||||
def test_rollback_live_migration_handles_dict(self, mock_remove_allocs,
|
||||
def test_rollback_live_migration_handles_dict(self,
|
||||
mock_get_node):
|
||||
compute = manager.ComputeManager()
|
||||
dest_node = objects.ComputeNode(host='foo', uuid=uuids.dest_node)
|
||||
mock_get_node.return_value = dest_node
|
||||
|
||||
@mock.patch.object(compute, '_revert_allocation')
|
||||
@mock.patch('nova.compute.utils.notify_about_instance_action')
|
||||
@mock.patch.object(compute.network_api, 'setup_networks_on_host')
|
||||
@mock.patch.object(compute, '_notify_about_instance_usage')
|
||||
@mock.patch.object(compute, '_live_migration_cleanup_flags')
|
||||
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
|
||||
def _test(mock_bdm, mock_lmcf, mock_notify, mock_nwapi,
|
||||
mock_notify_about_instance_action):
|
||||
mock_notify_about_instance_action, mock_ra):
|
||||
bdms = objects.BlockDeviceMappingList()
|
||||
mock_bdm.return_value = bdms
|
||||
mock_lmcf.return_value = False, False
|
||||
mock_instance = mock.MagicMock()
|
||||
mock_migration = mock.MagicMock()
|
||||
compute._rollback_live_migration(self.context,
|
||||
mock_instance,
|
||||
'foo', {})
|
||||
mock_remove_allocs.assert_called_once_with(
|
||||
mock_instance.uuid, dest_node.uuid, mock_instance.user_id,
|
||||
mock_instance.project_id, test.MatchType(dict))
|
||||
'foo',
|
||||
{'migration': mock_migration})
|
||||
mock_ra.assert_called_once_with(self.context,
|
||||
mock_instance, mock_migration)
|
||||
mock_notify_about_instance_action.assert_has_calls([
|
||||
mock.call(self.context, mock_instance, compute.host,
|
||||
action='live_migration_rollback', phase='start',
|
||||
@ -6741,6 +6741,91 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
|
||||
_do_test()
|
||||
|
||||
def _call_post_live_migration(self, *args, **kwargs):
|
||||
@mock.patch.object(self.compute, 'update_available_resource')
|
||||
@mock.patch.object(self.compute, 'compute_rpcapi')
|
||||
@mock.patch.object(self.compute, '_notify_about_instance_usage')
|
||||
@mock.patch.object(self.compute, 'network_api')
|
||||
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
|
||||
def _do_call(bdm, nwapi, notify, rpc, update):
|
||||
return self.compute._post_live_migration(self.context,
|
||||
self.instance,
|
||||
'foo',
|
||||
*args, **kwargs)
|
||||
return _do_call()
|
||||
|
||||
def test_post_live_migration_new_allocations(self):
|
||||
# We have a migrate_data with a migration...
|
||||
migration = objects.Migration(uuid=uuids.migration)
|
||||
migration.save = mock.MagicMock()
|
||||
md = objects.LibvirtLiveMigrateData(migration=migration,
|
||||
is_shared_instance_path=False,
|
||||
is_shared_block_storage=False)
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute.scheduler_client,
|
||||
'reportclient'),
|
||||
mock.patch.object(self.compute,
|
||||
'_delete_allocation_after_move'),
|
||||
) as (
|
||||
mock_report, mock_delete,
|
||||
):
|
||||
# ...and that migration has allocations...
|
||||
mock_report.get_allocations_for_consumer.return_value = (
|
||||
mock.sentinel.allocs)
|
||||
self._call_post_live_migration(migrate_data=md)
|
||||
# ...so we should have called the new style delete
|
||||
mock_delete.assert_called_once_with(self.instance,
|
||||
migration,
|
||||
self.instance.flavor,
|
||||
self.instance.node)
|
||||
|
||||
def test_post_live_migration_old_allocations(self):
|
||||
# We have a migrate_data with a migration...
|
||||
migration = objects.Migration(uuid=uuids.migration)
|
||||
migration.save = mock.MagicMock()
|
||||
md = objects.LibvirtLiveMigrateData(migration=migration,
|
||||
is_shared_instance_path=False,
|
||||
is_shared_block_storage=False)
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute.scheduler_client,
|
||||
'reportclient'),
|
||||
mock.patch.object(self.compute,
|
||||
'_delete_allocation_after_move'),
|
||||
mock.patch.object(self.compute,
|
||||
'_get_resource_tracker'),
|
||||
) as (
|
||||
mock_report, mock_delete, mock_rt,
|
||||
):
|
||||
# ...and that migration does not have allocations...
|
||||
mock_report.get_allocations_for_consumer.return_value = None
|
||||
self._call_post_live_migration(migrate_data=md)
|
||||
# ...so we should have called the old style delete
|
||||
mock_delete.assert_not_called()
|
||||
fn = mock_rt.return_value.delete_allocation_for_migrated_instance
|
||||
fn.assert_called_once_with(self.instance, self.instance.node)
|
||||
|
||||
def test_post_live_migration_legacy(self):
|
||||
# We have no migrate_data...
|
||||
md = None
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute.scheduler_client,
|
||||
'reportclient'),
|
||||
mock.patch.object(self.compute,
|
||||
'_delete_allocation_after_move'),
|
||||
mock.patch.object(self.compute,
|
||||
'_get_resource_tracker'),
|
||||
) as (
|
||||
mock_report, mock_delete, mock_rt,
|
||||
):
|
||||
self._call_post_live_migration(migrate_data=md)
|
||||
# ...without migrate_data, no migration allocations check...
|
||||
ga = mock_report.get_allocations_for_consumer
|
||||
self.assertFalse(ga.called)
|
||||
# ...so we should have called the old style delete
|
||||
mock_delete.assert_not_called()
|
||||
fn = mock_rt.return_value.delete_allocation_for_migrated_instance
|
||||
fn.assert_called_once_with(self.instance, self.instance.node)
|
||||
|
||||
def test_post_live_migration_cinder_v3_api(self):
|
||||
# Because live migration has succeeded, _post_live_migration
|
||||
# should call attachment_delete with the original/old attachment_id
|
||||
@ -6748,6 +6833,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
|
||||
dest_host = 'test_dest_host'
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
node='dest',
|
||||
uuid=uuids.instance)
|
||||
bdm_id = 1
|
||||
volume_id = uuids.volume
|
||||
@ -6768,9 +6854,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
vol_bdm.attachment_id = uuids.attachment1
|
||||
orig_attachment_id = uuids.attachment2
|
||||
migrate_data = migrate_data_obj.LiveMigrateData()
|
||||
migrate_data.migration = objects.Migration(uuid=uuids.migration,
|
||||
dest_node=instance.node,
|
||||
source_node='src')
|
||||
migrate_data.old_vol_attachment_ids = {volume_id: orig_attachment_id}
|
||||
image_bdm.attachment_id = uuids.attachment3
|
||||
|
||||
@mock.patch.object(migrate_data.migration, 'save',
|
||||
new=lambda: None)
|
||||
@mock.patch.object(compute.reportclient,
|
||||
'get_allocations_for_consumer_by_provider')
|
||||
@mock.patch.object(compute, '_get_resource_tracker')
|
||||
@mock.patch.object(vol_bdm, 'save')
|
||||
@mock.patch.object(compute, 'update_available_resource')
|
||||
@ -6784,7 +6877,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
'get_by_instance_uuid')
|
||||
def _test(mock_get_bdms, mock_net_api, mock_notify, mock_driver,
|
||||
mock_rpc, mock_get_bdm_info, mock_attach_delete,
|
||||
mock_update_resource, mock_bdm_save, mock_rt):
|
||||
mock_update_resource, mock_bdm_save, mock_rt, mock_ga):
|
||||
mock_rt.return_value = mock.Mock()
|
||||
mock_get_bdms.return_value = [vol_bdm, image_bdm]
|
||||
|
||||
|
@ -60,7 +60,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
servicegroup.API(), scheduler_client.SchedulerClient(),
|
||||
self.fake_spec)
|
||||
|
||||
def test_execute_with_destination(self):
|
||||
def test_execute_with_destination(self, new_mode=True):
|
||||
dest_node = objects.ComputeNode(hypervisor_hostname='dest_node')
|
||||
with test.nested(
|
||||
mock.patch.object(self.task, '_check_host_is_up'),
|
||||
@ -71,15 +71,27 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
'claim_resources_on_destination'),
|
||||
mock.patch.object(self.migration, 'save'),
|
||||
mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
|
||||
) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig):
|
||||
mock.patch('nova.conductor.tasks.migrate.'
|
||||
'replace_allocation_with_migration'),
|
||||
mock.patch('nova.conductor.tasks.live_migrate.'
|
||||
'should_do_migration_allocation')
|
||||
) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig,
|
||||
m_alloc, mock_sda):
|
||||
mock_mig.return_value = "bob"
|
||||
m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs)
|
||||
mock_sda.return_value = new_mode
|
||||
|
||||
self.assertEqual("bob", self.task.execute())
|
||||
mock_check_up.assert_called_once_with(self.instance_host)
|
||||
mock_check_dest.assert_called_once_with()
|
||||
if new_mode:
|
||||
allocs = mock.sentinel.allocs
|
||||
else:
|
||||
allocs = None
|
||||
mock_claim.assert_called_once_with(
|
||||
self.task.scheduler_client.reportclient, self.instance,
|
||||
mock.sentinel.source_node, dest_node)
|
||||
mock.sentinel.source_node, dest_node,
|
||||
source_node_allocations=allocs)
|
||||
mock_mig.assert_called_once_with(
|
||||
self.context,
|
||||
host=self.instance_host,
|
||||
@ -95,6 +107,15 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
self.migration.dest_node)
|
||||
self.assertEqual(self.task.destination,
|
||||
self.migration.dest_compute)
|
||||
if new_mode:
|
||||
m_alloc.assert_called_once_with(self.context,
|
||||
self.instance,
|
||||
self.migration)
|
||||
else:
|
||||
m_alloc.assert_not_called()
|
||||
|
||||
def test_execute_with_destination_old_school(self):
|
||||
self.test_execute_with_destination(new_mode=False)
|
||||
|
||||
def test_execute_without_destination(self):
|
||||
self.destination = None
|
||||
@ -105,10 +126,17 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
mock.patch.object(self.task, '_check_host_is_up'),
|
||||
mock.patch.object(self.task, '_find_destination'),
|
||||
mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
|
||||
mock.patch.object(self.migration, 'save')
|
||||
) as (mock_check, mock_find, mock_mig, mock_save):
|
||||
mock.patch.object(self.migration, 'save'),
|
||||
mock.patch('nova.conductor.tasks.migrate.'
|
||||
'replace_allocation_with_migration'),
|
||||
mock.patch('nova.conductor.tasks.live_migrate.'
|
||||
'should_do_migration_allocation'),
|
||||
) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc,
|
||||
mock_sda):
|
||||
mock_find.return_value = ("found_host", "found_node")
|
||||
mock_mig.return_value = "bob"
|
||||
mock_alloc.return_value = (mock.MagicMock(), mock.MagicMock())
|
||||
mock_sda.return_value = True
|
||||
|
||||
self.assertEqual("bob", self.task.execute())
|
||||
mock_check.assert_called_once_with(self.instance_host)
|
||||
@ -124,6 +152,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
self.assertEqual('found_host', self.migration.dest_compute)
|
||||
self.assertEqual('found_node', self.migration.dest_node)
|
||||
self.assertEqual(self.instance.node, self.migration.source_node)
|
||||
self.assertTrue(mock_alloc.called)
|
||||
|
||||
def test_check_instance_is_active_passes_when_paused(self):
|
||||
self.task.instance['power_state'] = power_state.PAUSED
|
||||
|
@ -499,6 +499,8 @@ class TestUtils(test.NoDBTestCase):
|
||||
@mock.patch.object(reportclient,
|
||||
'claim_resources', return_value=False)
|
||||
def test(mock_claim, mock_get_allocs):
|
||||
# NOTE(danms): Don't pass source_node_allocations here to test
|
||||
# that they are fetched if needed.
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
utils.claim_resources_on_destination,
|
||||
reportclient, instance, source_node, dest_node)
|
||||
@ -536,15 +538,14 @@ class TestUtils(test.NoDBTestCase):
|
||||
}
|
||||
|
||||
@mock.patch.object(reportclient,
|
||||
'get_allocations_for_consumer_by_provider',
|
||||
return_value=source_res_allocs)
|
||||
'get_allocations_for_consumer_by_provider')
|
||||
@mock.patch.object(reportclient,
|
||||
'claim_resources', return_value=True)
|
||||
def test(mock_claim, mock_get_allocs):
|
||||
utils.claim_resources_on_destination(
|
||||
reportclient, instance, source_node, dest_node)
|
||||
mock_get_allocs.assert_called_once_with(
|
||||
uuids.source_node, instance.uuid)
|
||||
reportclient, instance, source_node, dest_node,
|
||||
source_res_allocs)
|
||||
self.assertFalse(mock_get_allocs.called)
|
||||
mock_claim.assert_called_once_with(
|
||||
instance.uuid, dest_alloc_request,
|
||||
instance.project_id, instance.user_id)
|
||||
|
Loading…
Reference in New Issue
Block a user