Merge "Drop legacy cold migrate allocation compat code"
This commit is contained in:
commit
238407ad00
@ -3889,10 +3889,7 @@ class ComputeManager(manager.Manager):
|
|||||||
rt = self._get_resource_tracker()
|
rt = self._get_resource_tracker()
|
||||||
rt.drop_move_claim(context, instance, migration.source_node,
|
rt.drop_move_claim(context, instance, migration.source_node,
|
||||||
old_instance_type, prefix='old_')
|
old_instance_type, prefix='old_')
|
||||||
self._delete_allocation_after_move(context, instance,
|
self._delete_allocation_after_move(context, instance, migration)
|
||||||
migration,
|
|
||||||
old_instance_type,
|
|
||||||
migration.source_node)
|
|
||||||
instance.drop_migration_context()
|
instance.drop_migration_context()
|
||||||
|
|
||||||
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
|
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
|
||||||
@ -3923,26 +3920,16 @@ class ComputeManager(manager.Manager):
|
|||||||
self.host, action=fields.NotificationAction.RESIZE_CONFIRM,
|
self.host, action=fields.NotificationAction.RESIZE_CONFIRM,
|
||||||
phase=fields.NotificationPhase.END)
|
phase=fields.NotificationPhase.END)
|
||||||
|
|
||||||
def _delete_allocation_after_move(self, context, instance, migration,
|
def _delete_allocation_after_move(self, context, instance, migration):
|
||||||
flavor, nodename):
|
"""Deletes resource allocations held by the migration record against
|
||||||
rt = self._get_resource_tracker()
|
the source compute node resource provider after a confirmed cold /
|
||||||
cn_uuid = rt.get_node_uuid(nodename)
|
successful live migration.
|
||||||
|
"""
|
||||||
if migration.source_node == nodename:
|
|
||||||
if migration.status in ('confirmed', 'completed'):
|
|
||||||
try:
|
try:
|
||||||
# NOTE(danms): We're finishing on the source node, so try
|
# NOTE(danms): We're finishing on the source node, so try
|
||||||
# to delete the allocation based on the migration uuid
|
# to delete the allocation based on the migration uuid
|
||||||
deleted = self.reportclient.delete_allocation_for_instance(
|
self.reportclient.delete_allocation_for_instance(
|
||||||
context, migration.uuid)
|
context, migration.uuid)
|
||||||
if deleted:
|
|
||||||
LOG.info(_('Source node %(node)s confirmed migration '
|
|
||||||
'%(mig)s; deleted migration-based '
|
|
||||||
'allocation'),
|
|
||||||
{'node': nodename, 'mig': migration.uuid})
|
|
||||||
# NOTE(danms): We succeeded, which means we do not
|
|
||||||
# need to do the complex double allocation dance
|
|
||||||
return
|
|
||||||
except exception.AllocationDeleteFailed:
|
except exception.AllocationDeleteFailed:
|
||||||
LOG.error('Deleting allocation in placement for migration '
|
LOG.error('Deleting allocation in placement for migration '
|
||||||
'%(migration_uuid)s failed. The instance '
|
'%(migration_uuid)s failed. The instance '
|
||||||
@ -3952,60 +3939,6 @@ class ComputeManager(manager.Manager):
|
|||||||
{'instance_uuid': instance.uuid,
|
{'instance_uuid': instance.uuid,
|
||||||
'migration_uuid': migration.uuid})
|
'migration_uuid': migration.uuid})
|
||||||
raise
|
raise
|
||||||
else:
|
|
||||||
# We're reverting (or failed) on the source, so we
|
|
||||||
# need to check if our migration holds a claim and if
|
|
||||||
# so, avoid doing the legacy behavior below.
|
|
||||||
# NOTE(gibi): We are only hitting this in case of reverting
|
|
||||||
# a resize-on-same-host operation
|
|
||||||
mig_allocs = (
|
|
||||||
self.reportclient.get_allocations_for_consumer_by_provider(
|
|
||||||
context, cn_uuid, migration.uuid))
|
|
||||||
if mig_allocs:
|
|
||||||
LOG.info(_('Source node %(node)s reverted migration '
|
|
||||||
'%(mig)s; not deleting migration-based '
|
|
||||||
'allocation'),
|
|
||||||
{'node': nodename, 'mig': migration.uuid})
|
|
||||||
return
|
|
||||||
elif migration.dest_node == nodename:
|
|
||||||
# NOTE(danms): We're reverting on the destination node
|
|
||||||
# (and we must not be doing a same-host migration if we
|
|
||||||
# made it past the check above), so we need to check to
|
|
||||||
# see if the source did migration-based allocation
|
|
||||||
# accounting
|
|
||||||
allocs = self.reportclient.get_allocations_for_consumer(
|
|
||||||
context, migration.uuid)
|
|
||||||
if allocs:
|
|
||||||
# NOTE(danms): The source did migration-based allocation
|
|
||||||
# accounting, so we should let the source node rejigger
|
|
||||||
# the allocations in finish_resize_revert()
|
|
||||||
LOG.info(_('Destination node %(node)s reverted migration '
|
|
||||||
'%(mig)s; not deleting migration-based '
|
|
||||||
'allocation'),
|
|
||||||
{'node': nodename, 'mig': migration.uuid})
|
|
||||||
return
|
|
||||||
|
|
||||||
# TODO(danms): Remove below this line when we remove compatibility
|
|
||||||
# for double-accounting migrations (likely rocky)
|
|
||||||
LOG.info(_('Doing legacy allocation math for migration %(mig)s after '
|
|
||||||
'instance move'),
|
|
||||||
{'mig': migration.uuid},
|
|
||||||
instance=instance)
|
|
||||||
|
|
||||||
# NOTE(jaypipes): This sucks, but due to the fact that confirm_resize()
|
|
||||||
# only runs on the source host and revert_resize() runs on the
|
|
||||||
# destination host, we need to do this here. Basically, what we're
|
|
||||||
# doing here is grabbing the existing allocations for this instance
|
|
||||||
# from the placement API, dropping the resources in the doubled-up
|
|
||||||
# allocation set that refer to the source host UUID and calling PUT
|
|
||||||
# /allocations back to the placement API. The allocation that gets
|
|
||||||
# PUT'd back to placement will only include the destination host and
|
|
||||||
# any shared providers in the case of a confirm_resize operation and
|
|
||||||
# the source host and shared providers for a revert_resize operation..
|
|
||||||
if not scheduler_utils.remove_allocation_from_compute(
|
|
||||||
context, instance, cn_uuid, self.reportclient, flavor):
|
|
||||||
LOG.error("Failed to save manipulated allocation",
|
|
||||||
instance=instance)
|
|
||||||
|
|
||||||
@wrap_exception()
|
@wrap_exception()
|
||||||
@reverts_task_state
|
@reverts_task_state
|
||||||
@ -4064,9 +3997,6 @@ class ComputeManager(manager.Manager):
|
|||||||
|
|
||||||
rt = self._get_resource_tracker()
|
rt = self._get_resource_tracker()
|
||||||
rt.drop_move_claim(context, instance, instance.node)
|
rt.drop_move_claim(context, instance, instance.node)
|
||||||
self._delete_allocation_after_move(context, instance, migration,
|
|
||||||
instance.flavor,
|
|
||||||
instance.node)
|
|
||||||
|
|
||||||
# RPC cast back to the source host to finish the revert there.
|
# RPC cast back to the source host to finish the revert there.
|
||||||
self.compute_rpcapi.finish_revert_resize(context, instance,
|
self.compute_rpcapi.finish_revert_resize(context, instance,
|
||||||
@ -4184,13 +4114,10 @@ class ComputeManager(manager.Manager):
|
|||||||
orig_alloc = self.reportclient.get_allocations_for_consumer(
|
orig_alloc = self.reportclient.get_allocations_for_consumer(
|
||||||
context, migration.uuid)
|
context, migration.uuid)
|
||||||
if not orig_alloc:
|
if not orig_alloc:
|
||||||
# NOTE(danms): This migration did not do per-migration allocation
|
LOG.error('Did not find resource allocations for migration '
|
||||||
# accounting, so nothing to do here.
|
'%s on source node %s. Unable to revert source node '
|
||||||
LOG.info('Old-style migration %(mig)s is being reverted; '
|
'allocations back to the instance.',
|
||||||
'no migration claims found on original node '
|
migration.uuid, migration.source_node, instance=instance)
|
||||||
'to swap.',
|
|
||||||
{'mig': migration.uuid},
|
|
||||||
instance=instance)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if len(orig_alloc) > 1:
|
if len(orig_alloc) > 1:
|
||||||
@ -4198,7 +4125,7 @@ class ComputeManager(manager.Manager):
|
|||||||
# against other providers that need to be held by the migration
|
# against other providers that need to be held by the migration
|
||||||
# as well. Perhaps something like shared storage resources that
|
# as well. Perhaps something like shared storage resources that
|
||||||
# will actually be duplicated during a resize type operation.
|
# will actually be duplicated during a resize type operation.
|
||||||
LOG.error('New-style migration %(mig)s has allocations against '
|
LOG.error('Migration %(mig)s has allocations against '
|
||||||
'more than one provider %(rps)s. This should not be '
|
'more than one provider %(rps)s. This should not be '
|
||||||
'possible, but reverting it anyway.',
|
'possible, but reverting it anyway.',
|
||||||
{'mig': migration.uuid,
|
{'mig': migration.uuid,
|
||||||
@ -4299,18 +4226,7 @@ class ComputeManager(manager.Manager):
|
|||||||
# Since we hit a failure, we're either rescheduling or dead
|
# Since we hit a failure, we're either rescheduling or dead
|
||||||
# and either way we need to cleanup any allocations created
|
# and either way we need to cleanup any allocations created
|
||||||
# by the scheduler for the destination node.
|
# by the scheduler for the destination node.
|
||||||
if migration and not self._revert_allocation(
|
self._revert_allocation(context, instance, migration)
|
||||||
context, instance, migration):
|
|
||||||
# We did not do a migration-based
|
|
||||||
# allocation. Note that for a resize to the
|
|
||||||
# same host, the scheduler will merge the
|
|
||||||
# flavors, so here we'd be subtracting the new
|
|
||||||
# flavor from the allocated resources on this
|
|
||||||
# node.
|
|
||||||
# FIXME(danms): Remove this in Rocky
|
|
||||||
rt = self._get_resource_tracker()
|
|
||||||
rt.delete_allocation_for_failed_resize(
|
|
||||||
context, instance, node, instance_type)
|
|
||||||
# try to re-schedule the resize elsewhere:
|
# try to re-schedule the resize elsewhere:
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
self._reschedule_resize_or_reraise(context, image, instance,
|
self._reschedule_resize_or_reraise(context, image, instance,
|
||||||
@ -6751,9 +6667,7 @@ class ComputeManager(manager.Manager):
|
|||||||
# We had a migration-based allocation that we need to handle
|
# We had a migration-based allocation that we need to handle
|
||||||
self._delete_allocation_after_move(ctxt,
|
self._delete_allocation_after_move(ctxt,
|
||||||
instance,
|
instance,
|
||||||
migrate_data.migration,
|
migrate_data.migration)
|
||||||
instance.flavor,
|
|
||||||
source_node)
|
|
||||||
else:
|
else:
|
||||||
# No migration-based allocations, so do the old thing and
|
# No migration-based allocations, so do the old thing and
|
||||||
# attempt to clean up any doubled per-instance allocation
|
# attempt to clean up any doubled per-instance allocation
|
||||||
|
@ -99,12 +99,6 @@ def revert_allocation_for_migration(context, source_cn, instance, migration):
|
|||||||
{'inst': instance.uuid, 'rp': source_cn.uuid})
|
{'inst': instance.uuid, 'rp': source_cn.uuid})
|
||||||
|
|
||||||
|
|
||||||
def should_do_migration_allocation(context):
|
|
||||||
minver = objects.Service.get_minimum_version_multi(context,
|
|
||||||
['nova-compute'])
|
|
||||||
return minver >= 23
|
|
||||||
|
|
||||||
|
|
||||||
class MigrationTask(base.TaskBase):
|
class MigrationTask(base.TaskBase):
|
||||||
def __init__(self, context, instance, flavor,
|
def __init__(self, context, instance, flavor,
|
||||||
request_spec, clean_shutdown, compute_rpcapi,
|
request_spec, clean_shutdown, compute_rpcapi,
|
||||||
@ -126,11 +120,6 @@ class MigrationTask(base.TaskBase):
|
|||||||
self._source_cn = None
|
self._source_cn = None
|
||||||
|
|
||||||
def _preallocate_migration(self):
|
def _preallocate_migration(self):
|
||||||
if not should_do_migration_allocation(self.context):
|
|
||||||
# NOTE(danms): We can't pre-create the migration since we have
|
|
||||||
# old computes. Let the compute do it (legacy behavior).
|
|
||||||
return None
|
|
||||||
|
|
||||||
# If this is a rescheduled migration, don't create a new record.
|
# If this is a rescheduled migration, don't create a new record.
|
||||||
migration_type = ("resize" if self.instance.flavor.id != self.flavor.id
|
migration_type = ("resize" if self.instance.flavor.id != self.flavor.id
|
||||||
else "migration")
|
else "migration")
|
||||||
|
@ -4685,6 +4685,7 @@ class ComputeTestCase(BaseTestCase,
|
|||||||
for operation in actions:
|
for operation in actions:
|
||||||
if 'revert_resize' in operation:
|
if 'revert_resize' in operation:
|
||||||
migration.source_compute = 'fake-mini'
|
migration.source_compute = 'fake-mini'
|
||||||
|
migration.source_node = 'fake-mini'
|
||||||
|
|
||||||
def fake_migration_save(*args, **kwargs):
|
def fake_migration_save(*args, **kwargs):
|
||||||
raise test.TestingException()
|
raise test.TestingException()
|
||||||
@ -5349,7 +5350,7 @@ class ComputeTestCase(BaseTestCase,
|
|||||||
instance_type=instance_type, image={},
|
instance_type=instance_type, image={},
|
||||||
request_spec={},
|
request_spec={},
|
||||||
filter_properties={}, node=None,
|
filter_properties={}, node=None,
|
||||||
clean_shutdown=True, migration=None,
|
clean_shutdown=True, migration=mock.Mock(),
|
||||||
host_list=[])
|
host_list=[])
|
||||||
self.compute.terminate_instance(self.context, instance, [])
|
self.compute.terminate_instance(self.context, instance, [])
|
||||||
|
|
||||||
@ -12571,25 +12572,24 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
|
|||||||
self.instance_type = flavors.get_flavor_by_name(
|
self.instance_type = flavors.get_flavor_by_name(
|
||||||
"m1.tiny")
|
"m1.tiny")
|
||||||
|
|
||||||
@mock.patch.object(db, 'migration_create')
|
@mock.patch('nova.compute.manager.ComputeManager._prep_resize',
|
||||||
|
side_effect=test.TestingException)
|
||||||
@mock.patch.object(compute_manager.ComputeManager,
|
@mock.patch.object(compute_manager.ComputeManager,
|
||||||
'_reschedule_resize_or_reraise')
|
'_reschedule_resize_or_reraise')
|
||||||
def test_reschedule_resize_or_reraise_called(self, mock_res, mock_mig):
|
def test_reschedule_resize_or_reraise_called(self, mock_res, mock_prep):
|
||||||
"""Verify the rescheduling logic gets called when there is an error
|
"""Verify the rescheduling logic gets called when there is an error
|
||||||
during prep_resize.
|
during prep_resize.
|
||||||
"""
|
"""
|
||||||
inst_obj = self._create_fake_instance_obj()
|
inst_obj = self._create_fake_instance_obj()
|
||||||
mock_mig.side_effect = test.TestingException("Original")
|
|
||||||
|
|
||||||
self.compute.prep_resize(self.context, image=None,
|
self.compute.prep_resize(self.context, image=None,
|
||||||
instance=inst_obj,
|
instance=inst_obj,
|
||||||
instance_type=self.instance_type,
|
instance_type=self.instance_type,
|
||||||
request_spec={},
|
request_spec={},
|
||||||
filter_properties={}, migration=None,
|
filter_properties={}, migration=mock.Mock(),
|
||||||
node=None,
|
node=None,
|
||||||
clean_shutdown=True, host_list=None)
|
clean_shutdown=True, host_list=None)
|
||||||
|
|
||||||
mock_mig.assert_called_once_with(mock.ANY, mock.ANY)
|
|
||||||
mock_res.assert_called_once_with(mock.ANY, None, inst_obj, mock.ANY,
|
mock_res.assert_called_once_with(mock.ANY, None, inst_obj, mock.ANY,
|
||||||
self.instance_type, {}, {}, None)
|
self.instance_type, {}, {}, None)
|
||||||
|
|
||||||
|
@ -6664,6 +6664,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||||||
dest_compute=None,
|
dest_compute=None,
|
||||||
dest_node=None,
|
dest_node=None,
|
||||||
dest_host=None,
|
dest_host=None,
|
||||||
|
source_node='source_node',
|
||||||
status='migrating')
|
status='migrating')
|
||||||
self.migration.save = mock.MagicMock()
|
self.migration.save = mock.MagicMock()
|
||||||
self.useFixture(fixtures.SpawnIsSynchronousFixture())
|
self.useFixture(fixtures.SpawnIsSynchronousFixture())
|
||||||
@ -6903,7 +6904,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||||||
|
|
||||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||||
'drop_move_claim')
|
'drop_move_claim')
|
||||||
@mock.patch.object(self.compute, '_delete_allocation_after_move')
|
|
||||||
@mock.patch('nova.compute.rpcapi.ComputeAPI.finish_revert_resize')
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.finish_revert_resize')
|
||||||
@mock.patch.object(self.instance, 'revert_migration_context')
|
@mock.patch.object(self.instance, 'revert_migration_context')
|
||||||
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
|
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
|
||||||
@ -6932,7 +6932,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||||||
mock_get_instance_nw_info,
|
mock_get_instance_nw_info,
|
||||||
mock_revert_migration_context,
|
mock_revert_migration_context,
|
||||||
mock_finish_revert,
|
mock_finish_revert,
|
||||||
mock_delete_allocation,
|
|
||||||
mock_drop_move_claim):
|
mock_drop_move_claim):
|
||||||
|
|
||||||
self.instance.migration_context = objects.MigrationContext()
|
self.instance.migration_context = objects.MigrationContext()
|
||||||
@ -6944,9 +6943,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
mock_drop_move_claim.assert_called_once_with(self.context,
|
mock_drop_move_claim.assert_called_once_with(self.context,
|
||||||
self.instance, self.instance.node)
|
self.instance, self.instance.node)
|
||||||
mock_delete_allocation.assert_called_once_with(
|
|
||||||
self.context, self.instance, self.migration,
|
|
||||||
self.instance.flavor, self.instance.node)
|
|
||||||
self.assertIsNotNone(self.instance.migration_context)
|
self.assertIsNotNone(self.instance.migration_context)
|
||||||
|
|
||||||
# Three fake BDMs:
|
# Three fake BDMs:
|
||||||
@ -7034,144 +7030,28 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||||||
self.compute._confirm_resize(self.context, self.instance,
|
self.compute._confirm_resize(self.context, self.instance,
|
||||||
self.migration)
|
self.migration)
|
||||||
mock_delete.assert_called_once_with(self.context, self.instance,
|
mock_delete.assert_called_once_with(self.context, self.instance,
|
||||||
self.migration,
|
self.migration)
|
||||||
self.instance.old_flavor,
|
|
||||||
self.migration.source_node)
|
|
||||||
mock_save.assert_called_with(expected_task_state=
|
mock_save.assert_called_with(expected_task_state=
|
||||||
[None, task_states.DELETING,
|
[None, task_states.DELETING,
|
||||||
task_states.SOFT_DELETING])
|
task_states.SOFT_DELETING])
|
||||||
|
|
||||||
do_confirm_resize()
|
do_confirm_resize()
|
||||||
|
|
||||||
@mock.patch('nova.scheduler.utils.resources_from_flavor')
|
def test_delete_allocation_after_move_confirm_by_migration(self):
|
||||||
def test_delete_allocation_after_move_legacy(self, mock_resources):
|
with mock.patch.object(self.compute, 'reportclient') as mock_report:
|
||||||
@mock.patch.object(self.compute, '_get_resource_tracker')
|
mock_report.delete_allocation_for_instance.return_value = True
|
||||||
@mock.patch.object(self.compute, 'reportclient')
|
|
||||||
def do_it(mock_rc, mock_grt):
|
|
||||||
instance = mock.MagicMock()
|
|
||||||
migration = mock.MagicMock()
|
|
||||||
self.compute._delete_allocation_after_move(self.context,
|
|
||||||
instance,
|
|
||||||
migration,
|
|
||||||
mock.sentinel.flavor,
|
|
||||||
mock.sentinel.node)
|
|
||||||
mock_resources.assert_called_once_with(instance,
|
|
||||||
mock.sentinel.flavor)
|
|
||||||
rt = mock_grt.return_value
|
|
||||||
rt.get_node_uuid.assert_called_once_with(mock.sentinel.node)
|
|
||||||
remove = mock_rc.remove_provider_from_instance_allocation
|
|
||||||
remove.assert_called_once_with(
|
|
||||||
self.context, instance.uuid, rt.get_node_uuid.return_value,
|
|
||||||
instance.user_id, instance.project_id,
|
|
||||||
mock_resources.return_value)
|
|
||||||
do_it()
|
|
||||||
|
|
||||||
@mock.patch('nova.scheduler.utils.resources_from_flavor')
|
|
||||||
def test_delete_allocation_after_move_confirm_by_migration(self, mock_rff):
|
|
||||||
mock_rff.return_value = {}
|
|
||||||
|
|
||||||
@mock.patch.object(self.compute, '_get_resource_tracker')
|
|
||||||
@mock.patch.object(self.compute, 'reportclient')
|
|
||||||
def doit(new_rules, mock_report, mock_rt):
|
|
||||||
mock_report.delete_allocation_for_instance.return_value = new_rules
|
|
||||||
self.migration.source_node = 'src'
|
|
||||||
self.migration.uuid = uuids.migration
|
|
||||||
self.migration.status = 'confirmed'
|
|
||||||
self.compute._delete_allocation_after_move(self.context,
|
self.compute._delete_allocation_after_move(self.context,
|
||||||
self.instance,
|
self.instance,
|
||||||
self.migration,
|
self.migration)
|
||||||
mock.sentinel.flavor,
|
|
||||||
'src')
|
|
||||||
mock_report.delete_allocation_for_instance.assert_called_once_with(
|
mock_report.delete_allocation_for_instance.assert_called_once_with(
|
||||||
self.context, self.migration.uuid)
|
self.context, self.migration.uuid)
|
||||||
|
|
||||||
old = mock_report.remove_provider_from_instance_allocation
|
|
||||||
if new_rules:
|
|
||||||
self.assertFalse(old.called)
|
|
||||||
else:
|
|
||||||
self.assertTrue(old.called)
|
|
||||||
|
|
||||||
# Allocations by migration, no legacy cleanup
|
|
||||||
doit(True)
|
|
||||||
|
|
||||||
# No allocations by migration, legacy cleanup
|
|
||||||
doit(False)
|
|
||||||
|
|
||||||
@mock.patch('nova.scheduler.utils.resources_from_flavor')
|
|
||||||
def test_delete_allocation_after_move_fail_by_migration(self, mock_rff):
|
|
||||||
mock_rff.return_value = {}
|
|
||||||
|
|
||||||
@mock.patch.object(self.compute, '_get_resource_tracker')
|
|
||||||
@mock.patch.object(self.compute, 'reportclient')
|
|
||||||
def doit(new_rules, mock_report, mock_rt):
|
|
||||||
ga = mock_report.get_allocations_for_consumer_by_provider
|
|
||||||
ga.return_value = new_rules
|
|
||||||
self.migration.source_node = 'src'
|
|
||||||
self.migration.uuid = uuids.migration
|
|
||||||
self.migration.status = 'failed'
|
|
||||||
self.compute._delete_allocation_after_move(self.context,
|
|
||||||
self.instance,
|
|
||||||
self.migration,
|
|
||||||
mock.sentinel.flavor,
|
|
||||||
'src')
|
|
||||||
self.assertFalse(mock_report.delete_allocation_for_instance.called)
|
|
||||||
ga.assert_called_once_with(
|
|
||||||
self.context, mock_rt().get_node_uuid.return_value,
|
|
||||||
self.migration.uuid)
|
|
||||||
|
|
||||||
old = mock_report.remove_provider_from_instance_allocation
|
|
||||||
if new_rules:
|
|
||||||
self.assertFalse(old.called)
|
|
||||||
else:
|
|
||||||
self.assertTrue(old.called)
|
|
||||||
|
|
||||||
# Allocations by migration, no legacy cleanup
|
|
||||||
doit(True)
|
|
||||||
|
|
||||||
# No allocations by migration, legacy cleanup
|
|
||||||
doit(False)
|
|
||||||
|
|
||||||
@mock.patch('nova.scheduler.utils.resources_from_flavor')
|
|
||||||
def test_delete_allocation_after_move_revert_by_migration(self, mock_rff):
|
|
||||||
mock_rff.return_value = {}
|
|
||||||
|
|
||||||
@mock.patch.object(self.compute, '_get_resource_tracker')
|
|
||||||
@mock.patch.object(self.compute, 'reportclient')
|
|
||||||
def doit(new_rules, mock_report, mock_rt):
|
|
||||||
a = new_rules and {'fake'} or {}
|
|
||||||
ga = mock_report.get_allocations_for_consumer
|
|
||||||
ga.return_value = a
|
|
||||||
self.migration.source_node = 'src'
|
|
||||||
self.migration.dest_node = 'dst'
|
|
||||||
self.migration.uuid = uuids.migration
|
|
||||||
self.compute._delete_allocation_after_move(self.context,
|
|
||||||
self.instance,
|
|
||||||
self.migration,
|
|
||||||
mock.sentinel.flavor,
|
|
||||||
'dst')
|
|
||||||
self.assertFalse(mock_report.delete_allocation_for_instance.called)
|
|
||||||
ga.assert_called_once_with(self.context, self.migration.uuid)
|
|
||||||
|
|
||||||
old = mock_report.remove_provider_from_instance_allocation
|
|
||||||
if new_rules:
|
|
||||||
self.assertFalse(old.called)
|
|
||||||
else:
|
|
||||||
self.assertTrue(old.called)
|
|
||||||
|
|
||||||
# Allocations by migration, no legacy cleanup
|
|
||||||
doit(True)
|
|
||||||
|
|
||||||
# No allocations by migration, legacy cleanup
|
|
||||||
doit(False)
|
|
||||||
|
|
||||||
def test_revert_allocation(self):
|
def test_revert_allocation(self):
|
||||||
"""New-style migration-based allocation revert."""
|
"""New-style migration-based allocation revert."""
|
||||||
|
|
||||||
@mock.patch.object(self.compute, '_get_resource_tracker')
|
|
||||||
@mock.patch.object(self.compute, 'reportclient')
|
@mock.patch.object(self.compute, 'reportclient')
|
||||||
def doit(mock_report, mock_rt):
|
def doit(mock_report):
|
||||||
cu = uuids.node
|
cu = uuids.node
|
||||||
mock_rt.return_value.compute_nodes[self.instance.node].uuid = cu
|
|
||||||
a = {cu: {'resources': {'DISK_GB': 1}}}
|
a = {cu: {'resources': {'DISK_GB': 1}}}
|
||||||
mock_report.get_allocations_for_consumer.return_value = a
|
mock_report.get_allocations_for_consumer.return_value = a
|
||||||
self.migration.uuid = uuids.migration
|
self.migration.uuid = uuids.migration
|
||||||
@ -8018,9 +7898,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||||||
# ...so we should have called the new style delete
|
# ...so we should have called the new style delete
|
||||||
mock_delete.assert_called_once_with(self.context,
|
mock_delete.assert_called_once_with(self.context,
|
||||||
self.instance,
|
self.instance,
|
||||||
migration,
|
migration)
|
||||||
self.instance.flavor,
|
|
||||||
self.instance.node)
|
|
||||||
|
|
||||||
def test_post_live_migration_old_allocations(self):
|
def test_post_live_migration_old_allocations(self):
|
||||||
# We have a migrate_data with a migration...
|
# We have a migrate_data with a migration...
|
||||||
@ -8552,22 +8430,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||||||
# Make sure we set migration status to error
|
# Make sure we set migration status to error
|
||||||
self.assertEqual(migration.status, 'error')
|
self.assertEqual(migration.status, 'error')
|
||||||
|
|
||||||
# Run it again with migration=None and make sure we don't choke
|
|
||||||
self.assertRaises(test.TestingException,
|
|
||||||
self.compute.prep_resize,
|
|
||||||
self.context, mock.sentinel.image,
|
|
||||||
instance, flavor,
|
|
||||||
mock.sentinel.request_spec,
|
|
||||||
{}, 'node', False,
|
|
||||||
None, [])
|
|
||||||
|
|
||||||
# Make sure we only called save once (kinda obviously must be true)
|
|
||||||
migration.save.assert_called_once_with()
|
migration.save.assert_called_once_with()
|
||||||
mock_notify_resize.assert_has_calls([
|
mock_notify_resize.assert_has_calls([
|
||||||
mock.call(self.context, instance, 'fake-mini',
|
|
||||||
'start', flavor),
|
|
||||||
mock.call(self.context, instance, 'fake-mini',
|
|
||||||
'end', flavor),
|
|
||||||
mock.call(self.context, instance, 'fake-mini',
|
mock.call(self.context, instance, 'fake-mini',
|
||||||
'start', flavor),
|
'start', flavor),
|
||||||
mock.call(self.context, instance, 'fake-mini',
|
mock.call(self.context, instance, 'fake-mini',
|
||||||
|
@ -67,35 +67,6 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
|||||||
scheduler_client.SchedulerClient(),
|
scheduler_client.SchedulerClient(),
|
||||||
host_list=None)
|
host_list=None)
|
||||||
|
|
||||||
@mock.patch('nova.objects.Service.get_minimum_version_multi')
|
|
||||||
@mock.patch('nova.availability_zones.get_host_availability_zone')
|
|
||||||
@mock.patch.object(scheduler_utils, 'setup_instance_group')
|
|
||||||
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
|
|
||||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
|
|
||||||
def test_execute_legacy_no_pre_create_migration(self, prep_resize_mock,
|
|
||||||
sel_dest_mock, sig_mock,
|
|
||||||
az_mock, gmv_mock):
|
|
||||||
sel_dest_mock.return_value = self.host_lists
|
|
||||||
az_mock.return_value = 'myaz'
|
|
||||||
task = self._generate_task()
|
|
||||||
legacy_request_spec = self.request_spec.to_legacy_request_spec_dict()
|
|
||||||
gmv_mock.return_value = 22
|
|
||||||
task.execute()
|
|
||||||
|
|
||||||
sig_mock.assert_called_once_with(self.context, self.request_spec)
|
|
||||||
task.scheduler_client.select_destinations.assert_called_once_with(
|
|
||||||
self.context, self.request_spec, [self.instance.uuid],
|
|
||||||
return_objects=True, return_alternates=True)
|
|
||||||
selection = self.host_lists[0][0]
|
|
||||||
prep_resize_mock.assert_called_once_with(
|
|
||||||
self.context, self.instance, legacy_request_spec['image'],
|
|
||||||
self.flavor, selection.service_host, None,
|
|
||||||
request_spec=legacy_request_spec,
|
|
||||||
filter_properties=self.filter_properties, node=selection.nodename,
|
|
||||||
clean_shutdown=self.clean_shutdown, host_list=[])
|
|
||||||
az_mock.assert_called_once_with(self.context, 'host1')
|
|
||||||
self.assertIsNone(task._migration)
|
|
||||||
|
|
||||||
@mock.patch.object(objects.MigrationList, 'get_by_filters')
|
@mock.patch.object(objects.MigrationList, 'get_by_filters')
|
||||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
|
||||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||||
|
@ -2293,9 +2293,10 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||||||
@mock.patch.object(conductor_manager.ComputeTaskManager,
|
@mock.patch.object(conductor_manager.ComputeTaskManager,
|
||||||
'_set_vm_state_and_notify')
|
'_set_vm_state_and_notify')
|
||||||
@mock.patch.object(migrate.MigrationTask, 'rollback')
|
@mock.patch.object(migrate.MigrationTask, 'rollback')
|
||||||
|
@mock.patch.object(migrate.MigrationTask, '_preallocate_migration')
|
||||||
def test_cold_migrate_no_valid_host_back_in_active_state(
|
def test_cold_migrate_no_valid_host_back_in_active_state(
|
||||||
self, rollback_mock, notify_mock, select_dest_mock,
|
self, _preallocate_migration, rollback_mock, notify_mock,
|
||||||
metadata_mock, sig_mock, spec_fc_mock, im_mock):
|
select_dest_mock, metadata_mock, sig_mock, spec_fc_mock, im_mock):
|
||||||
flavor = flavors.get_flavor_by_name('m1.tiny')
|
flavor = flavors.get_flavor_by_name('m1.tiny')
|
||||||
inst_obj = objects.Instance(
|
inst_obj = objects.Instance(
|
||||||
image_ref='fake-image_ref',
|
image_ref='fake-image_ref',
|
||||||
@ -2343,9 +2344,10 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||||||
@mock.patch.object(conductor_manager.ComputeTaskManager,
|
@mock.patch.object(conductor_manager.ComputeTaskManager,
|
||||||
'_set_vm_state_and_notify')
|
'_set_vm_state_and_notify')
|
||||||
@mock.patch.object(migrate.MigrationTask, 'rollback')
|
@mock.patch.object(migrate.MigrationTask, 'rollback')
|
||||||
|
@mock.patch.object(migrate.MigrationTask, '_preallocate_migration')
|
||||||
def test_cold_migrate_no_valid_host_back_in_stopped_state(
|
def test_cold_migrate_no_valid_host_back_in_stopped_state(
|
||||||
self, rollback_mock, notify_mock, select_dest_mock,
|
self, _preallocate_migration, rollback_mock, notify_mock,
|
||||||
metadata_mock, spec_fc_mock, sig_mock, im_mock):
|
select_dest_mock, metadata_mock, spec_fc_mock, sig_mock, im_mock):
|
||||||
flavor = flavors.get_flavor_by_name('m1.tiny')
|
flavor = flavors.get_flavor_by_name('m1.tiny')
|
||||||
inst_obj = objects.Instance(
|
inst_obj = objects.Instance(
|
||||||
image_ref='fake-image_ref',
|
image_ref='fake-image_ref',
|
||||||
@ -2469,9 +2471,10 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||||||
'_set_vm_state_and_notify')
|
'_set_vm_state_and_notify')
|
||||||
@mock.patch.object(migrate.MigrationTask, 'rollback')
|
@mock.patch.object(migrate.MigrationTask, 'rollback')
|
||||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
|
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
|
||||||
|
@mock.patch.object(migrate.MigrationTask, '_preallocate_migration')
|
||||||
def test_cold_migrate_exception_host_in_error_state_and_raise(
|
def test_cold_migrate_exception_host_in_error_state_and_raise(
|
||||||
self, prep_resize_mock, rollback_mock, notify_mock,
|
self, _preallocate_migration, prep_resize_mock, rollback_mock,
|
||||||
select_dest_mock, metadata_mock, spec_fc_mock,
|
notify_mock, select_dest_mock, metadata_mock, spec_fc_mock,
|
||||||
sig_mock, im_mock):
|
sig_mock, im_mock):
|
||||||
flavor = flavors.get_flavor_by_name('m1.tiny')
|
flavor = flavors.get_flavor_by_name('m1.tiny')
|
||||||
inst_obj = objects.Instance(
|
inst_obj = objects.Instance(
|
||||||
@ -2520,7 +2523,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||||||
[inst_obj.uuid], return_objects=True, return_alternates=True)
|
[inst_obj.uuid], return_objects=True, return_alternates=True)
|
||||||
prep_resize_mock.assert_called_once_with(
|
prep_resize_mock.assert_called_once_with(
|
||||||
self.context, inst_obj, legacy_request_spec['image'],
|
self.context, inst_obj, legacy_request_spec['image'],
|
||||||
flavor, hosts[0]['host'], None,
|
flavor, hosts[0]['host'], _preallocate_migration.return_value,
|
||||||
request_spec=legacy_request_spec,
|
request_spec=legacy_request_spec,
|
||||||
filter_properties=legacy_filter_props,
|
filter_properties=legacy_filter_props,
|
||||||
node=hosts[0]['nodename'], clean_shutdown=True, host_list=[])
|
node=hosts[0]['nodename'], clean_shutdown=True, host_list=[])
|
||||||
|
Loading…
Reference in New Issue
Block a user