Drop legacy live migrate allocation compat code

This completes the legacy migration-based allocation
compat cleanup from I0851e2d54a1fdc82fe3291fb7e286e790f121e92.
That change handled the cold migrate flows. This completes the
related cleanup for the live migrate flows.

The compat being removed here was added in change
I9068a5a5b47cef565802a6d58f37777464644100 in Queens.

Change-Id: I8e47cac8bab50a086b98f37c2f9f659b10009cf1
This commit is contained in:
Matt Riedemann 2018-10-19 18:01:52 -04:00
parent a0563e754c
commit 355b6da96a
5 changed files with 29 additions and 128 deletions

View File

@ -6647,33 +6647,19 @@ class ComputeManager(manager.Manager):
if migrate_data and migrate_data.obj_attr_is_set('migration'):
migrate_data.migration.status = 'completed'
migrate_data.migration.save()
migration = migrate_data.migration
rc = self.scheduler_client.reportclient
# Check to see if our migration has its own allocations
allocs = rc.get_allocations_for_consumer(ctxt, migration.uuid)
else:
# We didn't have data on a migration, which means we can't
# look up to see if we had new-style migration-based
# allocations. This should really only happen in cases of
# a buggy virt driver or some really old component in the
# system. Log a warning so we know it happened.
allocs = None
LOG.warning('Live migration ended with no migrate_data '
'record. Unable to clean up migration-based '
'allocations which is almost certainly not '
'an expected situation.')
if allocs:
# We had a migration-based allocation that we need to handle
self._delete_allocation_after_move(ctxt,
instance,
migrate_data.migration)
else:
# No migration-based allocations, so do the old thing and
# attempt to clean up any doubled per-instance allocation
rt = self._get_resource_tracker()
rt.delete_allocation_for_migrated_instance(
ctxt, instance, source_node)
# We didn't have data on a migration, which means we can't
# look up to see if we had new-style migration-based
# allocations. This should really only happen in cases of
# a buggy virt driver. Log a warning so we know it happened.
LOG.warning('Live migration ended with no migrate_data '
'record. Unable to clean up migration-based '
'allocations for node %s which is almost certainly '
'not an expected situation.', source_node,
instance=instance)
def _consoles_enabled(self):
"""Returns whether a console is enable."""
@ -6805,8 +6791,8 @@ class ComputeManager(manager.Manager):
if migration:
# Remove allocations created in Placement for the dest node.
# If migration is None, we must be so old we don't have placement,
# so no need to do something else.
# If migration is None, the virt driver didn't pass it which is
# a bug.
self._revert_allocation(context, instance, migration)
else:
LOG.error('Unable to revert allocations during live migration '

View File

@ -30,12 +30,6 @@ LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def should_do_migration_allocation(context):
minver = objects.Service.get_minimum_version_multi(context,
['nova-compute'])
return minver >= 25
def supports_extended_port_binding(context, host):
"""Checks if the compute host service is new enough to support the neutron
port binding-extended details.
@ -73,15 +67,14 @@ class LiveMigrationTask(base.TaskBase):
self._check_instance_is_active()
self._check_host_is_up(self.source)
if should_do_migration_allocation(self.context):
self._source_cn, self._held_allocations = (
# NOTE(danms): This may raise various exceptions, which will
# propagate to the API and cause a 500. This is what we
# want, as it would indicate internal data structure corruption
# (such as missing migrations, compute nodes, etc).
migrate.replace_allocation_with_migration(self.context,
self.instance,
self.migration))
self._source_cn, self._held_allocations = (
# NOTE(danms): This may raise various exceptions, which will
# propagate to the API and cause a 500. This is what we
# want, as it would indicate internal data structure corruption
# (such as missing migrations, compute nodes, etc).
migrate.replace_allocation_with_migration(self.context,
self.instance,
self.migration))
if not self.destination:
# Either no host was specified in the API request and the user

View File

@ -6256,8 +6256,6 @@ class ComputeTestCase(BaseTestCase,
migrate_data=test.MatchType(
migrate_data_obj.XenapiLiveMigrateData))
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
@mock.patch.object(compute_rpcapi.ComputeAPI,
'post_live_migration_at_destination')
@ -6266,7 +6264,7 @@ class ComputeTestCase(BaseTestCase,
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch('nova.objects.Migration.save')
def test_live_migration_works_correctly(self, mock_save, mock_event,
mock_clear, mock_post, mock_pre, mock_remove_allocs):
mock_clear, mock_post, mock_pre):
# Confirm live_migration() works as expected correctly.
# creating instance testdata
c = context.get_admin_context()
@ -6313,8 +6311,6 @@ class ComputeTestCase(BaseTestCase,
'host'], 'dest_compute': dest})
mock_post.assert_called_once_with(c, instance, False, dest)
mock_clear.assert_called_once_with(mock.ANY)
mock_remove_allocs.assert_called_once_with(
c, instance.uuid, self.rt.compute_nodes[NODENAME].uuid)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
@mock.patch.object(compute_rpcapi.ComputeAPI,
@ -6361,15 +6357,13 @@ class ComputeTestCase(BaseTestCase,
# cleanup
instance.destroy()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
@mock.patch.object(fake.FakeDriver, 'unfilter_instance')
@mock.patch.object(compute_rpcapi.ComputeAPI,
'post_live_migration_at_destination')
@mock.patch.object(compute_manager.InstanceEvents,
'clear_events_for_instance')
def test_post_live_migration_no_shared_storage_working_correctly(self,
mock_clear, mock_post, mock_unfilter, mock_remove_allocs):
mock_clear, mock_post, mock_unfilter):
"""Confirm post_live_migration() works correctly as expected
for non shared storage migration.
"""
@ -6423,14 +6417,9 @@ class ComputeTestCase(BaseTestCase,
mock_migrate.assert_called_once_with(c, instance, migration)
mock_post.assert_called_once_with(c, instance, False, dest)
mock_clear.assert_called_once_with(mock.ANY)
mock_remove_allocs.assert_called_once_with(
c, instance.uuid, self.rt.compute_nodes[NODENAME].uuid)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
@mock.patch('nova.compute.utils.notify_about_instance_action')
def test_post_live_migration_working_correctly(self, mock_notify,
mock_remove_allocs):
def test_post_live_migration_working_correctly(self, mock_notify):
# Confirm post_live_migration() works as expected correctly.
dest = 'desthost'
srchost = self.compute.host
@ -6505,8 +6494,6 @@ class ComputeTestCase(BaseTestCase,
self.assertIn(
'Migrating instance to desthost finished successfully.',
self.stdlog.logger.output)
mock_remove_allocs.assert_called_once_with(
c, instance.uuid, self.rt.compute_nodes[NODENAME].uuid)
def test_post_live_migration_exc_on_dest_works_correctly(self):
"""Confirm that post_live_migration() completes successfully

View File

@ -7896,55 +7896,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
self.instance,
migration)
def test_post_live_migration_old_allocations(self):
# We have a migrate_data with a migration...
migration = objects.Migration(uuid=uuids.migration)
migration.save = mock.MagicMock()
md = objects.LibvirtLiveMigrateData(migration=migration,
is_shared_instance_path=False,
is_shared_block_storage=False)
with test.nested(
mock.patch.object(self.compute.scheduler_client,
'reportclient'),
mock.patch.object(self.compute,
'_delete_allocation_after_move'),
mock.patch.object(self.compute,
'_get_resource_tracker'),
) as (
mock_report, mock_delete, mock_rt,
):
# ...and that migration does not have allocations...
mock_report.get_allocations_for_consumer.return_value = None
self._call_post_live_migration(migrate_data=md)
# ...so we should have called the old style delete
mock_delete.assert_not_called()
fn = mock_rt.return_value.delete_allocation_for_migrated_instance
fn.assert_called_once_with(self.context, self.instance,
self.instance.node)
def test_post_live_migration_legacy(self):
# We have no migrate_data...
md = None
with test.nested(
mock.patch.object(self.compute.scheduler_client,
'reportclient'),
mock.patch.object(self.compute,
'_delete_allocation_after_move'),
mock.patch.object(self.compute,
'_get_resource_tracker'),
) as (
mock_report, mock_delete, mock_rt,
):
self._call_post_live_migration(migrate_data=md)
# ...without migrate_data, no migration allocations check...
ga = mock_report.get_allocations_for_consumer
self.assertFalse(ga.called)
# ...so we should have called the old style delete
mock_delete.assert_not_called()
fn = mock_rt.return_value.delete_allocation_for_migrated_instance
fn.assert_called_once_with(self.context, self.instance,
self.instance.node)
def test_post_live_migration_cinder_v3_api(self):
# Because live migration has succeeded, _post_live_migration
# should call attachment_delete with the original/old attachment_id

View File

@ -74,7 +74,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
servicegroup.API(), scheduler_client.SchedulerClient(),
self.fake_spec)
def test_execute_with_destination(self, new_mode=True):
def test_execute_with_destination(self):
dest_node = objects.ComputeNode(hypervisor_hostname='dest_node')
with test.nested(
mock.patch.object(self.task, '_check_host_is_up'),
@ -87,21 +87,15 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
mock.patch('nova.conductor.tasks.migrate.'
'replace_allocation_with_migration'),
mock.patch('nova.conductor.tasks.live_migrate.'
'should_do_migration_allocation')
) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig,
m_alloc, mock_sda):
m_alloc):
mock_mig.return_value = "bob"
m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs)
mock_sda.return_value = new_mode
self.assertEqual("bob", self.task.execute())
mock_check_up.assert_called_once_with(self.instance_host)
mock_check_dest.assert_called_once_with()
if new_mode:
allocs = mock.sentinel.allocs
else:
allocs = None
allocs = mock.sentinel.allocs
mock_claim.assert_called_once_with(
self.context, self.task.scheduler_client.reportclient,
self.instance, mock.sentinel.source_node, dest_node,
@ -121,12 +115,9 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.migration.dest_node)
self.assertEqual(self.task.destination,
self.migration.dest_compute)
if new_mode:
m_alloc.assert_called_once_with(self.context,
self.instance,
self.migration)
else:
m_alloc.assert_not_called()
m_alloc.assert_called_once_with(self.context,
self.instance,
self.migration)
# When the task is executed with a destination it means the host is
# being forced and we don't call the scheduler, so we don't need to
# heal the request spec.
@ -137,9 +128,6 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
# modify the request spec
self.ensure_network_metadata_mock.assert_not_called()
def test_execute_with_destination_old_school(self):
self.test_execute_with_destination(new_mode=False)
def test_execute_without_destination(self):
self.destination = None
self._generate_task()
@ -152,14 +140,10 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
mock.patch.object(self.migration, 'save'),
mock.patch('nova.conductor.tasks.migrate.'
'replace_allocation_with_migration'),
mock.patch('nova.conductor.tasks.live_migrate.'
'should_do_migration_allocation'),
) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc,
mock_sda):
) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc):
mock_find.return_value = ("found_host", "found_node")
mock_mig.return_value = "bob"
mock_alloc.return_value = (mock.MagicMock(), mock.MagicMock())
mock_sda.return_value = True
self.assertEqual("bob", self.task.execute())
mock_check.assert_called_once_with(self.instance_host)