Merge "partial support for live migration with specific resources"

This commit is contained in:
Zuul 2020-04-08 16:56:59 +00:00 committed by Gerrit Code Review
commit 2fe5d90436
11 changed files with 140 additions and 156 deletions

View File

@ -3,7 +3,8 @@
"payload":{
"$ref":"common_payloads/InstanceActionPayload.json#",
"nova_object.data":{
"action_initiator_user": "admin"
"action_initiator_user": "admin",
"task_state": "migrating"
}
},
"priority":"INFO",

View File

@ -3,7 +3,8 @@
"payload": {
"$ref": "common_payloads/InstanceActionPayload.json#",
"nova_object.data": {
"action_initiator_user": "admin"
"action_initiator_user": "admin",
"task_state": "migrating"
}
},
"priority": "INFO",

View File

@ -3,7 +3,8 @@
"payload": {
"$ref": "common_payloads/InstanceActionPayload.json#",
"nova_object.data": {
"action_initiator_user": "admin"
"action_initiator_user": "admin",
"task_state": "migrating"
}
},
"priority": "INFO",

View File

@ -7663,12 +7663,14 @@ class ComputeManager(manager.Manager):
migration)
LOG.debug('destination check data is %s', dest_check_data)
try:
allocs = self.reportclient.get_allocations_for_consumer(
ctxt, instance.uuid)
migrate_data = self.compute_rpcapi.check_can_live_migrate_source(
ctxt, instance, dest_check_data)
if ('src_supports_numa_live_migration' in migrate_data and
migrate_data.src_supports_numa_live_migration):
migrate_data = self._live_migration_claim(
ctxt, instance, migrate_data, migration, limits)
ctxt, instance, migrate_data, migration, limits, allocs)
elif 'dst_supports_numa_live_migration' in dest_check_data:
LOG.info('Destination was ready for NUMA live migration, '
'but source is either too old, or is set to an '
@ -7688,7 +7690,7 @@ class ComputeManager(manager.Manager):
return migrate_data
def _live_migration_claim(self, ctxt, instance, migrate_data,
migration, limits):
migration, limits, allocs):
"""Runs on the destination and does a resources claim, if necessary.
Currently, only NUMA live migrations require it.
@ -7707,7 +7709,7 @@ class ComputeManager(manager.Manager):
# migration.dest_node here and must use self._get_nodename().
claim = self.rt.live_migration_claim(
ctxt, instance, self._get_nodename(instance), migration,
limits)
limits, allocs)
LOG.debug('Created live migration claim.', instance=instance)
except exception.ComputeResourcesUnavailable as e:
raise exception.MigrationPreCheckError(
@ -8424,6 +8426,17 @@ class ComputeManager(manager.Manager):
# destination, which will update it
source_node = instance.node
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
if do_cleanup:
LOG.debug('Calling driver.cleanup from _post_live_migration',
instance=instance)
self.driver.cleanup(ctxt, instance, unplug_nw_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
post_at_dest_success = True
@ -8438,26 +8451,6 @@ class ComputeManager(manager.Manager):
LOG.exception("Post live migration at destination %s failed",
dest, instance=instance, error=error)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
if do_cleanup:
# NOTE(artom) By this time post_live_migration_at_destination()
# will have applied the migration context and saved the instance,
# writing a new instance NUMA topology in the process (if the
# intance has one). Here on the source, some drivers will call
# instance.save() in their cleanup() method, which would clobber
# the new instance NUMA topology saved by the destination with the
# old fields in our instance object. To prevent this, refresh our
# instance.
instance.refresh()
LOG.debug('Calling driver.cleanup from _post_live_migration',
instance=instance)
self.driver.cleanup(ctxt, instance, unplug_nw_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
self.instance_events.clear_events_for_instance(instance)
# NOTE(timello): make sure we update available resources on source
@ -8705,28 +8698,6 @@ class ComputeManager(manager.Manager):
'rollback; compute driver did not provide migrate_data',
instance=instance)
# TODO(artom) drop_move_claim_at_destination() is new in RPC 5.3, only
# call it if we performed a NUMA-aware live migration (which implies us
# being able to send RPC 5.3). To check this, we can use the
# src_supports_numa_live_migration flag, as it will be set if and only
# if:
# - dst_supports_numa_live_migration made its way to the source
# (meaning both dest and source are new and conductor can speak
# RPC 5.3)
# - src_supports_numa_live_migration was set by the source driver and
# passed the send-RPC-5.3 check.
# This check can be removed in RPC 6.0.
if ('src_supports_numa_live_migration' in migrate_data and
migrate_data.src_supports_numa_live_migration):
LOG.debug('Calling destination to drop move claim.',
instance=instance)
self.compute_rpcapi.drop_move_claim_at_destination(context,
instance, dest)
instance.task_state = None
instance.progress = 0
instance.drop_migration_context()
instance.save(expected_task_state=[task_states.MIGRATING])
# NOTE(tr3buchet): setup networks on source host (really it's re-setup
# for nova-network)
# NOTE(mriedem): This is a no-op for neutron.
@ -8785,6 +8756,34 @@ class ComputeManager(manager.Manager):
'during live migration rollback.',
instance=instance)
# NOTE(luyao): We drop move_claim and migration_context after cleanup
# is complete, to ensure the specific resources claimed on destination
# are released safely.
# TODO(artom) drop_move_claim_at_destination() is new in RPC 5.3, only
# call it if we performed a NUMA-aware live migration (which implies us
# being able to send RPC 5.3). To check this, we can use the
# src_supports_numa_live_migration flag, as it will be set if and only
# if:
# - dst_supports_numa_live_migration made its way to the source
# (meaning both dest and source are new and conductor can speak
# RPC 5.3)
# - src_supports_numa_live_migration was set by the source driver and
# passed the send-RPC-5.3 check.
# This check can be removed in RPC 6.0.
if ('src_supports_numa_live_migration' in migrate_data and
migrate_data.src_supports_numa_live_migration):
LOG.debug('Calling destination to drop move claim.',
instance=instance)
self.compute_rpcapi.drop_move_claim_at_destination(context,
instance, dest)
# NOTE(luyao): We only update instance info after rollback operations
# are complete
instance.task_state = None
instance.progress = 0
instance.drop_migration_context()
instance.save(expected_task_state=[task_states.MIGRATING])
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
compute_utils.notify_about_instance_action(context, instance,
@ -8793,6 +8792,9 @@ class ComputeManager(manager.Manager):
phase=fields.NotificationPhase.END,
bdms=bdms)
# TODO(luyao): set migration status to 'failed' but not 'error'
# which means rollback_live_migration is done, we have successfully
# cleaned up and returned instance back to normal status.
self._set_migration_status(migration, migration_status)
@wrap_exception()
@ -8865,9 +8867,10 @@ class ComputeManager(manager.Manager):
# check_can_live_migrate_destination()
self.rt.free_pci_device_claims_for_instance(context, instance)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
with instance.mutated_migration_context():
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",

View File

@ -209,7 +209,7 @@ class ResourceTracker(object):
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def live_migration_claim(self, context, instance, nodename, migration,
limits):
limits, allocs):
"""Builds a MoveClaim for a live migration.
:param context: The request context.
@ -219,15 +219,14 @@ class ResourceTracker(object):
migration.
:param limits: A SchedulerLimits object from when the scheduler
selected the destination host.
:param allocs: The placement allocation records for the instance.
:returns: A MoveClaim for this live migration.
"""
# Flavor and image cannot change during a live migration.
instance_type = instance.flavor
image_meta = instance.image_meta
# TODO(Luyao) will pass allocations to live_migration_claim after the
# live migration change is done, now just set it None to _move_claim
return self._move_claim(context, instance, instance_type, nodename,
migration, None, move_type='live-migration',
migration, allocs, move_type='live-migration',
image_meta=image_meta, limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,

View File

@ -12,6 +12,7 @@
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
import six
from nova import availability_zones
@ -93,12 +94,12 @@ class LiveMigrationTask(base.TaskBase):
# live migrating with a specific destination host so the scheduler
# is bypassed. There are still some minimal checks performed here
# though.
source_node, dest_node = self._check_requested_destination()
# Now that we're semi-confident in the force specified host, we
# need to copy the source compute node allocations in Placement
# to the destination compute node. Normally select_destinations()
# in the scheduler would do this for us, but when forcing the
# target host we don't call the scheduler.
self._check_destination_is_not_source()
self._check_host_is_up(self.destination)
self._check_destination_has_enough_memory()
source_node, dest_node = (
self._check_compatible_with_source_hypervisor(
self.destination))
# TODO(mriedem): Call select_destinations() with a
# skip_filters=True flag so the scheduler does the work of claiming
# resources on the destination in Placement but still bypass the
@ -111,11 +112,20 @@ class LiveMigrationTask(base.TaskBase):
# this assumption fails then placement will return consumer
# generation conflict and this call raise a AllocationUpdateFailed
# exception. We let that propagate here to abort the migration.
# NOTE(luyao): When forcing the target host we don't call the
# scheduler, that means we need to get allocations from placement
# first, then claim resources in resource tracker on the
# destination host based on these allocations.
scheduler_utils.claim_resources_on_destination(
self.context, self.report_client,
self.instance, source_node, dest_node,
source_allocations=self._held_allocations,
consumer_generation=None)
try:
self._check_requested_destination()
except Exception:
with excutils.save_and_reraise_exception():
self._remove_host_allocations(dest_node.uuid)
# dest_node is a ComputeNode object, so we need to get the actual
# node name off it to set in the Migration object below.
@ -264,15 +274,7 @@ class LiveMigrationTask(base.TaskBase):
raise exception.ComputeServiceUnavailable(host=host)
def _check_requested_destination(self):
"""Performs basic pre-live migration checks for the forced host.
:returns: tuple of (source ComputeNode, destination ComputeNode)
"""
self._check_destination_is_not_source()
self._check_host_is_up(self.destination)
self._check_destination_has_enough_memory()
source_node, dest_node = self._check_compatible_with_source_hypervisor(
self.destination)
"""Performs basic pre-live migration checks for the forced host."""
# NOTE(gibi): This code path is used when the live migration is forced
# to a target host and skipping the scheduler. Such operation is
# rejected for servers with nested resource allocations since
@ -289,7 +291,6 @@ class LiveMigrationTask(base.TaskBase):
raise exception.MigrationPreCheckError(
reason=(_('Unable to force live migrate instance %s '
'across cells.') % self.instance.uuid))
return source_node, dest_node
def _check_destination_is_not_source(self):
if self.destination == self.source:

View File

@ -73,6 +73,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
@mock.patch.object(fakelibvirt.Domain, 'undefine')
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_volume_connector')
@mock.patch('nova.virt.libvirt.guest.Guest.get_job_info')
@mock.patch.object(fakelibvirt.Domain, 'migrateToURI3')
@ -100,7 +101,8 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
mock_host_get_connection,
mock_migrate_to_uri,
mock_get_job_info,
mock_get_volume_connector):
mock_get_volume_connector,
mock_undefine):
"""Regression test for bug #1595962.
If the graphical consoles VNC and SPICE are disabled, the
@ -120,6 +122,12 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
version=fakelibvirt.FAKE_LIBVIRT_VERSION,
hv_version=fakelibvirt.FAKE_QEMU_VERSION)
mock_host_get_connection.return_value = fake_connection
# We invoke cleanup on source host first which will call undefine
# method currently. Since in functional test we make all compute
# services linked to the same connection, we need to mock the undefine
# method to avoid triggering 'Domain not found' error in subsequent
# rpc call post_live_migration_at_destination.
mock_undefine.return_value = True
server_attr = dict(name='server1',
imageRef=self.image_id,

View File

@ -3058,10 +3058,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
post_claim_md,
self.compute._live_migration_claim(
self.context, instance, md, migration,
mock.sentinel.limits))
mock.sentinel.limits, None))
mock_lm_claim.assert_called_once_with(
self.context, instance, 'fake-dest-node', migration,
mock.sentinel.limits)
mock.sentinel.limits, None)
mock_post_claim_migrate_data.assert_called_once_with(
self.context, instance, md, mock_claim)
@ -3086,10 +3086,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
exception.MigrationPreCheckError,
self.compute._live_migration_claim,
self.context, instance, objects.LibvirtLiveMigrateData(),
migration, mock.sentinel.limits)
migration, mock.sentinel.limits, None)
mock_lm_claim.assert_called_once_with(
self.context, instance, 'fake-dest-node', migration,
mock.sentinel.limits)
mock.sentinel.limits, None)
mock_get_nodename.assert_called_once_with(instance)
mock_post_claim_migrate_data.assert_not_called()
@ -3230,7 +3230,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
'Destination was ready for NUMA live migration'))
else:
mock_lm_claim.assert_called_once_with(
self.context, instance, mig_data, migration, limits)
self.context, instance, mig_data, migration, limits, None)
self.assertEqual(post_claim_md, result)
mock_check_clean.assert_called_once_with(self.context,
dest_check_data)
@ -9581,13 +9581,11 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute, 'compute_rpcapi')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(objects.Instance, 'refresh')
def _do_call(refresh, nwapi, notify, rpc, update):
def _do_call(nwapi, notify, rpc, update):
bdms = objects.BlockDeviceMappingList(objects=[])
result = self.compute._post_live_migration(
self.context, self.instance, 'foo', *args, source_bdms=bdms,
**kwargs)
refresh.assert_called_once_with()
return result
mock_rt = self._mock_rt()
@ -9742,8 +9740,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute, 'update_available_resource')
@mock.patch.object(self.compute, '_update_scheduler_instance_info')
@mock.patch.object(self.compute, '_clean_instance_console_tokens')
@mock.patch.object(objects.Instance, 'refresh')
def _test(_refresh, _clean_instance_console_tokens,
def _test(_clean_instance_console_tokens,
_update_scheduler_instance_info, update_available_resource,
driver_cleanup, _live_migration_cleanup_flags,
post_live_migration_at_destination,
@ -9757,7 +9754,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
post_live_migration_at_source.assert_called_once_with(
self.context, self.instance,
test.MatchType(network_model.NetworkInfo))
_refresh.assert_called_once_with()
driver_cleanup.assert_called_once_with(
self.context, self.instance,
test.MatchType(network_model.NetworkInfo), destroy_disks=False,

View File

@ -3032,7 +3032,8 @@ class TestLiveMigration(BaseTestCase):
) as (mock_from_instance, mock_migration_save, mock_instance_save,
mock_update, mock_pci_claim_instance, mock_update_usage):
claim = self.rt.live_migration_claim(ctxt, instance, _NODENAME,
migration, limits=None)
migration, limits=None,
allocs=None)
self.assertEqual(42, claim.migration.id)
# Check that we didn't set the status to 'pre-migrating', like we
# do for cold migrations, but which doesn't exist for live

View File

@ -97,23 +97,32 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
dest_node = objects.ComputeNode(hypervisor_hostname='dest_node')
with test.nested(
mock.patch.object(self.task, '_check_host_is_up'),
mock.patch.object(self.task, '_check_requested_destination',
return_value=(mock.sentinel.source_node,
dest_node)),
mock.patch.object(self.task, '_check_requested_destination'),
mock.patch.object(scheduler_utils,
'claim_resources_on_destination'),
mock.patch.object(self.migration, 'save'),
mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
mock.patch('nova.conductor.tasks.migrate.'
'replace_allocation_with_migration'),
mock.patch.object(self.task, '_check_destination_is_not_source'),
mock.patch.object(self.task,
'_check_destination_has_enough_memory'),
mock.patch.object(self.task,
'_check_compatible_with_source_hypervisor',
return_value=(mock.sentinel.source_node,
dest_node)),
) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig,
m_alloc):
m_alloc, m_check_diff, m_check_enough_mem, m_check_compatible):
mock_mig.return_value = "bob"
m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs)
self.assertEqual("bob", self.task.execute())
mock_check_up.assert_called_once_with(self.instance_host)
mock_check_up.assert_has_calls([
mock.call(self.instance_host), mock.call(self.destination)])
mock_check_dest.assert_called_once_with()
m_check_diff.assert_called_once()
m_check_enough_mem.assert_called_once()
m_check_compatible.assert_called_once()
allocs = mock.sentinel.allocs
mock_claim.assert_called_once_with(
self.context, self.task.report_client,
@ -283,61 +292,16 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.assertRaises(exception.ComputeHostNotFound,
self.task._check_host_is_up, "host")
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
@mock.patch.object(servicegroup.API, 'service_is_up')
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
def test_check_requested_destination(self, mock_check, mock_is_up,
mock_get_info, mock_get_host):
mock_get_host.return_value = "service"
mock_is_up.return_value = True
hypervisor_details = objects.ComputeNode(
hypervisor_type="a",
hypervisor_version=6.1,
free_ram_mb=513,
memory_mb=512,
ram_allocation_ratio=1.0)
mock_get_info.return_value = hypervisor_details
mock_check.return_value = "migrate_data"
self.task.limits = fake_limits1
with test.nested(
mock.patch.object(self.task.network_api,
'supports_port_binding_extension',
return_value=False),
mock.patch.object(self.task, '_check_can_migrate_pci')):
self.assertEqual((hypervisor_details, hypervisor_details),
self.task._check_requested_destination())
self.assertEqual("migrate_data", self.task.migrate_data)
mock_get_host.assert_called_once_with(self.context, self.destination)
mock_is_up.assert_called_once_with("service")
self.assertEqual([mock.call(self.destination),
mock.call(self.instance_host),
mock.call(self.destination)],
mock_get_info.call_args_list)
mock_check.assert_called_once_with(self.context, self.instance,
self.destination, self.block_migration, self.disk_over_commit,
self.task.migration, fake_limits1)
def test_check_requested_destination_fails_with_same_dest(self):
def test_check_destination_fails_with_same_dest(self):
self.task.destination = "same"
self.task.source = "same"
self.assertRaises(exception.UnableToMigrateToSelf,
self.task._check_requested_destination)
self.task._check_destination_is_not_source)
@mock.patch.object(objects.Service, 'get_by_compute_host',
side_effect=exception.ComputeHostNotFound(host='host'))
def test_check_requested_destination_fails_when_destination_is_up(self,
mock):
self.assertRaises(exception.ComputeHostNotFound,
self.task._check_requested_destination)
@mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up')
@mock.patch.object(objects.ComputeNode,
'get_first_node_by_host_for_old_compat')
def test_check_requested_destination_fails_with_not_enough_memory(
self, mock_get_first, mock_is_up):
def test_check_destination_fails_with_not_enough_memory(
self, mock_get_first):
mock_get_first.return_value = (
objects.ComputeNode(free_ram_mb=513,
memory_mb=1024,
@ -347,47 +311,55 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
# ratio reduces the total available RAM to 410MB
# (1024 * 0.9 - (1024 - 513))
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_requested_destination)
mock_is_up.assert_called_once_with(self.destination)
self.task._check_destination_has_enough_memory)
mock_get_first.assert_called_once_with(self.context, self.destination)
@mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_destination_has_enough_memory')
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
def test_check_requested_destination_fails_with_hypervisor_diff(
self, mock_get_info, mock_check, mock_is_up):
def test_check_compatible_fails_with_hypervisor_diff(
self, mock_get_info):
mock_get_info.side_effect = [
objects.ComputeNode(hypervisor_type='b'),
objects.ComputeNode(hypervisor_type='a')]
self.assertRaises(exception.InvalidHypervisorType,
self.task._check_requested_destination)
mock_is_up.assert_called_once_with(self.destination)
mock_check.assert_called_once_with()
self.task._check_compatible_with_source_hypervisor,
self.destination)
self.assertEqual([mock.call(self.instance_host),
mock.call(self.destination)],
mock_get_info.call_args_list)
@mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_destination_has_enough_memory')
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
def test_check_requested_destination_fails_with_hypervisor_too_old(
self, mock_get_info, mock_check, mock_is_up):
def test_check_compatible_fails_with_hypervisor_too_old(
self, mock_get_info):
host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
mock_get_info.side_effect = [objects.ComputeNode(**host1),
objects.ComputeNode(**host2)]
self.assertRaises(exception.DestinationHypervisorTooOld,
self.task._check_requested_destination)
mock_is_up.assert_called_once_with(self.destination)
mock_check.assert_called_once_with()
self.task._check_compatible_with_source_hypervisor,
self.destination)
self.assertEqual([mock.call(self.instance_host),
mock.call(self.destination)],
mock_get_info.call_args_list)
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
def test_check_requested_destination(self, mock_check):
mock_check.return_value = "migrate_data"
self.task.limits = fake_limits1
with test.nested(
mock.patch.object(self.task.network_api,
'supports_port_binding_extension',
return_value=False),
mock.patch.object(self.task, '_check_can_migrate_pci')):
self.assertIsNone(self.task._check_requested_destination())
self.assertEqual("migrate_data", self.task.migrate_data)
mock_check.assert_called_once_with(self.context, self.instance,
self.destination, self.block_migration, self.disk_over_commit,
self.task.migration, fake_limits1)
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
@mock.patch.object(servicegroup.API, 'service_is_up')

View File

@ -140,6 +140,7 @@ def fake_instance_obj(context, obj_instance_class=None, **updates):
inst.old_flavor = None
inst.new_flavor = None
inst.resources = None
inst.migration_context = None
inst.obj_reset_changes()
return inst