Fix regression with live migration on shared storage

The commit c1ccc1a316 introduced
a regression when NUMA live migration was done on shared storage

The live migration support for the power mgmt feature means we need to
call driver.cleanup() for all NUMA instances to potentially offline
pcpus that are not used any more after the instance is migrated away.
However this change exposed an issue with the disk cleanup logic. Nova
should never delete the instance directory if that directory is on
shared storage (e.g. the nova instances path is backed by NFS).

This patch will fix that behavior so live migration will function

Closes-Bug: #2080436
Change-Id: Ia2bbb5b4ac728563a8aabd857ed0503449991df1
(cherry picked from commit 035b8404fc)
This commit is contained in:
Matthew N Heler 2024-09-11 12:28:15 -05:00 committed by Matthew Heler
parent cd4e58173a
commit 57e037b507
5 changed files with 49 additions and 9 deletions

View File

@ -9196,7 +9196,9 @@ class ComputeManager(manager.Manager):
# vpmem must be cleaned # vpmem must be cleaned
do_cleanup = (not migrate_data.is_shared_instance_path or do_cleanup = (not migrate_data.is_shared_instance_path or
has_vpmem or has_mdevs or power_management_possible) has_vpmem or has_mdevs or power_management_possible)
destroy_disks = not migrate_data.is_shared_block_storage destroy_disks = not (
migrate_data.is_shared_block_storage or
migrate_data.is_shared_instance_path)
elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData): elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData):
# NOTE(claudiub): We need to cleanup any zombie Planned VM. # NOTE(claudiub): We need to cleanup any zombie Planned VM.
do_cleanup = True do_cleanup = True

View File

@ -11363,7 +11363,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags(
migrate_data, migr_ctxt) migrate_data, migr_ctxt)
self.assertTrue(do_cleanup) self.assertTrue(do_cleanup)
self.assertTrue(destroy_disks) self.assertFalse(destroy_disks)
def test_live_migration_cleanup_flags_block_migrate_libvirt(self): def test_live_migration_cleanup_flags_block_migrate_libvirt(self):
migrate_data = objects.LibvirtLiveMigrateData( migrate_data = objects.LibvirtLiveMigrateData(
@ -11390,7 +11390,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags(
migrate_data) migrate_data)
self.assertFalse(do_cleanup) self.assertFalse(do_cleanup)
self.assertTrue(destroy_disks) self.assertFalse(destroy_disks)
def test_live_migration_cleanup_flags_shared_libvirt(self): def test_live_migration_cleanup_flags_shared_libvirt(self):
migrate_data = objects.LibvirtLiveMigrateData( migrate_data = objects.LibvirtLiveMigrateData(
@ -11401,6 +11401,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertFalse(do_cleanup) self.assertFalse(do_cleanup)
self.assertFalse(destroy_disks) self.assertFalse(destroy_disks)
def test_live_migration_cleanup_flags_shared_path_libvirt_mdev(self):
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_block_storage=False,
is_shared_instance_path=True,
target_mdevs={})
do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags(
migrate_data)
self.assertTrue(do_cleanup)
self.assertFalse(destroy_disks)
def test_live_migration_cleanup_flags_live_migrate(self): def test_live_migration_cleanup_flags_live_migrate(self):
do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags(
{}) {})

View File

@ -20859,7 +20859,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# is_shared_block_storage=True and destroy_disks=False. # is_shared_block_storage=True and destroy_disks=False.
instance = objects.Instance(self.context, **self.test_instance) instance = objects.Instance(self.context, **self.test_instance)
migrate_data = objects.LibvirtLiveMigrateData( migrate_data = objects.LibvirtLiveMigrateData(
is_shared_block_storage=True) is_shared_block_storage=True,
is_shared_instance_path=False)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.cleanup( drvr.cleanup(
self.context, instance, network_info={}, destroy_disks=False, self.context, instance, network_info={}, destroy_disks=False,
@ -20869,6 +20870,25 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(instance.cleaned) self.assertTrue(instance.cleaned)
save.assert_called_once_with() save.assert_called_once_with()
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files',
return_value=True)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_cleanup_migrate_data_block_storage_and_share_instance_dir(
self, _undefine_domain, save, delete_instance_files
):
# Test the case when the instance directory is on shared storage
# (e.g. NFS) and the instance is booted form volume.
instance = objects.Instance(self.context, **self.test_instance)
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_block_storage=True,
is_shared_instance_path=True)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.cleanup(
self.context, instance, network_info={}, destroy_disks=False,
migrate_data=migrate_data, destroy_vifs=False)
delete_instance_files.assert_not_called()
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files', @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files',
return_value=True) return_value=True)
@mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'save')

View File

@ -1648,12 +1648,12 @@ class LibvirtDriver(driver.ComputeDriver):
cleanup_instance_dir = True cleanup_instance_dir = True
cleanup_instance_disks = True cleanup_instance_disks = True
else: else:
# NOTE(mdbooth): I think the theory here was that if this is a # NOTE(mheler): For shared block storage we only need to clean up
# migration with shared block storage then we need to delete the # the instance directory when it's not on a shared path.
# instance directory because that's not shared. I'm pretty sure
# this is wrong.
if migrate_data and 'is_shared_block_storage' in migrate_data: if migrate_data and 'is_shared_block_storage' in migrate_data:
cleanup_instance_dir = migrate_data.is_shared_block_storage cleanup_instance_dir = (
migrate_data.is_shared_block_storage and
not migrate_data.is_shared_instance_path)
# NOTE(lyarwood): The following workaround allows operators to # NOTE(lyarwood): The following workaround allows operators to
# ensure that non-shared instance directories are removed after an # ensure that non-shared instance directories are removed after an

View File

@ -0,0 +1,8 @@
---
fixes:
- |
Fixes a regression for live migration on shared storage that
was removing the backing disk and instance folder during the
cleanup of a virtual machine post live migration.
`bug 2080436
<https://bugs.launchpad.net/nova/+bug/2080436>`__ for details.