Changes required for Cold Migration

- "block_device_info" required to delete instance that migrated form
  one host to another
- Skip check for RMC state during migration
- Skipping check for reducing disk size in case of cold migration.

Change-Id: I8ad1fc2852e0a281c8a48b207ea4fd2f2beac467
This commit is contained in:
rajat-0 2019-08-23 04:22:03 -04:00 committed by rajas113
parent 6ea1819963
commit 0545663d8a
4 changed files with 89 additions and 15 deletions

View File

@ -1378,13 +1378,17 @@ class TestPowerVMDriver(test.NoDBTestCase):
# BDMs
mock_bdms = self._fake_bdms()
# Catch root disk resize smaller.
small_root = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=9)
self.assertRaises(
exc.InstanceFaultRollback, self.drv.migrate_disk_and_power_off,
'context', self.inst, 'dest', small_root, 'network_info',
mock_bdms)
# IBM cherry-pick start
dest_host = host + '1'
# Catch root disk resize smaller
if dest_host == host:
small_root = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=9)
self.assertRaises(
exc.InstanceFaultRollback, self.drv.migrate_disk_and_power_off,
'context', self.inst, 'dest', small_root, 'network_info',
mock_bdms)
# IBM cherry-pick end
# Boot disk resize
boot_flav = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=12)
# Tasks expected to be added for migrate
@ -1397,7 +1401,6 @@ class TestPowerVMDriver(test.NoDBTestCase):
'fake',
'rename_vm_migrate_instance-00000001',
]
dest_host = host + '1'
with fx.DriverTaskFlow() as taskflow_fix:
self.drv.migrate_disk_and_power_off(
'context', self.inst, dest_host, boot_flav, 'network_info',
@ -2068,13 +2071,19 @@ class TestPowerVMDriver(test.NoDBTestCase):
mock_power_off.assert_called_once_with(
self.drv.adapter, self.inst, force_immediate=False, timeout=500)
@mock.patch('nova_powervm.virt.powervm.driver.'
'PowerVMDriver._get_block_device_info')
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._destroy')
def test_confirm_migration_diff_host(self, mock_destroy):
def test_confirm_migration_diff_host(self, mock_destroy,
mock_block_device_info):
mock_mig = mock.Mock(source_compute='host1', dest_compute='host2')
self.drv.confirm_migration('context', mock_mig, self.lpm_inst,
'network_info')
mock_block_device_info.assert_called_once_with('context',
self.lpm_inst)
mock_destroy.assert_called_once_with(
'context', self.lpm_inst, block_device_info=None,
'context', self.lpm_inst,
block_device_info=mock_block_device_info.return_value,
destroy_disks=False, shutdown=False)
@mock.patch('nova_powervm.virt.powervm.vm.rename', autospec=True)

View File

@ -129,6 +129,65 @@ class TestLPM(test.NoDBTestCase):
mock_vterm_close.assert_called_once_with(
self.apt, mock_wrap.uuid)
@mock.patch('pypowervm.tasks.storage.ScrubOrphanStorageForLpar',
autospec=True)
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM',
autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper',
autospec=True)
@mock.patch('pypowervm.tasks.vterm.close_vterm', autospec=True)
def test_lpm_source_for_cold_migration(self, mock_vterm_close,
mock_get_wrap,
mock_cd, mock_scrub):
self.host_mig_data['active_migrations_supported'] = 4
self.host_mig_data['active_migrations_in_progress'] = 2
# Test the bad path first, then patch in values to make succeed
mock_wrap = mock.Mock(id=123)
mock_get_wrap.return_value = mock_wrap
self.assertRaises(exception.MigrationPreCheckError,
self.lpmsrc.check_source, 'context',
'block_device_info', [])
# Patch the proc compat fields, to get further
pm = mock.PropertyMock(return_value='b')
type(mock_wrap).proc_compat_mode = pm
self.assertRaises(exception.MigrationPreCheckError,
self.lpmsrc.check_source, 'context',
'block_device_info', [])
pm = mock.PropertyMock(return_value='Not_Migrating')
type(mock_wrap).migration_state = pm
# Get a volume driver.
mock_vol_drv = mock.MagicMock()
# Finally, good path.
self.lpmsrc.check_source('context', 'block_device_info',
[mock_vol_drv])
# Ensure we built a scrubber.
mock_scrub.assert_called_with(mock.ANY, 123)
# Ensure we added the subtasks to remove the vopts.
mock_cd.return_value.dlt_vopt.assert_called_once_with(
mock.ANY, stg_ftsk=mock_scrub.return_value,
remove_mappings=False)
# And ensure the scrubber was executed
mock_scrub.return_value.execute.assert_called_once_with()
mock_vol_drv.pre_live_migration_on_source.assert_called_once_with(
{})
# Ensure migration counts are validated
self.host_mig_data['active_migrations_in_progress'] = 4
self.assertRaises(exception.MigrationPreCheckError,
self.lpmsrc.check_source, 'context',
'block_device_info', [])
# Ensure the vterm was closed
mock_vterm_close.assert_called_once_with(
self.apt, mock_wrap.uuid)
def test_lpm_dest(self):
src_compute_info = {'stats': {'memory_region_size': 1}}
dst_compute_info = {'stats': {'memory_region_size': 1}}

View File

@ -1190,12 +1190,14 @@ class PowerVMDriver(driver.ComputeDriver):
disk_info = {}
if flavor and flavor.root_gb < instance.root_gb:
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=_('Cannot reduce disk size.')))
same_host = dest == self.get_host_ip_addr()
if same_host:
# IBM cherry-pick start
if flavor and flavor.root_gb < instance.root_gb:
raise exception.InstanceFaultRollback(
exception.ResizeError(
reason=_('Cannot reduce disk size.')))
# IBM cherry-pick end
self._log_operation('resize', instance)
else:
self._log_operation('migration', instance)
@ -1412,7 +1414,8 @@ class PowerVMDriver(driver.ComputeDriver):
# Destroy the old VM.
destroy_disks = not self.disk_dvr.capabilities['shared_storage']
self._destroy(context, instance, block_device_info=None,
block_device_info = self._get_block_device_info(context, instance)
self._destroy(context, instance, block_device_info=block_device_info,
destroy_disks=destroy_disks, shutdown=False)
def finish_revert_migration(self, context, instance, network_info,

View File

@ -18,6 +18,7 @@
import abc
import six
from nova.compute import power_state
from nova import exception
from nova.objects import migrate_data as mig_obj
from oslo_log import log as logging
@ -291,7 +292,9 @@ class LiveMigrationSrc(LiveMigration):
raise exception.MigrationPreCheckError(reason=msg)
# Check if VM is ready for migration
self._check_migration_ready(lpar_w, self.drvr.host_wrapper)
# Skip incase of cold migration
if self.instance.power_state == power_state.RUNNING:
self._check_migration_ready(lpar_w, self.drvr.host_wrapper)
if lpar_w.migration_state != 'Not_Migrating':
msg = (_("Live migration of instance '%(name)s' failed because "