Changes required for Cold Migration

- "block_device_info" required to delete instance that migrated form
  one host to another

- Skip check for RMC state during migration

- Skipping check for reducing disk size in case of cold migration.

- Fix error with NVRAM download during cold migration

  On Novalink hosts, if the VM is deployed using
  a custom compute template having "Remote restart
  enabled" value set to "False", the nvram data will
  not be uploaded to the swift repo. During cold
  migration of such VM the operation will fail with
  NVRAM not found exception. This fix is to handle this
  case and continue with the cold migrate operation
  instead of failing the same

Change-Id: I8ad1fc2852e0a281c8a48b207ea4fd2f2beac467
This commit is contained in:
rajat-0 2019-08-23 04:22:03 -04:00 committed by rajas113
parent eb186af938
commit aa130c44dc
5 changed files with 133 additions and 20 deletions

View File

@ -184,6 +184,15 @@ class TestPowerVMDriver(test.NoDBTestCase):
self.inst = objects.Instance(**powervm.TEST_INST_SPAWNING)
self.inst_ibmi = objects.Instance(**powervm.TEST_INST_SPAWNING)
self.inst_ibmi.system_metadata = {'image_os_distro': 'ibmi'}
self.migration = objects.Migration(
id=1,
new_instance_type_id=7,
dest_compute='dest_compute',
dest_node=None,
dest_host=None,
source_compute='source_compute',
source_node='source_node',
status='migrating')
def test_get_available_nodes(self):
self.flags(host='hostname')
@ -1366,6 +1375,32 @@ class TestPowerVMDriver(test.NoDBTestCase):
# Validate the rollbacks were called.
self.assertEqual(2, self.vol_drv.connect_volume.call_count)
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.'
'_gen_resize_name', autospec=True)
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.'
'finish_migration', autospec=True)
def test_cold_migration(self, mock_finish_migration, mock_rename):
self.migration.dest_compute = 'dst'
self.migration.dest_node = 'dstnode'
self.migration.source_compute = 'src'
self.migration.source_node = 'srcnode'
self.migration.root_gb = 12
# BDMs
mock_bdms = self._fake_bdms()
self.drv.migrate_disk_and_power_off(
'context', self.inst, self.migration.dest_host,
self.migration, 'network_info', mock_bdms)
mock_rename.assert_called_once_with(self.inst, same_host=False)
disk_info = {}
self.drv.finish_migration(
'context', self.migration, self.inst, disk_info, 'network_info',
powervm.IMAGE1, 'resize_instance', block_device_info=mock_bdms)
mock_finish_migration.assert_called_once_with(
'context', self.migration, self.inst, disk_info, 'network_info',
powervm.IMAGE1, 'resize_instance', block_device_info=mock_bdms)
@mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr', autospec=True)
def test_migrate_disk_and_power_off(self, mock_bld_slot_mgr):
"""Validates the PowerVM driver migrate / resize operation."""
@ -1378,12 +1413,14 @@ class TestPowerVMDriver(test.NoDBTestCase):
# BDMs
mock_bdms = self._fake_bdms()
# Catch root disk resize smaller.
small_root = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=9)
self.assertRaises(
exc.InstanceFaultRollback, self.drv.migrate_disk_and_power_off,
'context', self.inst, 'dest', small_root, 'network_info',
mock_bdms)
dest_host = host + '1'
# Catch root disk resize smaller
if dest_host == host:
small_root = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=9)
self.assertRaises(
exc.InstanceFaultRollback, self.drv.migrate_disk_and_power_off,
'context', self.inst, 'dest', small_root, 'network_info',
mock_bdms)
# Boot disk resize
boot_flav = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=12)
@ -1397,7 +1434,6 @@ class TestPowerVMDriver(test.NoDBTestCase):
'fake',
'rename_vm_migrate_instance-00000001',
]
dest_host = host + '1'
with fx.DriverTaskFlow() as taskflow_fix:
self.drv.migrate_disk_and_power_off(
'context', self.inst, dest_host, boot_flav, 'network_info',
@ -2068,13 +2104,19 @@ class TestPowerVMDriver(test.NoDBTestCase):
mock_power_off.assert_called_once_with(
self.drv.adapter, self.inst, force_immediate=False, timeout=500)
@mock.patch('nova_powervm.virt.powervm.driver.'
'PowerVMDriver._get_block_device_info')
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._destroy')
def test_confirm_migration_diff_host(self, mock_destroy):
def test_confirm_migration_diff_host(self, mock_destroy,
mock_block_device_info):
mock_mig = mock.Mock(source_compute='host1', dest_compute='host2')
self.drv.confirm_migration('context', mock_mig, self.lpm_inst,
'network_info')
mock_block_device_info.assert_called_once_with('context',
self.lpm_inst)
mock_destroy.assert_called_once_with(
'context', self.lpm_inst, block_device_info=None,
'context', self.lpm_inst,
block_device_info=mock_block_device_info.return_value,
destroy_disks=False, shutdown=False)
@mock.patch('nova_powervm.virt.powervm.vm.rename', autospec=True)

View File

@ -129,6 +129,65 @@ class TestLPM(test.NoDBTestCase):
mock_vterm_close.assert_called_once_with(
self.apt, mock_wrap.uuid)
@mock.patch('pypowervm.tasks.storage.ScrubOrphanStorageForLpar',
autospec=True)
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM',
autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper',
autospec=True)
@mock.patch('pypowervm.tasks.vterm.close_vterm', autospec=True)
def test_lpm_source_for_cold_migration(self, mock_vterm_close,
mock_get_wrap,
mock_cd, mock_scrub):
self.host_mig_data['active_migrations_supported'] = 4
self.host_mig_data['active_migrations_in_progress'] = 2
# Test the bad path first, then patch in values to make succeed
mock_wrap = mock.Mock(id=123)
mock_get_wrap.return_value = mock_wrap
self.assertRaises(exception.MigrationPreCheckError,
self.lpmsrc.check_source, 'context',
'block_device_info', [])
# Patch the proc compat fields, to get further
pm = mock.PropertyMock(return_value='b')
type(mock_wrap).proc_compat_mode = pm
self.assertRaises(exception.MigrationPreCheckError,
self.lpmsrc.check_source, 'context',
'block_device_info', [])
pm = mock.PropertyMock(return_value='Not_Migrating')
type(mock_wrap).migration_state = pm
# Get a volume driver.
mock_vol_drv = mock.MagicMock()
# Finally, good path.
self.lpmsrc.check_source('context', 'block_device_info',
[mock_vol_drv])
# Ensure we built a scrubber.
mock_scrub.assert_called_with(mock.ANY, 123)
# Ensure we added the subtasks to remove the vopts.
mock_cd.return_value.dlt_vopt.assert_called_once_with(
mock.ANY, stg_ftsk=mock_scrub.return_value,
remove_mappings=False)
# And ensure the scrubber was executed
mock_scrub.return_value.execute.assert_called_once_with()
mock_vol_drv.pre_live_migration_on_source.assert_called_once_with(
{})
# Ensure migration counts are validated
self.host_mig_data['active_migrations_in_progress'] = 4
self.assertRaises(exception.MigrationPreCheckError,
self.lpmsrc.check_source, 'context',
'block_device_info', [])
# Ensure the vterm was closed
mock_vterm_close.assert_called_once_with(
self.apt, mock_wrap.uuid)
def test_lpm_dest(self):
src_compute_info = {'stats': {'memory_region_size': 1}}
dst_compute_info = {'stats': {'memory_region_size': 1}}

View File

@ -1190,12 +1190,12 @@ class PowerVMDriver(driver.ComputeDriver):
disk_info = {}
if flavor and flavor.root_gb < instance.root_gb:
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=_('Cannot reduce disk size.')))
same_host = dest == self.get_host_ip_addr()
if same_host:
if flavor and flavor.root_gb < instance.root_gb:
raise exception.InstanceFaultRollback(
exception.ResizeError(
reason=_('Cannot reduce disk size.')))
self._log_operation('resize', instance)
else:
self._log_operation('migration', instance)
@ -1412,7 +1412,8 @@ class PowerVMDriver(driver.ComputeDriver):
# Destroy the old VM.
destroy_disks = not self.disk_dvr.capabilities['shared_storage']
self._destroy(context, instance, block_device_info=None,
block_device_info = self._get_block_device_info(context, instance)
self._destroy(context, instance, block_device_info=block_device_info,
destroy_disks=destroy_disks, shutdown=False)
def finish_revert_migration(self, context, instance, network_info,

View File

@ -18,6 +18,7 @@
import abc
import six
from nova.compute import power_state
from nova import exception
from nova.objects import migrate_data as mig_obj
from oslo_log import log as logging
@ -291,7 +292,9 @@ class LiveMigrationSrc(LiveMigration):
raise exception.MigrationPreCheckError(reason=msg)
# Check if VM is ready for migration
self._check_migration_ready(lpar_w, self.drvr.host_wrapper)
# Skip incase of cold migration
if self.instance.power_state == power_state.RUNNING:
self._check_migration_ready(lpar_w, self.drvr.host_wrapper)
if lpar_w.migration_state != 'Not_Migrating':
msg = (_("Live migration of instance '%(name)s' failed because "

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2018 IBM Corp.
# Copyright 2015, 2019 IBM Corp.
#
# All Rights Reserved.
#
@ -21,6 +21,7 @@ from pypowervm.tasks import storage as pvm_stg
from taskflow import task
from taskflow.types import failure as task_fail
from nova_powervm.virt.powervm.nvram import api
from nova_powervm.virt.powervm import vm
from nova.compute import task_states
@ -97,10 +98,17 @@ class Create(task.Task):
def execute(self):
data = None
if self.nvram_mgr is not None:
LOG.info('Fetching NVRAM.', instance=self.instance)
data = self.nvram_mgr.fetch(self.instance)
LOG.debug('NVRAM data is: %s', data, instance=self.instance)
try:
if self.nvram_mgr is not None:
LOG.info('Fetching NVRAM.', instance=self.instance)
data = self.nvram_mgr.fetch(self.instance)
LOG.debug('NVRAM data is: %s', data, instance=self.instance)
except api.NVRAMDownloadException:
# Fetching NVRAM data for instance could fail if the instance
# doesn't have remote restart attribute set. In such case,
# we don't want to fail the operation.
LOG.warning('NVRAM data could not be fetched for instance.',
instance=self.instance)
wrap = vm.create_lpar(self.adapter, self.host_wrapper, self.instance,
nvram=data, slot_mgr=self.slot_mgr)