iSCSI volume detach with no UDID

fixes the issue to preserve the UDID information in BDM during
the live migration.

Closes-Bug: 1778939
Change-Id: I81aee924093ebcbf1e43ec8c7e330b7d4a973420
This commit is contained in:
Chhavi Agarwal 2018-06-18 01:47:03 -04:00 committed by chhagarw
parent bfb8962fcc
commit 44671813c2
6 changed files with 104 additions and 18 deletions

View File

@ -1975,11 +1975,27 @@ class TestPowerVMDriver(test.NoDBTestCase):
mock_rename.assert_called_once_with(
self.drv.adapter, self.lpm_inst, self.lpm_inst.name)
def test_post_live_mig_dest(self):
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._vol_drv_iter',
autospec=True)
def test_post_live_mig_dest(self, mock_vdi):
vals = [(None, None)]
mock_vdi.return_value = vals
self.drv.post_live_migration_at_destination(
'context', self.lpm_inst, 'network_info')
self.lpm.post_live_migration_at_destination.assert_called_once_with(
'network_info', [])
'network_info', vals)
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._vol_drv_iter',
autospec=True)
def test_post_live_mig_dest_vol_drv(self, mock_vdi):
bdm1 = mock.Mock()
fake_bdi = {'block_device_mapping': [bdm1]}
vals = [(bdm1, self.iscsi_vol_drv)]
mock_vdi.return_value = vals
self.drv.post_live_migration_at_destination(
'context', self.lpm_inst, 'network_info', fake_bdi)
self.lpm.post_live_migration_at_destination.assert_called_once_with(
'network_info', vals)
@mock.patch('pypowervm.tasks.memory.calculate_memory_overhead_on_host',
autospec=True)

View File

@ -318,3 +318,18 @@ class TestLPM(test.NoDBTestCase):
for network_info in network_infos:
mock_vif_post_lpm_at_source.assert_any_call(mock.ANY, mock.ANY,
mock.ANY, network_info)
@mock.patch('nova_powervm.virt.powervm.tasks.storage.SaveBDM.execute',
autospec=True)
def test_post_live_migration_at_dest(self, mock_save_bdm):
bdm1, bdm2, vol_drv1, vol_drv2 = [mock.Mock()] * 4
vals = [(bdm1, vol_drv1), (bdm2, vol_drv2)]
self.lpmdst.pre_live_vol_data = {'vscsi-vol-id': 'fake_udid',
'vscsi-vol-id2': 'fake_udid2'}
self.lpmdst.post_live_migration_at_destination('network_infos', vals)
# Assertions
for bdm, vol_drv in vals:
vol_drv.post_live_migration_at_destination.assert_called_with(
mock.ANY)
self.assertEqual(len(vals), mock_save_bdm.call_count)

View File

@ -379,6 +379,45 @@ class TestISCSIAdapter(test_vol.TestVolumeAdapter):
self.multi_vol_drv.disconnect_volume(self.slot_mgr)
mock_remove_iscsi.assert_has_calls(multi_calls, any_order=True)
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid',
autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id', autospec=True)
@mock.patch('pypowervm.tasks.hdisk.remove_iscsi', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
def test_disconnect_volume_no_devname(
self, mock_remove_maps, mock_remove_iscsi, mock_get_vm_id,
mock_hdisk_from_uuid):
# Ensures that if device_name not found, then mappings are not
# removed and disconnect return False.
self.vol_drv._set_udid("vstor_uuid")
mock_hdisk_from_uuid.return_value = None
mock_get_vm_id.return_value = '2'
# Run the method
status = self.vol_drv.disconnect_volume(self.slot_mgr)
# In this case no disconnect should happen
# mock_remove_maps.assert_not_called()
self.assertEqual(0, mock_remove_maps.call_count)
self.assertEqual(0, mock_remove_iscsi.call_count)
mock_hdisk_from_uuid.assert_called_with(mock.ANY, 'vstor_uuid')
self.assertFalse(status)
# Ensures that if UDID not found, then mappings are not
# removed and disconnect return False.
self.vol_drv._set_udid(None)
mock_hdisk_from_uuid.reset_mock()
# Run the method
status = self.vol_drv.disconnect_volume(self.slot_mgr)
# In this case no disconnect should happen
self.assertEqual(0, mock_remove_maps.call_count)
self.assertEqual(0, mock_remove_iscsi.call_count)
self.assertEqual(0, mock_hdisk_from_uuid.call_count)
self.assertFalse(status)
def test_min_xags(self):
xags = self.vol_drv.min_xags()
self.assertEqual(1, len(xags))
@ -541,6 +580,14 @@ class TestISCSIAdapter(test_vol.TestVolumeAdapter):
self.vol_drv.pre_live_migration_on_destination, {})
mock_discover.assert_not_called()
@mock.patch('nova_powervm.virt.powervm.volume.volume.VscsiVolumeAdapter'
'._set_udid', autospec=True)
def test_post_live_migration_at_destination(self, mock_set_udid):
volume_key = 'vscsi-' + self.serial
mig_vol_stor = {volume_key: 'udid'}
self.vol_drv.post_live_migration_at_destination(mig_vol_stor)
mock_set_udid.assert_called_with(mock.ANY, 'udid')
@mock.patch('nova_powervm.virt.powervm.volume.driver.PowerVMVolumeAdapter.'
'vios_uuids', new_callable=mock.PropertyMock)
def test_is_volume_on_vios(self, mock_vios_uuids):

View File

@ -1647,18 +1647,19 @@ class PowerVMDriver(driver.ComputeDriver):
:param instance: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
:param block_device_info: instance block device information.
"""
LOG.info("Post live migration processing on destination host.",
instance=instance)
mig = self.live_migrations[instance.uuid]
mig.instance = instance
# Build the volume drivers
vol_drvs = self._build_vol_drivers(context, instance,
block_device_info)
bdms = self._extract_bdm(block_device_info)
# Get a volume driver iterator for volume and BDM mapping
vol_drv_iter = self._vol_drv_iter(instance, bdms)
# Run post live migration
mig.post_live_migration_at_destination(network_info, vol_drvs)
mig.post_live_migration_at_destination(network_info, vol_drv_iter)
del self.live_migrations[instance.uuid]
def _vol_drv_iter(self, instance, bdms, stg_ftsk=None):

View File

@ -32,6 +32,7 @@ from pypowervm import util
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm import media
from nova_powervm.virt.powervm.tasks import storage as tf_stg
from nova_powervm.virt.powervm import vif
from nova_powervm.virt.powervm import vm
@ -183,26 +184,26 @@ class LiveMigrationDest(LiveMigration):
self.pre_live_vol_data = migrate_data.vol_data
return migrate_data
def post_live_migration_at_destination(self, network_infos, vol_drvs):
def post_live_migration_at_destination(self, network_infos, vol_drv_iter):
"""Do post migration cleanup on destination host.
:param network_infos: instance network information
:param vol_drvs: volume drivers for the attached volumes
:param vol_drv_iter: volume driver iterator for the attached volumes
and BDM information.
"""
# The LPAR should be on this host now.
LOG.debug("Post live migration at destination.",
instance=self.instance)
# An unbounded dictionary that each volume adapter can use to persist
# data from one call to the next.
mig_vol_stor = {}
# For each volume, make sure it completes the migration
for vol_drv in vol_drvs:
for bdm, vol_drv in vol_drv_iter:
LOG.info('Performing post migration for volume %(volume)s',
dict(volume=vol_drv.volume_id), instance=self.instance)
try:
vol_drv.post_live_migration_at_destination(mig_vol_stor)
vol_drv.post_live_migration_at_destination(
self.pre_live_vol_data)
# Save the BDM for the updated connection info.
tf_stg.SaveBDM(bdm, self.instance).execute()
except Exception:
LOG.exception("PowerVM error cleaning up destination host "
"after migration.", instance=self.instance)

View File

@ -165,6 +165,13 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
instance_name=self.instance.name)
raise p_exc.VolumePreMigrationFailed(**ex_args)
def post_live_migration_at_destination(self, mig_data):
"""This method will update the connection info with the volume udid."""
volume_key = 'vscsi-' + self.volume_id
if volume_key in mig_data:
self._set_udid(mig_data[volume_key])
def is_volume_on_vios(self, vios_w):
"""Returns whether or not the volume is on a VIOS.
@ -346,12 +353,11 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
if udid:
# Get the device name using UniqueDeviceID Identifier.
device_name = vios_w.hdisk_from_uuid(udid)
if not device_name:
# We lost our bdm data.
if not udid or not device_name:
# If we have no device name, at this point
# we should not continue. Subsequent scrub code on future
# deploys will clean this up.
# we should not continue. Subsequent scrub code on
# future deploys will clean this up.
LOG.warning(
"Disconnect Volume: The backing hdisk for volume "
"%(volume_id)s on Virtual I/O Server %(vios)s is "