Add pre-LPM method in the volume driver
The current volume driver does not have a separate pre live migration method. During live migration, the volume must be discovered on the target host VIOSes for VSCSI attached volumes, but the connections will be automatically moved over. This patch separates the discovery and connect logic for use by live migration. However, it only addresses VSCSI needs. It does not address NPIV attached volumes. Change-Id: I56f10b5f41c3fdef7afd723eac3f6cbe3256ccba
This commit is contained in:
@@ -72,7 +72,8 @@ class VolumeAdapter(fixtures.Fixture):
|
||||
def setUp(self):
|
||||
super(VolumeAdapter, self).setUp()
|
||||
self._std_vol_adpt = mock.patch('nova_powervm.virt.powervm.volume.'
|
||||
'vscsi.VscsiVolumeAdapter')
|
||||
'vscsi.VscsiVolumeAdapter',
|
||||
__name__='MockVSCSI')
|
||||
self.std_vol_adpt = self._std_vol_adpt.start()
|
||||
# We want to mock out the connection_info individually so it gives
|
||||
# back a new mock on every call. That's because the vol id is
|
||||
|
||||
@@ -132,7 +132,7 @@ class TestPowerVMDriver(test.TestCase):
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
def test_get_volume_connector(self, mock_getuuid):
|
||||
mock_getuuid.return_value = '1234'
|
||||
vol_connector = self.drv.get_volume_connector(None)
|
||||
vol_connector = self.drv.get_volume_connector(mock.Mock())
|
||||
self.assertIsNotNone(vol_connector['wwpns'])
|
||||
self.assertIsNotNone(vol_connector['host'])
|
||||
|
||||
@@ -1073,8 +1073,9 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.assertEqual('src_data', src_data)
|
||||
|
||||
def test_pre_live_migr(self):
|
||||
block_device_info = self._fake_bdms()
|
||||
self.drv.pre_live_migration(
|
||||
'context', self.lpm_inst, 'block_device_info', 'network_info',
|
||||
'context', self.lpm_inst, block_device_info, 'network_info',
|
||||
'disk_info', migrate_data='migrate_data')
|
||||
|
||||
def test_live_migration(self):
|
||||
|
||||
@@ -114,7 +114,7 @@ class TestLPM(test.TestCase):
|
||||
def test_pre_live_mig(self):
|
||||
self.lpmdst.pre_live_migration('context', 'block_device_info',
|
||||
'network_info', 'disk_info',
|
||||
{})
|
||||
{}, [])
|
||||
|
||||
@mock.patch('pypowervm.tasks.migration.migrate_lpar')
|
||||
def test_live_migration(self, mock_migr):
|
||||
|
||||
@@ -84,6 +84,23 @@ class TestVSCSIAdapter(test.TestCase):
|
||||
self.udid = (
|
||||
'01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDA1Rg==')
|
||||
|
||||
@mock.patch('pypowervm.tasks.hdisk.lua_recovery')
|
||||
def test_pre_live_migration(self, mock_discover):
|
||||
# The mock return values
|
||||
mock_discover.return_value = (
|
||||
hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid')
|
||||
|
||||
# Run the method
|
||||
self.vol_drv.pre_live_migration_on_destination()
|
||||
|
||||
# Test exception path
|
||||
mock_discover.return_value = (
|
||||
hdisk.LUAStatus.DEVICE_IN_USE, 'devname', 'udid')
|
||||
|
||||
# Run the method
|
||||
self.assertRaises(p_exc.VolumePreMigrationFailed,
|
||||
self.vol_drv.pre_live_migration_on_destination)
|
||||
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
|
||||
@mock.patch('pypowervm.tasks.hdisk.lua_recovery')
|
||||
|
||||
@@ -982,8 +982,17 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
LOG.info(_LI("Pre live migration processing."),
|
||||
instance=instance)
|
||||
mig = self.live_migrations[instance.uuid]
|
||||
|
||||
# Get a volume driver for each volume
|
||||
vol_drvs = []
|
||||
bdms = self._extract_bdm(block_device_info)
|
||||
for bdm in bdms or []:
|
||||
vol_drvs.append(
|
||||
self._get_inst_vol_adpt(
|
||||
context, instance, conn_info=bdm.get('connection_info')))
|
||||
|
||||
mig.pre_live_migration(context, block_device_info, network_info,
|
||||
disk_info, migrate_data)
|
||||
disk_info, migrate_data, vol_drvs)
|
||||
|
||||
def live_migration(self, context, instance, dest,
|
||||
post_method, recover_method, block_migration=False,
|
||||
@@ -1233,7 +1242,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
LOG.debug('Volume Adapter returned for connection_info=%s' %
|
||||
conn_info)
|
||||
LOG.debug('Volume Adapter class %(cls)s for instance %(inst)s' %
|
||||
{'cls': vol_cls, 'inst': instance})
|
||||
{'cls': vol_cls.__name__, 'inst': instance.name})
|
||||
return vol_cls(self.adapter, self.host_uuid,
|
||||
instance, conn_info, tx_mgr=tx_mgr)
|
||||
|
||||
|
||||
@@ -109,3 +109,8 @@ class VolumeAttachFailed(nex.NovaException):
|
||||
class VolumeDetachFailed(nex.NovaException):
|
||||
msg_fmt = _("Unable to detach volume (id: %(volume_id)s) from virtual "
|
||||
"machine %(instance_name)s. %(reason)s")
|
||||
|
||||
|
||||
class VolumePreMigrationFailed(nex.NovaException):
|
||||
msg_fmt = _("Unable to perform pre live migration steps on volume (id: "
|
||||
"%(volume_id)s) from virtual machine %(instance_name)s.")
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
import abc
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from pypowervm.tasks import management_console as mgmt_task
|
||||
from pypowervm.tasks import migration as mig
|
||||
|
||||
@@ -67,6 +67,11 @@ class LiveMigrationCapacity(exception.NovaException):
|
||||
"migrations are currently running.")
|
||||
|
||||
|
||||
class LiveMigrationVolume(exception.NovaException):
|
||||
msg_fmt = _("Cannot migrate %(name)s because the volume %(volume)s "
|
||||
"cannot be attached on the destination host %(host)s.")
|
||||
|
||||
|
||||
def _verify_migration_capacity(host_w, instance):
|
||||
"""Check that the counts are valid for in progress and supported."""
|
||||
mig_stats = host_w.migration_data
|
||||
@@ -140,7 +145,7 @@ class LiveMigrationDest(LiveMigration):
|
||||
return self.dest_data
|
||||
|
||||
def pre_live_migration(self, context, block_device_info, network_info,
|
||||
disk_info, migrate_data):
|
||||
disk_info, migrate_data, vol_drvs):
|
||||
|
||||
"""Prepare an instance for live migration
|
||||
|
||||
@@ -149,7 +154,8 @@ class LiveMigrationDest(LiveMigration):
|
||||
:param block_device_info: instance block device information
|
||||
:param network_info: instance network information
|
||||
:param disk_info: instance disk information
|
||||
:param migrate_data: implementation specific data dict.
|
||||
:param migrate_data: implementation specific data dict
|
||||
:param vol_drvs: volume drivers for the attached volumes
|
||||
"""
|
||||
LOG.debug('Running pre live migration on destination.',
|
||||
instance=self.instance)
|
||||
@@ -161,6 +167,18 @@ class LiveMigrationDest(LiveMigration):
|
||||
if pub_key is not None:
|
||||
mgmt_task.add_authorized_key(self.drvr.adapter, pub_key)
|
||||
|
||||
# For each volume, make sure it's ready to migrate
|
||||
for vol_drv in vol_drvs:
|
||||
LOG.info(_LI('Performing pre migration for volume %(volume)s'),
|
||||
dict(volume=vol_drv.volume_id))
|
||||
try:
|
||||
vol_drv.pre_live_migration_on_destination()
|
||||
except Exception:
|
||||
# It failed.
|
||||
raise LiveMigrationVolume(
|
||||
host=self.drvr.host_wrapper.system_name,
|
||||
name=self.instance.name, volume=vol_drv.volume_id)
|
||||
|
||||
def post_live_migration_at_destination(self, network_info):
|
||||
"""Do post migration cleanup on destination host.
|
||||
|
||||
|
||||
@@ -65,11 +65,24 @@ class PowerVMVolumeAdapter(object):
|
||||
|
||||
@property
|
||||
def vm_id(self):
|
||||
"""Return the short ID (not UUID) of the LPAR for our instance."""
|
||||
"""Return the short ID (not UUID) of the LPAR for our instance.
|
||||
|
||||
This method is unavailable during a pre live migration call since
|
||||
there is no instance of the VM on the destination host at the time.
|
||||
"""
|
||||
if self._vm_id is None:
|
||||
self._vm_id = vm.get_vm_id(self.adapter, self.vm_uuid)
|
||||
return self._vm_id
|
||||
|
||||
@property
|
||||
def volume_id(self):
|
||||
"""Method to return the volume id.
|
||||
|
||||
Every driver must implement this method if the default impl will
|
||||
not work for their data.
|
||||
"""
|
||||
return self.connection_info['data']['volume_id']
|
||||
|
||||
def reset_tx_mgr(self, tx_mgr=None):
|
||||
"""Resets the pypowervm transaction FeedTask to a new value.
|
||||
|
||||
@@ -94,6 +107,14 @@ class PowerVMVolumeAdapter(object):
|
||||
"""List of pypowervm XAGs needed to support this adapter."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def pre_live_migration_on_destination(self):
|
||||
"""Perform pre live migration steps for the volume on the target host.
|
||||
|
||||
This method performs any pre live migration that is needed.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def connect_volume(self):
|
||||
"""Connects the volume."""
|
||||
self._connect_volume()
|
||||
|
||||
@@ -76,6 +76,72 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
# SCSI mapping is for the connections between VIOS and client VM
|
||||
return [pvm_vios.VIOS.xags.SCSI_MAPPING]
|
||||
|
||||
def pre_live_migration_on_destination(self):
|
||||
"""Perform pre live migration steps for the volume on the target host.
|
||||
|
||||
This method performs any pre live migration that is needed.
|
||||
|
||||
"""
|
||||
volume_id = self.volume_id
|
||||
found = False
|
||||
# Iterate through host vios list to find valid hdisks.
|
||||
for vios_w in self.tx_mgr.feed:
|
||||
status, device_name, udid = self._discover_volume_on_vios(
|
||||
vios_w, volume_id, migr=True)
|
||||
# If we found one, no need to check the others.
|
||||
found = found or self._good_discovery(status, device_name, udid)
|
||||
|
||||
if not found:
|
||||
ex_args = dict(volume_id=volume_id,
|
||||
instance_name=self.instance.name)
|
||||
raise p_exc.VolumePreMigrationFailed(**ex_args)
|
||||
|
||||
def _good_discovery(self, status, device_name, udid):
|
||||
"""Checks the hdisk discovery results for a good discovery."""
|
||||
return device_name is not None and status in [
|
||||
hdisk.LUAStatus.DEVICE_AVAILABLE,
|
||||
hdisk.LUAStatus.FOUND_ITL_ERR]
|
||||
|
||||
def _discover_volume_on_vios(self, vios_w, volume_id, migr=False):
|
||||
"""Discovers an hdisk on a single vios for the volume.
|
||||
|
||||
:param vios_w: VIOS wrapper to process
|
||||
:param volume_id: Volume to discover
|
||||
:param migr: Specifies whether this call is for a migration on the
|
||||
destination host
|
||||
:returns: Status of the volume or None
|
||||
:returns: Device name or None
|
||||
:returns: LUN or None
|
||||
"""
|
||||
# Get the initiatior WWPNs, targets and Lun for the given VIOS.
|
||||
vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)
|
||||
|
||||
# Build the ITL map and discover the hdisks on the Virtual I/O
|
||||
# Server (if any).
|
||||
itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
|
||||
if len(itls) == 0:
|
||||
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.'
|
||||
% {'vios': vios_w.name, 'volume_id': volume_id})
|
||||
return None, None, None
|
||||
|
||||
status, device_name, udid = (
|
||||
hdisk.discover_hdisk(self.adapter, vios_w.uuid, itls, self.vm_id)
|
||||
if not migr else hdisk.lua_recovery(
|
||||
self.adapter, vios_w.uuid, itls))
|
||||
|
||||
if self._good_discovery(status, device_name, udid):
|
||||
LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
|
||||
'volume %(volume_id)s. Status code: %(status)s.'),
|
||||
{'hdisk': device_name, 'vios': vios_w.name,
|
||||
'volume_id': volume_id, 'status': str(status)})
|
||||
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
|
||||
LOG.warn(_LW('Discovered device %(dev)s for volume %(volume)s '
|
||||
'on %(vios)s is in use. Error code: %(status)s.'),
|
||||
{'dev': device_name, 'volume': volume_id,
|
||||
'vios': vios_w.name, 'status': str(status)})
|
||||
|
||||
return status, device_name, udid
|
||||
|
||||
def _connect_volume(self):
|
||||
"""Connects the volume."""
|
||||
# Get the initiators
|
||||
@@ -120,27 +186,13 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
(could be the Virtual I/O Server does not have connectivity
|
||||
to the hdisk).
|
||||
"""
|
||||
|
||||
status, device_name, udid = self._discover_volume_on_vios(
|
||||
vios_w, volume_id)
|
||||
# Get the initiatior WWPNs, targets and Lun for the given VIOS.
|
||||
vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)
|
||||
|
||||
# Build the ITL map and discover the hdisks on the Virtual I/O
|
||||
# Server (if any).
|
||||
itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
|
||||
if len(itls) == 0:
|
||||
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.'
|
||||
% {'vios': vios_w.name, 'volume_id': volume_id})
|
||||
return False
|
||||
|
||||
status, device_name, udid = hdisk.discover_hdisk(
|
||||
self.adapter, vios_w.uuid, itls, self.vm_id)
|
||||
if device_name is not None and status in [
|
||||
hdisk.LUAStatus.DEVICE_AVAILABLE,
|
||||
hdisk.LUAStatus.FOUND_ITL_ERR]:
|
||||
LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
|
||||
'volume %(volume_id)s. Status code: %(status)s.'),
|
||||
{'hdisk': device_name, 'vios': vios_w.name,
|
||||
'volume_id': volume_id, 'status': str(status)})
|
||||
|
||||
if self._good_discovery(status, device_name, udid):
|
||||
# Found a hdisk on this Virtual I/O Server. Add the action to
|
||||
# map it to the VM when the tx_mgr is executed.
|
||||
self._add_append_mapping(vios_w.uuid, device_name)
|
||||
@@ -152,11 +204,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
|
||||
# Valid attachment
|
||||
return True
|
||||
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
|
||||
LOG.warn(_LW('Discovered device %(dev)s for volume %(volume)s '
|
||||
'on %(vios)s is in use. Error code: %(status)s.'),
|
||||
{'dev': device_name, 'volume': volume_id,
|
||||
'vios': vios_w.name, 'status': str(status)})
|
||||
|
||||
return False
|
||||
|
||||
def _disconnect_volume(self):
|
||||
@@ -325,8 +373,8 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
|
||||
A PowerVM system may have multiple Virtual I/O Servers to virtualize
|
||||
the I/O to the virtual machines. Each Virtual I/O server may have their
|
||||
own set of initiator WWPNs, target WWPNs and Lun on which hdisk is
|
||||
mapped.It will determine and return the ITLs for the given VIOS.
|
||||
own set of initiator WWPNs, target WWPNs and Lun on which hdisk is
|
||||
mapped. It will determine and return the ITLs for the given VIOS.
|
||||
|
||||
:param vios_w: A virtual I/O Server wrapper.
|
||||
:return: List of the i_wwpns that are part of the vios_w,
|
||||
|
||||
Reference in New Issue
Block a user