Merge "VMAX Driver Queens - FC live migration failure" into stable/queens

This commit is contained in:
Zuul 2020-05-27 15:25:45 +00:00 committed by Gerrit Code Review
commit 6de15e346b
6 changed files with 149 additions and 13 deletions

View File

@ -3751,6 +3751,51 @@ class VMAXCommonTest(test.TestCase):
self.common._remove_members.assert_called_once_with( self.common._remove_members.assert_called_once_with(
array, volume, device_id, extra_specs, None, async_grp=None) array, volume, device_id, extra_specs, None, async_grp=None)
@mock.patch.object(common.VMAXCommon, 'find_host_lun_id',
return_value=({'maskingview': u'OS-HostX-F-OS_PG-MV',
'array': '000123456789',
'hostlunid': 1,
'device_id': u'00001'}, True,
['OS-HostX-F-OS_PG-NONFAST',
'OS-HostY-SRP_1-OptimdNONE-OS_PG']))
def test_unmap_lun_live_migration(self, mock_dev):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
connector = self.data.connector
with mock.patch.object(self.masking, 'remove_volume_from_sg'):
with mock.patch.object(self.utils, 'get_volumetype_extra_specs',
return_value=extra_specs):
self.common._unmap_lun(volume, connector)
self.masking.remove_volume_from_sg.assert_called_once_with(
array, device_id, volume.name, 'OS-HostX-F-OS_PG-NONFAST',
extra_specs)
@mock.patch.object(common.VMAXCommon, 'find_host_lun_id',
return_value=({'maskingview': u'OS-HostY-F-OS_PG-MV',
'array': '000123456789',
'hostlunid': 1,
'device_id': u'00001'}, True,
['OS-HostY-F-OS_PG-NONFAST',
'OS-HostX-SRP_1-OptimdNONE-OS_PG']))
def test_unmap_lun_live_migration_clean_up_failed_case(self, mock_dev):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
connector = self.data.connector
with mock.patch.object(self.masking, 'remove_volume_from_sg'):
with mock.patch.object(self.utils, 'get_volumetype_extra_specs',
return_value=extra_specs):
self.common._unmap_lun(volume, connector)
self.masking.remove_volume_from_sg.assert_called_once_with(
array, device_id, volume.name,
'OS-HostX-SRP_1-OptimdNONE-OS_PG',
extra_specs)
def test_initialize_connection_already_mapped(self): def test_initialize_connection_already_mapped(self):
volume = self.data.test_volume volume = self.data.test_volume
connector = self.data.connector connector = self.data.connector
@ -4036,6 +4081,27 @@ class VMAXCommonTest(test.TestCase):
volume, None, extra_specs) volume, None, extra_specs)
self.assertEqual(ref_masked, maskedvols) self.assertEqual(ref_masked, maskedvols)
@mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume',
return_value=['OS-HostY-F-OS_PG-NONFAST',
'OS-HostX-SRP_1-OptimdNONE-OS_PG'])
@mock.patch.object(common.VMAXCommon, '_get_masking_views_from_volume',
return_value=['OS-HostX-F-OS-fibre-PG-MV',
'OS-HostY-F-OS-fibre-PG-MV'])
def test_find_host_lun_id_live_migration(self, mock_mvs, mock_sgs):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
host = 'HostX'
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0]
['host_lun_address'])
ref_masked = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
maskedvols, is_live_migration, __ = self.common.find_host_lun_id(
volume, host, extra_specs)
self.assertEqual(ref_masked, maskedvols)
self.assertTrue(is_live_migration)
def test_register_config_file_from_config_group_exists(self): def test_register_config_file_from_config_group_exists(self):
config_group_name = 'CommonTests' config_group_name = 'CommonTests'
config_file = self.common._register_config_file_from_config_group( config_file = self.common._register_config_file_from_config_group(

View File

@ -507,6 +507,9 @@ class VMAXCommon(object):
{'volume': volume_name}) {'volume': volume_name})
if connector is not None: if connector is not None:
host = self.utils.get_host_short_name(connector['host']) host = self.utils.get_host_short_name(connector['host'])
LOG.debug("The host in the detach operation is "
"%(host)s.",
{'host': host})
else: else:
LOG.warning("Cannot get host name from connector object - " LOG.warning("Cannot get host name from connector object - "
"assuming force-detach.") "assuming force-detach.")
@ -514,6 +517,12 @@ class VMAXCommon(object):
device_info, is_live_migration, source_storage_group_list = ( device_info, is_live_migration, source_storage_group_list = (
self.find_host_lun_id(volume, host, extra_specs)) self.find_host_lun_id(volume, host, extra_specs))
if is_live_migration:
LOG.debug("Live migration is true. The device info is "
"%(device_info)s. The source storage group list is "
"%(sgl)s.",
{'device_info': device_info,
'sgl': source_storage_group_list})
if 'hostlunid' not in device_info: if 'hostlunid' not in device_info:
LOG.info("Volume %s is not mapped. No volume to unmap.", LOG.info("Volume %s is not mapped. No volume to unmap.",
volume_name) volume_name)
@ -523,6 +532,7 @@ class VMAXCommon(object):
volume_name) volume_name)
return return
source_nf_sg = None source_nf_sg = None
target_nf_sg = None
array = extra_specs[utils.ARRAY] array = extra_specs[utils.ARRAY]
if self.utils.does_vol_need_rdf_management_group(extra_specs): if self.utils.does_vol_need_rdf_management_group(extra_specs):
async_grp = self.utils.get_async_rdf_managed_grp_name( async_grp = self.utils.get_async_rdf_managed_grp_name(
@ -530,13 +540,37 @@ class VMAXCommon(object):
if len(source_storage_group_list) > 1: if len(source_storage_group_list) > 1:
for storage_group in source_storage_group_list: for storage_group in source_storage_group_list:
if 'NONFAST' in storage_group: if 'NONFAST' in storage_group:
source_nf_sg = storage_group if host and host.lower() in storage_group.lower():
break source_nf_sg = storage_group
LOG.debug("The source NONFAST storage group is "
"%(source_nf_sg)s. Attempting a cleanup of "
"source host in Live Migration process.",
{'source_nf_sg': source_nf_sg})
break
else:
target_nf_sg = storage_group
if source_nf_sg: if source_nf_sg:
# Remove volume from non fast storage group # Remove volume from non fast storage group
self.masking.remove_volume_from_sg( self.masking.remove_volume_from_sg(
array, device_info['device_id'], volume_name, source_nf_sg, array, device_info['device_id'], volume_name, source_nf_sg,
extra_specs) extra_specs)
LOG.debug("Removed %(dev_id)s from source NONFAST storage "
"group %(sg)s in the live migration process.",
{'dev_id': device_info['device_id'],
'sg': source_nf_sg}),
elif target_nf_sg:
for storage_group in source_storage_group_list:
if storage_group.lower == target_nf_sg:
LOG.debug("The target NONFAST storage group is "
"%(target_nf_sg)s. The volume will remain "
"here until the next Live Migration.",
{'target_nf_sg': target_nf_sg})
elif storage_group.lower != target_nf_sg:
if host and host.lower() in storage_group.lower():
self.masking.remove_volume_from_sg(
array, device_info['device_id'], volume_name,
storage_group, extra_specs)
else: else:
self._remove_members(array, volume, device_info['device_id'], self._remove_members(array, volume, device_info['device_id'],
extra_specs, connector, async_grp=async_grp) extra_specs, connector, async_grp=async_grp)
@ -1088,7 +1122,9 @@ class VMAXCommon(object):
:param rep_extra_specs: rep extra specs, passed in if metro device :param rep_extra_specs: rep extra specs, passed in if metro device
:returns: dict -- the data dict :returns: dict -- the data dict
""" """
masked_vol_list = list()
maskedvols = {} maskedvols = {}
hoststr = None
is_live_migration = False is_live_migration = False
volume_name = volume.name volume_name = volume.name
device_id = self._find_device_on_array(volume, extra_specs) device_id = self._find_device_on_array(volume, extra_specs)
@ -1097,6 +1133,8 @@ class VMAXCommon(object):
extra_specs[utils.ARRAY], volume, device_id)[0] extra_specs[utils.ARRAY], volume, device_id)[0]
extra_specs = rep_extra_specs extra_specs = rep_extra_specs
host_name = self.utils.get_host_short_name(host) if host else None host_name = self.utils.get_host_short_name(host) if host else None
if host_name:
hoststr = ("-%(host)s-" % {'host': host_name})
if device_id: if device_id:
array = extra_specs[utils.ARRAY] array = extra_specs[utils.ARRAY]
source_storage_group_list = ( source_storage_group_list = (
@ -1113,7 +1151,28 @@ class VMAXCommon(object):
'maskingview': maskingview, 'maskingview': maskingview,
'array': array, 'array': array,
'device_id': device_id} 'device_id': device_id}
maskedvols = devicedict masked_vol_list.append(devicedict)
if len(masked_vol_list) == 1:
maskedvols = masked_vol_list[0]
LOG.debug("Device id %(device_id)s is masked to "
"one masking view %(mvl)s.",
{'device_id': device_id,
'mvl': maskedvols})
elif len(masked_vol_list) > 1:
LOG.debug("Device id %(device_id)s is masked to more "
"than one masking view %(mvl)s.",
{'device_id': device_id,
'mvl': masked_vol_list})
for masked_vol in masked_vol_list:
if hoststr and hoststr.lower() in (
masked_vol['maskingview'].lower()):
LOG.debug("Device id %(device_id)s is masked "
"to %(mv)s. Match found for host %(host)s.",
{'device_id': device_id,
'mv': masked_vol['maskingview'],
'host': host_name})
maskedvols = masked_vol
if not maskedvols: if not maskedvols:
LOG.debug( LOG.debug(
"Host lun id not found for volume: %(volume_name)s " "Host lun id not found for volume: %(volume_name)s "
@ -1123,9 +1182,7 @@ class VMAXCommon(object):
else: else:
LOG.debug("Device info: %(maskedvols)s.", LOG.debug("Device info: %(maskedvols)s.",
{'maskedvols': maskedvols}) {'maskedvols': maskedvols})
if host: if hoststr:
hoststr = ("-%(host)s-" % {'host': host_name})
if (hoststr.lower() if (hoststr.lower()
not in maskedvols['maskingview'].lower()): not in maskedvols['maskingview'].lower()):
LOG.debug("Volume is masked but not to host %(host)s " LOG.debug("Volume is masked but not to host %(host)s "
@ -1135,6 +1192,11 @@ class VMAXCommon(object):
else: else:
for storage_group in source_storage_group_list: for storage_group in source_storage_group_list:
if 'NONFAST' in storage_group: if 'NONFAST' in storage_group:
LOG.debug("Setting live migration as "
"device id %(device_id)s is part "
"of storage group %(sg)s.",
{'device_id': device_id,
'sg': storage_group})
is_live_migration = True is_live_migration = True
break break
else: else:
@ -1171,7 +1233,7 @@ class VMAXCommon(object):
:param storage_group_list: the storage group list to use :param storage_group_list: the storage group list to use
:returns: masking view list :returns: masking view list
""" """
LOG.debug("Getting masking views from volume") LOG.debug("Getting masking views from volume.")
maskingview_list = [] maskingview_list = []
host_compare = False host_compare = False
if not storage_group_list: if not storage_group_list:

View File

@ -102,9 +102,10 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
(bugs #1783855 #1783867) (bugs #1783855 #1783867)
- Fix for HyperMax OS Upgrade Bug (bug #1790141) - Fix for HyperMax OS Upgrade Bug (bug #1790141)
3.1.2 - Legacy volume not found fix (#1867163) 3.1.2 - Legacy volume not found fix (#1867163)
3.1.3 - FC Live Migration failure (bug #1874829)
""" """
VERSION = "3.1.2" VERSION = "3.1.3"
# ThirdPartySystems wiki # ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI" CI_WIKI_NAME = "EMC_VMAX_CI"

View File

@ -107,9 +107,10 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
(bugs #1783855 #1783867) (bugs #1783855 #1783867)
- Fix for HyperMax OS Upgrade Bug (bug #1790141) - Fix for HyperMax OS Upgrade Bug (bug #1790141)
3.1.2 - Legacy volume not found fix (#1867163) 3.1.2 - Legacy volume not found fix (#1867163)
3.1.3 - FC Live Migration failure (bug #1874829)
""" """
VERSION = "3.1.2" VERSION = "3.1.3"
# ThirdPartySystems wiki # ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI" CI_WIKI_NAME = "EMC_VMAX_CI"

View File

@ -1611,9 +1611,15 @@ class VMAXMasking(object):
self.add_child_sg_to_parent_sg( self.add_child_sg_to_parent_sg(
device_info_dict['array'], source_nf_sg, source_parent_sg, device_info_dict['array'], source_nf_sg, source_parent_sg,
extra_specs, default_version=False) extra_specs, default_version=False)
self.move_volume_between_storage_groups( if source_sg.lower() != source_nf_sg.lower():
device_info_dict['array'], device_info_dict['device_id'], self.move_volume_between_storage_groups(
source_sg, source_nf_sg, extra_specs) device_info_dict['array'], device_info_dict['device_id'],
source_sg, source_nf_sg, extra_specs)
else:
LOG.debug("The source sg %(source_sg)s is the same "
"as the source NONFAST storage group. No move operation "
"necessary.",
{'source_sg': source_sg})
def post_live_migration(self, device_info_dict, extra_specs): def post_live_migration(self, device_info_dict, extra_specs):
"""Run after every live migration operation. """Run after every live migration operation.

View File

@ -752,7 +752,7 @@ class VMAXRest(object):
if vol_details: if vol_details:
vol_identifier = vol_details.get('volume_identifier', None) vol_identifier = vol_details.get('volume_identifier', None)
LOG.debug('Element name = %(en)s, Vol identifier = %(vi)s, ' LOG.debug('Element name = %(en)s, Vol identifier = %(vi)s, '
'Device id = %(di)s, vol details = %(vd)s', 'Device id = %(di)s.',
{'en': element_name, 'vi': vol_identifier, {'en': element_name, 'vi': vol_identifier,
'di': device_id}) 'di': device_id})
if vol_identifier: if vol_identifier: