PowerMax Driver - Safeguarding retype to some in-use replicated modes

Host assisted migration is a better option for retyping from a non-replicated
to a replicated (Asynchronous and Metro) modes.

1. Block in-use non-replicated to replicated(Metro and Asynchronous)
2. Remove any unused code
3. Differentiate between the production and the management storage groups
4. Remove rollback code on a cleanup_lun, its not necessary on delete and
    will not always work
5. Allow for a target device not to exist on the target array
6. Fix minor issue on metadata on a retype

Change-Id: I6c7c94528ee94183c1df21c25388505b28f1781a
This commit is contained in:
Helen Walsh 2020-02-07 15:01:48 +00:00 committed by Jay Bryant
parent e1ace50684
commit 92aeec3ad9
11 changed files with 386 additions and 101 deletions

View File

@ -251,7 +251,7 @@ class PowerMaxData(object):
test_volume_attachment = volume_attachment.VolumeAttachment(
id='2b06255d-f5f0-4520-a953-b029196add6b', volume_id=test_volume.id,
connector=connector)
connector=connector, attached_host='HostX')
location_info = {'location_info': '000197800123#SRP_1#Diamond#DSS',
'storage_protocol': 'FC'}
@ -637,6 +637,17 @@ class PowerMaxData(object):
'snapvx_source': 'false',
'storageGroupId': []}
volume_details_attached_async = (
{'cap_gb': 2,
'num_of_storage_groups': 1,
'volumeId': device_id,
'volume_identifier': 'OS-%s' % test_volume.id,
'wwn': volume_wwn,
'snapvx_target': 'false',
'snapvx_source': 'false',
'storageGroupId': [
rdf_managed_async_grp, storagegroup_name_f + '-RA']})
volume_list = [
{'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa',
'count': 2,
@ -1236,3 +1247,18 @@ class PowerMaxData(object):
volume_metadata = {
'DeviceID': device_id, 'ArrayID': array, 'ArrayModel': array_model}
# retype metadata dict
retype_metadata_dict = {
'device_id': device_id,
'rdf_group_no': '10',
'remote_array': remote_array,
'target_device_id': device_id,
'rep_mode': 'Asynchronous',
'replication_status': 'enabled',
'target_array_model': array_model}
retype_metadata_dict2 = {
'default_sg_name': 'default-sg',
'service_level': 'Diamond'
}

View File

@ -1569,6 +1569,8 @@ class PowerMaxCommonTest(test.TestCase):
new_type, host)
mock_retype.assert_called_once()
@mock.patch.object(utils.PowerMaxUtils, 'get_volume_attached_hostname',
return_value='HostX')
@mock.patch.object(
rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details_attached)
@ -1585,13 +1587,16 @@ class PowerMaxCommonTest(test.TestCase):
def test_retype_inuse_volume_tgt_sg_exist(self, mck_vol_in_sg, mck_sg_move,
mck_child_sg_in_sg,
mck_get_sg_name,
mck_get_sg, mck_get_vol):
mck_get_sg, mck_get_vol,
mock_host):
array = self.data.array
srp = self.data.srp
slo = self.data.slo
workload = self.data.workload
device_id = self.data.device_id
volume = self.data.test_attached_volume
attached_volume = deepcopy(self.data.test_volume)
attached_volume.volume_attachment.objects = [
self.data.test_volume_attachment]
rep_mode = 'Synchronous'
src_extra_specs = self.data.extra_specs_migrate
interval = src_extra_specs['interval']
@ -1601,12 +1606,14 @@ class PowerMaxCommonTest(test.TestCase):
'interval': interval, 'retries': retries, 'rep_mode': rep_mode}
success = self.common._retype_inuse_volume(
array, srp, volume, device_id, src_extra_specs, slo, workload,
tgt_extra_specs, False)[0]
array, srp, attached_volume, device_id, src_extra_specs, slo,
workload, tgt_extra_specs, False)[0]
self.assertTrue(success)
mck_sg_move.assert_called()
mck_vol_in_sg.assert_called()
@mock.patch.object(utils.PowerMaxUtils, 'get_volume_attached_hostname',
return_value='HostX')
@mock.patch.object(
rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details_attached)
@ -1623,7 +1630,7 @@ class PowerMaxCommonTest(test.TestCase):
def test_retype_inuse_volume_no_tgt_sg(self, mck_vol_in_sg, mck_move_vol,
mck_sg_in_sg, mck_add_sg_to_sg,
mck_create_sg, mck_get_csg_name,
mck_get_vol):
mck_get_vol, mock_host):
array = self.data.array
srp = self.data.srp
slo = self.data.slo
@ -1900,17 +1907,8 @@ class PowerMaxCommonTest(test.TestCase):
mck_setup_specs = src_extra_specs
mck_setup_specs[utils.METROBIAS] = self.common.rep_config[
'metro_use_bias']
mck_setup.assert_called_once_with(
self.data.array, volume, device_id, mck_setup_specs)
mck_retype.assert_called_once_with(
array, srp, volume, device_id, src_extra_specs, slo,
workload, tgt_extra_specs, False)
mck_add_vol.assert_called_once()
mck_get_sg.assert_called_once()
mck_get_rdf_name.assert_called_once()
mck_cleanup.assert_not_called()
mck_remote_retype.assert_not_called()
self.assertTrue(success)
mck_setup.assert_not_called()
self.assertFalse(success)
_reset_mocks()
# Scenario 4: rep => rep

View File

@ -117,7 +117,7 @@ class PowerMaxProvisionTest(test.TestCase):
snap_name, extra_specs, create_snap=True)
mock_modify.assert_called_once_with(
array, source_device_id, target_device_id, snap_name,
extra_specs, link=True)
extra_specs, link=True, copy_mode=False)
mock_create_snapvx.assert_called_once_with(
array, source_device_id, snap_name, extra_specs, ttl=ttl)
@ -133,10 +133,10 @@ class PowerMaxProvisionTest(test.TestCase):
self.provision.rest, 'modify_volume_snap') as mock_modify:
self.provision.create_volume_replica(
array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False)
snap_name, extra_specs, create_snap=False, copy_mode=True)
mock_modify.assert_called_once_with(
array, source_device_id, target_device_id, snap_name,
extra_specs, link=True)
extra_specs, link=True, copy_mode=True)
mock_create_snapvx.assert_not_called()
def test_break_replication_relationship(self):

View File

@ -503,24 +503,24 @@ class PowerMaxReplicationTest(test.TestCase):
self.extra_specs)
mock_clean.assert_not_called()
@mock.patch.object(
common.PowerMaxCommon, 'get_remote_target_device',
return_value=(tpd.PowerMaxData.device_id2, '', '', '', ''))
@mock.patch.object(common.PowerMaxCommon,
'_add_volume_to_async_rdf_managed_grp')
def test_cleanup_lun_replication_exception(self, mock_add, mock_tgt):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.cleanup_lun_replication,
self.data.test_volume, '1', self.data.device_id,
self.extra_specs)
# is metro or async volume
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.REP_MODE] = utils.REP_METRO
self.assertRaises(exception.VolumeBackendAPIException,
self.common.cleanup_lun_replication,
self.data.test_volume, '1', self.data.device_id,
extra_specs)
mock_add.assert_called_once()
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=('VMAX250F', False))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_remote_target')
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_managed_storage_group',
return_value=(
tpd.PowerMaxData.rdf_managed_async_grp, {}))
@mock.patch.object(rest.PowerMaxRest, 'remove_vol_from_sg')
def test_cleanup_lun_replication_async(
self, mock_rm_sg, mock_get_rdf_sg, mock_clean, mock_model):
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
rep_extra_specs['target_array_model'] = 'VMAX250F'
self.common.cleanup_lun_replication(
self.data.test_volume, '1', self.data.device_id,
self.extra_specs)
mock_rm_sg.assert_called_once_with(
self.data.array, self.data.rdf_managed_async_grp,
self.data.device_id, self.extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_cleanup_metro_target')
@mock.patch.object(masking.PowerMaxMasking,
@ -652,6 +652,21 @@ class PowerMaxReplicationTest(test.TestCase):
self.data.device_id))
self.assertIsNone(target_device4)
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=(tpd.PowerMaxData.rdf_group_name,
tpd.PowerMaxData.remote_array))
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
side_effect=exception.VolumeBackendAPIException(
data=''))
def test_get_remote_target_device_no_target(
self, mock_get_vol, mock_get_rdf):
target_device, remote_array, rdf_group, local_vol_state, pair_state = (
self.common.get_remote_target_device(
self.data.array, self.data.test_volume, self.data.device_id))
self.assertIsNone(target_device)
self.assertEqual('', local_vol_state)
self.assertEqual('', pair_state)
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=('VMAX250F', False))
@mock.patch.object(common.PowerMaxCommon,

View File

@ -511,14 +511,13 @@ class PowerMaxUtilsTest(test.TestCase):
self.utils.is_snapshot_manageable(volume))
def test_get_volume_attached_hostname(self):
device_info_pass = self.data.volume_details_attached
attached_volume = deepcopy(self.data.test_volume)
attached_volume.volume_attachment.objects = [
self.data.test_volume_attachment]
# Success
hostname = self.utils.get_volume_attached_hostname(device_info_pass)
hostname = self.utils.get_volume_attached_hostname(attached_volume)
self.assertEqual('HostX', hostname)
# Fail
device_info_fail = self.data.volume_details_no_sg
hostname = self.utils.get_volume_attached_hostname(device_info_fail)
self.assertIsNone(hostname)
def test_validate_qos_input_exception(self):
qos_extra_spec = {'total_iops_sec': 90, 'DistributionType': 'Wrong',
@ -1016,3 +1015,127 @@ class PowerMaxUtilsTest(test.TestCase):
port_name_out = self.utils.get_port_name_label(
port_name_in, port_group_template)
self.assertEqual('p_name3b02c', port_name_out)
def test_get_rdf_managed_storage_group(self):
rdf_component_dict = ('OS-23_24_007-Asynchronous-rdf-sg',
{'prefix': 'OS',
'rdf_label': '23_24_007',
'sync_mode': 'Asynchronous',
'after_mode': 'rdf-sg'})
async_rdf_details = (
self.utils.get_rdf_managed_storage_group(
self.data.volume_details_attached_async))
self.assertEqual(rdf_component_dict, async_rdf_details)
def test_get_storage_group_component_dict_no_slo(self):
"""Test for get_storage_group_component_dict.
REST and no SLO.
"""
sg_no_slo = 'OS-myhost-No_SLO-os-iscsi-pg'
component_dict = self.utils.get_storage_group_component_dict(
sg_no_slo)
self.assertEqual('myhost', component_dict['host'])
self.assertEqual('OS', component_dict['prefix'])
self.assertEqual('No_SLO', component_dict['no_slo'])
self.assertEqual('os-iscsi-pg', component_dict['portgroup'])
self.assertIsNone(component_dict['sloworkload'])
self.assertIsNone(component_dict['srp'])
def test_get_storage_group_component_dict_slo_workload_2(self):
"""Test for get_storage_group_component_dict.
SLO, workload and test 2.
"""
sg_slo_workload = 'OS-myhost-SRP_1-DiamodOLTP-os-iscsi-pg-RE'
component_dict = self.utils.get_storage_group_component_dict(
sg_slo_workload)
self.assertEqual('OS', component_dict['prefix'])
self.assertEqual('myhost', component_dict['host'])
self.assertEqual('SRP_1', component_dict['srp'])
self.assertEqual('os-iscsi-pg', component_dict['portgroup'])
self.assertEqual('DiamodOLTP', component_dict['sloworkload'])
self.assertIsNone(component_dict['no_slo'])
def test_get_storage_group_component_dict_compression_disabled(self):
"""Test for get_storage_group_component_dict.
Compression disabled.
"""
sg_compression_disabled = 'OS-myhost-SRP_1-DiamodNONE-os-iscsi-pg-CD'
component_dict = self.utils.get_storage_group_component_dict(
sg_compression_disabled)
self.assertEqual('OS', component_dict['prefix'])
self.assertEqual('myhost', component_dict['host'])
self.assertEqual('SRP_1', component_dict['srp'])
self.assertEqual('os-iscsi-pg', component_dict['portgroup'])
self.assertEqual('DiamodNONE', component_dict['sloworkload'])
self.assertEqual('-CD', component_dict['after_pg'])
self.assertIsNone(component_dict['no_slo'])
def test_get_storage_group_component_dict_replication_enabled(self):
"""Test for get_storage_group_component_dict.
Replication enabled.
"""
sg_slo_workload_rep = 'OS-myhost-SRP_1-DiamodOLTP-os-iscsi-pg-RE'
component_dict = self.utils.get_storage_group_component_dict(
sg_slo_workload_rep)
self.assertEqual('OS', component_dict['prefix'])
self.assertEqual('myhost', component_dict['host'])
self.assertEqual('SRP_1', component_dict['srp'])
self.assertEqual('os-iscsi-pg', component_dict['portgroup'])
self.assertEqual('DiamodOLTP', component_dict['sloworkload'])
self.assertEqual('-RE', component_dict['after_pg'])
self.assertIsNone(component_dict['no_slo'])
def test_get_storage_group_component_dict_slo_no_workload(self):
"""Test for get_storage_group_component_dict.
SLO and no workload.
"""
sg_slo_no_workload = 'OS-myhost-SRP_1-DiamodNONE-os-iscsi-pg'
component_dict = self.utils.get_storage_group_component_dict(
sg_slo_no_workload)
self.assertEqual('OS', component_dict['prefix'])
self.assertEqual('myhost', component_dict['host'])
self.assertEqual('SRP_1', component_dict['srp'])
self.assertEqual('os-iscsi-pg', component_dict['portgroup'])
self.assertEqual('DiamodNONE', component_dict['sloworkload'])
self.assertIsNone(component_dict['no_slo'])
def test_get_storage_group_component_dict_dashes(self):
"""Test for get_storage_group_component_dict, dashes."""
sg_host_with_dashes = (
'OS-host-with-dashes-SRP_1-DiamodOLTP-myportgroup-RE')
component_dict = self.utils.get_storage_group_component_dict(
sg_host_with_dashes)
self.assertEqual('host-with-dashes', component_dict['host'])
self.assertEqual('OS', component_dict['prefix'])
self.assertEqual('SRP_1', component_dict['srp'])
self.assertEqual('DiamodOLTP', component_dict['sloworkload'])
self.assertEqual('myportgroup', component_dict['portgroup'])
self.assertEqual('-RE', component_dict['after_pg'])
def test_delete_values_from_dict(self):
"""Test delete_values_from_dict"""
delete_list = ['rdf_group_no', 'rep_mode', 'target_array_model',
'service_level', 'remote_array', 'target_device_id',
'replication_status', 'rdf_group_label']
data_dict = self.utils.delete_values_from_dict(
self.data.retype_metadata_dict, delete_list)
self.assertEqual({'device_id': self.data.device_id}, data_dict)
def test_update_values_in_dict(self):
"""Test delete_values_from_dict"""
update_list = [('default_sg_name', 'source_sg_name'),
('service_level', 'source_service_level')]
update_dict = {'default_sg_name': 'default-sg',
'service_level': 'Diamond'}
ret_dict = {'source_sg_name': 'default-sg',
'source_service_level': 'Diamond'}
data_dict = self.utils.update_values_in_dict(
update_dict, update_list)
self.assertEqual(ret_dict, data_dict)

View File

@ -1510,7 +1510,7 @@ class PowerMaxCommon(object):
is_multiattach = False
volume_name = volume.name
device_id = self._find_device_on_array(volume, extra_specs)
if rep_extra_specs is not None:
if rep_extra_specs:
device_id = self.get_remote_target_device(
extra_specs[utils.ARRAY], volume, device_id)[0]
extra_specs = rep_extra_specs
@ -2352,7 +2352,7 @@ class PowerMaxCommon(object):
create_snap = True
self.provision.create_volume_replica(
array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap)
snap_name, extra_specs, create_snap, copy_mode=True)
except Exception as e:
if target_device_id:
LOG.warning("Create replica failed. Cleaning up the target "
@ -2513,7 +2513,7 @@ class PowerMaxCommon(object):
array, device_id, tgt_only=True)
source_device_id = tgt_session['source_vol_id']
LOG.debug("Target %(tgt)s source device %(src)s",
{'target': device_id, 'src': source_device_id})
{'tgt': device_id, 'src': source_device_id})
return source_device_id
@ -3314,9 +3314,15 @@ class PowerMaxCommon(object):
# Scenario: Rep was not enabled, target VT has rep enabled, need to
# enable replication
elif not was_rep_enabled and is_rep_enabled:
metro_bias = utils.METROBIAS
if metro_bias in self.rep_config:
extra_specs[metro_bias] = self.rep_config[metro_bias]
if self.rep_config['mode'] is utils.REP_METRO or (
self.rep_config['mode'] is utils.REP_ASYNC):
LOG.warning("Volume %(device_id)s cannot be retyped to "
"%(mode)s in an attached state. Please "
"detach first or use On Demand Migration "
"Policy to run host assisted migration.",
{'device_id': device_id,
'mode': self.rep_config['mode']})
return False, model_update
rep_status, rep_driver_data, rep_info_dict = (
self.setup_inuse_volume_replication(
array, volume, device_id, extra_specs))
@ -3330,21 +3336,6 @@ class PowerMaxCommon(object):
target_slo, target_workload, target_extra_specs,
is_compression_disabled)
# Ensure that storage groups for metro volumes stay consistent
if not was_rep_enabled and is_rep_enabled and (
self.rep_config['mode'] is utils.REP_METRO):
async_sg = self.utils.get_async_rdf_managed_grp_name(
self.rep_config)
sg_exists = self.rest.get_storage_group(array, async_sg)
if not sg_exists:
self.rest.create_storage_group(
array, async_sg, extra_specs['srp'],
extra_specs['slo'], extra_specs['workload'],
extra_specs)
self.masking.add_volume_to_storage_group(
array, device_id, async_sg, volume_name, extra_specs,
True)
# If the volume was replication enabled both before and after
# retype, the volume needs to be retyped on the remote array also
if was_rep_enabled and is_rep_enabled:
@ -3479,12 +3470,17 @@ class PowerMaxCommon(object):
"""
success = False
device_info = self.rest.get_volume(array, device_id)
source_sg_name = device_info['storageGroupId'][0]
if len(device_info.get('storageGroupId')) > 1:
LOG.warning('Device id %(dev)s is in more than 1 storage group.',
{'dev': device_id})
# Get the source group
source_sg_name, __ = self.utils.get_production_storage_group(
device_info)
source_sg = self.rest.get_storage_group(array, source_sg_name)
target_extra_specs[utils.PORTGROUPNAME] = extra_specs[
utils.PORTGROUPNAME]
attached_host = self.utils.get_volume_attached_hostname(device_info)
attached_host = self.utils.get_volume_attached_hostname(volume)
if not attached_host:
LOG.error(
"There was an issue retrieving attached host from volume "
@ -3878,7 +3874,7 @@ class PowerMaxCommon(object):
local_vol_state, pair_state) = (
self.get_remote_target_device(array, volume, device_id))
if target_device is not None:
if target_device:
# Clean-up target
self._cleanup_remote_target(
array, volume, remote_array, device_id, target_device,
@ -3886,22 +3882,24 @@ class PowerMaxCommon(object):
LOG.info('Successfully destroyed replication for '
'volume: %(volume)s',
{'volume': volume_name})
# Remove the source volume from the rdf management storage
# group if it exists there
device_info = self.rest.get_volume(array, device_id)
rdf_sg, __ = self.utils.get_rdf_managed_storage_group(
device_info)
if rdf_sg:
self.rest.remove_vol_from_sg(
array, rdf_sg, device_id, extra_specs)
LOG.info('Removed device %(dev)s from storage '
'group %(sg)s.',
{'dev': device_id,
'sg': rdf_sg})
else:
LOG.warning('Replication target not found for '
'replication-enabled volume: %(volume)s',
{'volume': volume_name})
except Exception as e:
if extra_specs.get(utils.REP_MODE, None) in [
utils.REP_ASYNC, utils.REP_METRO]:
(target_device, remote_array, rdf_group_no,
local_vol_state, pair_state) = (
self.get_remote_target_device(
extra_specs[utils.ARRAY], volume, device_id))
if target_device is not None:
# Return devices to their async rdf management groups
self._add_volume_to_async_rdf_managed_grp(
extra_specs[utils.ARRAY], device_id, volume_name,
remote_array, target_device, extra_specs)
exception_message = (
_('Cannot get necessary information to cleanup '
'replication target for volume: %(volume)s. '
@ -4216,15 +4214,22 @@ class PowerMaxCommon(object):
replication_keybindings = ast.literal_eval(rep_target_data)
remote_array = replication_keybindings['array']
remote_device = replication_keybindings['device_id']
target_device_info = self.rest.get_volume(
remote_array, remote_device)
if target_device_info is not None:
try:
target_device_info = self.rest.get_volume(
remote_array, remote_device)
except exception.VolumeBackendAPIException:
target_device_info = None
if target_device_info:
target_device = remote_device
are_vols_paired, local_vol_state, pair_state = (
self.rest.are_vols_rdf_paired(
array, remote_array, device_id, target_device))
if not are_vols_paired:
target_device = None
else:
LOG.warning('Unable to find device %s(dev)s on remote '
'array %(array)s',
{'dev': remote_device, 'array': remote_array})
except (KeyError, ValueError):
target_device = None
return (target_device, remote_array, rdf_group,

View File

@ -653,6 +653,15 @@ class PowerMaxVolumeMetadata(object):
is_rep_enabled=('yes' if is_rep_enabled else 'no'),
rep_mode=rep_mode, is_compression_disabled=(
True if is_compression_disabled else False))
if not is_rep_enabled:
delete_list = ['rdf_group_no', 'rep_mode', 'target_array_model',
'service_level', 'remote_array', 'target_device_id',
'replication_status', 'rdf_group_label']
self.utils.delete_values_from_dict(datadict, delete_list)
update_list = [('default_sg_name', 'source_sg_name'),
('service_level', 'source_service_level')]
self.utils.update_values_in_dict(datadict, update_list)
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)

View File

@ -145,7 +145,7 @@ class PowerMaxProvision(object):
def create_volume_replica(
self, array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False):
snap_name, extra_specs, create_snap=False, copy_mode=False):
"""Create a snap vx of a source and copy to a target.
:param array: the array serial number
@ -166,7 +166,7 @@ class PowerMaxProvision(object):
def do_modify_volume_snap(src_device_id):
self.rest.modify_volume_snap(
array, src_device_id, target_device_id, snap_name,
extra_specs, link=True)
extra_specs, link=True, copy_mode=copy_mode)
do_modify_volume_snap(source_device_id)

View File

@ -1850,7 +1850,8 @@ class PowerMaxRest(object):
def modify_volume_snap(self, array, source_id, target_id, snap_name,
extra_specs, link=False, unlink=False,
rename=False, new_snap_name=None, restore=False,
list_volume_pairs=None, generation=0):
list_volume_pairs=None, generation=0,
copy_mode=False):
"""Modify a snapvx snapshot
:param array: the array serial number
@ -1876,6 +1877,8 @@ class PowerMaxRest(object):
elif restore:
action = "Restore"
copy = 'true' if copy_mode else 'false'
payload = {}
if action == "Restore":
operation = 'Restore snapVx snapshot'
@ -1895,7 +1898,7 @@ class PowerMaxRest(object):
tgt_list.append({'name': target_id})
payload = {"deviceNameListSource": src_list,
"deviceNameListTarget": tgt_list,
"copy": 'false', "action": action,
"copy": copy, "action": action,
"star": 'false', "force": 'false',
"exact": 'false', "remote": 'false',
"symforce": 'false', "generation": generation}

View File

@ -925,18 +925,66 @@ class PowerMaxUtils(object):
return True
@staticmethod
def get_volume_attached_hostname(device_info):
"""Parse a hostname from a storage group ID.
def get_volume_attached_hostname(self, volume):
"""Get the host name from the attached volume
:param volume: the volume object
:returns: str -- the attached hostname
"""
host_name_set = set()
attachment_list = volume.volume_attachment
LOG.debug("Volume attachment list: %(atl)s. "
"Attachment type: %(at)s",
{'atl': attachment_list, 'at': type(attachment_list)})
try:
att_list = attachment_list.objects
except AttributeError:
att_list = attachment_list
for att in att_list:
host_name_set.add(att.attached_host)
if host_name_set:
if len(host_name_set) > 1:
LOG.warning("Volume is attached to multiple instances "
"on more than one compute node.")
else:
return host_name_set.pop()
return None
def get_rdf_managed_storage_group(self, device_info):
"""Get the RDF managed storage group
:param device_info: the device info dict
:returns: str -- the attached hostname
dict -- storage group details
"""
try:
sg_id = device_info.get("storageGroupId")[0]
return sg_id.split('-')[1]
sg_list = device_info.get("storageGroupId")
for sg_id in sg_list:
sg_details = self.get_rdf_group_component_dict(sg_id)
if sg_details:
return sg_id, sg_details
except IndexError:
return None
return None, None
return None, None
def get_production_storage_group(self, device_info):
"""Get the production storage group
:param device_info: the device info dict
:return: str -- the storage group id
dict -- storage group details
"""
try:
sg_list = device_info.get("storageGroupId")
for sg_id in sg_list:
sg_details = self.get_storage_group_component_dict(sg_id)
if sg_details:
return sg_id, sg_details
except IndexError:
return None, None
return None, None
@staticmethod
def validate_qos_input(input_key, sg_value, qos_extra_spec, property_dict):
@ -1373,17 +1421,30 @@ class PowerMaxUtils(object):
port_group_template, port_name_in))
return port_name_out
@staticmethod
def get_object_components(regex_str, input_str):
"""Get components from input string.
def get_storage_group_component_dict(self, storage_group_name):
"""Parse the storage group string.
:param regex_str: the regex -- str
:param input_str: the input string -- str
:returns: dict
:param storage_group_name: the storage group name -- str
:returns: object components -- dict
"""
full_str = re.compile(regex_str)
match = full_str.match(input_str)
return match.groupdict() if match else None
regex_str = (r'^(?P<prefix>OS)-(?P<host>.+?)'
r'((?P<no_slo>No_SLO)|((?P<srp>SRP.+?)-'
r'(?P<sloworkload>.+?)))-(?P<portgroup>.+?)'
r'(?P<after_pg>$|-CD|-RE|-RA|-RM)')
return self.get_object_components_and_correct_host(
regex_str, storage_group_name)
def get_rdf_group_component_dict(self, storage_group_name):
"""Parse the storage group string.
:param storage_group_name: the storage group name -- str
:returns: object components -- dict
"""
regex_str = (r'^(?P<prefix>OS)-(?P<rdf_label>.+?)-'
r'(?P<sync_mode>Asynchronous|Metro)-'
r'(?P<after_mode>rdf-sg)$')
return self.get_object_components(
regex_str, storage_group_name)
def get_object_components_and_correct_host(self, regex_str, input_str):
"""Get components from input string.
@ -1398,6 +1459,18 @@ class PowerMaxUtils(object):
object_dict['host'] = object_dict['host'][:-1]
return object_dict
@staticmethod
def get_object_components(regex_str, input_str):
"""Get components from input string.
:param regex_str: the regex -- str
:param input_str: the input string -- str
:returns: dict
"""
full_str = re.compile(regex_str)
match = full_str.match(input_str)
return match.groupdict() if match else None
def get_possible_initiator_name(self, host_label, protocol):
"""Get possible initiator name based on the host
@ -1409,3 +1482,30 @@ class PowerMaxUtils(object):
return ("OS-%(shortHostName)s-%(protocol)s-IG"
% {'shortHostName': host_label,
'protocol': protocol})
@staticmethod
def delete_values_from_dict(datadict, key_list):
"""Delete values from a dict
:param datadict: dictionary
:param key_list: list of keys
:returns: dict
"""
for key in key_list:
if datadict.get(key):
del datadict[key]
return datadict
@staticmethod
def update_values_in_dict(datadict, tuple_list):
"""Delete values from a dict
:param datadict: dictionary
:param tuple_list: list of tuples
:returns: dict
"""
for tuple in tuple_list:
if datadict.get(tuple[0]):
datadict.update({tuple[1]: datadict.get(tuple[0])})
del datadict[tuple[0]]
return datadict

View File

@ -0,0 +1,6 @@
---
issues:
- |
PowerMax driver - Disabling inuse storage assisted migration to a metro
or asynchronous replicated volume type as this operation will
not facilitate FC scanning or iSCSI login of the target array.