VMAX driver - Enable multiattach support
Support multiattach capability with Dell EMC VMAX arrays. Change-Id: I7313c704ac45ea3e813e4816815d1d6a8ea39a9b Implements: blueprint vmax-allow-multi-attach
This commit is contained in:
parent
ae4f3eab3d
commit
106cf3cbf0
@ -30,6 +30,7 @@ from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder.objects import group
|
||||
from cinder.objects import group_snapshot
|
||||
from cinder.objects import volume_attachment
|
||||
from cinder.objects import volume_type
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_group
|
||||
@ -94,6 +95,7 @@ class VMAXCommonData(object):
|
||||
storagegroup_name_with_id = 'GrpId_group_name'
|
||||
rdf_managed_async_grp = "OS-%s-Asynchronous-rdf-sg" % rdf_group_name
|
||||
volume_id = '2b06255d-f5f0-4520-a953-b029196add6a'
|
||||
no_slo_sg_name = 'OS-HostX-No_SLO-OS-fibre-PG'
|
||||
|
||||
# connector info
|
||||
wwpn1 = "123456789012345"
|
||||
@ -248,6 +250,10 @@ class VMAXCommonData(object):
|
||||
host=fake_host, volume=test_volume_snap_manage,
|
||||
display_name='my_snap')
|
||||
|
||||
test_volume_attachment = volume_attachment.VolumeAttachment(
|
||||
id='2b06255d-f5f0-4520-a953-b029196add6b', volume_id=test_volume.id,
|
||||
connector=connector)
|
||||
|
||||
location_info = {'location_info': '000197800123#SRP_1#Diamond#DSS',
|
||||
'storage_protocol': 'FC'}
|
||||
test_host = {'capabilities': location_info,
|
||||
@ -357,7 +363,7 @@ class VMAXCommonData(object):
|
||||
'connector': connector,
|
||||
'device_id': device_id,
|
||||
'init_group_name': initiatorgroup_name_f,
|
||||
'initiator_check': False,
|
||||
'initiator_check': None,
|
||||
'maskingview_name': masking_view_name_f,
|
||||
'parent_sg_name': parent_sg_f,
|
||||
'srp': srp,
|
||||
@ -372,7 +378,7 @@ class VMAXCommonData(object):
|
||||
masking_view_dict_no_slo = deepcopy(masking_view_dict)
|
||||
masking_view_dict_no_slo.update(
|
||||
{'slo': None, 'workload': None,
|
||||
'storagegroup_name': 'OS-HostX-No_SLO-OS-fibre-PG'})
|
||||
'storagegroup_name': no_slo_sg_name})
|
||||
|
||||
masking_view_dict_compression_disabled = deepcopy(masking_view_dict)
|
||||
masking_view_dict_compression_disabled.update(
|
||||
@ -384,6 +390,12 @@ class VMAXCommonData(object):
|
||||
{'replication_enabled': True,
|
||||
'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE'})
|
||||
|
||||
masking_view_dict_multiattach = deepcopy(masking_view_dict)
|
||||
masking_view_dict_multiattach.update(
|
||||
{utils.EXTRA_SPECS: extra_specs, utils.IS_MULTIATTACH: True,
|
||||
utils.OTHER_PARENT_SG: parent_sg_i, utils.FAST_SG:
|
||||
storagegroup_name_i, utils.NO_SLO_SG: no_slo_sg_name})
|
||||
|
||||
# vmax data
|
||||
# sloprovisioning
|
||||
compression_info = {"symmetrixId": ["000197800128"]}
|
||||
@ -479,7 +491,15 @@ class VMAXCommonData(object):
|
||||
"storageGroupId": parent_sg_i,
|
||||
"num_of_child_sgs": 1,
|
||||
"child_storage_group": [storagegroup_name_i],
|
||||
"maskingview": [masking_view_name_i], }
|
||||
"maskingview": [masking_view_name_i], },
|
||||
{"srp": srp,
|
||||
"num_of_vols": 2,
|
||||
"cap_gb": 2,
|
||||
"storageGroupId": no_slo_sg_name,
|
||||
"slo": None,
|
||||
"workload": None,
|
||||
"maskingview": [masking_view_name_i],
|
||||
"parent_storage_group": [parent_sg_i]}
|
||||
]
|
||||
|
||||
sg_details_rep = [{"childNames": [],
|
||||
@ -1489,6 +1509,40 @@ class VMAXUtilsTest(test.TestCase):
|
||||
self.assertFalse(self.utils.change_replication(True, new_type))
|
||||
self.assertTrue(self.utils.change_replication(False, new_type))
|
||||
|
||||
def test_get_child_sg_name(self):
|
||||
host_name = 'HostX'
|
||||
# Slo and rep enabled
|
||||
extra_specs1 = self.data.extra_specs_rep_enabled
|
||||
extra_specs1[utils.PORTGROUPNAME] = self.data.port_group_name_f
|
||||
child_sg_name, do_disable_compression, rep_enabled, pg_name = (
|
||||
self.utils.get_child_sg_name(host_name, extra_specs1))
|
||||
re_name = self.data.storagegroup_name_f + '-RE'
|
||||
self.assertEqual(re_name, child_sg_name)
|
||||
# Disable compression
|
||||
extra_specs2 = self.data.extra_specs_disable_compression
|
||||
extra_specs2[utils.PORTGROUPNAME] = self.data.port_group_name_f
|
||||
child_sg_name, do_disable_compression, rep_enabled, pg_name = (
|
||||
self.utils.get_child_sg_name(host_name, extra_specs2))
|
||||
cd_name = self.data.storagegroup_name_f + '-CD'
|
||||
self.assertEqual(cd_name, child_sg_name)
|
||||
# No slo
|
||||
extra_specs3 = deepcopy(self.data.extra_specs)
|
||||
extra_specs3[utils.SLO] = None
|
||||
extra_specs3[utils.PORTGROUPNAME] = self.data.port_group_name_f
|
||||
child_sg_name, do_disable_compression, rep_enabled, pg_name = (
|
||||
self.utils.get_child_sg_name(host_name, extra_specs3))
|
||||
self.assertEqual(self.data.no_slo_sg_name, child_sg_name)
|
||||
|
||||
def test_change_multiattach(self):
|
||||
extra_specs_ma_true = {'multiattach': '<is> True'}
|
||||
extra_specs_ma_false = {'multiattach': '<is> False'}
|
||||
self.assertTrue(self.utils.change_multiattach(
|
||||
extra_specs_ma_true, extra_specs_ma_false))
|
||||
self.assertFalse(self.utils.change_multiattach(
|
||||
extra_specs_ma_true, extra_specs_ma_true))
|
||||
self.assertFalse(self.utils.change_multiattach(
|
||||
extra_specs_ma_false, extra_specs_ma_false))
|
||||
|
||||
|
||||
class VMAXRestTest(test.TestCase):
|
||||
def setUp(self):
|
||||
@ -1710,6 +1764,10 @@ class VMAXRestTest(test.TestCase):
|
||||
self.data.array, self.data.defaultstoragegroup_name)
|
||||
self.assertEqual(ref_details, sg_details)
|
||||
|
||||
def test_get_storage_group_list(self):
|
||||
sg_list = self.rest.get_storage_group_list(self.data.array)
|
||||
self.assertEqual(self.data.sg_list, sg_list)
|
||||
|
||||
def test_create_storage_group(self):
|
||||
with mock.patch.object(self.rest, 'create_resource'):
|
||||
payload = {'someKey': 'someValue'}
|
||||
@ -1867,16 +1925,15 @@ class VMAXRestTest(test.TestCase):
|
||||
|
||||
def test_add_child_sg_to_parent_sg(self):
|
||||
payload = {"editStorageGroupActionParam": {
|
||||
"expandStorageGroupParam": {
|
||||
"addExistingStorageGroupParam": {
|
||||
"storageGroupId": [self.data.storagegroup_name_f]}}}}
|
||||
"addExistingStorageGroupParam": {
|
||||
"storageGroupId": [self.data.storagegroup_name_f]}}}
|
||||
with mock.patch.object(self.rest, 'modify_storage_group',
|
||||
return_value=(202, self.data.job_list[0])):
|
||||
self.rest.add_child_sg_to_parent_sg(
|
||||
self.data.array, self.data.storagegroup_name_f,
|
||||
self.data.parent_sg_f, self.data.extra_specs)
|
||||
self.rest.modify_storage_group.assert_called_once_with(
|
||||
self.data.array, self.data.parent_sg_f, payload)
|
||||
self.data.array, self.data.parent_sg_f, payload, version='83')
|
||||
|
||||
def test_remove_child_sg_from_parent_sg(self):
|
||||
payload = {"editStorageGroupActionParam": {
|
||||
@ -3491,19 +3548,36 @@ class VMAXCommonTest(test.TestCase):
|
||||
self.data.test_legacy_vol)
|
||||
mock_del.assert_called_once_with(self.data.test_legacy_snapshot)
|
||||
|
||||
def test_remove_members(self):
|
||||
@mock.patch.object(masking.VMAXMasking,
|
||||
'return_volume_to_fast_managed_group')
|
||||
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
|
||||
def test_remove_members(self, mock_rm, mock_return):
|
||||
array = self.data.array
|
||||
device_id = self.data.device_id
|
||||
volume = self.data.test_volume
|
||||
volume_name = self.data.test_volume.name
|
||||
extra_specs = self.data.extra_specs
|
||||
with mock.patch.object(self.masking,
|
||||
'remove_and_reset_members') as mock_rm:
|
||||
self.common._remove_members(array, volume, device_id,
|
||||
extra_specs, self.data.connector)
|
||||
mock_rm.assert_called_once_with(
|
||||
array, volume, device_id, volume_name,
|
||||
extra_specs, True, self.data.connector, async_grp=None)
|
||||
self.common._remove_members(
|
||||
array, volume, device_id, extra_specs, self.data.connector, False)
|
||||
mock_rm.assert_called_once_with(
|
||||
array, volume, device_id, volume_name,
|
||||
extra_specs, True, self.data.connector, async_grp=None)
|
||||
|
||||
@mock.patch.object(masking.VMAXMasking,
|
||||
'return_volume_to_fast_managed_group')
|
||||
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
|
||||
def test_remove_members_multiattach_case(self, mock_rm, mock_return):
|
||||
array = self.data.array
|
||||
device_id = self.data.device_id
|
||||
volume = self.data.test_volume
|
||||
volume_name = self.data.test_volume.name
|
||||
extra_specs = self.data.extra_specs
|
||||
self.common._remove_members(
|
||||
array, volume, device_id, extra_specs, self.data.connector, True)
|
||||
mock_rm.assert_called_once_with(
|
||||
array, volume, device_id, volume_name,
|
||||
extra_specs, False, self.data.connector, async_grp=None)
|
||||
mock_return.assert_called_once()
|
||||
|
||||
def test_unmap_lun(self):
|
||||
array = self.data.array
|
||||
@ -3516,7 +3590,21 @@ class VMAXCommonTest(test.TestCase):
|
||||
self.common._unmap_lun(volume, connector)
|
||||
self.common._remove_members.assert_called_once_with(
|
||||
array, volume, device_id, extra_specs,
|
||||
connector, async_grp=None)
|
||||
connector, False, async_grp=None)
|
||||
|
||||
@mock.patch.object(common.VMAXCommon, '_remove_members')
|
||||
def test_unmap_lun_attachments(self, mock_rm):
|
||||
volume1 = deepcopy(self.data.test_volume)
|
||||
volume1.volume_attachment.objects = [self.data.test_volume_attachment]
|
||||
connector = self.data.connector
|
||||
self.common._unmap_lun(volume1, connector)
|
||||
mock_rm.assert_called_once()
|
||||
mock_rm.reset_mock()
|
||||
volume2 = deepcopy(volume1)
|
||||
volume2.volume_attachment.objects.append(
|
||||
self.data.test_volume_attachment)
|
||||
self.common._unmap_lun(volume2, connector)
|
||||
mock_rm.assert_not_called()
|
||||
|
||||
def test_unmap_lun_qos(self):
|
||||
array = self.data.array
|
||||
@ -3533,13 +3621,13 @@ class VMAXCommonTest(test.TestCase):
|
||||
self.common._unmap_lun(volume, connector)
|
||||
self.common._remove_members.assert_called_once_with(
|
||||
array, volume, device_id, extra_specs,
|
||||
connector, async_grp=None)
|
||||
connector, False, async_grp=None)
|
||||
|
||||
def test_unmap_lun_not_mapped(self):
|
||||
volume = self.data.test_volume
|
||||
connector = self.data.connector
|
||||
with mock.patch.object(self.common, 'find_host_lun_id',
|
||||
return_value=({}, False, [])):
|
||||
return_value=({}, False)):
|
||||
with mock.patch.object(self.common, '_remove_members'):
|
||||
self.common._unmap_lun(volume, connector)
|
||||
self.common._remove_members.assert_not_called()
|
||||
@ -3554,7 +3642,8 @@ class VMAXCommonTest(test.TestCase):
|
||||
with mock.patch.object(self.common, '_remove_members'):
|
||||
self.common._unmap_lun(volume, None)
|
||||
self.common._remove_members.assert_called_once_with(
|
||||
array, volume, device_id, extra_specs, None, async_grp=None)
|
||||
array, volume, device_id, extra_specs, None,
|
||||
False, async_grp=None)
|
||||
|
||||
def test_initialize_connection_already_mapped(self):
|
||||
volume = self.data.test_volume
|
||||
@ -3568,23 +3657,38 @@ class VMAXCommonTest(test.TestCase):
|
||||
device_info_dict = self.common.initialize_connection(volume, connector)
|
||||
self.assertEqual(ref_dict, device_info_dict)
|
||||
|
||||
def test_initialize_connection_not_mapped(self):
|
||||
@mock.patch.object(common.VMAXCommon, 'find_host_lun_id',
|
||||
return_value=({}, False))
|
||||
@mock.patch.object(common.VMAXCommon, '_attach_volume',
|
||||
return_value=({}, VMAXCommonData.port_group_name_f))
|
||||
def test_initialize_connection_not_mapped(self, mock_attach, mock_id):
|
||||
volume = self.data.test_volume
|
||||
connector = self.data.connector
|
||||
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
|
||||
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
|
||||
masking_view_dict = self.common._populate_masking_dict(
|
||||
volume, connector, extra_specs)
|
||||
with mock.patch.object(self.common, 'find_host_lun_id',
|
||||
return_value=({}, False, [])):
|
||||
with mock.patch.object(
|
||||
self.common, '_attach_volume', return_value=(
|
||||
{}, self.data.port_group_name_f)):
|
||||
device_info_dict = self.common.initialize_connection(volume,
|
||||
connector)
|
||||
self.assertEqual({}, device_info_dict)
|
||||
self.common._attach_volume.assert_called_once_with(
|
||||
volume, connector, extra_specs, masking_view_dict, False)
|
||||
masking_view_dict[utils.IS_MULTIATTACH] = False
|
||||
device_info_dict = self.common.initialize_connection(
|
||||
volume, connector)
|
||||
self.assertEqual({}, device_info_dict)
|
||||
mock_attach.assert_called_once_with(
|
||||
volume, connector, extra_specs, masking_view_dict)
|
||||
|
||||
@mock.patch.object(
|
||||
masking.VMAXMasking, 'pre_multiattach',
|
||||
return_value=VMAXCommonData.masking_view_dict_multiattach)
|
||||
@mock.patch.object(common.VMAXCommon, 'find_host_lun_id',
|
||||
return_value=({}, True))
|
||||
@mock.patch.object(common.VMAXCommon, '_attach_volume',
|
||||
return_value=({}, VMAXCommonData.port_group_name_f))
|
||||
def test_initialize_connection_multiattach_case(
|
||||
self, mock_attach, mock_id, mock_pre):
|
||||
volume = self.data.test_volume
|
||||
connector = self.data.connector
|
||||
self.common.initialize_connection(volume, connector)
|
||||
mock_attach.assert_called_once()
|
||||
mock_pre.assert_called_once()
|
||||
|
||||
def test_attach_volume_success(self):
|
||||
volume = self.data.test_volume
|
||||
@ -3612,7 +3716,7 @@ class VMAXCommonTest(test.TestCase):
|
||||
@mock.patch.object(masking.VMAXMasking, 'setup_masking_view',
|
||||
return_value={})
|
||||
@mock.patch.object(common.VMAXCommon, 'find_host_lun_id',
|
||||
return_value=({}, False, []))
|
||||
return_value=({}, False))
|
||||
def test_attach_volume_failed(self, mock_lun, mock_setup, mock_rollback):
|
||||
volume = self.data.test_volume
|
||||
connector = self.data.connector
|
||||
@ -3740,7 +3844,7 @@ class VMAXCommonTest(test.TestCase):
|
||||
'maskingview': self.data.masking_view_name_f,
|
||||
'array': self.data.array,
|
||||
'device_id': self.data.device_id}
|
||||
maskedvols, __, __ = self.common.find_host_lun_id(
|
||||
maskedvols, __ = self.common.find_host_lun_id(
|
||||
volume, host, extra_specs)
|
||||
self.assertEqual(ref_masked, maskedvols)
|
||||
|
||||
@ -3750,10 +3854,20 @@ class VMAXCommonTest(test.TestCase):
|
||||
host = 'HostX'
|
||||
with mock.patch.object(self.rest, 'find_mv_connections_for_vol',
|
||||
return_value=None):
|
||||
maskedvols, __, __ = self.common.find_host_lun_id(
|
||||
maskedvols, __ = self.common.find_host_lun_id(
|
||||
volume, host, extra_specs)
|
||||
self.assertEqual({}, maskedvols)
|
||||
|
||||
@mock.patch.object(
|
||||
common.VMAXCommon, '_get_masking_views_from_volume',
|
||||
return_value=([], [VMAXCommonData.masking_view_name_f]))
|
||||
def test_find_host_lun_id_multiattach(self, mock_mask):
|
||||
volume = self.data.test_volume
|
||||
extra_specs = self.data.extra_specs
|
||||
__, is_multiattach = self.common.find_host_lun_id(
|
||||
volume, 'HostX', extra_specs)
|
||||
self.assertTrue(is_multiattach)
|
||||
|
||||
@mock.patch.object(common.VMAXCommon, 'get_remote_target_device',
|
||||
return_value=VMAXCommonData.device_id2)
|
||||
def test_find_host_lun_id_rep_extra_specs(self, mock_tgt):
|
||||
@ -3794,7 +3908,7 @@ class VMAXCommonTest(test.TestCase):
|
||||
'maskingview': self.data.masking_view_name_f,
|
||||
'array': self.data.array,
|
||||
'device_id': self.data.device_id}
|
||||
maskedvols, __, __ = self.common.find_host_lun_id(
|
||||
maskedvols, __ = self.common.find_host_lun_id(
|
||||
volume, None, extra_specs)
|
||||
self.assertEqual(ref_masked, maskedvols)
|
||||
|
||||
@ -4144,7 +4258,7 @@ class VMAXCommonTest(test.TestCase):
|
||||
|
||||
def test_get_target_wwns_from_masking_view_no_mv(self):
|
||||
with mock.patch.object(self.common, '_get_masking_views_from_volume',
|
||||
return_value=None):
|
||||
return_value=([], None)):
|
||||
target_wwns = self.common._get_target_wwns_from_masking_view(
|
||||
self.data.device_id, self.data.connector['host'],
|
||||
self.data.extra_specs)
|
||||
@ -5658,6 +5772,7 @@ class VMAXMaskingTest(test.TestCase):
|
||||
self.maskingviewdict = self.driver._populate_masking_dict(
|
||||
self.data.test_volume, self.data.connector, self.extra_specs)
|
||||
self.maskingviewdict['extra_specs'] = self.extra_specs
|
||||
self.maskingviewdict[utils.IS_MULTIATTACH] = False
|
||||
self.device_id = self.data.device_id
|
||||
self.volume_name = self.data.volume_details[0]['volume_identifier']
|
||||
|
||||
@ -5743,6 +5858,20 @@ class VMAXMaskingTest(test.TestCase):
|
||||
self.data.storagegroup_name_i, self.extra_specs)
|
||||
self.assertIsNotNone(msg)
|
||||
|
||||
@mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg')
|
||||
@mock.patch.object(masking.VMAXMasking, 'get_parent_sg_from_child',
|
||||
side_effect=[None, VMAXCommonData.parent_sg_f])
|
||||
@mock.patch.object(
|
||||
rest.VMAXRest, 'get_num_vols_in_sg', side_effect=[2, 1, 1])
|
||||
def test_move_volume_between_storage_groups(
|
||||
self, mock_num, mock_parent, mock_rm):
|
||||
for x in range(0, 3):
|
||||
self.driver.masking.move_volume_between_storage_groups(
|
||||
self.data.array, self.data.device_id,
|
||||
self.data.storagegroup_name_i, self.data.storagegroup_name_f,
|
||||
self.data.extra_specs)
|
||||
mock_rm.assert_called_once()
|
||||
|
||||
@mock.patch.object(
|
||||
rest.VMAXRest,
|
||||
'get_masking_view',
|
||||
@ -6063,9 +6192,11 @@ class VMAXMaskingTest(test.TestCase):
|
||||
self.data.initiatorgroup_name_i, self.extra_specs)
|
||||
self.assertIsNotNone(error_message)
|
||||
|
||||
@mock.patch.object(masking.VMAXMasking,
|
||||
'_return_volume_to_fast_managed_group')
|
||||
@mock.patch.object(masking.VMAXMasking, '_check_ig_rollback')
|
||||
def test_check_if_rollback_action_for_masking_required(self,
|
||||
mock_check_ig):
|
||||
def test_check_if_rollback_action_for_masking_required(
|
||||
self, mock_check_ig, mock_return):
|
||||
with mock.patch.object(rest.VMAXRest,
|
||||
'get_storage_groups_from_volume',
|
||||
side_effect=[
|
||||
@ -6082,11 +6213,14 @@ class VMAXMaskingTest(test.TestCase):
|
||||
'remove_and_reset_members'):
|
||||
self.maskingviewdict[
|
||||
'default_sg_name'] = self.data.defaultstoragegroup_name
|
||||
error_message = (
|
||||
self.mask.check_if_rollback_action_for_masking_required(
|
||||
self.data.array, self.data.test_volume,
|
||||
self.device_id, self.maskingviewdict))
|
||||
self.assertIsNone(error_message)
|
||||
self.mask.check_if_rollback_action_for_masking_required(
|
||||
self.data.array, self.data.test_volume,
|
||||
self.device_id, self.maskingviewdict)
|
||||
# Multiattach case
|
||||
self.mask.check_if_rollback_action_for_masking_required(
|
||||
self.data.array, self.data.test_volume,
|
||||
self.device_id, self.data.masking_view_dict_multiattach)
|
||||
mock_return.assert_called_once()
|
||||
|
||||
@mock.patch.object(rest.VMAXRest, 'delete_masking_view')
|
||||
@mock.patch.object(rest.VMAXRest, 'delete_initiator_group')
|
||||
@ -6498,60 +6632,6 @@ class VMAXMaskingTest(test.TestCase):
|
||||
self.assertEqual(1, mock_delete.call_count)
|
||||
mock_add.assert_called_once()
|
||||
|
||||
@mock.patch.object(masking.VMAXMasking, 'add_child_sg_to_parent_sg')
|
||||
@mock.patch.object(masking.VMAXMasking,
|
||||
'move_volume_between_storage_groups')
|
||||
@mock.patch.object(provision.VMAXProvision, 'create_storage_group')
|
||||
def test_pre_live_migration(self, mock_create_sg, mock_move, mock_add):
|
||||
with mock.patch.object(
|
||||
rest.VMAXRest, 'get_storage_group',
|
||||
side_effect=[None, self.data.sg_details[1]["storageGroupId"]]
|
||||
):
|
||||
source_sg = self.data.sg_details[2]["storageGroupId"]
|
||||
source_parent_sg = self.data.sg_details[4]["storageGroupId"]
|
||||
source_nf_sg = source_parent_sg[:-2] + 'NONFAST'
|
||||
self.data.iscsi_device_info['device_id'] = self.data.device_id
|
||||
self.mask.pre_live_migration(
|
||||
source_nf_sg, source_sg, source_parent_sg, False,
|
||||
self.data.iscsi_device_info, None)
|
||||
mock_create_sg.assert_called_once()
|
||||
|
||||
@mock.patch.object(rest.VMAXRest, 'delete_storage_group')
|
||||
@mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg')
|
||||
def test_post_live_migration(self, mock_remove_child_sg, mock_delete_sg):
|
||||
self.data.iscsi_device_info['source_sg'] = self.data.sg_details[2][
|
||||
"storageGroupId"]
|
||||
self.data.iscsi_device_info['source_parent_sg'] = self.data.sg_details[
|
||||
4]["storageGroupId"]
|
||||
with mock.patch.object(
|
||||
rest.VMAXRest, 'get_num_vols_in_sg', side_effect=[0, 1]):
|
||||
self.mask.post_live_migration(self.data.iscsi_device_info, None)
|
||||
mock_remove_child_sg.assert_called_once()
|
||||
mock_delete_sg.assert_called_once()
|
||||
|
||||
@mock.patch.object(masking.VMAXMasking,
|
||||
'move_volume_between_storage_groups')
|
||||
@mock.patch.object(rest.VMAXRest, 'delete_storage_group')
|
||||
@mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg')
|
||||
@mock.patch.object(masking.VMAXMasking, 'remove_volume_from_sg')
|
||||
def test_failed_live_migration(
|
||||
self, mock_remove_volume, mock_remove_child_sg, mock_delete_sg,
|
||||
mock_move):
|
||||
device_dict = self.data.iscsi_device_info
|
||||
device_dict['device_id'] = self.data.device_id
|
||||
device_dict['source_sg'] = self.data.sg_details[2]["storageGroupId"]
|
||||
device_dict['source_parent_sg'] = self.data.sg_details[4][
|
||||
"storageGroupId"]
|
||||
device_dict['source_nf_sg'] = (
|
||||
self.data.sg_details[4]["storageGroupId"][:-2] + 'NONFAST')
|
||||
sg_list = [device_dict['source_nf_sg']]
|
||||
with mock.patch.object(
|
||||
rest.VMAXRest, 'is_child_sg_in_parent_sg',
|
||||
side_effect=[True, False]):
|
||||
self.mask.failed_live_migration(device_dict, sg_list, None)
|
||||
mock_remove_volume.assert_not_called()
|
||||
mock_remove_child_sg.assert_called_once()
|
||||
|
||||
@mock.patch.object(masking.VMAXMasking,
|
||||
'add_volumes_to_storage_group')
|
||||
def test_add_remote_vols_to_volume_group(self, mock_add):
|
||||
@ -6578,6 +6658,90 @@ class VMAXMaskingTest(test.TestCase):
|
||||
self.data.test_volume.name, self.data.extra_specs)
|
||||
mock_add.assert_called_once()
|
||||
|
||||
@mock.patch.object(masking.VMAXMasking,
|
||||
'_return_volume_to_fast_managed_group')
|
||||
def test_pre_multiattach(self, mock_return):
|
||||
mv_dict = self.mask.pre_multiattach(
|
||||
self.data.array, self.data.device_id,
|
||||
self.data.masking_view_dict_multiattach, self.data.extra_specs)
|
||||
mock_return.assert_not_called()
|
||||
self.assertEqual(self.data.storagegroup_name_f,
|
||||
mv_dict[utils.FAST_SG])
|
||||
with mock.patch.object(
|
||||
self.mask, 'move_volume_between_storage_groups',
|
||||
side_effect=exception.CinderException):
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.mask.pre_multiattach, self.data.array,
|
||||
self.data.device_id, self.data.masking_view_dict_multiattach,
|
||||
self.data.extra_specs)
|
||||
mock_return.assert_called_once()
|
||||
|
||||
@mock.patch.object(rest.VMAXRest, 'get_storage_group_list',
|
||||
side_effect=[{'storageGroupId': [
|
||||
VMAXCommonData.no_slo_sg_name]}, {}])
|
||||
@mock.patch.object(masking.VMAXMasking,
|
||||
'_return_volume_to_fast_managed_group')
|
||||
def test_check_return_volume_to_fast_managed_group(
|
||||
self, mock_return, mock_sg):
|
||||
for x in range(0, 2):
|
||||
self.mask.return_volume_to_fast_managed_group(
|
||||
self.data.array, self.data.device_id, self.data.extra_specs)
|
||||
no_slo_specs = deepcopy(self.data.extra_specs)
|
||||
no_slo_specs[utils.SLO] = None
|
||||
self.mask.return_volume_to_fast_managed_group(
|
||||
self.data.array, self.data.device_id, no_slo_specs)
|
||||
mock_return.assert_called_once()
|
||||
|
||||
@mock.patch.object(masking.VMAXMasking, '_move_vol_from_default_sg')
|
||||
@mock.patch.object(masking.VMAXMasking, '_clean_up_child_storage_group')
|
||||
@mock.patch.object(masking.VMAXMasking, 'add_child_sg_to_parent_sg')
|
||||
@mock.patch.object(masking.VMAXMasking, '_get_or_create_storage_group')
|
||||
@mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume',
|
||||
side_effect=[[VMAXCommonData.no_slo_sg_name],
|
||||
[VMAXCommonData.storagegroup_name_f]])
|
||||
def test_return_volume_to_fast_managed_group(
|
||||
self, mock_sg, mock_get, mock_add, mock_clean, mock_move):
|
||||
for x in range(0, 2):
|
||||
self.mask._return_volume_to_fast_managed_group(
|
||||
self.data.array, self.data.device_id,
|
||||
self.data.parent_sg_f, self.data.storagegroup_name_f,
|
||||
self.data.no_slo_sg_name, self.data.extra_specs)
|
||||
mock_get.assert_called_once()
|
||||
mock_clean.assert_called_once()
|
||||
|
||||
@mock.patch.object(rest.VMAXRest, 'delete_storage_group')
|
||||
@mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg')
|
||||
@mock.patch.object(rest.VMAXRest, 'is_child_sg_in_parent_sg',
|
||||
side_effect=[False, True])
|
||||
@mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg',
|
||||
side_effect=[2, 0, 0])
|
||||
@mock.patch.object(rest.VMAXRest, 'get_storage_group', side_effect=[
|
||||
None, 'child_sg', 'child_sg', 'child_sg'])
|
||||
def test_clean_up_child_storage_group(
|
||||
self, mock_sg, mock_num, mock_child, mock_rm, mock_del):
|
||||
# Storage group not found
|
||||
self.mask._clean_up_child_storage_group(
|
||||
self.data.array, self.data.storagegroup_name_f,
|
||||
self.data.parent_sg_f, self.data.extra_specs)
|
||||
mock_num.assert_not_called()
|
||||
# Storage group not empty
|
||||
self.mask._clean_up_child_storage_group(
|
||||
self.data.array, self.data.storagegroup_name_f,
|
||||
self.data.parent_sg_f, self.data.extra_specs)
|
||||
mock_child.assert_not_called()
|
||||
# Storage group not child
|
||||
self.mask._clean_up_child_storage_group(
|
||||
self.data.array, self.data.storagegroup_name_f,
|
||||
self.data.parent_sg_f, self.data.extra_specs)
|
||||
mock_rm.assert_not_called()
|
||||
# Storage group is child, and empty
|
||||
self.mask._clean_up_child_storage_group(
|
||||
self.data.array, self.data.storagegroup_name_f,
|
||||
self.data.parent_sg_f, self.data.extra_specs)
|
||||
mock_rm.assert_called_once()
|
||||
self.assertEqual(2, mock_del.call_count)
|
||||
|
||||
|
||||
class VMAXCommonReplicationTest(test.TestCase):
|
||||
def setUp(self):
|
||||
@ -6808,13 +6972,21 @@ class VMAXCommonReplicationTest(test.TestCase):
|
||||
self.data.test_volume, self.data.connector)
|
||||
self.assertIsNone(info_dict)
|
||||
|
||||
def test_attach_metro_volume(self):
|
||||
@mock.patch.object(
|
||||
masking.VMAXMasking, 'pre_multiattach',
|
||||
return_value=VMAXCommonData.masking_view_dict_multiattach)
|
||||
def test_attach_metro_volume(self, mock_pre):
|
||||
rep_extra_specs = deepcopy(VMAXCommonData.rep_extra_specs)
|
||||
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
|
||||
hostlunid, remote_port_group = self.common._attach_metro_volume(
|
||||
self.data.test_volume, self.data.connector,
|
||||
self.data.test_volume, self.data.connector, False,
|
||||
self.data.extra_specs, rep_extra_specs)
|
||||
self.assertEqual(self.data.port_group_name_f, remote_port_group)
|
||||
# Multiattach case
|
||||
self.common._attach_metro_volume(
|
||||
self.data.test_volume, self.data.connector, True,
|
||||
self.data.extra_specs, rep_extra_specs)
|
||||
mock_pre.assert_called_once()
|
||||
|
||||
@mock.patch.object(rest.VMAXRest, 'is_vol_in_rep_session',
|
||||
return_value=(False, False, None))
|
||||
|
@ -443,7 +443,8 @@ class VMAXCommon(object):
|
||||
{'ssname': snap_name})
|
||||
|
||||
def _remove_members(self, array, volume, device_id,
|
||||
extra_specs, connector, async_grp=None):
|
||||
extra_specs, connector, is_multiattach,
|
||||
async_grp=None):
|
||||
"""This method unmaps a volume from a host.
|
||||
|
||||
Removes volume from the storage group that belongs to a masking view.
|
||||
@ -452,13 +453,18 @@ class VMAXCommon(object):
|
||||
:param device_id: the VMAX volume device id
|
||||
:param extra_specs: extra specifications
|
||||
:param connector: the connector object
|
||||
:param is_multiattach: flag to indicate if this is a multiattach case
|
||||
:param async_grp: the name if the async group, if applicable
|
||||
"""
|
||||
volume_name = volume.name
|
||||
LOG.debug("Detaching volume %s.", volume_name)
|
||||
return self.masking.remove_and_reset_members(
|
||||
reset = False if is_multiattach else True
|
||||
self.masking.remove_and_reset_members(
|
||||
array, volume, device_id, volume_name,
|
||||
extra_specs, True, connector, async_grp=async_grp)
|
||||
extra_specs, reset, connector, async_grp=async_grp)
|
||||
if is_multiattach:
|
||||
self.masking.return_volume_to_fast_managed_group(
|
||||
array, device_id, extra_specs)
|
||||
|
||||
def _unmap_lun(self, volume, connector):
|
||||
"""Unmaps a volume from the host.
|
||||
@ -475,52 +481,53 @@ class VMAXCommon(object):
|
||||
extra_specs = rep_extra_specs
|
||||
volume_name = volume.name
|
||||
async_grp = None
|
||||
LOG.info("Unmap volume: %(volume)s.",
|
||||
{'volume': volume_name})
|
||||
LOG.info("Unmap volume: %(volume)s.", {'volume': volume})
|
||||
if connector is not None:
|
||||
host = connector['host']
|
||||
attachment_list = volume.volume_attachment
|
||||
LOG.debug("Volume attachment list: %(atl)s. "
|
||||
"Attachment type: %(at)s",
|
||||
{'atl': attachment_list, 'at': type(attachment_list)})
|
||||
try:
|
||||
att_list = attachment_list.objects
|
||||
except AttributeError:
|
||||
att_list = attachment_list
|
||||
if att_list is not None:
|
||||
host_list = [att.connector['host'] for att in att_list if
|
||||
att is not None and att.connector is not None]
|
||||
current_host_occurances = host_list.count(host)
|
||||
if current_host_occurances > 1:
|
||||
LOG.info("Volume is attached to multiple instances on "
|
||||
"this host. Not removing the volume from the "
|
||||
"masking view.")
|
||||
return
|
||||
else:
|
||||
LOG.warning("Cannot get host name from connector object - "
|
||||
"assuming force-detach.")
|
||||
host = None
|
||||
|
||||
device_info, is_live_migration, source_storage_group_list = (
|
||||
device_info, is_multiattach = (
|
||||
self.find_host_lun_id(volume, host, extra_specs))
|
||||
if 'hostlunid' not in device_info:
|
||||
LOG.info("Volume %s is not mapped. No volume to unmap.",
|
||||
volume_name)
|
||||
return
|
||||
if is_live_migration and len(source_storage_group_list) == 1:
|
||||
LOG.info("Volume %s is mapped. Failed live migration case",
|
||||
volume_name)
|
||||
return
|
||||
source_nf_sg = None
|
||||
array = extra_specs[utils.ARRAY]
|
||||
if self.utils.does_vol_need_rdf_management_group(extra_specs):
|
||||
async_grp = self.utils.get_async_rdf_managed_grp_name(
|
||||
self.rep_config)
|
||||
if len(source_storage_group_list) > 1:
|
||||
for storage_group in source_storage_group_list:
|
||||
if 'NONFAST' in storage_group:
|
||||
source_nf_sg = storage_group
|
||||
break
|
||||
if source_nf_sg:
|
||||
# Remove volume from non fast storage group
|
||||
self.masking.remove_volume_from_sg(
|
||||
array, device_info['device_id'], volume_name, source_nf_sg,
|
||||
extra_specs)
|
||||
else:
|
||||
self._remove_members(array, volume, device_info['device_id'],
|
||||
extra_specs, connector, async_grp=async_grp)
|
||||
self._remove_members(array, volume, device_info['device_id'],
|
||||
extra_specs, connector, is_multiattach,
|
||||
async_grp=async_grp)
|
||||
if self.utils.is_metro_device(self.rep_config, extra_specs):
|
||||
# Need to remove from remote masking view
|
||||
device_info, __, __ = (self.find_host_lun_id(
|
||||
device_info, __ = (self.find_host_lun_id(
|
||||
volume, host, extra_specs, rep_extra_specs))
|
||||
if 'hostlunid' in device_info:
|
||||
self._remove_members(
|
||||
rep_extra_specs[utils.ARRAY], volume,
|
||||
device_info['device_id'],
|
||||
rep_extra_specs, connector, async_grp=async_grp)
|
||||
device_info['device_id'], rep_extra_specs, connector,
|
||||
is_multiattach, async_grp=async_grp)
|
||||
else:
|
||||
# Make an attempt to clean up initiator group
|
||||
self.masking.attempt_ig_cleanup(
|
||||
@ -571,33 +578,33 @@ class VMAXCommon(object):
|
||||
|
||||
if self.utils.is_volume_failed_over(volume):
|
||||
extra_specs = rep_extra_specs
|
||||
device_info_dict, is_live_migration, source_storage_group_list = (
|
||||
device_info_dict, is_multiattach = (
|
||||
self.find_host_lun_id(volume, connector['host'], extra_specs))
|
||||
masking_view_dict = self._populate_masking_dict(
|
||||
volume, connector, extra_specs)
|
||||
masking_view_dict[utils.IS_MULTIATTACH] = is_multiattach
|
||||
|
||||
if ('hostlunid' in device_info_dict and
|
||||
device_info_dict['hostlunid'] is not None and
|
||||
is_live_migration is False) or (
|
||||
is_live_migration and len(source_storage_group_list) > 1):
|
||||
device_info_dict['hostlunid'] is not None):
|
||||
hostlunid = device_info_dict['hostlunid']
|
||||
LOG.info("Volume %(volume)s is already mapped. "
|
||||
LOG.info("Volume %(volume)s is already mapped to host %(host)s. "
|
||||
"The hostlunid is %(hostlunid)s.",
|
||||
{'volume': volume_name,
|
||||
{'volume': volume_name, 'host': connector['host'],
|
||||
'hostlunid': hostlunid})
|
||||
port_group_name = (
|
||||
self.get_port_group_from_masking_view(
|
||||
extra_specs[utils.ARRAY],
|
||||
device_info_dict['maskingview']))
|
||||
if self.utils.is_metro_device(self.rep_config, extra_specs):
|
||||
remote_info_dict, __, __ = (
|
||||
remote_info_dict, is_multiattach = (
|
||||
self.find_host_lun_id(volume, connector['host'],
|
||||
extra_specs, rep_extra_specs))
|
||||
if remote_info_dict.get('hostlunid') is None:
|
||||
# Need to attach on remote side
|
||||
metro_host_lun, remote_port_group = (
|
||||
self._attach_metro_volume(
|
||||
volume, connector, extra_specs, rep_extra_specs))
|
||||
volume, connector, is_multiattach, extra_specs,
|
||||
rep_extra_specs))
|
||||
else:
|
||||
metro_host_lun = remote_info_dict['hostlunid']
|
||||
remote_port_group = self.get_port_group_from_masking_view(
|
||||
@ -606,44 +613,22 @@ class VMAXCommon(object):
|
||||
device_info_dict['metro_hostlunid'] = metro_host_lun
|
||||
|
||||
else:
|
||||
if is_live_migration:
|
||||
source_nf_sg, source_sg, source_parent_sg, is_source_nf_sg = (
|
||||
self._setup_for_live_migration(
|
||||
device_info_dict, source_storage_group_list))
|
||||
masking_view_dict['source_nf_sg'] = source_nf_sg
|
||||
masking_view_dict['source_sg'] = source_sg
|
||||
masking_view_dict['source_parent_sg'] = source_parent_sg
|
||||
try:
|
||||
self.masking.pre_live_migration(
|
||||
source_nf_sg, source_sg, source_parent_sg,
|
||||
is_source_nf_sg, device_info_dict, extra_specs)
|
||||
except Exception:
|
||||
# Move it back to original storage group
|
||||
source_storage_group_list = (
|
||||
self.rest.get_storage_groups_from_volume(
|
||||
device_info_dict['array'],
|
||||
device_info_dict['device_id']))
|
||||
self.masking.failed_live_migration(
|
||||
masking_view_dict, source_storage_group_list,
|
||||
extra_specs)
|
||||
exception_message = (_(
|
||||
"Unable to setup live migration because of the "
|
||||
"following error: %(errorMessage)s.")
|
||||
% {'errorMessage': sys.exc_info()[1]})
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=exception_message)
|
||||
if is_multiattach and extra_specs[utils.SLO]:
|
||||
# Need to move volume to a non-fast managed storagegroup
|
||||
# before attach on subsequent host(s)
|
||||
masking_view_dict = self.masking.pre_multiattach(
|
||||
extra_specs[utils.ARRAY],
|
||||
masking_view_dict[utils.DEVICE_ID], masking_view_dict,
|
||||
extra_specs)
|
||||
device_info_dict, port_group_name = (
|
||||
self._attach_volume(
|
||||
volume, connector, extra_specs, masking_view_dict,
|
||||
is_live_migration))
|
||||
volume, connector, extra_specs, masking_view_dict))
|
||||
if self.utils.is_metro_device(self.rep_config, extra_specs):
|
||||
# Need to attach on remote side
|
||||
metro_host_lun, remote_port_group = self._attach_metro_volume(
|
||||
volume, connector, extra_specs, rep_extra_specs)
|
||||
volume, connector, is_multiattach, extra_specs,
|
||||
rep_extra_specs)
|
||||
device_info_dict['metro_hostlunid'] = metro_host_lun
|
||||
if is_live_migration:
|
||||
self.masking.post_live_migration(
|
||||
masking_view_dict, extra_specs)
|
||||
if self.protocol.lower() == 'iscsi':
|
||||
device_info_dict['ip_and_iqn'] = (
|
||||
self._find_ip_and_iqns(
|
||||
@ -655,7 +640,7 @@ class VMAXCommon(object):
|
||||
device_info_dict['is_multipath'] = is_multipath
|
||||
return device_info_dict
|
||||
|
||||
def _attach_metro_volume(self, volume, connector,
|
||||
def _attach_metro_volume(self, volume, connector, is_multiattach,
|
||||
extra_specs, rep_extra_specs):
|
||||
"""Helper method to attach a metro volume.
|
||||
|
||||
@ -664,11 +649,21 @@ class VMAXCommon(object):
|
||||
masks the remote device to the host.
|
||||
:param volume: the volume object
|
||||
:param connector: the connector dict
|
||||
:param is_multiattach: flag to indicate if this a multiattach case
|
||||
:param extra_specs: the extra specifications
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
:return: hostlunid, remote_port_group
|
||||
"""
|
||||
remote_mv_dict = self._populate_masking_dict(
|
||||
volume, connector, extra_specs, rep_extra_specs)
|
||||
remote_mv_dict[utils.IS_MULTIATTACH] = (
|
||||
True if is_multiattach else False)
|
||||
if is_multiattach and rep_extra_specs[utils.SLO]:
|
||||
# Need to move volume to a non-fast managed sg
|
||||
# before attach on subsequent host(s)
|
||||
remote_mv_dict = self.masking.pre_multiattach(
|
||||
rep_extra_specs[utils.ARRAY], remote_mv_dict[utils.DEVICE_ID],
|
||||
remote_mv_dict, rep_extra_specs)
|
||||
remote_info_dict, remote_port_group = (
|
||||
self._attach_volume(
|
||||
volume, connector, extra_specs, remote_mv_dict,
|
||||
@ -678,25 +673,18 @@ class VMAXCommon(object):
|
||||
return remote_info_dict['hostlunid'], remote_port_group
|
||||
|
||||
def _attach_volume(self, volume, connector, extra_specs,
|
||||
masking_view_dict, is_live_migration=False,
|
||||
rep_extra_specs=None):
|
||||
masking_view_dict, rep_extra_specs=None):
|
||||
"""Attach a volume to a host.
|
||||
|
||||
:param volume: the volume object
|
||||
:param connector: the connector object
|
||||
:param extra_specs: extra specifications
|
||||
:param masking_view_dict: masking view information
|
||||
:param is_live_migration: flag to indicate live migration
|
||||
:param rep_extra_specs: rep extra specs are passed if metro device
|
||||
:returns: dict -- device_info_dict
|
||||
String -- port group name
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
volume_name = volume.name
|
||||
if is_live_migration:
|
||||
masking_view_dict['isLiveMigration'] = True
|
||||
else:
|
||||
masking_view_dict['isLiveMigration'] = False
|
||||
m_specs = extra_specs if rep_extra_specs is None else rep_extra_specs
|
||||
rollback_dict = self.masking.setup_masking_view(
|
||||
masking_view_dict[utils.ARRAY], volume,
|
||||
@ -704,22 +692,18 @@ class VMAXCommon(object):
|
||||
|
||||
# Find host lun id again after the volume is exported to the host.
|
||||
|
||||
device_info_dict, __, __ = self.find_host_lun_id(
|
||||
device_info_dict, __ = self.find_host_lun_id(
|
||||
volume, connector['host'], extra_specs, rep_extra_specs)
|
||||
if 'hostlunid' not in device_info_dict:
|
||||
# Did not successfully attach to host,
|
||||
# so a rollback for FAST is required.
|
||||
LOG.error("Error Attaching volume %(vol)s. "
|
||||
"Cannot retrieve hostlunid. ",
|
||||
{'vol': volume_name})
|
||||
# Did not successfully attach to host, so a rollback is required.
|
||||
error_message = (_("Error Attaching volume %(vol)s. Cannot "
|
||||
"retrieve hostlunid.") % {'vol': volume.id})
|
||||
LOG.error(error_message)
|
||||
self.masking.check_if_rollback_action_for_masking_required(
|
||||
masking_view_dict[utils.ARRAY], volume,
|
||||
masking_view_dict[utils.DEVICE_ID],
|
||||
rollback_dict)
|
||||
exception_message = (_("Error Attaching volume %(vol)s.")
|
||||
% {'vol': volume_name})
|
||||
masking_view_dict[utils.DEVICE_ID], rollback_dict)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=exception_message)
|
||||
data=error_message)
|
||||
|
||||
return device_info_dict, rollback_dict[utils.PORTGROUPNAME]
|
||||
|
||||
@ -852,7 +836,8 @@ class VMAXCommon(object):
|
||||
'max_over_subscription_ratio':
|
||||
max_oversubscription_ratio,
|
||||
'reserved_percentage': reserved_percentage,
|
||||
'replication_enabled': self.replication_enabled}
|
||||
'replication_enabled': self.replication_enabled,
|
||||
'multiattach': True}
|
||||
if arrays[array_info['SerialNumber']][3]:
|
||||
if reserved_percentage:
|
||||
if (arrays[array_info['SerialNumber']][3] >
|
||||
@ -1033,7 +1018,7 @@ class VMAXCommon(object):
|
||||
:returns: dict -- the data dict
|
||||
"""
|
||||
maskedvols = {}
|
||||
is_live_migration = False
|
||||
is_multiattach = False
|
||||
volume_name = volume.name
|
||||
device_id = self._find_device_on_array(volume, extra_specs)
|
||||
if rep_extra_specs is not None:
|
||||
@ -1043,13 +1028,12 @@ class VMAXCommon(object):
|
||||
host_name = self.utils.get_host_short_name(host) if host else None
|
||||
if device_id:
|
||||
array = extra_specs[utils.ARRAY]
|
||||
source_storage_group_list = (
|
||||
self.rest.get_storage_groups_from_volume(array, device_id))
|
||||
# return only masking views for this host
|
||||
maskingviews = self._get_masking_views_from_volume(
|
||||
array, device_id, host_name, source_storage_group_list)
|
||||
# Return only masking views for this host
|
||||
host_maskingviews, all_masking_view_list = (
|
||||
self._get_masking_views_from_volume(
|
||||
array, device_id, host_name))
|
||||
|
||||
for maskingview in maskingviews:
|
||||
for maskingview in host_maskingviews:
|
||||
host_lun_id = self.rest.find_mv_connections_for_vol(
|
||||
array, maskingview, device_id)
|
||||
if host_lun_id is not None:
|
||||
@ -1061,33 +1045,33 @@ class VMAXCommon(object):
|
||||
if not maskedvols:
|
||||
LOG.debug(
|
||||
"Host lun id not found for volume: %(volume_name)s "
|
||||
"with the device id: %(device_id)s.",
|
||||
"with the device id: %(device_id)s on host: %(host)s.",
|
||||
{'volume_name': volume_name,
|
||||
'device_id': device_id})
|
||||
else:
|
||||
LOG.debug("Device info: %(maskedvols)s.",
|
||||
{'maskedvols': maskedvols})
|
||||
if host:
|
||||
hoststr = ("-%(host)s-" % {'host': host_name})
|
||||
'device_id': device_id, 'host': host_name})
|
||||
if len(all_masking_view_list) > len(host_maskingviews):
|
||||
other_maskedvols = []
|
||||
for maskingview in all_masking_view_list:
|
||||
host_lun_id = self.rest.find_mv_connections_for_vol(
|
||||
array, maskingview, device_id)
|
||||
if host_lun_id is not None:
|
||||
devicedict = {'hostlunid': host_lun_id,
|
||||
'maskingview': maskingview,
|
||||
'array': array,
|
||||
'device_id': device_id}
|
||||
other_maskedvols.append(devicedict)
|
||||
if len(other_maskedvols) > 0:
|
||||
LOG.debug("Volume is masked to a different host "
|
||||
"than %(host)s - multiattach case.",
|
||||
{'host': host})
|
||||
is_multiattach = True
|
||||
|
||||
if (hoststr.lower()
|
||||
not in maskedvols['maskingview'].lower()):
|
||||
LOG.debug("Volume is masked but not to host %(host)s "
|
||||
"as is expected. Assuming live migration.",
|
||||
{'host': host})
|
||||
is_live_migration = True
|
||||
else:
|
||||
for storage_group in source_storage_group_list:
|
||||
if 'NONFAST' in storage_group:
|
||||
is_live_migration = True
|
||||
break
|
||||
else:
|
||||
exception_message = (_("Cannot retrieve volume %(vol)s "
|
||||
"from the array.") % {'vol': volume_name})
|
||||
LOG.exception(exception_message)
|
||||
raise exception.VolumeBackendAPIException(exception_message)
|
||||
|
||||
return maskedvols, is_live_migration, source_storage_group_list
|
||||
return maskedvols, is_multiattach
|
||||
|
||||
def get_masking_views_from_volume(self, array, volume, device_id, host):
|
||||
"""Get all masking views from a volume.
|
||||
@ -1100,38 +1084,36 @@ class VMAXCommon(object):
|
||||
"""
|
||||
is_metro = False
|
||||
extra_specs = self._initial_setup(volume)
|
||||
mv_list = self._get_masking_views_from_volume(array, device_id, host)
|
||||
mv_list, __ = self._get_masking_views_from_volume(array, device_id,
|
||||
host)
|
||||
if self.utils.is_metro_device(self.rep_config, extra_specs):
|
||||
is_metro = True
|
||||
return mv_list, is_metro
|
||||
|
||||
def _get_masking_views_from_volume(self, array, device_id, host,
|
||||
storage_group_list=None):
|
||||
def _get_masking_views_from_volume(self, array, device_id, host):
|
||||
"""Helper function to retrieve masking view list for a volume.
|
||||
|
||||
:param array: array serial number
|
||||
:param device_id: the volume device id
|
||||
:param host: the host
|
||||
:param storage_group_list: the storage group list to use
|
||||
:returns: masking view list
|
||||
:returns: masking view list, all masking view list
|
||||
"""
|
||||
LOG.debug("Getting masking views from volume")
|
||||
maskingview_list = []
|
||||
host_compare = False
|
||||
if not storage_group_list:
|
||||
storage_group_list = self.rest.get_storage_groups_from_volume(
|
||||
array, device_id)
|
||||
host_compare = True if host else False
|
||||
host_maskingview_list, all_masking_view_list = [], []
|
||||
storage_group_list = self.rest.get_storage_groups_from_volume(
|
||||
array, device_id)
|
||||
host_compare = True if host else False
|
||||
for sg in storage_group_list:
|
||||
mvs = self.rest.get_masking_views_from_storage_group(
|
||||
array, sg)
|
||||
for mv in mvs:
|
||||
all_masking_view_list.append(mv)
|
||||
if host_compare:
|
||||
if host.lower() in mv.lower():
|
||||
maskingview_list.append(mv)
|
||||
else:
|
||||
maskingview_list.append(mv)
|
||||
return maskingview_list
|
||||
host_maskingview_list.append(mv)
|
||||
maskingview_list = (host_maskingview_list if host_compare else
|
||||
all_masking_view_list)
|
||||
return maskingview_list, all_masking_view_list
|
||||
|
||||
def _register_config_file_from_config_group(self, config_group_name):
|
||||
"""Given the config group name register the file.
|
||||
@ -1238,51 +1220,21 @@ class VMAXCommon(object):
|
||||
LOG.exception(exception_message)
|
||||
raise exception.VolumeBackendAPIException(exception_message)
|
||||
|
||||
host_name = connector['host']
|
||||
unique_name = self.utils.truncate_string(extra_specs[utils.SRP], 12)
|
||||
protocol = self.utils.get_short_protocol_type(self.protocol)
|
||||
short_host_name = self.utils.get_host_short_name(host_name)
|
||||
masking_view_dict[utils.DISABLECOMPRESSION] = False
|
||||
masking_view_dict['replication_enabled'] = False
|
||||
slo = extra_specs[utils.SLO]
|
||||
workload = extra_specs[utils.WORKLOAD]
|
||||
rep_enabled = self.utils.is_replication_enabled(extra_specs)
|
||||
short_pg_name = self.utils.get_pg_short_name(
|
||||
extra_specs[utils.PORTGROUPNAME])
|
||||
masking_view_dict[utils.SLO] = slo
|
||||
masking_view_dict[utils.WORKLOAD] = workload
|
||||
masking_view_dict[utils.SRP] = unique_name
|
||||
short_host_name = self.utils.get_host_short_name(connector['host'])
|
||||
masking_view_dict[utils.SLO] = extra_specs[utils.SLO]
|
||||
masking_view_dict[utils.WORKLOAD] = extra_specs[utils.WORKLOAD]
|
||||
masking_view_dict[utils.ARRAY] = extra_specs[utils.ARRAY]
|
||||
masking_view_dict[utils.SRP] = extra_specs[utils.SRP]
|
||||
masking_view_dict[utils.PORTGROUPNAME] = (
|
||||
extra_specs[utils.PORTGROUPNAME])
|
||||
if self._get_initiator_check_flag():
|
||||
masking_view_dict[utils.INITIATOR_CHECK] = True
|
||||
else:
|
||||
masking_view_dict[utils.INITIATOR_CHECK] = False
|
||||
masking_view_dict[utils.INITIATOR_CHECK] = (
|
||||
self._get_initiator_check_flag())
|
||||
|
||||
if slo:
|
||||
slo_wl_combo = self.utils.truncate_string(slo + workload, 10)
|
||||
child_sg_name = (
|
||||
"OS-%(shortHostName)s-%(srpName)s-%(combo)s-%(pg)s"
|
||||
% {'shortHostName': short_host_name,
|
||||
'srpName': unique_name,
|
||||
'combo': slo_wl_combo,
|
||||
'pg': short_pg_name})
|
||||
do_disable_compression = self.utils.is_compression_disabled(
|
||||
extra_specs)
|
||||
if do_disable_compression:
|
||||
child_sg_name = ("%(child_sg_name)s-CD"
|
||||
% {'child_sg_name': child_sg_name})
|
||||
masking_view_dict[utils.DISABLECOMPRESSION] = True
|
||||
else:
|
||||
child_sg_name = (
|
||||
"OS-%(shortHostName)s-No_SLO-%(pg)s"
|
||||
% {'shortHostName': short_host_name,
|
||||
'pg': short_pg_name})
|
||||
if rep_enabled:
|
||||
rep_mode = extra_specs.get(utils.REP_MODE, None)
|
||||
child_sg_name += self.utils.get_replication_prefix(rep_mode)
|
||||
masking_view_dict['replication_enabled'] = True
|
||||
child_sg_name, do_disable_compression, rep_enabled, short_pg_name = (
|
||||
self.utils.get_child_sg_name(short_host_name, extra_specs))
|
||||
masking_view_dict[utils.DISABLECOMPRESSION] = do_disable_compression
|
||||
masking_view_dict[utils.IS_RE] = rep_enabled
|
||||
mv_prefix = (
|
||||
"OS-%(shortHostName)s-%(protocol)s-%(pg)s"
|
||||
% {'shortHostName': short_host_name,
|
||||
@ -1705,9 +1657,9 @@ class VMAXCommon(object):
|
||||
"""
|
||||
target_wwns = []
|
||||
array = extra_specs[utils.ARRAY]
|
||||
masking_view_list = self._get_masking_views_from_volume(
|
||||
masking_view_list, __ = self._get_masking_views_from_volume(
|
||||
array, device_id, short_host_name)
|
||||
if masking_view_list is not None:
|
||||
if masking_view_list:
|
||||
portgroup = self.get_port_group_from_masking_view(
|
||||
array, masking_view_list[0])
|
||||
target_wwns = self.rest.get_target_wwns(array, portgroup)
|
||||
@ -2253,17 +2205,23 @@ class VMAXCommon(object):
|
||||
do_change_compression, do_change_replication))
|
||||
|
||||
if not is_valid:
|
||||
LOG.error(
|
||||
"Volume %(name)s is not suitable for storage "
|
||||
"assisted migration using retype.",
|
||||
{'name': volume_name})
|
||||
return False
|
||||
# Check if this is multiattach retype case
|
||||
do_change_multiattach = self.utils.change_multiattach(
|
||||
extra_specs, new_type['extra_specs'])
|
||||
if do_change_multiattach:
|
||||
return True
|
||||
else:
|
||||
LOG.error(
|
||||
"Volume %(name)s is not suitable for storage "
|
||||
"assisted migration using retype.",
|
||||
{'name': volume_name})
|
||||
return False
|
||||
if (volume.host != host['host'] or do_change_compression
|
||||
or do_change_replication):
|
||||
LOG.debug(
|
||||
"Retype Volume %(name)s from source host %(sourceHost)s "
|
||||
"to target host %(targetHost)s. Compression change is %(cc)r. "
|
||||
"Replication change is %(rc)s",
|
||||
"Replication change is %(rc)s.",
|
||||
{'name': volume_name, 'sourceHost': volume.host,
|
||||
'targetHost': host['host'],
|
||||
'cc': do_change_compression, 'rc': do_change_replication})
|
||||
@ -3225,35 +3183,6 @@ class VMAXCommon(object):
|
||||
secondary_info['srpName'] = rep_config['srp']
|
||||
return secondary_info
|
||||
|
||||
def _setup_for_live_migration(self, device_info_dict,
|
||||
source_storage_group_list):
|
||||
"""Function to set attributes for live migration.
|
||||
|
||||
:param device_info_dict: the data dict
|
||||
:param source_storage_group_list:
|
||||
:returns: source_nf_sg: The non fast storage group
|
||||
:returns: source_sg: The source storage group
|
||||
:returns: source_parent_sg: The parent storage group
|
||||
:returns: is_source_nf_sg:if the non fast storage group already exists
|
||||
"""
|
||||
array = device_info_dict['array']
|
||||
source_sg = None
|
||||
is_source_nf_sg = False
|
||||
# Get parent storage group
|
||||
source_parent_sg = self.rest.get_element_from_masking_view(
|
||||
array, device_info_dict['maskingview'], storagegroup=True)
|
||||
source_nf_sg = source_parent_sg[:-2] + 'NONFAST'
|
||||
for sg in source_storage_group_list:
|
||||
is_descendant = self.rest.is_child_sg_in_parent_sg(
|
||||
array, sg, source_parent_sg)
|
||||
if is_descendant:
|
||||
source_sg = sg
|
||||
is_descendant = self.rest.is_child_sg_in_parent_sg(
|
||||
array, source_nf_sg, source_parent_sg)
|
||||
if is_descendant:
|
||||
is_source_nf_sg = True
|
||||
return source_nf_sg, source_sg, source_parent_sg, is_source_nf_sg
|
||||
|
||||
def create_group(self, context, group):
|
||||
"""Creates a generic volume group.
|
||||
|
||||
|
@ -92,6 +92,7 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
|
||||
- Support for revert to volume snapshot
|
||||
3.2.0 - Support for retyping replicated volumes (bp
|
||||
vmax-retype-replicated-volumes)
|
||||
- Support for multiattach volumes (bp vmax-allow-multi-attach)
|
||||
"""
|
||||
|
||||
VERSION = "3.2.0"
|
||||
|
@ -97,6 +97,7 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
|
||||
- Support for revert to volume snapshot
|
||||
3.2.0 - Support for retyping replicated volumes (bp
|
||||
vmax-retype-replicated-volumes)
|
||||
- Support for multiattach volumes (bp vmax-allow-multi-attach)
|
||||
"""
|
||||
|
||||
VERSION = "3.2.0"
|
||||
|
@ -14,6 +14,8 @@
|
||||
# under the License.
|
||||
|
||||
import ast
|
||||
from copy import deepcopy
|
||||
import sys
|
||||
import time
|
||||
|
||||
from oslo_log import log as logging
|
||||
@ -102,8 +104,6 @@ class VMAXMasking(object):
|
||||
{'maskingview_name': masking_view_dict[utils.MV_NAME]})
|
||||
error_message = six.text_type(e)
|
||||
|
||||
if 'source_nf_sg' in masking_view_dict:
|
||||
default_sg_name = masking_view_dict['source_nf_sg']
|
||||
rollback_dict['default_sg_name'] = default_sg_name
|
||||
|
||||
if error_message:
|
||||
@ -111,15 +111,8 @@ class VMAXMasking(object):
|
||||
# successfully then we must roll back by adding the volume back to
|
||||
# the default storage group for that slo/workload combination.
|
||||
|
||||
if rollback_dict['slo'] is not None:
|
||||
self.check_if_rollback_action_for_masking_required(
|
||||
serial_number, volume, device_id, masking_view_dict)
|
||||
|
||||
else:
|
||||
self._check_adding_volume_to_storage_group(
|
||||
serial_number, device_id, rollback_dict['default_sg_name'],
|
||||
masking_view_dict[utils.VOL_NAME],
|
||||
masking_view_dict[utils.EXTRA_SPECS])
|
||||
self.check_if_rollback_action_for_masking_required(
|
||||
serial_number, volume, device_id, rollback_dict)
|
||||
|
||||
exception_message = (_(
|
||||
"Failed to get, create or add volume %(volumeName)s "
|
||||
@ -150,24 +143,10 @@ class VMAXMasking(object):
|
||||
check_vol = self.rest.is_volume_in_storagegroup(
|
||||
serial_number, device_id, default_sg_name)
|
||||
if check_vol:
|
||||
@coordination.synchronized("emc-sg-{sg_name}")
|
||||
def do_move_vol_from_def_sg(sg_name):
|
||||
num_vol_in_sg = self.rest.get_num_vols_in_sg(
|
||||
serial_number, default_sg_name)
|
||||
LOG.debug("There are %(num_vol)d volumes in the "
|
||||
"storage group %(sg_name)s.",
|
||||
{'num_vol': num_vol_in_sg,
|
||||
'sg_name': default_sg_name})
|
||||
self.rest.move_volume_between_storage_groups(
|
||||
try:
|
||||
self.move_volume_between_storage_groups(
|
||||
serial_number, device_id, default_sg_name,
|
||||
dest_storagegroup, extra_specs)
|
||||
if num_vol_in_sg == 1:
|
||||
# Last volume in the storage group - delete sg.
|
||||
self.rest.delete_storage_group(
|
||||
serial_number, default_sg_name)
|
||||
|
||||
try:
|
||||
do_move_vol_from_def_sg(default_sg_name)
|
||||
except Exception as e:
|
||||
msg = ("Exception while moving volume from the default "
|
||||
"storage group to %(sg)s. Exception received was "
|
||||
@ -336,12 +315,9 @@ class VMAXMasking(object):
|
||||
return msg
|
||||
|
||||
def add_child_sg_to_parent_sg(
|
||||
self, serial_number, child_sg_name, parent_sg_name, extra_specs,
|
||||
default_version=True
|
||||
):
|
||||
self, serial_number, child_sg_name, parent_sg_name, extra_specs):
|
||||
"""Add a child storage group to a parent storage group.
|
||||
|
||||
:param default_version: the default uv4 version
|
||||
:param serial_number: the array serial number
|
||||
:param child_sg_name: the name of the child storage group
|
||||
:param parent_sg_name: the name of the aprent storage group
|
||||
@ -358,12 +334,8 @@ class VMAXMasking(object):
|
||||
serial_number, child_sg_name, parent_sg_name):
|
||||
pass
|
||||
else:
|
||||
if default_version:
|
||||
self.rest.add_child_sg_to_parent_sg(
|
||||
serial_number, child_sg, parent_sg, extra_specs)
|
||||
else:
|
||||
self.rest.add_empty_child_sg_to_parent_sg(
|
||||
serial_number, child_sg, parent_sg, extra_specs)
|
||||
self.rest.add_child_sg_to_parent_sg(
|
||||
serial_number, child_sg, parent_sg, extra_specs)
|
||||
|
||||
do_add_sg_to_sg(child_sg_name, parent_sg_name)
|
||||
|
||||
@ -488,9 +460,26 @@ class VMAXMasking(object):
|
||||
:param target_storagegroup_name: the target sg
|
||||
:param extra_specs: the extra specifications
|
||||
"""
|
||||
num_vol_in_sg = self.rest.get_num_vols_in_sg(
|
||||
serial_number, source_storagegroup_name)
|
||||
LOG.debug("There are %(num_vol)d volumes in the "
|
||||
"storage group %(sg_name)s.",
|
||||
{'num_vol': num_vol_in_sg,
|
||||
'sg_name': source_storagegroup_name})
|
||||
self.rest.move_volume_between_storage_groups(
|
||||
serial_number, device_id, source_storagegroup_name,
|
||||
target_storagegroup_name, extra_specs)
|
||||
if num_vol_in_sg == 1:
|
||||
# Check if storage group is a child sg
|
||||
parent_sg_name = self.get_parent_sg_from_child(
|
||||
serial_number, source_storagegroup_name)
|
||||
if parent_sg_name:
|
||||
self.rest.remove_child_sg_from_parent_sg(
|
||||
serial_number, source_storagegroup_name, parent_sg_name,
|
||||
extra_specs)
|
||||
# Last volume in the storage group - delete sg.
|
||||
self.rest.delete_storage_group(
|
||||
serial_number, source_storagegroup_name)
|
||||
|
||||
def _check_port_group(self, serial_number, portgroup_name):
|
||||
"""Check that you can get a port group.
|
||||
@ -836,68 +825,38 @@ class VMAXMasking(object):
|
||||
self, serial_number, volume, device_id, rollback_dict):
|
||||
"""Rollback action for volumes with an associated service level.
|
||||
|
||||
We need to be able to return the volume to the default storage group
|
||||
if anything has gone wrong. The volume can also potentially belong to
|
||||
a storage group that is not the default depending on where
|
||||
the exception occurred. We also may need to clean up any unused
|
||||
We need to be able to return the volume to its previous storage group
|
||||
if anything has gone wrong. We also may need to clean up any unused
|
||||
initiator groups.
|
||||
:param serial_number: the array serial number
|
||||
:param volume: the volume object
|
||||
:param device_id: the device id
|
||||
:param rollback_dict: the rollback dict
|
||||
:returns: error message -- string, or None
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
message = None
|
||||
reset = False if rollback_dict[utils.IS_MULTIATTACH] else True
|
||||
# Check if ig has been created. If so, check for other
|
||||
# masking views associated with the ig. If none, delete the ig.
|
||||
self._check_ig_rollback(
|
||||
serial_number, rollback_dict['init_group_name'],
|
||||
rollback_dict['connector'])
|
||||
serial_number, rollback_dict[utils.IG_NAME],
|
||||
rollback_dict[utils.CONNECTOR])
|
||||
try:
|
||||
found_sg_name_list = (
|
||||
self.rest.get_storage_groups_from_volume(
|
||||
serial_number, rollback_dict['device_id']))
|
||||
# Volume is not associated with any storage group so add
|
||||
# it back to the default.
|
||||
if not found_sg_name_list:
|
||||
error_message = self._check_adding_volume_to_storage_group(
|
||||
serial_number, device_id,
|
||||
rollback_dict['default_sg_name'],
|
||||
rollback_dict[utils.VOL_NAME],
|
||||
rollback_dict[utils.EXTRA_SPECS])
|
||||
if error_message:
|
||||
LOG.error(error_message)
|
||||
message = (_("Rollback"))
|
||||
elif 'isLiveMigration' in rollback_dict and (
|
||||
rollback_dict['isLiveMigration'] is True):
|
||||
# Live migration case.
|
||||
# Remove from nonfast storage group to fast sg
|
||||
self.failed_live_migration(rollback_dict, found_sg_name_list,
|
||||
rollback_dict[utils.EXTRA_SPECS])
|
||||
else:
|
||||
LOG.info("Volume %(vol_id)s is in %(list_size)d storage"
|
||||
"groups. The storage groups are %(found_sg_list)s.",
|
||||
{'vol_id': volume.id,
|
||||
'list_size': len(found_sg_name_list),
|
||||
'found_sg_list': found_sg_name_list})
|
||||
|
||||
# Check the name, see if it is the default storage group
|
||||
# or another.
|
||||
sg_found = False
|
||||
for found_sg_name in found_sg_name_list:
|
||||
if found_sg_name == rollback_dict['default_sg_name']:
|
||||
sg_found = True
|
||||
if not sg_found:
|
||||
# Remove it from its current storage group and return it
|
||||
# to its default storage group if slo is defined.
|
||||
self.remove_and_reset_members(
|
||||
serial_number, volume, device_id,
|
||||
rollback_dict['volume_name'],
|
||||
rollback_dict['extra_specs'], True,
|
||||
rollback_dict['connector'])
|
||||
message = (_("Rollback - Volume in another storage "
|
||||
"group besides default storage group."))
|
||||
# Remove it from the storage group associated with the connector,
|
||||
# if any. If not multiattach case, return to the default sg.
|
||||
self.remove_and_reset_members(
|
||||
serial_number, volume, device_id,
|
||||
rollback_dict[utils.VOL_NAME],
|
||||
rollback_dict[utils.EXTRA_SPECS], reset,
|
||||
rollback_dict[utils.CONNECTOR])
|
||||
if rollback_dict[utils.IS_MULTIATTACH]:
|
||||
# Move from the nonfast storage group to the fast sg
|
||||
if rollback_dict[utils.SLO] is not None:
|
||||
self._return_volume_to_fast_managed_group(
|
||||
serial_number, device_id,
|
||||
rollback_dict[utils.OTHER_PARENT_SG],
|
||||
rollback_dict[utils.FAST_SG],
|
||||
rollback_dict[utils.NO_SLO_SG],
|
||||
rollback_dict[utils.EXTRA_SPECS])
|
||||
except Exception as e:
|
||||
error_message = (_(
|
||||
"Rollback for Volume: %(volume_name)s has failed. "
|
||||
@ -908,7 +867,6 @@ class VMAXMasking(object):
|
||||
'e': six.text_type(e)})
|
||||
LOG.exception(error_message)
|
||||
raise exception.VolumeBackendAPIException(data=error_message)
|
||||
return message
|
||||
|
||||
def _verify_initiator_group_from_masking_view(
|
||||
self, serial_number, maskingview_name, maskingview_dict,
|
||||
@ -1061,6 +1019,7 @@ class VMAXMasking(object):
|
||||
:param volume_name: the volume name
|
||||
:param extra_specs: the extra specifications
|
||||
:param connector: the connector object
|
||||
:param reset: flag to indicate if reset is required -- bool
|
||||
:param async_grp: the async rep group
|
||||
"""
|
||||
move = False
|
||||
@ -1074,10 +1033,10 @@ class VMAXMasking(object):
|
||||
storagegroup_names.pop(index)
|
||||
if len(storagegroup_names) == 1 and reset is True:
|
||||
move = True
|
||||
elif connector is not None and reset is True:
|
||||
elif connector is not None:
|
||||
short_host_name = self.utils.get_host_short_name(
|
||||
connector['host'])
|
||||
move = True
|
||||
move = reset
|
||||
if short_host_name:
|
||||
for sg_name in storagegroup_names:
|
||||
if short_host_name in sg_name:
|
||||
@ -1638,78 +1597,171 @@ class VMAXMasking(object):
|
||||
"initiator group %(ig_name)s will not be deleted.",
|
||||
{'ig_name': initiatorgroup_name})
|
||||
|
||||
def pre_live_migration(self, source_nf_sg, source_sg, source_parent_sg,
|
||||
is_source_nf_sg, device_info_dict, extra_specs):
|
||||
"""Run before any live migration operation.
|
||||
def pre_multiattach(self, serial_number, device_id, mv_dict, extra_specs):
|
||||
"""Run before attaching a device to multiple hosts.
|
||||
|
||||
:param source_nf_sg: The non fast storage group
|
||||
:param source_sg: The source storage group
|
||||
:param source_parent_sg: The parent storage group
|
||||
:param is_source_nf_sg: if the non fast storage group already exists
|
||||
:param device_info_dict: the data dict
|
||||
:param serial_number: the array serial number
|
||||
:param device_id: the device id
|
||||
:param mv_dict: the masking view dict
|
||||
:param extra_specs: extra specifications
|
||||
:returns: masking view dict
|
||||
"""
|
||||
if is_source_nf_sg is False:
|
||||
storage_group = self.rest.get_storage_group(
|
||||
device_info_dict['array'], source_nf_sg)
|
||||
if storage_group is None:
|
||||
no_slo_sg_name, fast_source_sg_name, parent_sg_name = None, None, None
|
||||
sg_list = self.rest.get_storage_group_list(
|
||||
serial_number, params={
|
||||
'child': 'true', 'volumeId': device_id})
|
||||
slo_wl_combo = self.utils.truncate_string(
|
||||
extra_specs[utils.SLO] + extra_specs[utils.WORKLOAD], 10)
|
||||
for sg in sg_list.get('storageGroupId', []):
|
||||
if slo_wl_combo in sg:
|
||||
fast_source_sg_name = sg
|
||||
masking_view_name = (
|
||||
self.rest.get_masking_views_from_storage_group(
|
||||
serial_number, fast_source_sg_name))[0]
|
||||
port_group_name = self.rest.get_element_from_masking_view(
|
||||
serial_number, masking_view_name, portgroup=True)
|
||||
short_pg_name = self.utils.get_pg_short_name(port_group_name)
|
||||
short_host_name = masking_view_name.lstrip('OS-').rstrip(
|
||||
'-%s-MV' % short_pg_name)[:-2]
|
||||
extra_specs[utils.PORTGROUPNAME] = short_pg_name
|
||||
no_slo_extra_specs = deepcopy(extra_specs)
|
||||
no_slo_extra_specs[utils.SLO] = None
|
||||
no_slo_sg_name, __, __, __ = self.utils.get_child_sg_name(
|
||||
short_host_name, no_slo_extra_specs)
|
||||
source_sg_details = self.rest.get_storage_group(
|
||||
serial_number, fast_source_sg_name)
|
||||
parent_sg_name = source_sg_details[
|
||||
'parent_storage_group'][0]
|
||||
mv_dict[utils.OTHER_PARENT_SG] = parent_sg_name
|
||||
mv_dict[utils.FAST_SG] = fast_source_sg_name
|
||||
mv_dict[utils.NO_SLO_SG] = no_slo_sg_name
|
||||
try:
|
||||
no_slo_sg = self.rest.get_storage_group(
|
||||
serial_number, no_slo_sg_name)
|
||||
if no_slo_sg is None:
|
||||
self.provision.create_storage_group(
|
||||
device_info_dict['array'], source_nf_sg, None, None, None,
|
||||
extra_specs)
|
||||
self.add_child_sg_to_parent_sg(
|
||||
device_info_dict['array'], source_nf_sg, source_parent_sg,
|
||||
extra_specs, default_version=False)
|
||||
self.move_volume_between_storage_groups(
|
||||
device_info_dict['array'], device_info_dict['device_id'],
|
||||
source_sg, source_nf_sg, extra_specs)
|
||||
|
||||
def post_live_migration(self, device_info_dict, extra_specs):
|
||||
"""Run after every live migration operation.
|
||||
|
||||
:param device_info_dict: : the data dict
|
||||
:param extra_specs: extra specifications
|
||||
"""
|
||||
array = device_info_dict['array']
|
||||
source_sg = device_info_dict['source_sg']
|
||||
# Delete fast storage group
|
||||
num_vol_in_sg = self.rest.get_num_vols_in_sg(
|
||||
array, source_sg)
|
||||
if num_vol_in_sg == 0:
|
||||
self.rest.remove_child_sg_from_parent_sg(
|
||||
array, source_sg, device_info_dict['source_parent_sg'],
|
||||
extra_specs)
|
||||
self.rest.delete_storage_group(array, source_sg)
|
||||
|
||||
def failed_live_migration(self, device_info_dict,
|
||||
source_storage_group_list, extra_specs):
|
||||
"""This is run in the event of a failed live migration operation.
|
||||
|
||||
:param device_info_dict: the data dict
|
||||
:param source_storage_group_list: list of storage groups associated
|
||||
with the device
|
||||
:param extra_specs: extra specifications
|
||||
"""
|
||||
array = device_info_dict['array']
|
||||
source_nf_sg = device_info_dict['source_nf_sg']
|
||||
source_sg = device_info_dict['source_sg']
|
||||
source_parent_sg = device_info_dict['source_parent_sg']
|
||||
device_id = device_info_dict['device_id']
|
||||
for sg in source_storage_group_list:
|
||||
if sg not in [source_sg, source_nf_sg]:
|
||||
self.remove_volume_from_sg(
|
||||
array, device_id, device_info_dict['volume_name'], sg,
|
||||
extra_specs)
|
||||
if source_nf_sg in source_storage_group_list:
|
||||
serial_number, no_slo_sg_name,
|
||||
None, None, None, extra_specs)
|
||||
self._check_add_child_sg_to_parent_sg(
|
||||
serial_number, no_slo_sg_name, parent_sg_name, extra_specs)
|
||||
self.move_volume_between_storage_groups(
|
||||
array, device_id, source_nf_sg,
|
||||
source_sg, extra_specs)
|
||||
is_descendant = self.rest.is_child_sg_in_parent_sg(
|
||||
array, source_nf_sg, source_parent_sg)
|
||||
if is_descendant:
|
||||
self.rest.remove_child_sg_from_parent_sg(
|
||||
array, source_nf_sg, source_parent_sg, extra_specs)
|
||||
# Delete non fast storage group
|
||||
self.rest.delete_storage_group(array, source_nf_sg)
|
||||
serial_number, device_id, fast_source_sg_name,
|
||||
no_slo_sg_name, extra_specs)
|
||||
# Clean up the fast managed group, if required
|
||||
self._clean_up_child_storage_group(
|
||||
serial_number, fast_source_sg_name,
|
||||
parent_sg_name, extra_specs)
|
||||
except Exception:
|
||||
# Move it back to original storage group, if required
|
||||
self._return_volume_to_fast_managed_group(
|
||||
serial_number, device_id, parent_sg_name,
|
||||
fast_source_sg_name, no_slo_sg_name, extra_specs)
|
||||
exception_message = (_("Unable to setup for multiattach because "
|
||||
"of the following error: %(error_msg)s.")
|
||||
% {'error_msg': sys.exc_info()[1]})
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=exception_message)
|
||||
return mv_dict
|
||||
|
||||
def return_volume_to_fast_managed_group(
|
||||
self, serial_number, device_id, extra_specs):
|
||||
"""Return a volume to a fast managed group if slo is set.
|
||||
|
||||
On a detach on a multiattach volume, return the volume to its fast
|
||||
managed group, if slo is set.
|
||||
:param serial_number: the array serial number
|
||||
:param device_id: the device id
|
||||
:param extra_specs: the extra specifications
|
||||
"""
|
||||
if extra_specs[utils.SLO]:
|
||||
# Get a parent storage group of the volume
|
||||
sg_list = self.rest.get_storage_group_list(
|
||||
serial_number, params={
|
||||
'child': 'true', 'volumeId': device_id})
|
||||
slo_wl_combo = '-No_SLO-'
|
||||
for sg in sg_list.get('storageGroupId', []):
|
||||
if slo_wl_combo in sg:
|
||||
no_slo_sg_name = sg
|
||||
masking_view_name = (
|
||||
self.rest.get_masking_views_from_storage_group(
|
||||
serial_number, no_slo_sg_name))[0]
|
||||
port_group_name = self.rest.get_element_from_masking_view(
|
||||
serial_number, masking_view_name, portgroup=True)
|
||||
short_pg_name = self.utils.get_pg_short_name(
|
||||
port_group_name)
|
||||
short_host_name = masking_view_name.lstrip('OS-').rstrip(
|
||||
'-%s-MV' % short_pg_name)[:-2]
|
||||
extra_specs[utils.PORTGROUPNAME] = short_pg_name
|
||||
fast_sg_name, _, _, _ = self.utils.get_child_sg_name(
|
||||
short_host_name, extra_specs)
|
||||
source_sg_details = self.rest.get_storage_group(
|
||||
serial_number, no_slo_sg_name)
|
||||
parent_sg_name = source_sg_details[
|
||||
'parent_storage_group'][0]
|
||||
self._return_volume_to_fast_managed_group(
|
||||
serial_number, device_id, parent_sg_name,
|
||||
fast_sg_name, no_slo_sg_name, extra_specs)
|
||||
break
|
||||
|
||||
def _return_volume_to_fast_managed_group(
|
||||
self, serial_number, device_id, parent_sg_name,
|
||||
fast_sg_name, no_slo_sg_name, extra_specs):
|
||||
"""Return a volume to its fast managed group.
|
||||
|
||||
On a detach, or failed attach, on a multiattach volume, return the
|
||||
volume to its fast managed group, if required.
|
||||
:param serial_number: the array serial number
|
||||
:param device_id: the device id
|
||||
:param parent_sg_name: the parent sg name
|
||||
:param fast_sg_name: the fast managed sg name
|
||||
:param no_slo_sg_name: the no slo sg name
|
||||
:param extra_specs: the extra specifications
|
||||
"""
|
||||
sg_list = self.rest.get_storage_groups_from_volume(
|
||||
serial_number, device_id)
|
||||
in_fast_sg = True if fast_sg_name in sg_list else False
|
||||
if in_fast_sg is False:
|
||||
disable_compr = self.utils.is_compression_disabled(extra_specs)
|
||||
mv_dict = {utils.DISABLECOMPRESSION: disable_compr,
|
||||
utils.VOL_NAME: device_id}
|
||||
# Get or create the fast child sg
|
||||
self._get_or_create_storage_group(
|
||||
serial_number, mv_dict, fast_sg_name, extra_specs)
|
||||
# Add child sg to parent sg if required
|
||||
self.add_child_sg_to_parent_sg(
|
||||
serial_number, fast_sg_name, parent_sg_name, extra_specs)
|
||||
# Add or move volume to fast sg
|
||||
self._move_vol_from_default_sg(
|
||||
serial_number, device_id, device_id,
|
||||
no_slo_sg_name, fast_sg_name, extra_specs)
|
||||
else:
|
||||
LOG.debug("Volume already a member of the FAST managed storage "
|
||||
"group.")
|
||||
# Check if non-fast storage group needs to be cleaned up
|
||||
self._clean_up_child_storage_group(
|
||||
serial_number, no_slo_sg_name, parent_sg_name, extra_specs)
|
||||
|
||||
def _clean_up_child_storage_group(self, serial_number, child_sg_name,
|
||||
parent_sg_name, extra_specs):
|
||||
"""Clean up an empty child storage group, if required.
|
||||
|
||||
:param serial_number: the array serial number
|
||||
:param child_sg_name: the child storage group
|
||||
:param parent_sg_name: the parent storage group
|
||||
:param extra_specs: extra specifications
|
||||
"""
|
||||
child_sg = self.rest.get_storage_group(serial_number, child_sg_name)
|
||||
if child_sg:
|
||||
num_vol_in_sg = self.rest.get_num_vols_in_sg(
|
||||
serial_number, child_sg_name)
|
||||
if num_vol_in_sg == 0:
|
||||
if self.rest.is_child_sg_in_parent_sg(
|
||||
serial_number, child_sg_name, parent_sg_name):
|
||||
self.rest.remove_child_sg_from_parent_sg(
|
||||
serial_number, child_sg_name,
|
||||
parent_sg_name, extra_specs)
|
||||
self.rest.delete_storage_group(
|
||||
serial_number, child_sg_name)
|
||||
|
||||
def attempt_ig_cleanup(self, connector, protocol, serial_number, force):
|
||||
"""Attempt to cleanup an orphan initiator group
|
||||
@ -1717,6 +1769,7 @@ class VMAXMasking(object):
|
||||
:param connector: connector object
|
||||
:param protocol: iscsi or fc
|
||||
:param serial_number: extra the array serial number
|
||||
:param force: flag to indicate if operation should be forced
|
||||
"""
|
||||
protocol = self.utils.get_short_protocol_type(protocol)
|
||||
host_name = connector['host']
|
||||
|
@ -511,6 +511,16 @@ class VMAXRest(object):
|
||||
array, SLOPROVISIONING, 'storagegroup',
|
||||
resource_name=storage_group_name)
|
||||
|
||||
def get_storage_group_list(self, array, params=None):
|
||||
"""Given a name, return storage group details.
|
||||
|
||||
:param array: the array serial number
|
||||
:param params: dict of optional filters
|
||||
:returns: storage group dict or None
|
||||
"""
|
||||
return self.get_resource(
|
||||
array, SLOPROVISIONING, 'storagegroup', params=params)
|
||||
|
||||
def get_num_vols_in_sg(self, array, storage_group_name):
|
||||
"""Get the number of volumes in a storage group.
|
||||
|
||||
@ -545,24 +555,6 @@ class VMAXRest(object):
|
||||
self, array, child_sg, parent_sg, extra_specs):
|
||||
"""Add a storage group to a parent storage group.
|
||||
|
||||
This method adds an existing storage group to another storage
|
||||
group, i.e. cascaded storage groups.
|
||||
:param array: the array serial number
|
||||
:param child_sg: the name of the child sg
|
||||
:param parent_sg: the name of the parent sg
|
||||
:param extra_specs: the extra specifications
|
||||
"""
|
||||
payload = {"editStorageGroupActionParam": {
|
||||
"expandStorageGroupParam": {
|
||||
"addExistingStorageGroupParam": {
|
||||
"storageGroupId": [child_sg]}}}}
|
||||
sc, job = self.modify_storage_group(array, parent_sg, payload)
|
||||
self.wait_for_job('Add child sg to parent sg', sc, job, extra_specs)
|
||||
|
||||
def add_empty_child_sg_to_parent_sg(
|
||||
self, array, child_sg, parent_sg, extra_specs):
|
||||
"""Add an empty storage group to a parent storage group.
|
||||
|
||||
This method adds an existing storage group to another storage
|
||||
group, i.e. cascaded storage groups.
|
||||
:param array: the array serial number
|
||||
|
@ -28,6 +28,7 @@ import six
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.objects import fields
|
||||
from cinder.volume import utils as vol_utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
@ -56,6 +57,7 @@ PARENT_SG_NAME = 'parent_sg_name'
|
||||
CONNECTOR = 'connector'
|
||||
VOL_NAME = 'volume_name'
|
||||
EXTRA_SPECS = 'extra_specs'
|
||||
HOST_NAME = 'short_host_name'
|
||||
IS_RE = 'replication_enabled'
|
||||
DISABLECOMPRESSION = 'storagetype:disablecompression'
|
||||
REP_SYNC = 'Synchronous'
|
||||
@ -71,6 +73,11 @@ RDF_ACTIVE = 'active'
|
||||
RDF_ACTIVEACTIVE = 'activeactive'
|
||||
RDF_ACTIVEBIAS = 'activebias'
|
||||
METROBIAS = 'metro_bias'
|
||||
# Multiattach constants
|
||||
IS_MULTIATTACH = 'multiattach'
|
||||
OTHER_PARENT_SG = 'other_parent_sg_name'
|
||||
FAST_SG = 'fast_managed_sg'
|
||||
NO_SLO_SG = 'no_slo_sg'
|
||||
|
||||
# Cinder.conf vmax configuration
|
||||
VMAX_SERVER_IP = 'san_ip'
|
||||
@ -824,3 +831,51 @@ class VMAXUtils(object):
|
||||
LOG.debug("The temp rdf managed group name is %(name)s",
|
||||
{'name': temp_grp_name})
|
||||
return temp_grp_name
|
||||
|
||||
def get_child_sg_name(self, host_name, extra_specs):
|
||||
"""Get the child storage group name for a masking view.
|
||||
|
||||
:param host_name: the short host name
|
||||
:param extra_specs: the extra specifications
|
||||
:return: child sg name, compression flag, rep flag, short pg name
|
||||
"""
|
||||
do_disable_compression = False
|
||||
pg_name = self.get_pg_short_name(extra_specs[PORTGROUPNAME])
|
||||
rep_enabled = self.is_replication_enabled(extra_specs)
|
||||
if extra_specs[SLO]:
|
||||
slo_wl_combo = self.truncate_string(
|
||||
extra_specs[SLO] + extra_specs[WORKLOAD], 10)
|
||||
unique_name = self.truncate_string(extra_specs[SRP], 12)
|
||||
child_sg_name = (
|
||||
"OS-%(shortHostName)s-%(srpName)s-%(combo)s-%(pg)s"
|
||||
% {'shortHostName': host_name,
|
||||
'srpName': unique_name,
|
||||
'combo': slo_wl_combo,
|
||||
'pg': pg_name})
|
||||
do_disable_compression = self.is_compression_disabled(
|
||||
extra_specs)
|
||||
if do_disable_compression:
|
||||
child_sg_name = ("%(child_sg_name)s-CD"
|
||||
% {'child_sg_name': child_sg_name})
|
||||
else:
|
||||
child_sg_name = (
|
||||
"OS-%(shortHostName)s-No_SLO-%(pg)s"
|
||||
% {'shortHostName': host_name, 'pg': pg_name})
|
||||
if rep_enabled:
|
||||
rep_mode = extra_specs.get(REP_MODE, None)
|
||||
child_sg_name += self.get_replication_prefix(rep_mode)
|
||||
return child_sg_name, do_disable_compression, rep_enabled, pg_name
|
||||
|
||||
@staticmethod
|
||||
def change_multiattach(extra_specs, new_type_extra_specs):
|
||||
"""Check if a change in multiattach is required for retype.
|
||||
|
||||
:param extra_specs: the source type extra specs
|
||||
:param new_type_extra_specs: the target type extra specs
|
||||
:return: bool
|
||||
"""
|
||||
is_src_multiattach = vol_utils.is_replicated_str(
|
||||
extra_specs.get('multiattach'))
|
||||
is_tgt_multiattach = vol_utils.is_replicated_str(
|
||||
new_type_extra_specs.get('multiattach'))
|
||||
return is_src_multiattach != is_tgt_multiattach
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Dell EMC VMAX driver has added multiattach support.
|
||||
|
Loading…
Reference in New Issue
Block a user