From 22eb9b69c1c7ee11ab5cfdec4957ce7b86ccbf14 Mon Sep 17 00:00:00 2001 From: Helen Walsh Date: Thu, 18 May 2017 16:45:46 +0100 Subject: [PATCH] VMAX driver - Replication, replacing SMI-S with REST In VMAX driver version 3.0, SMI-S has been replaced with unisphere REST. This is porting Replication V2.1 from SMIS to REST. See original https://review.openstack.org/#/c/409079/ for more details Change-Id: I9cb8d931bd40cb34429f228f1723bb162a75443f Partially-Implements: blueprint vmax-rest --- .../volume/drivers/dell_emc/vmax/test_vmax.py | 1099 ++++++++++++++--- cinder/volume/drivers/dell_emc/vmax/common.py | 857 ++++++++++++- cinder/volume/drivers/dell_emc/vmax/fc.py | 31 +- cinder/volume/drivers/dell_emc/vmax/iscsi.py | 31 +- .../volume/drivers/dell_emc/vmax/masking.py | 29 +- .../volume/drivers/dell_emc/vmax/provision.py | 62 +- cinder/volume/drivers/dell_emc/vmax/rest.py | 286 ++++- cinder/volume/drivers/dell_emc/vmax/utils.py | 71 +- ...max-rest-replication-612fcfd136cc076e.yaml | 4 + 9 files changed, 2136 insertions(+), 334 deletions(-) create mode 100644 releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py index 1e2094bb229..5bd8a15b1c5 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py @@ -26,6 +26,7 @@ import six from cinder import context from cinder import exception +from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume @@ -46,6 +47,7 @@ class VMAXCommonData(object): # array info array = '000197800123' srp = 'SRP_1' + srp2 = 'SRP_2' slo = 'Diamond' workload = 'DSS' port_group_name_f = 'OS-fibre-PG' @@ -61,11 +63,17 @@ class VMAXCommonData(object): defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG' default_sg_no_slo = 'OS-no_SLO-SG' default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG' + default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG' failed_resource = 'OS-failed-resource' fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123' new_host = 'HostX@Backend#Silver+OLTP+SRP_1+000197800123' version = '3.0.0' volume_wwn = '600000345' + remote_array = '000197800124' + device_id = '00001' + device_id2 = '00002' + rdf_group_name = '23_24_007' + rdf_group_no = '70' # connector info wwpn1 = "123456789012345" @@ -116,13 +124,16 @@ class VMAXCommonData(object): # cinder volume info ctx = context.RequestContext('admin', 'fake', True) provider_location = {'array': six.text_type(array), - 'device_id': '00001'} + 'device_id': device_id} provider_location2 = {'array': six.text_type(array), - 'device_id': '00002'} + 'device_id': device_id2} + + provider_location3 = {'array': six.text_type(remote_array), + 'device_id': device_id2} snap_location = {'snap_name': '12345', - 'source_id': '00001'} + 'source_id': device_id} test_volume_type = fake_volume.fake_volume_type_obj( context=ctx @@ -131,8 +142,8 @@ class VMAXCommonData(object): test_volume = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=six.text_type(provider_location), - volume_type=test_volume_type, - host=fake_host) + volume_type=test_volume_type, host=fake_host, + replication_driver_data=six.text_type(provider_location3)) test_clone_volume = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, @@ -159,6 +170,9 @@ class VMAXCommonData(object): vol_type_extra_specs_compr_disabled = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'storagetype:disablecompression': "true"} + vol_type_extra_specs_rep_enabled = { + 'pool_name': u'Diamond+DSS+SRP_1+000197800123', + 'replication_enabled': ' True'} extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'slo': slo, 'workload': workload, @@ -171,12 +185,19 @@ class VMAXCommonData(object): extra_specs_intervals_set = deepcopy(extra_specs) extra_specs_intervals_set['interval'] = 1 extra_specs_intervals_set['retries'] = 1 + extra_specs_rep_enabled = deepcopy(extra_specs) + extra_specs_rep_enabled['replication_enabled'] = True + rep_extra_specs = deepcopy(extra_specs_rep_enabled) + rep_extra_specs['array'] = remote_array + rep_extra_specs['interval'] = 0 + rep_extra_specs['retries'] = 0 + rep_extra_specs['srp'] = srp2 # masking view dict masking_view_dict = { 'array': array, 'connector': connector, - 'device_id': '00001', + 'device_id': device_id, 'init_group_name': initiatorgroup_name_f, 'initiator_check': False, 'maskingview_name': masking_view_name_f, @@ -187,39 +208,23 @@ class VMAXCommonData(object): 'slo': slo, 'storagegroup_name': storagegroup_name_f, 'volume_name': test_volume.name, - 'workload': workload} + 'workload': workload, + 'replication_enabled': False} - masking_view_dict_no_slo = { - 'array': array, - 'connector': connector, - 'device_id': '00001', - 'init_group_name': initiatorgroup_name_f, - 'initiator_check': False, - 'maskingview_name': masking_view_name_f, - 'srp': srp, - 'storagetype:disablecompression': False, - 'port_group_name': port_group_name_f, - 'slo': None, - 'parent_sg_name': parent_sg_f, - 'storagegroup_name': 'OS-HostX-No_SLO-OS-fibre-PG', - 'volume_name': test_volume.name, - 'workload': None} + masking_view_dict_no_slo = deepcopy(masking_view_dict) + masking_view_dict_no_slo.update( + {'slo': None, 'workload': None, + 'storagegroup_name': 'OS-HostX-No_SLO-OS-fibre-PG'}) - masking_view_dict_compression_disabled = { - 'array': array, - 'connector': connector, - 'device_id': '00001', - 'init_group_name': initiatorgroup_name_f, - 'initiator_check': False, - 'maskingview_name': masking_view_name_f, - 'srp': srp, - 'storagetype:disablecompression': True, - 'port_group_name': port_group_name_f, - 'slo': slo, - 'parent_sg_name': parent_sg_f, - 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-CD', - 'volume_name': test_volume['name'], - 'workload': workload} + masking_view_dict_compression_disabled = deepcopy(masking_view_dict) + masking_view_dict_compression_disabled.update( + {'storagetype:disablecompression': True, + 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-CD'}) + + masking_view_dict_replication_enabled = deepcopy(masking_view_dict) + masking_view_dict_replication_enabled.update( + {'replication_enabled': True, + 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE'}) # vmax data # sloprovisioning @@ -332,7 +337,7 @@ class VMAXCommonData(object): volume_details = [{"cap_gb": 2, "num_of_storage_groups": 1, - "volumeId": "00001", + "volumeId": device_id, "volume_identifier": "1", "wwn": volume_wwn, "snapvx_target": 'false', @@ -341,17 +346,17 @@ class VMAXCommonData(object): storagegroup_name_f]}, {"cap_gb": 1, "num_of_storage_groups": 1, - "volumeId": "00002", + "volumeId": device_id2, "volume_identifier": "OS-2", "wwn": '600012345', "storageGroupId": [defaultstoragegroup_name, storagegroup_name_f]}] volume_list = [ - {"resultList": {"result": [{"volumeId": "00001"}]}}, - {"resultList": {"result": [{"volumeId": "00002"}]}}, - {"resultList": {"result": [{"volumeId": "00001"}, - {"volumeId": "00002"}]}}] + {"resultList": {"result": [{"volumeId": device_id}]}}, + {"resultList": {"result": [{"volumeId": device_id2}]}}, + {"resultList": {"result": [{"volumeId": device_id}, + {"volumeId": device_id2}]}}] private_vol_details = { "resultList": { @@ -361,13 +366,13 @@ class VMAXCommonData(object): {"srcSnapshotGenInfo": [ {"snapshotHeader": { "snapshotName": "temp-1", - "device": "00001"}, + "device": device_id}, "lnkSnapshotGenInfo": [ - {"targetDevice": "00002"}]}]}, + {"targetDevice": device_id2}]}]}, {"tgtSrcSnapshotGenInfo": { "snapshotName": "temp-1", - "targetDevice": "00002", - "sourceDevice": "00001"}}], + "targetDevice": device_id2, + "sourceDevice": device_id}}], "snapVXSrc": 'true', "snapVXTgt": 'true'}}]}} @@ -380,7 +385,7 @@ class VMAXCommonData(object): "snapshotSrcs": [ {"generation": 0, "linkedDevices": [ - {"targetDevice": "00002", + {"targetDevice": device_id2, "percentageCopied": 100, "state": "Copied", "copy": True, @@ -395,6 +400,27 @@ class VMAXCommonData(object): "snapVxCapable": True, "rdfCapable": True}]} + rdf_group_list = {"rdfGroupID": [{"rdfgNumber": rdf_group_no, + "label": rdf_group_name}]} + rdf_group_details = {"modes": ["Synchronous"], + "remoteSymmetrix": remote_array, + "label": rdf_group_name, + "type": "Dynamic", + "numDevices": 1, + "remoteRdfgNumber": rdf_group_no, + "rdfgNumber": rdf_group_no} + rdf_group_vol_details = {"remoteRdfGroupNumber": rdf_group_no, + "localSymmetrixId": array, + "volumeConfig": "RDF1+TDEV", + "localRdfGroupNumber": rdf_group_no, + "localVolumeName": device_id, + "rdfpairState": "Synchronized", + "remoteVolumeName": device_id2, + "localVolumeState": "Ready", + "rdfMode": "Synchronous", + "remoteVolumeState": "Write Disabled", + "remoteSymmetrixId": remote_array} + # system job_list = [{"status": "SUCCEEDED", "jobId": "12345", @@ -558,7 +584,14 @@ class FakeRequestsSession(object): def _replication(self, url): return_object = None - if 'volume' in url: + if 'rdf_group' in url: + if self.data.device_id in url: + return_object = self.data.rdf_group_vol_details + elif self.data.rdf_group_no in url: + return_object = self.data.rdf_group_details + else: + return_object = self.data.rdf_group_list + elif 'snapshot' in url: return_object = self.data.volume_snap_vx elif 'capabilities' in url: return_object = self.data.capabilities @@ -606,12 +639,14 @@ class FakeRequestsSession(object): class FakeConfiguration(object): def __init__(self, emc_file=None, volume_backend_name=None, - intervals=0, retries=0): + interval=0, retries=0, replication_device=None): self.cinder_dell_emc_config_file = emc_file - self.intervals = intervals + self.interval = interval self.retries = retries self.volume_backend_name = volume_backend_name self.config_group = volume_backend_name + if replication_device: + self.replication_device = [replication_device] def safe_get(self, key): try: @@ -913,7 +948,7 @@ class VMAXUtilsTest(test.TestCase): def test_get_temp_snap_name(self): clone_name = "12345" - source_device_id = "00001" + source_device_id = self.data.device_id ref_name = "temp-00001-12345" snap_name = self.utils.get_temp_snap_name( clone_name, source_device_id) @@ -952,7 +987,7 @@ class VMAXUtilsTest(test.TestCase): self.assertTrue(do_disable_compression) def test_is_compression_disabled_false(self): - # Path 1: no compressiion extra spec set + # Path 1: no compression extra spec set extra_specs = self.data.extra_specs do_disable_compression = self.utils.is_compression_disabled( extra_specs) @@ -980,6 +1015,49 @@ class VMAXUtilsTest(test.TestCase): source_compr_disabled_true, new_type_compr_disabled) self.assertFalse(ans) + def test_is_replication_enabled(self): + is_re = self.utils.is_replication_enabled( + self.data.vol_type_extra_specs_rep_enabled) + self.assertTrue(is_re) + is_re2 = self.utils.is_replication_enabled(self.data.extra_specs) + self.assertFalse(is_re2) + + def test_get_replication_config(self): + # Success, allow_extend false + rep_device_list1 = [{'target_device_id': self.data.remote_array, + 'remote_pool': self.data.srp, + 'remote_port_group': self.data.port_group_name_f, + 'rdf_group_label': self.data.rdf_group_name}] + rep_config1 = self.utils.get_replication_config(rep_device_list1) + self.assertEqual(self.data.remote_array, rep_config1['array']) + # Success, allow_extend true + rep_device_list2 = [{'target_device_id': self.data.remote_array, + 'remote_pool': self.data.srp, + 'rdf_group_label': self.data.rdf_group_name, + 'remote_port_group': self.data.port_group_name_f, + 'allow_extend': 'true'}] + rep_config2 = self.utils.get_replication_config(rep_device_list2) + self.assertTrue(rep_config2['allow_extend']) + # No rep_device_list + rep_device_list3 = [] + rep_config3 = self.utils.get_replication_config(rep_device_list3) + self.assertIsNone(rep_config3) + # Exception + rep_device_list4 = [{'target_device_id': self.data.remote_array, + 'remote_pool': self.data.srp}] + self.assertRaises(exception.VolumeBackendAPIException, + self.utils.get_replication_config, rep_device_list4) + + def test_is_volume_failed_over(self): + vol = deepcopy(self.data.test_volume) + vol.replication_status = fields.ReplicationStatus.FAILED_OVER + is_fo1 = self.utils.is_volume_failed_over(vol) + self.assertTrue(is_fo1) + is_fo2 = self.utils.is_volume_failed_over(self.data.test_volume) + self.assertFalse(is_fo2) + is_fo3 = self.utils.is_volume_failed_over(None) + self.assertFalse(is_fo3) + class VMAXRestTest(test.TestCase): def setUp(self): @@ -1303,7 +1381,7 @@ class VMAXRestTest(test.TestCase): status_code = 202 message = self.data.job_list[0] with mock.patch.object(self.rest, 'wait_for_job'): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id self.rest.add_vol_to_sg( self.data.array, self.data.storagegroup_name_f, device_id, self.data.extra_specs) @@ -1311,7 +1389,7 @@ class VMAXRestTest(test.TestCase): operation, status_code, message, self.data.extra_specs) def test_add_vol_to_sg_failed(self): - device_id = [self.data.volume_details[0]['volumeId']] + device_id = [self.data.device_id] self.assertRaises( exception.VolumeBackendAPIException, self.rest.add_vol_to_sg, self.data.array, @@ -1323,7 +1401,7 @@ class VMAXRestTest(test.TestCase): status_code = 202 message = self.data.job_list[0] with mock.patch.object(self.rest, 'wait_for_job'): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id self.rest.remove_vol_from_sg( self.data.array, self.data.storagegroup_name_f, device_id, self.data.extra_specs) @@ -1396,24 +1474,24 @@ class VMAXRestTest(test.TestCase): self.data.array, self.data.parent_sg_f, payload) def test_get_volume_list(self): - ref_volumes = ['00001', '00002'] + ref_volumes = [self.data.device_id, self.data.device_id2] volumes = self.rest.get_volume_list(self.data.array, {}) self.assertEqual(ref_volumes, volumes) def test_get_volume(self): ref_volumes = self.data.volume_details[0] - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id volumes = self.rest.get_volume(self.data.array, device_id) self.assertEqual(ref_volumes, volumes) def test_get_private_volume(self): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id ref_volume = self.data.private_vol_details['resultList']['result'][0] volume = self.rest._get_private_volume(self.data.array, device_id) self.assertEqual(ref_volume, volume) def test_get_private_volume_exception(self): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id with mock.patch.object(self.rest, 'get_resource', return_value={}): self.assertRaises(exception.VolumeBackendAPIException, @@ -1422,7 +1500,7 @@ class VMAXRestTest(test.TestCase): def test_modify_volume_success(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id payload = {'someKey': 'someValue'} with mock.patch.object(self.rest, 'modify_resource'): self.rest._modify_volume(array, device_id, payload) @@ -1432,14 +1510,14 @@ class VMAXRestTest(test.TestCase): def test_modify_volume_failed(self): payload = {'someKey': self.data.failed_resource} - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id self.assertRaises( exception.VolumeBackendAPIException, self.rest._modify_volume, self.data.array, device_id, payload) def test_extend_volume(self): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id new_size = '3' extend_vol_payload = {"executionOption": "ASYNCHRONOUS", "editVolumeActionParam": { @@ -1455,7 +1533,7 @@ class VMAXRestTest(test.TestCase): self.data.array, device_id, extend_vol_payload) def test_delete_volume(self): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id deallocate_vol_payload = {"editVolumeActionParam": { "freeVolumeParam": {"free_volume": 'true'}}} with mock.patch.object(self.rest, 'delete_resource'): @@ -1467,7 +1545,7 @@ class VMAXRestTest(test.TestCase): self.data.array, 'sloprovisioning', 'volume', device_id) def test_rename_volume(self): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id payload = {"editVolumeActionParam": { "modifyVolumeIdentifierParam": { "volumeIdentifier": { @@ -1479,7 +1557,7 @@ class VMAXRestTest(test.TestCase): self.data.array, device_id, payload) def test_find_mv_connections_for_vol(self): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id ref_lun_id = int((self.data.maskingview[0]['maskingViewConnection'] [0]['host_lun_address']), 16) host_lun_id = self.rest.find_mv_connections_for_vol( @@ -1501,7 +1579,7 @@ class VMAXRestTest(test.TestCase): def test_get_storage_groups_from_volume(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id ref_list = self.data.volume_details[0]['storageGroupId'] sg_list = self.rest.get_storage_groups_from_volume(array, device_id) self.assertEqual(ref_list, sg_list) @@ -1521,7 +1599,7 @@ class VMAXRestTest(test.TestCase): def test_is_volume_in_storagegroup(self): # True array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id storagegroup = self.data.defaultstoragegroup_name is_vol1 = self.rest.is_volume_in_storagegroup( array, device_id, storagegroup) @@ -1536,7 +1614,7 @@ class VMAXRestTest(test.TestCase): def test_find_volume_device_number(self): array = self.data.array volume_name = self.data.volume_details[0]['volume_identifier'] - ref_device = self.data.volume_details[0]['volumeId'] + ref_device = self.data.device_id device_number = self.rest.find_volume_device_id(array, volume_name) self.assertEqual(ref_device, device_number) @@ -1550,7 +1628,7 @@ class VMAXRestTest(test.TestCase): def test_get_volume_success(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id ref_volume = self.data.volume_details[0] volume = self.rest.get_volume(array, device_id) self.assertEqual(ref_volume, volume) @@ -1564,21 +1642,21 @@ class VMAXRestTest(test.TestCase): def test_find_volume_identifier(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id ref_name = self.data.volume_details[0]['volume_identifier'] vol_name = self.rest.find_volume_identifier(array, device_id) self.assertEqual(ref_name, vol_name) def test_get_volume_size(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id ref_size = self.data.test_volume.size size = self.rest.get_size_of_device_on_array(array, device_id) self.assertEqual(ref_size, size) def test_get_volume_size_exception(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id with mock.patch.object(self.rest, 'get_volume', return_value=None): size = self.rest.get_size_of_device_on_array( @@ -1876,7 +1954,7 @@ class VMAXRestTest(test.TestCase): def test_create_volume_snap(self): snap_name = (self.data.volume_snap_vx ['snapshotSrcs'][0]['snapshotName']) - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id extra_specs = self.data.extra_specs payload = {"deviceNameListSource": [{"name": device_id}], "bothSides": 'false', "star": 'false', @@ -1892,7 +1970,7 @@ class VMAXRestTest(test.TestCase): def test_modify_volume_snap(self): array = self.data.array - source_id = self.data.volume_details[0]['volumeId'] + source_id = self.data.device_id target_id = (self.data.volume_snap_vx ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) snap_name = (self.data.volume_snap_vx @@ -1935,7 +2013,7 @@ class VMAXRestTest(test.TestCase): array = self.data.array snap_name = (self.data.volume_snap_vx ['snapshotSrcs'][0]['snapshotName']) - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id payload = {"deviceNameListSource": [{"name": source_device_id}]} with mock.patch.object(self.rest, 'delete_resource'): self.rest.delete_volume_snap(array, snap_name, source_device_id) @@ -1945,7 +2023,7 @@ class VMAXRestTest(test.TestCase): def test_get_volume_snap_info(self): array = self.data.array - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id ref_snap_info = self.data.volume_snap_vx snap_info = self.rest.get_volume_snap_info(array, source_device_id) self.assertEqual(ref_snap_info, snap_info) @@ -1954,7 +2032,7 @@ class VMAXRestTest(test.TestCase): array = self.data.array snap_name = (self.data.volume_snap_vx ['snapshotSrcs'][0]['snapshotName']) - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id ref_snap = self.data.volume_snap_vx['snapshotSrcs'][0] snap = self.rest.get_volume_snap(array, device_id, snap_name) self.assertEqual(ref_snap, snap) @@ -1963,7 +2041,7 @@ class VMAXRestTest(test.TestCase): array = self.data.array snap_name = (self.data.volume_snap_vx ['snapshotSrcs'][0]['snapshotName']) - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id with mock.patch.object(self.rest, 'get_volume_snap_info', return_value=None): snap = self.rest.get_volume_snap(array, device_id, snap_name) @@ -1975,7 +2053,7 @@ class VMAXRestTest(test.TestCase): def test_is_sync_complete(self): array = self.data.array - source_id = self.data.volume_details[0]['volumeId'] + source_id = self.data.device_id target_id = (self.data.volume_snap_vx ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) snap_name = (self.data.volume_snap_vx @@ -1987,7 +2065,7 @@ class VMAXRestTest(test.TestCase): def test_is_sync_complete_exception(self): array = self.data.array - source_id = self.data.volume_details[0]['volumeId'] + source_id = self.data.device_id target_id = (self.data.volume_snap_vx ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) snap_name = (self.data.volume_snap_vx @@ -2002,35 +2080,35 @@ class VMAXRestTest(test.TestCase): def test_get_sync_session(self): array = self.data.array - source_id = self.data.volume_details[0]['volumeId'] + source_id = self.data.device_id target_id = (self.data.volume_snap_vx ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) snap_name = (self.data.volume_snap_vx ['snapshotSrcs'][0]['snapshotName']) ref_sync = (self.data.volume_snap_vx ['snapshotSrcs'][0]['linkedDevices'][0]) - sync = self.rest._get_sync_session( + sync = self.rest.get_sync_session( array, source_id, snap_name, target_id) self.assertEqual(ref_sync, sync) def test_find_snap_vx_sessions(self): array = self.data.array - source_id = self.data.volume_details[0]['volumeId'] + source_id = self.data.device_id ref_sessions = [{'snap_name': 'temp-1', - 'source_vol': '00001', - 'target_vol_list': ['00002']}, + 'source_vol': self.data.device_id, + 'target_vol_list': [self.data.device_id2]}, {'snap_name': 'temp-1', - 'source_vol': '00001', - 'target_vol_list': ['00002']}] + 'source_vol': self.data.device_id, + 'target_vol_list': [self.data.device_id2]}] sessions = self.rest.find_snap_vx_sessions(array, source_id) self.assertEqual(ref_sessions, sessions) def test_find_snap_vx_sessions_tgt_only(self): array = self.data.array - source_id = self.data.volume_details[0]['volumeId'] + source_id = self.data.device_id ref_sessions = [{'snap_name': 'temp-1', - 'source_vol': '00001', - 'target_vol_list': ['00002']}] + 'source_vol': self.data.device_id, + 'target_vol_list': [self.data.device_id2]}] sessions = self.rest.find_snap_vx_sessions( array, source_id, tgt_only=True) self.assertEqual(ref_sessions, sessions) @@ -2072,6 +2150,104 @@ class VMAXRestTest(test.TestCase): array, "OS-QOS-SG", extra_specs) self.assertFalse(return_value) + def test_get_rdf_group(self): + with mock.patch.object(self.rest, 'get_resource') as mock_get: + self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no) + mock_get.assert_called_once_with( + self.data.array, 'replication', 'rdf_group', + self.data.rdf_group_no) + + def test_get_rdf_group_list(self): + rdf_list = self.rest.get_rdf_group_list(self.data.array) + self.assertEqual(self.data.rdf_group_list, rdf_list) + + def test_get_rdf_group_volume(self): + with mock.patch.object(self.rest, 'get_resource') as mock_get: + self.rest.get_rdf_group_volume( + self.data.array, self.data.rdf_group_no, self.data.device_id) + mock_get.assert_called_once_with( + self.data.array, 'replication', 'rdf_group', "70/volume/00001") + + def test_are_vols_rdf_paired(self): + are_vols1, local_state, pair_state = self.rest.are_vols_rdf_paired( + self.data.array, self.data.remote_array, self.data.device_id, + self.data.device_id2, self.data.rdf_group_no) + self.assertTrue(are_vols1) + are_vols2, local_state, pair_state = self.rest.are_vols_rdf_paired( + self.data.array, "00012345", self.data.device_id, + self.data.device_id2, self.data.rdf_group_no) + self.assertFalse(are_vols2) + with mock.patch.object(self.rest, "get_rdf_group_volume", + return_value=None): + are_vols3, local, pair = self.rest.are_vols_rdf_paired( + self.data.array, self.data.remote_array, self.data.device_id, + self.data.device_id2, self.data.rdf_group_no) + self.assertFalse(are_vols3) + + def test_get_rdf_group_number(self): + rdfg_num = self.rest.get_rdf_group_number( + self.data.array, self.data.rdf_group_name) + self.assertEqual(self.data.rdf_group_no, rdfg_num) + with mock.patch.object(self.rest, 'get_rdf_group_list', + return_value=None): + rdfg_num2 = self.rest.get_rdf_group_number( + self.data.array, self.data.rdf_group_name) + self.assertIsNone(rdfg_num2) + with mock.patch.object(self.rest, 'get_rdf_group', + return_value=None): + rdfg_num3 = self.rest.get_rdf_group_number( + self.data.array, self.data.rdf_group_name) + self.assertIsNone(rdfg_num3) + + def test_create_rdf_device_pair(self): + ref_dict = {'array': self.data.remote_array, + 'device_id': self.data.device_id2} + rdf_dict = self.rest.create_rdf_device_pair( + self.data.array, self.data.device_id, self.data.rdf_group_no, + self.data.device_id2, self.data.remote_array, "OS-2", + self.data.extra_specs) + self.assertEqual(ref_dict, rdf_dict) + + def test_modify_rdf_device_pair(self): + resource_name = "70/volume/00001" + common_opts = {"force": 'false', + "symForce": 'false', + "star": 'false', + "hop2": 'false', + "bypass": 'false'} + split_opts = deepcopy(common_opts) + split_opts.update({"immediate": 'false'}) + split_payload = {"action": "Split", + 'executionOption': 'ASYNCHRONOUS', + "split": split_opts} + + failover_opts = deepcopy(common_opts) + failover_opts.update({"establish": 'true', + "restore": 'false', + "remote": 'false', + "immediate": 'false'}) + failover_payload = {"action": "Failover", + 'executionOption': 'ASYNCHRONOUS', + "failover": failover_opts} + with mock.patch.object( + self.rest, "modify_resource", + return_value=(200, self.data.job_list[0])) as mock_mod: + self.rest.modify_rdf_device_pair( + self.data.array, self.data.device_id, self.data.rdf_group_no, + self.data.extra_specs, split=True) + mock_mod.assert_called_once_with( + self.data.array, 'replication', 'rdf_group', + split_payload, resource_name=resource_name, + private='/private') + mock_mod.reset_mock() + self.rest.modify_rdf_device_pair( + self.data.array, self.data.device_id, self.data.rdf_group_no, + self.data.extra_specs, split=False) + mock_mod.assert_called_once_with( + self.data.array, 'replication', 'rdf_group', + failover_payload, resource_name=resource_name, + private='/private') + class VMAXProvisionTest(test.TestCase): def setUp(self): @@ -2115,7 +2291,7 @@ class VMAXProvisionTest(test.TestCase): def test_delete_volume_from_srp(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id volume_name = self.data.volume_details[0]['volume_identifier'] with mock.patch.object(self.provision.rest, 'delete_volume'): self.provision.delete_volume_from_srp( @@ -2125,7 +2301,7 @@ class VMAXProvisionTest(test.TestCase): def test_create_volume_snap_vx(self): array = self.data.array - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs with mock.patch.object(self.provision.rest, 'create_volume_snap'): @@ -2136,10 +2312,8 @@ class VMAXProvisionTest(test.TestCase): def test_create_volume_replica_create_snap_true(self): array = self.data.array - source_device_id = self.data.volume_details[0]['volumeId'] - target_device_id = ( - self.data.volume_snap_vx - ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + source_device_id = self.data.device_id + target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs with mock.patch.object(self.provision, 'create_volume_snapvx'): @@ -2155,10 +2329,8 @@ class VMAXProvisionTest(test.TestCase): def test_create_volume_replica_create_snap_false(self): array = self.data.array - source_device_id = self.data.volume_details[0]['volumeId'] - target_device_id = ( - self.data.volume_snap_vx - ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + source_device_id = self.data.device_id + target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs with mock.patch.object(self.provision, 'create_volume_snapvx'): @@ -2173,10 +2345,8 @@ class VMAXProvisionTest(test.TestCase): def test_break_replication_relationship_sync_wait_true(self): array = self.data.array - source_device_id = self.data.volume_details[0]['volumeId'] - target_device_id = ( - self.data.volume_snap_vx - ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + source_device_id = self.data.device_id + target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs with mock.patch.object(self.provision.rest, 'modify_volume_snap'): @@ -2196,10 +2366,8 @@ class VMAXProvisionTest(test.TestCase): def test_break_replication_relationship_sync_wait_false(self): array = self.data.array - source_device_id = self.data.volume_details[0]['volumeId'] - target_device_id = ( - self.data.volume_snap_vx - ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + source_device_id = self.data.device_id + target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs with mock.patch.object(self.provision.rest, 'modify_volume_snap'): @@ -2216,7 +2384,7 @@ class VMAXProvisionTest(test.TestCase): def test_delete_volume_snap(self): array = self.data.array - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] with mock.patch.object(self.provision.rest, 'delete_volume_snap'): self.provision.delete_volume_snap( @@ -2226,7 +2394,7 @@ class VMAXProvisionTest(test.TestCase): def test_extend_volume(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id new_size = '3' extra_specs = self.data.extra_specs with mock.patch.object(self.provision.rest, 'extend_volume'): @@ -2363,6 +2531,46 @@ class VMAXProvisionTest(test.TestCase): self.data.array, 'no_workload_sg')) self.assertEqual(ref_settings2, sg_slo_settings2) + def test_break_rdf_relationship(self): + array = self.data.array + device_id = self.data.device_id + target_device = self.data.device_id2 + rdf_group_name = self.data.rdf_group_name + rep_extra_specs = self.data.rep_extra_specs + with mock.patch.object( + self.provision.rest, 'modify_rdf_device_pair') as mod_rdf: + with mock.patch.object( + self.provision.rest, 'delete_rdf_pair') as del_rdf: + self.provision.break_rdf_relationship( + array, device_id, target_device, + rdf_group_name, rep_extra_specs, "Synchronized") + mod_rdf.assert_called_once_with( + array, device_id, rdf_group_name, rep_extra_specs, + split=True) + del_rdf.assert_called_once_with( + array, device_id, rdf_group_name) + + def test_failover_volume(self): + array = self.data.array + device_id = self.data.device_id + rdf_group_name = self.data.rdf_group_name + extra_specs = self.data.extra_specs + with mock.patch.object( + self.provision.rest, 'modify_rdf_device_pair') as mod_rdf: + self.provision.failover_volume( + array, device_id, rdf_group_name, + extra_specs, '', True) + mod_rdf.assert_called_once_with( + array, device_id, rdf_group_name, extra_specs, + split=False) + mod_rdf.reset_mock() + self.provision.failover_volume( + array, device_id, rdf_group_name, + extra_specs, '', False) + mod_rdf.assert_called_once_with( + array, device_id, rdf_group_name, extra_specs, + split=False) + class VMAXCommonTest(test.TestCase): def setUp(self): @@ -2466,7 +2674,7 @@ class VMAXCommonTest(test.TestCase): def test_remove_members(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs @@ -2477,7 +2685,7 @@ class VMAXCommonTest(test.TestCase): def test_unmap_lun(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id volume = self.data.test_volume extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs['port_group_name'] = self.data.port_group_name_f @@ -2563,7 +2771,7 @@ class VMAXCommonTest(test.TestCase): self.common._attach_volume, volume, connector, extra_specs, masking_view_dict) - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id (self.masking. check_if_rollback_action_for_masking_required. assert_called_once_with(self.data.array, device_id, {})) @@ -2579,7 +2787,7 @@ class VMAXCommonTest(test.TestCase): def test_extend_volume_success(self): volume = self.data.test_volume array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id new_size = self.data.test_volume.size ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs['port_group_name'] = self.data.port_group_name_f @@ -2642,7 +2850,7 @@ class VMAXCommonTest(test.TestCase): def test_find_device_on_array_success(self): volume = self.data.test_volume extra_specs = self.data.extra_specs - ref_device_id = self.data.volume_details[0]['volumeId'] + ref_device_id = self.data.device_id founddevice_id = self.common._find_device_on_array(volume, extra_specs) self.assertEqual(ref_device_id, founddevice_id) @@ -2689,7 +2897,7 @@ class VMAXCommonTest(test.TestCase): def test_get_masking_views_from_volume(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id host = 'HostX' ref_mv_list = [self.data.masking_view_name_f] maskingview_list = self.common.get_masking_views_from_volume( @@ -2698,7 +2906,7 @@ class VMAXCommonTest(test.TestCase): def test_get_masking_views_from_volume_wrong_host(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id host = 'DifferentHost' maskingview_list = self.common.get_masking_views_from_volume( array, device_id, host) @@ -2803,7 +3011,7 @@ class VMAXCommonTest(test.TestCase): volume, source_volume, extra_specs) def test_parse_snap_info_found(self): - ref_device_id = self.data.volume_details[0]['volumeId'] + ref_device_id = self.data.device_id ref_snap_name = self.data.snap_location['snap_name'] sourcedevice_id, foundsnap_name = self.common._parse_snap_info( self.data.array, self.data.test_snapshot) @@ -2836,9 +3044,9 @@ class VMAXCommonTest(test.TestCase): def test_create_snapshot_success(self): array = self.data.array snapshot = self.data.test_snapshot - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id extra_specs = self.data.extra_specs - ref_dict = {'snap_name': '12345', 'source_id': '00001'} + ref_dict = {'snap_name': '12345', 'source_id': self.data.device_id} snap_dict = self.common._create_snapshot( array, snapshot, source_device_id, extra_specs) self.assertEqual(ref_dict, snap_dict) @@ -2846,7 +3054,7 @@ class VMAXCommonTest(test.TestCase): def test_create_snapshot_exception(self): array = self.data.array snapshot = self.data.test_snapshot - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id extra_specs = self.data.extra_specs with mock.patch.object( self.provision, 'create_volume_snapvx', @@ -2855,9 +3063,10 @@ class VMAXCommonTest(test.TestCase): self.common._create_snapshot, array, snapshot, source_device_id, extra_specs) - def test_delete_volume_from_srp(self): + @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') + def test_delete_volume_from_srp(self, mock_rm): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id volume_name = self.data.test_volume.name ref_extra_specs = self.data.extra_specs_intervals_set ref_extra_specs['port_group_name'] = self.data.port_group_name_f @@ -2975,29 +3184,50 @@ class VMAXCommonTest(test.TestCase): def test_delete_volume_from_srp_success(self): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs - with mock.patch.object(self.masking, 'remove_and_reset_members'): + with mock.patch.object( + self.provision, 'delete_volume_from_srp') as mock_del: self.common._delete_from_srp(array, device_id, volume_name, extra_specs) - self.masking.remove_and_reset_members.assert_called_once_with( - array, device_id, volume_name, extra_specs, False) + mock_del.assert_called_once_with(array, device_id, volume_name) def test_delete_volume_from_srp_failed(self): array = self.data.array device_id = self.data.failed_resource volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs - with mock.patch.object(self.masking, 'remove_and_reset_members'): - with mock.patch.object(self.masking, - 'return_volume_to_default_storage_group'): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._delete_from_srp, array, - device_id, volume_name, extra_specs) - (self.masking.return_volume_to_default_storage_group. - assert_called_once_with( - array, device_id, volume_name, extra_specs)) + with mock.patch.object(self.masking, + 'add_volume_to_default_storage_group'): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._delete_from_srp, array, + device_id, volume_name, extra_specs) + (self.masking.add_volume_to_default_storage_group. + assert_called_once_with( + array, device_id, volume_name, extra_specs)) + + @mock.patch.object(utils.VMAXUtils, 'is_replication_enabled', + side_effect=[False, True]) + def test_remove_vol_and_cleanup_replication(self, mock_rep_enabled): + array = self.data.array + device_id = self.data.device_id + volume = self.data.test_volume + volume_name = self.data.test_volume.name + extra_specs = self.data.extra_specs + with mock.patch.object( + self.masking, 'remove_and_reset_members') as mock_rm: + with mock.patch.object( + self.common, 'cleanup_lun_replication') as mock_clean: + self.common._remove_vol_and_cleanup_replication( + array, device_id, volume_name, extra_specs) + mock_rm.assert_called_once_with( + array, device_id, volume_name, extra_specs, False) + mock_clean.assert_not_called() + self.common._remove_vol_and_cleanup_replication( + array, device_id, volume_name, extra_specs, volume) + mock_clean.assert_called_once_with( + volume, volume_name, device_id, extra_specs) def test_get_target_wwns_from_masking_view(self): target_wwns = self.common.get_target_wwns_from_masking_view( @@ -3059,7 +3289,7 @@ class VMAXCommonTest(test.TestCase): def test_create_replica_snap_name(self): array = self.data.array clone_volume = self.data.test_clone_volume - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] ref_dict = self.data.provider_location clone_dict = self.common._create_replica( @@ -3070,7 +3300,7 @@ class VMAXCommonTest(test.TestCase): def test_create_replica_no_snap_name(self): array = self.data.array clone_volume = self.data.test_clone_volume - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id snap_name = "temp-" + source_device_id + clone_volume.id ref_dict = self.data.provider_location with mock.patch.object(self.utils, 'get_temp_snap_name', @@ -3085,7 +3315,7 @@ class VMAXCommonTest(test.TestCase): def test_create_replica_failed_cleanup_target(self): array = self.data.array clone_volume = self.data.test_clone_volume - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id snap_name = self.data.failed_resource clone_name = 'OS-' + clone_volume.id extra_specs = self.data.extra_specs @@ -3101,7 +3331,7 @@ class VMAXCommonTest(test.TestCase): def test_create_replica_failed_no_target(self): array = self.data.array clone_volume = self.data.test_clone_volume - source_device_id = self.data.volume_details[0]['volumeId'] + source_device_id = self.data.device_id snap_name = self.data.failed_resource with mock.patch.object(self.common, '_create_volume', return_value={'device_id': None}): @@ -3118,12 +3348,12 @@ class VMAXCommonTest(test.TestCase): def test_cleanup_target_sync_present(self, mock_remove): array = self.data.array clone_volume = self.data.test_clone_volume - source_device_id = self.data.volume_details[0]['volumeId'] - target_device_id = self.data.volume_details[1]['volumeId'] + source_device_id = self.data.device_id + target_device_id = self.data.device_id2 snap_name = self.data.failed_resource clone_name = clone_volume.name extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, '_get_sync_session', + with mock.patch.object(self.rest, 'get_sync_session', return_value='session'): with mock.patch.object(self.provision, 'break_replication_relationship'): @@ -3131,19 +3361,19 @@ class VMAXCommonTest(test.TestCase): array, target_device_id, source_device_id, clone_name, snap_name, extra_specs) (self.provision.break_replication_relationship. - assert_called_once_with( + assert_called_with( array, target_device_id, source_device_id, snap_name, extra_specs)) def test_cleanup_target_no_sync(self): array = self.data.array clone_volume = self.data.test_clone_volume - source_device_id = self.data.volume_details[0]['volumeId'] - target_device_id = self.data.volume_details[1]['volumeId'] + source_device_id = self.data.device_id + target_device_id = self.data.device_id2 snap_name = self.data.failed_resource clone_name = clone_volume.name extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, '_get_sync_session', + with mock.patch.object(self.rest, 'get_sync_session', return_value=None): with mock.patch.object(self.common, '_delete_from_srp'): @@ -3162,7 +3392,7 @@ class VMAXCommonTest(test.TestCase): 'break_replication_relationship') def test_sync_check_temp_snap(self, mock_break, mock_delete): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id target = self.data.volume_details[1]['volumeId'] volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs @@ -3185,7 +3415,7 @@ class VMAXCommonTest(test.TestCase): 'break_replication_relationship') def test_sync_check_not_temp_snap(self, mock_break, mock_delete): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id target = self.data.volume_details[1]['volumeId'] volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs @@ -3207,7 +3437,7 @@ class VMAXCommonTest(test.TestCase): 'break_replication_relationship') def test_sync_check_no_sessions(self, mock_break): array = self.data.array - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'find_snap_vx_sessions', @@ -3218,11 +3448,8 @@ class VMAXCommonTest(test.TestCase): def test_manage_existing_success(self): external_ref = {u'source-name': u'00002'} - volume_name = self.utils.get_volume_element_name( - self.data.test_volume.id) provider_location = {'device_id': u'00002', 'array': u'000197800123'} - ref_update = {'provider_location': six.text_type(provider_location), - 'display_name': volume_name} + ref_update = {'provider_location': six.text_type(provider_location)} with mock.patch.object( self.common, '_check_lun_valid_for_cinder_management'): model_update = self.common.manage_existing( @@ -3273,12 +3500,23 @@ class VMAXCommonTest(test.TestCase): self.data.test_volume, external_ref) self.assertEqual(2, size) - def test_unmanage_success(self): + def test_manage_existing_get_size_exception(self): + external_ref = {u'source-name': u'00001'} + with mock.patch.object(self.rest, 'get_size_of_device_on_array', + return_value=3.5): + self.assertRaises(exception.ManageExistingInvalidReference, + self.common.manage_existing_get_size, + self.data.test_volume, external_ref) + + @mock.patch.object(common.VMAXCommon, + '_remove_vol_and_cleanup_replication') + def test_unmanage_success(self, mock_rm): volume = self.data.test_volume with mock.patch.object(self.rest, 'rename_volume'): self.common.unmanage(volume) self.rest.rename_volume.assert_called_once_with( - self.data.array, '00001', self.data.test_volume.id) + self.data.array, self.data.device_id, + self.data.test_volume.id) def test_unmanage_device_not_found(self): volume = self.data.test_volume @@ -3291,8 +3529,8 @@ class VMAXCommonTest(test.TestCase): @mock.patch.object(common.VMAXCommon, '_slo_workload_migration') def test_retype(self, mock_migrate): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs_intervals_set extra_specs['port_group_name'] = self.data.port_group_name_f volume = self.data.test_volume @@ -3308,8 +3546,8 @@ class VMAXCommonTest(test.TestCase): mock_migrate.assert_not_called() def test_slo_workload_migration_valid(self): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs new_type = {'extra_specs': {}} volume = self.data.test_volume @@ -3323,8 +3561,8 @@ class VMAXCommonTest(test.TestCase): 'OLTP', volume_name, new_type, extra_specs) def test_slo_workload_migration_not_valid(self): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs volume = self.data.test_volume new_type = {'extra_specs': {}} @@ -3337,8 +3575,8 @@ class VMAXCommonTest(test.TestCase): self.assertFalse(migrate_status) def test_slo_workload_migration_same_hosts(self): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs volume = self.data.test_volume host = {'host': self.data.fake_host} @@ -3348,8 +3586,8 @@ class VMAXCommonTest(test.TestCase): self.assertFalse(migrate_status) def test_slo_workload_migration_same_host_change_compression(self): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs volume = self.data.test_volume host = {'host': self.data.fake_host} @@ -3371,8 +3609,8 @@ class VMAXCommonTest(test.TestCase): def test_migrate_volume_success(self, mock_remove): with mock.patch.object(self.rest, 'is_volume_in_storagegroup', return_value=True): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs new_type = {'extra_specs': {}} migrate_status = self.common._migrate_volume( @@ -3393,8 +3631,8 @@ class VMAXCommonTest(test.TestCase): @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') def test_migrate_volume_failed_get_new_sg_failed(self, mock_remove): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs new_type = {'extra_specs': {}} with mock.patch.object( @@ -3406,8 +3644,8 @@ class VMAXCommonTest(test.TestCase): self.assertFalse(migrate_status) def test_migrate_volume_failed_vol_not_added(self): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs new_type = {'extra_specs': {}} with mock.patch.object( @@ -3419,9 +3657,9 @@ class VMAXCommonTest(test.TestCase): self.assertFalse(migrate_status) def test_is_valid_for_storage_assisted_migration_true(self): - device_id = self.data.volume_details[0]['volumeId'] + device_id = self.data.device_id host = {'host': self.data.new_host} - volume_name = self.data.test_volume['name'] + volume_name = self.data.test_volume.name ref_return = (True, 'Silver', 'OLTP') return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, @@ -3436,8 +3674,8 @@ class VMAXCommonTest(test.TestCase): self.assertEqual(ref_return, return_val) def test_is_valid_for_storage_assisted_migration_false(self): - device_id = self.data.volume_details[0]['volumeId'] - volume_name = self.data.test_volume['name'] + device_id = self.data.device_id + volume_name = self.data.test_volume.name ref_return = (False, None, None) # IndexError host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123'} @@ -3684,6 +3922,14 @@ class VMAXFCTest(test.TestCase): self.common.retype.assert_called_once_with( self.data.test_volume, new_type, host) + def test_failover_host(self): + with mock.patch.object( + self.common, 'failover_host', + return_value=(self.data.remote_array, [], [])) as mock_fo: + self.driver.failover_host(self.data.ctx, [self.data.test_volume]) + mock_fo.assert_called_once_with([self.data.test_volume], None, + None) + class VMAXISCSITest(test.TestCase): def setUp(self): @@ -3915,6 +4161,13 @@ class VMAXISCSITest(test.TestCase): self.common.retype.assert_called_once_with( self.data.test_volume, new_type, host) + def test_failover_host(self): + with mock.patch.object(self.common, 'failover_host', + return_value={}) as mock_fo: + self.driver.failover_host({}, [self.data.test_volume]) + mock_fo.assert_called_once_with([self.data.test_volume], None, + None) + class VMAXMaskingTest(test.TestCase): def setUp(self): @@ -3939,7 +4192,7 @@ class VMAXMaskingTest(test.TestCase): self.maskingviewdict = self.driver._populate_masking_dict( self.data.test_volume, self.data.connector, self.extra_specs) self.maskingviewdict['extra_specs'] = self.extra_specs - self.device_id = self.data.volume_details[0]['volumeId'] + self.device_id = self.data.device_id self.volume_name = self.data.volume_details[0]['volume_identifier'] def tearDown(self): @@ -3992,7 +4245,7 @@ class VMAXMaskingTest(test.TestCase): @mock.patch.object( masking.VMAXMasking, - 'remove_volume_from_sg') + 'remove_vol_from_storage_group') @mock.patch.object( rest.VMAXRest, 'is_volume_in_storagegroup', @@ -4269,13 +4522,13 @@ class VMAXMaskingTest(test.TestCase): with mock.patch.object( rest.VMAXRest, 'is_volume_in_storagegroup', side_effect=[False, True]): - self.driver.masking._remove_vol_from_storage_group( + self.driver.masking.remove_vol_from_storage_group( self.data.array, self.device_id, self.data.storagegroup_name_i, self.volume_name, self.extra_specs) mock_remove_volume.assert_called_once() self.assertRaises( exception.VolumeBackendAPIException, - self.driver.masking._remove_vol_from_storage_group, + self.driver.masking.remove_vol_from_storage_group, self.data.array, self.device_id, self.data.storagegroup_name_i, self.volume_name, self.extra_specs) @@ -4422,7 +4675,7 @@ class VMAXMaskingTest(test.TestCase): @mock.patch.object(masking.VMAXMasking, '_cleanup_deletion') @mock.patch.object(masking.VMAXMasking, - 'return_volume_to_default_storage_group') + 'add_volume_to_default_storage_group') def test_remove_and_reset_members(self, mock_ret_to_sg, mock_cleanup): self.mask.remove_and_reset_members(self.data.array, self.device_id, self.volume_name, self.extra_specs, @@ -4529,7 +4782,7 @@ class VMAXMaskingTest(test.TestCase): self.assertEqual(1, mock_remove.call_count) @mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg') - @mock.patch.object(masking.VMAXMasking, '_remove_vol_from_storage_group') + @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') def test_multiple_vols_in_sg(self, mock_remove_vol, mock_get_volumes): self.mask._multiple_vols_in_sg( self.data.array, self.device_id, self.data.storagegroup_name_i, @@ -4557,9 +4810,9 @@ class VMAXMaskingTest(test.TestCase): @mock.patch.object(masking.VMAXMasking, 'get_or_create_default_storage_group') @mock.patch.object(masking.VMAXMasking, 'add_volume_to_storage_group') - def test_return_volume_to_default_storage_group(self, mock_add_sg, - mock_get_sg): - self.mask.return_volume_to_default_storage_group( + def test_add_volume_to_default_storage_group( + self, mock_add_sg, mock_get_sg): + self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, self.extra_specs) mock_add_sg.assert_called_once() @@ -4586,7 +4839,7 @@ class VMAXMaskingTest(test.TestCase): self.data.workload, self.extra_specs) @mock.patch.object(rest.VMAXRest, 'delete_storage_group') - @mock.patch.object(masking.VMAXMasking, '_remove_vol_from_storage_group') + @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') def test_remove_last_vol_and_delete_sg(self, mock_delete_sg, mock_vol_sg): self.mask._remove_last_vol_and_delete_sg( self.data.array, self.device_id, self.volume_name, @@ -4697,3 +4950,475 @@ class VMAXMaskingTest(test.TestCase): self.data.array, self.data.masking_view_name_f, self.data.parent_sg_f) self.assertEqual(2, mock_delete.call_count) + + +class VMAXCommonReplicationTest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + + super(VMAXCommonReplicationTest, self).setUp() + config_group = 'CommonReplicationTests' + self.fake_xml = FakeXML().create_fake_config_file( + config_group, self.data.port_group_name_f) + self.replication_device = { + 'target_device_id': self.data.remote_array, + 'remote_port_group': self.data.port_group_name_f, + 'remote_pool': self.data.srp2, + 'rdf_group_label': self.data.rdf_group_name, + 'allow_extend': 'True'} + configuration = FakeConfiguration( + self.fake_xml, config_group, + replication_device=self.replication_device) + rest.VMAXRest._establish_rest_session = mock.Mock( + return_value=FakeRequestsSession()) + driver = fc.VMAXFCDriver(configuration=configuration) + self.driver = driver + self.common = self.driver.common + self.masking = self.common.masking + self.provision = self.common.provision + self.rest = self.common.rest + self.utils = self.common.utils + self.utils.get_volumetype_extra_specs = ( + mock.Mock( + return_value=self.data.vol_type_extra_specs_rep_enabled)) + self.extra_specs = deepcopy(self.data.extra_specs_rep_enabled) + self.extra_specs['retries'] = 0 + self.extra_specs['interval'] = 0 + + def test_get_replication_info(self): + self.common._get_replication_info() + self.assertTrue(self.common.replication_enabled) + + def test_create_replicated_volume(self): + extra_specs = deepcopy(self.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + vol_identifier = self.utils.get_volume_element_name( + self.data.test_volume.id) + with mock.patch.object(self.common, '_replicate_volume', + return_value={}) as mock_rep: + self.common.create_volume(self.data.test_volume) + volume_dict = self.data.provider_location + mock_rep.assert_called_once_with( + self.data.test_volume, vol_identifier, volume_dict, + extra_specs) + + def test_create_cloned_replicated_volume(self): + extra_specs = deepcopy(self.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + with mock.patch.object(self.common, '_replicate_volume', + return_value={}) as mock_rep: + self.common.create_cloned_volume( + self.data.test_clone_volume, self.data.test_volume) + volume_dict = self.data.provider_location + mock_rep.assert_called_once_with( + self.data.test_clone_volume, + self.data.test_clone_volume.name, volume_dict, extra_specs) + + def test_create_replicated_volume_from_snap(self): + extra_specs = deepcopy(self.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + with mock.patch.object(self.common, '_replicate_volume', + return_value={}) as mock_rep: + self.common.create_volume_from_snapshot( + self.data.test_clone_volume, self.data.test_snapshot) + volume_dict = self.data.provider_location + mock_rep.assert_called_once_with( + self.data.test_clone_volume, "snapshot-12345", volume_dict, + extra_specs) + + def test_replicate_volume(self): + volume_dict = self.data.provider_location + rs_enabled = fields.ReplicationStatus.ENABLED + with mock.patch.object(self.common, 'setup_volume_replication', + return_value=(rs_enabled, {})) as mock_setup: + self.common._replicate_volume( + self.data.test_volume, "1", volume_dict, self.extra_specs) + mock_setup.assert_called_once_with( + self.data.array, self.data.test_volume, + self.data.device_id, self.extra_specs) + + def test_replicate_volume_exception(self): + volume_dict = self.data.provider_location + with mock.patch.object( + self.common, 'setup_volume_replication', + side_effect=exception.VolumeBackendAPIException(data='')): + with mock.patch.object( + self.common, '_cleanup_replication_source') as mock_clean: + self.assertRaises(exception.VolumeBackendAPIException, + self.common._replicate_volume, + self.data.test_volume, + "1", volume_dict, self.extra_specs) + mock_clean.assert_called_once_with( + self.data.array, self.data.test_volume, "1", + volume_dict, self.extra_specs) + + @mock.patch.object(common.VMAXCommon, '_remove_members') + @mock.patch.object(common.VMAXCommon, + '_get_replication_extra_specs', + return_value=VMAXCommonData.rep_extra_specs) + @mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over', + return_value=True) + def test_unmap_lun_volume_failed_over(self, mock_fo, mock_es, mock_rm): + extra_specs = deepcopy(self.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + rep_config = self.utils.get_replication_config( + [self.replication_device]) + self.common._unmap_lun(self.data.test_volume, self.data.connector) + mock_es.assert_called_once_with(extra_specs, rep_config) + + @mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over', + return_value=True) + def test_initialize_connection_vol_failed_over(self, mock_fo): + extra_specs = deepcopy(self.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + rep_extra_specs = deepcopy(VMAXCommonData.rep_extra_specs) + rep_extra_specs['port_group_name'] = self.data.port_group_name_f + rep_config = self.utils.get_replication_config( + [self.replication_device]) + with mock.patch.object(self.common, '_get_replication_extra_specs', + return_value=rep_extra_specs) as mock_es: + self.common.initialize_connection( + self.data.test_volume, self.data.connector) + mock_es.assert_called_once_with(extra_specs, rep_config) + + @mock.patch.object(common.VMAXCommon, '_sync_check') + def test_extend_volume_rep_enabled(self, mock_sync): + extra_specs = deepcopy(self.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + volume_name = self.data.test_volume.name + with mock.patch.object(self.rest, 'is_vol_in_rep_session', + return_value=(False, False, None)): + with mock.patch.object( + self.common, 'extend_volume_is_replicated') as mock_ex_re: + self.common.extend_volume(self.data.test_volume, '5') + mock_ex_re.assert_called_once_with( + self.data.array, self.data.test_volume, + self.data.device_id, volume_name, "5", extra_specs) + + def test_set_config_file_get_extra_specs_rep_enabled(self): + extra_specs, _, _ = self.common._set_config_file_and_get_extra_specs( + self.data.test_volume) + self.assertTrue(extra_specs['replication_enabled']) + + def test_populate_masking_dict_is_re(self): + extra_specs = deepcopy(self.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + masking_dict = self.common._populate_masking_dict( + self.data.test_volume, self.data.connector, extra_specs) + self.assertTrue(masking_dict['replication_enabled']) + self.assertEqual('OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE', + masking_dict[utils.SG_NAME]) + + @mock.patch.object(common.VMAXCommon, + '_replicate_volume', + return_value={}) + def test_manage_existing_is_replicated(self, mock_rep): + extra_specs = deepcopy(self.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + external_ref = {u'source-name': u'00002'} + volume_name = self.utils.get_volume_element_name( + self.data.test_volume.id) + provider_location = {'device_id': u'00002', 'array': self.data.array} + with mock.patch.object( + self.common, '_check_lun_valid_for_cinder_management'): + self.common.manage_existing( + self.data.test_volume, external_ref) + mock_rep.assert_called_once_with( + self.data.test_volume, volume_name, provider_location, + extra_specs, delete_src=False) + + @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') + def test_setup_volume_replication(self, mock_rm): + rep_status, rep_data = self.common.setup_volume_replication( + self.data.array, self.data.test_volume, self.data.device_id, + self.extra_specs) + self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status) + self.assertEqual({'array': self.data.remote_array, + 'device_id': self.data.device_id}, rep_data) + + @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') + @mock.patch.object(common.VMAXCommon, '_create_volume') + def test_setup_volume_replication_target(self, mock_create, mock_rm): + rep_status, rep_data = self.common.setup_volume_replication( + self.data.array, self.data.test_volume, self.data.device_id, + self.extra_specs, self.data.device_id2) + self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status) + self.assertEqual({'array': self.data.remote_array, + 'device_id': self.data.device_id2}, rep_data) + mock_create.assert_not_called() + + @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') + @mock.patch.object(common.VMAXCommon, '_cleanup_remote_target') + def test_cleanup_lun_replication_success(self, mock_clean, mock_rm): + rep_extra_specs = deepcopy(self.data.rep_extra_specs) + rep_extra_specs['port_group_name'] = self.data.port_group_name_f + self.common.cleanup_lun_replication( + self.data.test_volume, "1", self.data.device_id, + self.extra_specs) + mock_clean.assert_called_once_with( + self.data.array, self.data.remote_array, self.data.device_id, + self.data.device_id2, self.data.rdf_group_no, "1", + rep_extra_specs) + mock_rm.assert_called_once_with( + self.data.remote_array, self.data.device_id2, "1", + rep_extra_specs, False) + + @mock.patch.object(common.VMAXCommon, '_cleanup_remote_target') + def test_cleanup_lun_replication_no_target(self, mock_clean): + with mock.patch.object(self.common, 'get_remote_target_device', + return_value=(None, '', '', '', '')): + self.common.cleanup_lun_replication( + self.data.test_volume, "1", self.data.device_id, + self.extra_specs) + mock_clean.assert_not_called() + + def test_cleanup_lun_replication_exception(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.common.cleanup_lun_replication, + self.data.test_volume, "1", self.data.device_id, + self.extra_specs) + + @mock.patch.object(common.VMAXCommon, '_delete_from_srp') + @mock.patch.object(provision.VMAXProvision, 'break_rdf_relationship') + def test_cleanup_remote_target(self, mock_break, mock_del): + with mock.patch.object(self.rest, 'are_vols_rdf_paired', + return_value=(False, '', '')): + self.common._cleanup_remote_target( + self.data.array, self.data.remote_array, self.data.device_id, + self.data.device_id2, self.data.rdf_group_name, + "vol1", self.data.rep_extra_specs) + mock_break.assert_not_called() + self.common._cleanup_remote_target( + self.data.array, self.data.remote_array, self.data.device_id, + self.data.device_id2, self.data.rdf_group_name, + "vol1", self.data.rep_extra_specs) + mock_break.assert_called_once_with( + self.data.array, self.data.device_id, + self.data.device_id2, self.data.rdf_group_name, + self.data.rep_extra_specs, "Synchronized") + + @mock.patch.object(common.VMAXCommon, + '_remove_vol_and_cleanup_replication') + @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') + @mock.patch.object(common.VMAXCommon, '_delete_from_srp') + def test_cleanup_replication_source(self, mock_del, mock_rm, mock_clean): + self.common._cleanup_replication_source( + self.data.array, self.data.test_volume, "vol1", + {'device_id': self.data.device_id}, self.extra_specs) + mock_del.assert_called_once_with( + self.data.array, self.data.device_id, "vol1", self.extra_specs) + + def test_get_rdf_details(self): + rdf_group_no, remote_array = self.common.get_rdf_details( + self.data.array) + self.assertEqual(self.data.rdf_group_no, rdf_group_no) + self.assertEqual(self.data.remote_array, remote_array) + + def test_get_rdf_details_exception(self): + with mock.patch.object(self.rest, 'get_rdf_group_number', + return_value=None): + self.assertRaises(exception.VolumeBackendAPIException, + self.common.get_rdf_details, self.data.array) + + def test_failover_host(self): + volumes = [self.data.test_volume, self.data.test_clone_volume] + with mock.patch.object(self.common, '_failover_volume', + return_value={}) as mock_fo: + self.common.failover_host(volumes) + self.assertEqual(2, mock_fo.call_count) + + def test_failover_host_exception(self): + volumes = [self.data.test_volume, self.data.test_clone_volume] + self.assertRaises(exception.VolumeBackendAPIException, + self.common.failover_host, + volumes, secondary_id="default") + + def test_failover_volume(self): + ref_model_update = { + 'volume_id': self.data.test_volume.id, + 'updates': + {'replication_status': fields.ReplicationStatus.FAILED_OVER, + 'replication_driver_data': six.text_type( + self.data.provider_location), + 'provider_location': six.text_type( + self.data.provider_location3)}} + model_update = self.common._failover_volume( + self.data.test_volume, True, self.extra_specs) + self.assertEqual(ref_model_update, model_update) + ref_model_update2 = { + 'volume_id': self.data.test_volume.id, + 'updates': + {'replication_status': fields.ReplicationStatus.ENABLED, + 'replication_driver_data': six.text_type( + self.data.provider_location), + 'provider_location': six.text_type( + self.data.provider_location3)}} + model_update2 = self.common._failover_volume( + self.data.test_volume, False, self.extra_specs) + self.assertEqual(ref_model_update2, model_update2) + + def test_failover_volume_exception(self): + with mock.patch.object( + self.provision, 'failover_volume', + side_effect=exception.VolumeBackendAPIException): + ref_model_update = { + 'volume_id': self.data.test_volume.id, + 'updates': {'replication_status': + fields.ReplicationStatus.FAILOVER_ERROR, + 'replication_driver_data': six.text_type( + self.data.provider_location3), + 'provider_location': six.text_type( + self.data.provider_location)}} + model_update = self.common._failover_volume( + self.data.test_volume, True, self.extra_specs) + self.assertEqual(ref_model_update, model_update) + + @mock.patch.object( + common.VMAXCommon, '_find_device_on_array', + side_effect=[None, VMAXCommonData.device_id, + VMAXCommonData.device_id, VMAXCommonData.device_id]) + @mock.patch.object( + common.VMAXCommon, 'get_masking_views_from_volume', + side_effect=['OS-host-MV', None, exception.VolumeBackendAPIException]) + def test_recover_volumes_on_failback(self, mock_mv, mock_dev): + recovery1 = self.common.recover_volumes_on_failback( + self.data.test_volume, self.extra_specs) + self.assertEqual('error', recovery1['updates']['status']) + recovery2 = self.common.recover_volumes_on_failback( + self.data.test_volume, self.extra_specs) + self.assertEqual('in-use', recovery2['updates']['status']) + recovery3 = self.common.recover_volumes_on_failback( + self.data.test_volume, self.extra_specs) + self.assertEqual('available', recovery3['updates']['status']) + recovery4 = self.common.recover_volumes_on_failback( + self.data.test_volume, self.extra_specs) + self.assertEqual('available', recovery4['updates']['status']) + + def test_get_remote_target_device(self): + target_device1, _, _, _, _ = ( + self.common.get_remote_target_device( + self.data.array, self.data.test_volume, self.data.device_id)) + self.assertEqual(self.data.device_id2, target_device1) + target_device2, _, _, _, _ = ( + self.common.get_remote_target_device( + self.data.array, self.data.test_clone_volume, + self.data.device_id)) + self.assertIsNone(target_device2) + with mock.patch.object(self.rest, 'are_vols_rdf_paired', + return_value=(False, '')): + target_device3, _, _, _, _ = ( + self.common.get_remote_target_device( + self.data.array, self.data.test_volume, + self.data.device_id)) + self.assertIsNone(target_device3) + with mock.patch.object(self.rest, 'get_volume', + return_value=None): + target_device4, _, _, _, _ = ( + self.common.get_remote_target_device( + self.data.array, self.data.test_volume, + self.data.device_id)) + self.assertIsNone(target_device4) + + @mock.patch.object(common.VMAXCommon, 'setup_volume_replication') + @mock.patch.object(provision.VMAXProvision, 'extend_volume') + @mock.patch.object(provision.VMAXProvision, 'break_rdf_relationship') + @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') + def test_extend_volume_is_replicated(self, mock_remove, + mock_break, mock_extend, mock_setup): + self.common.extend_volume_is_replicated( + self.data.array, self.data.test_volume, self.data.device_id, + 'vol1', '5', self.data.extra_specs_rep_enabled) + self.assertEqual(2, mock_remove.call_count) + self.assertEqual(2, mock_extend.call_count) + + def test_extend_volume_is_replicated_exception(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.common.extend_volume_is_replicated, + self.data.failed_resource, self.data.test_volume, + self.data.device_id, 'vol1', '1', + self.data.extra_specs_rep_enabled) + + @mock.patch.object(common.VMAXCommon, 'add_volume_to_replication_group') + @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') + def test_enable_rdf(self, mock_remove, mock_add): + rep_config = self.utils.get_replication_config( + [self.replication_device]) + self.common.enable_rdf( + self.data.array, self.data.device_id, self.data.rdf_group_no, + rep_config, 'OS-1', self.data.remote_array, self.data.device_id2, + self.extra_specs) + self.assertEqual(2, mock_remove.call_count) + self.assertEqual(2, mock_add.call_count) + + @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') + @mock.patch.object(common.VMAXCommon, '_cleanup_remote_target') + def test_enable_rdf_exception(self, mock_cleanup, mock_rm): + rep_config = self.utils.get_replication_config( + [self.replication_device]) + self.assertRaises( + exception.VolumeBackendAPIException, self.common.enable_rdf, + self.data.array, self.data.device_id, + self.data.failed_resource, rep_config, 'OS-1', + self.data.remote_array, self.data.device_id2, self.extra_specs) + self.assertEqual(1, mock_cleanup.call_count) + + def test_add_volume_to_replication_group(self): + sg_name = self.common.add_volume_to_replication_group( + self.data.array, self.data.device_id, 'vol1', + self.extra_specs) + self.assertEqual(self.data.default_sg_re_enabled, sg_name) + + @mock.patch.object(masking.VMAXMasking, + 'get_or_create_default_storage_group', + side_effect=exception.VolumeBackendAPIException) + def test_add_volume_to_replication_group_exception(self, mock_get): + self.assertRaises( + exception.VolumeBackendAPIException, + self.common.add_volume_to_replication_group, + self.data.array, self.data.device_id, 'vol1', + self.extra_specs) + + def test_get_replication_extra_specs(self): + rep_config = self.utils.get_replication_config( + [self.replication_device]) + # Path one - disable compression + extra_specs1 = deepcopy(self.extra_specs) + extra_specs1[utils.DISABLECOMPRESSION] = "true" + ref_specs1 = deepcopy(self.data.rep_extra_specs) + ref_specs1['port_group_name'] = self.data.port_group_name_f + rep_extra_specs1 = self.common._get_replication_extra_specs( + extra_specs1, rep_config) + self.assertEqual(ref_specs1, rep_extra_specs1) + # Path two - disable compression, not all flash + ref_specs2 = deepcopy(self.data.rep_extra_specs) + ref_specs2['port_group_name'] = self.data.port_group_name_f + with mock.patch.object(self.rest, 'is_compression_capable', + return_value=False): + rep_extra_specs2 = self.common._get_replication_extra_specs( + extra_specs1, rep_config) + self.assertEqual(ref_specs2, rep_extra_specs2) + # Path three - slo not valid + extra_specs3 = deepcopy(self.extra_specs) + ref_specs3 = deepcopy(ref_specs1) + ref_specs3['slo'] = None + ref_specs3['workload'] = None + with mock.patch.object(self.provision, 'verify_slo_workload', + return_value=(False, False)): + rep_extra_specs3 = self.common._get_replication_extra_specs( + extra_specs3, rep_config) + self.assertEqual(ref_specs3, rep_extra_specs3) + + def test_get_secondary_stats(self): + rep_config = self.utils.get_replication_config( + [self.replication_device]) + array_map = self.utils.parse_file_to_get_array_map( + self.common.pool_info['config_file']) + finalarrayinfolist = self.common._get_slo_workload_combinations( + array_map) + array_info = finalarrayinfolist[0] + ref_info = deepcopy(array_info) + ref_info['SerialNumber'] = six.text_type(rep_config['array']) + ref_info['srpName'] = rep_config['srp'] + secondary_info = self.common.get_secondary_stats_info( + rep_config, array_info) + self.assertEqual(ref_info, secondary_info) diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py index cda50999a84..77c4ad3e363 100644 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ b/cinder/volume/drivers/dell_emc/vmax/common.py @@ -14,6 +14,7 @@ # under the License. import ast +from copy import deepcopy import os.path import sys @@ -24,6 +25,7 @@ import six from cinder import exception from cinder.i18n import _ +from cinder.objects import fields from cinder.volume import configuration from cinder.volume.drivers.dell_emc.vmax import masking from cinder.volume.drivers.dell_emc.vmax import provision @@ -39,6 +41,14 @@ CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_dell_emc_config.xml' CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_dell_emc_config_' CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml' BACKENDNAME = 'volume_backend_name' +PREFIXBACKENDNAME = 'capabilities:volume_backend_name' + +# Replication +REPLICATION_DISABLED = fields.ReplicationStatus.DISABLED +REPLICATION_ENABLED = fields.ReplicationStatus.ENABLED +REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER +FAILOVER_ERROR = fields.ReplicationStatus.FAILOVER_ERROR +REPLICATION_ERROR = fields.ReplicationStatus.ERROR vmax_opts = [ @@ -46,10 +56,10 @@ vmax_opts = [ default=CINDER_EMC_CONFIG_FILE, help='Use this file for cinder emc plugin ' 'config data.'), - cfg.StrOpt('intervals', + cfg.StrOpt('interval', default=3, help='Use this value to specify ' - 'length of intervals in seconds.'), + 'length of the interval in seconds.'), cfg.StrOpt('retries', default=200, help='Use this value to specify ' @@ -78,15 +88,19 @@ class VMAXCommon(object): 'storage_protocol': None, 'total_capacity_gb': 0, 'vendor_name': 'Dell EMC', - 'volume_backend_name': None} + 'volume_backend_name': None, + 'replication_enabled': False, + 'replication_targets': None} pool_info = {'backend_name': None, 'config_file': None, 'arrays_info': {}, 'max_over_subscription_ratio': None, - 'reserved_percentage': 0} + 'reserved_percentage': 0, + 'replication_enabled': False} - def __init__(self, prtcl, version, configuration=None): + def __init__(self, prtcl, version, configuration=None, + active_backend_id=None): self.protocol = prtcl self.configuration = configuration @@ -96,6 +110,13 @@ class VMAXCommon(object): self.masking = masking.VMAXMasking(prtcl, self.rest) self.provision = provision.VMAXProvision(self.rest) self.version = version + # replication + self.replication_enabled = False + self.extend_replicated_vol = False + self.rep_devices = None + self.active_backend_id = active_backend_id + self.failover = False + self._get_replication_info() self._gather_info() def _gather_info(self): @@ -116,7 +137,7 @@ class VMAXCommon(object): else: self.pool_info['config_file'] = ( self.configuration.safe_get('cinder_dell_emc_config_file')) - self.intervals = self.configuration.safe_get('intervals') + self.interval = self.configuration.safe_get('interval') self.retries = self.configuration.safe_get('retries') self.pool_info['backend_name'] = ( self.configuration.safe_get('volume_backend_name')) @@ -143,6 +164,30 @@ class VMAXCommon(object): ret_val = True return ret_val + def _get_replication_info(self): + """Gather replication information, if provided.""" + self.rep_config = None + self.replication_targets = None + if hasattr(self.configuration, 'replication_device'): + self.rep_devices = self.configuration.safe_get( + 'replication_device') + if self.rep_devices and len(self.rep_devices) == 1: + self.rep_config = self.utils.get_replication_config( + self.rep_devices) + if self.rep_config: + self.replication_targets = [self.rep_config['array']] + if self.active_backend_id == self.rep_config['array']: + self.failover = True + self.extend_replicated_vol = self.rep_config['allow_extend'] + # use self.replication_enabled for update_volume_stats + self.replication_enabled = True + LOG.debug("The replication configuration is %(rep_config)s.", + {'rep_config': self.rep_config}) + elif self.rep_devices and len(self.rep_devices) > 1: + LOG.error("More than one replication target is configured. " + "Dell EMC VMAX only suppports a single replication " + "target. Replication will not be enabled.") + def _get_slo_workload_combinations(self, array_info): """Method to query the array for SLO and Workloads. @@ -154,6 +199,8 @@ class VMAXCommon(object): """ try: array = array_info['SerialNumber'] + if self.failover: + array = self.active_backend_id # Get the srp slo & workload settings slo_settings = self.rest.get_slo_list(array) # Remove 'None' from the list (so a 'None' slo is not combined @@ -203,6 +250,12 @@ class VMAXCommon(object): volume_dict = (self._create_volume( volume_name, volume_size, extra_specs)) + # Set-up volume replication, if enabled + if self.utils.is_replication_enabled(extra_specs): + rep_update = self._replicate_volume(volume, volume_name, + volume_dict, extra_specs) + model_update.update(rep_update) + LOG.info("Leaving create_volume: %(name)s. Volume dict: %(dict)s.", {'name': volume_name, 'dict': volume_dict}) model_update.update( @@ -225,6 +278,12 @@ class VMAXCommon(object): volume, snapshot, extra_specs, is_snapshot=False, from_snapvx=True) + # Set-up volume replication, if enabled + if self.utils.is_replication_enabled(extra_specs): + rep_update = self._replicate_volume(volume, snapshot['name'], + clone_dict, extra_specs) + model_update.update(rep_update) + model_update.update( {'provider_location': six.text_type(clone_dict)}) return model_update @@ -241,10 +300,43 @@ class VMAXCommon(object): clone_dict = self._create_cloned_volume(clone_volume, source_volume, extra_specs) + # Set-up volume replication, if enabled + if self.utils.is_replication_enabled(extra_specs): + rep_update = self._replicate_volume( + clone_volume, clone_volume.name, clone_dict, extra_specs) + model_update.update(rep_update) + model_update.update( {'provider_location': six.text_type(clone_dict)}) return model_update + def _replicate_volume(self, volume, volume_name, volume_dict, extra_specs, + delete_src=True): + """Setup up remote replication for a volume. + + :param volume: the volume object + :param volume_name: the volume name + :param volume_dict: the volume dict + :param extra_specs: the extra specifications + :param delete_src: flag to indicate if source should be deleted on + if replication fails + :returns: replication model_update + """ + array = volume_dict['array'] + try: + device_id = volume_dict['device_id'] + replication_status, replication_driver_data = ( + self.setup_volume_replication( + array, volume, device_id, extra_specs)) + except Exception: + if delete_src: + self._cleanup_replication_source( + array, volume, volume_name, volume_dict, extra_specs) + raise + return ({'replication_status': replication_status, + 'replication_driver_data': six.text_type( + replication_driver_data)}) + def delete_volume(self, volume): """Deletes a EMC(VMAX) volume. @@ -308,16 +400,23 @@ class VMAXCommon(object): :param volume: the volume Object :param connector: the connector Object - :raises VolumeBackendAPIException: """ - device_info = {} extra_specs = self._initial_setup(volume) + if self.utils.is_volume_failed_over(volume): + extra_specs = self._get_replication_extra_specs( + extra_specs, self.rep_config) volume_name = volume.name LOG.info("Unmap volume: %(volume)s.", {'volume': volume_name}) - if connector is not None: - device_info = self.find_host_lun_id( - volume, connector['host'], extra_specs) + if connector is None: + exception_message = ( + _("Connector must not be None - Cannot get the required " + "information needed to unmap the volume")) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + device_info = self.find_host_lun_id( + volume, connector['host'], extra_specs) if 'hostlunid' not in device_info: LOG.info("Volume %s is not mapped. No volume to unmap.", volume_name) @@ -354,7 +453,6 @@ class VMAXCommon(object): :param volume: volume Object :param connector: the connector Object :returns: dict -- device_info_dict - device information dict - :raises VolumeBackendAPIException: """ extra_specs = self._initial_setup(volume) is_multipath = connector.get('multipath', False) @@ -362,6 +460,9 @@ class VMAXCommon(object): volume_name = volume.name LOG.info("Initialize connection: %(volume)s.", {'volume': volume_name}) + if self.utils.is_volume_failed_over(volume): + extra_specs = self._get_replication_extra_specs( + extra_specs, self.rep_config) device_info_dict = self.find_host_lun_id( volume, connector['host'], extra_specs) masking_view_dict = self._populate_masking_dict( @@ -452,7 +553,7 @@ class VMAXCommon(object): extra_specs = self._initial_setup(volume) device_id = self._find_device_on_array(volume, extra_specs) array = extra_specs[utils.ARRAY] - # check if volume is part of an on-going clone operation + # Check if volume is part of an on-going clone operation self._sync_check(array, device_id, volume_name, extra_specs) if device_id is None: exception_message = (_("Cannot find Volume: %(volume_name)s. " @@ -480,7 +581,13 @@ class VMAXCommon(object): LOG.info("Extending volume %(volume)s to %(new_size)d GBs", {'volume': volume_name, 'new_size': int(new_size)}) - self.provision.extend_volume(array, device_id, new_size, extra_specs) + if self.utils.is_replication_enabled(extra_specs): + # Extra logic required if volume is replicated + self.extend_volume_is_replicated( + array, volume, device_id, volume_name, new_size, extra_specs) + else: + self.provision.extend_volume( + array, device_id, new_size, extra_specs) LOG.debug("Leaving extend_volume: %(volume_name)s. ", {'volume_name': volume_name}) @@ -506,6 +613,9 @@ class VMAXCommon(object): array_info_list = self.pool_info['arrays_info'] already_queried = False for array_info in array_info_list: + if self.failover: + array_info = self.get_secondary_stats_info( + self.rep_config, array_info) # Add both SLO & Workload name in the pool name # Query the SRP only once if WLP is not enabled # Only insert the array details in the dict once @@ -549,7 +659,8 @@ class VMAXCommon(object): 'thick_provisioning_support': False, 'max_over_subscription_ratio': max_oversubscription_ratio, - 'reserved_percentage': reserved_percentage} + 'reserved_percentage': reserved_percentage, + 'replication_enabled': self.replication_enabled} if arrays[array_info['SerialNumber']][3]: if reserved_percentage: if (arrays[array_info['SerialNumber']][3] > @@ -572,7 +683,9 @@ class VMAXCommon(object): 'consistent_group_snapshot_enabled': False, 'max_over_subscription_ratio': max_oversubscription_ratio, - 'reserved_percentage': reserved_percentage} + 'reserved_percentage': reserved_percentage, + 'replication_enabled': self.replication_enabled + } if array_reserve_percent: if isinstance(reserved_percentage, int): if array_reserve_percent > reserved_percentage: @@ -597,6 +710,8 @@ class VMAXCommon(object): 'free_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, + 'replication_enabled': self.replication_enabled, + 'replication_targets': self.replication_targets, 'pools': pools} return data @@ -661,6 +776,8 @@ class VMAXCommon(object): # If there are no extra specs then the default case is assumed. if extra_specs: config_group = self.configuration.config_group + if extra_specs.get('replication_enabled') == ' True': + extra_specs[utils.IS_RE] = True config_file = self._register_config_file_from_config_group( config_group) return extra_specs, config_file, qos_specs @@ -680,7 +797,10 @@ class VMAXCommon(object): if isinstance(loc, six.string_types): name = ast.literal_eval(loc) array = extra_specs[utils.ARRAY] - device_id = name['device_id'] + try: + device_id = name['device_id'] + except KeyError: + device_id = name['keybindings']['DeviceID'] element_name = self.utils.get_volume_element_name( volume_name) founddevice_id = self.rest.find_volume_device_id( @@ -750,7 +870,7 @@ class VMAXCommon(object): :param array: array serial number :param device_id: the volume device id :param host: the host - :return: masking view list + :returns: masking view list """ LOG.debug("Getting masking views from volume") maskingview_list = [] @@ -856,8 +976,10 @@ class VMAXCommon(object): protocol = self.utils.get_short_protocol_type(self.protocol) short_host_name = self.utils.get_host_short_name(host_name) masking_view_dict[utils.DISABLECOMPRESSION] = False + masking_view_dict['replication_enabled'] = False slo = extra_specs[utils.SLO] workload = extra_specs[utils.WORKLOAD] + rep_enabled = self.utils.is_replication_enabled(extra_specs) short_pg_name = self.utils.get_pg_short_name( extra_specs[utils.PORTGROUPNAME]) masking_view_dict[utils.SLO] = slo @@ -890,7 +1012,9 @@ class VMAXCommon(object): "OS-%(shortHostName)s-No_SLO-%(pg)s" % {'shortHostName': short_host_name, 'pg': short_pg_name}) - + if rep_enabled: + child_sg_name += "-RE" + masking_view_dict['replication_enabled'] = True mv_prefix = ( "OS-%(shortHostName)s-%(protocol)s-%(pg)s" % {'shortHostName': short_host_name, @@ -981,7 +1105,7 @@ class VMAXCommon(object): :param array: the array serial number :param snapshot: the snapshot object - :return: sourcedevice_id, foundsnap_name + :returns: sourcedevice_id, foundsnap_name """ foundsnap_name = None sourcedevice_id = None @@ -1026,7 +1150,7 @@ class VMAXCommon(object): :param snapshot: the snapshot object :param source_device_id: the source device id :param extra_specs: the extra specifications - :return: snap_dict + :returns: snap_dict """ clone_name = self.utils.get_volume_element_name(snapshot.id) snap_name = self.utils.truncate_string(clone_name, 19) @@ -1061,8 +1185,11 @@ class VMAXCommon(object): return volume_name array = extra_specs[utils.ARRAY] - # check if volume is snap source + # Check if volume is snap source self._sync_check(array, device_id, volume_name, extra_specs) + # Remove from any storage groups and cleanup replication + self._remove_vol_and_cleanup_replication( + array, device_id, volume_name, extra_specs, volume) self._delete_from_srp( array, device_id, volume_name, extra_specs) return volume_name @@ -1163,9 +1290,9 @@ class VMAXCommon(object): LOG.exception(error_message) raise exception.VolumeBackendAPIException(data=error_message) - extra_specs[utils.INTERVAL] = self.intervals + extra_specs[utils.INTERVAL] = self.interval LOG.debug("The interval is set at: %(intervalInSecs)s.", - {'intervalInSecs': self.intervals}) + {'intervalInSecs': self.interval}) extra_specs[utils.RETRIES] = self.retries LOG.debug("Retries are set at: %(retries)s.", {'retries': self.retries}) @@ -1228,22 +1355,16 @@ class VMAXCommon(object): :param extra_specs: the extra specifications :raises VolumeBackendAPIException: """ - # Check if it is part of a storage group and delete it - # extra logic for case when volume is the last member. - self.masking.remove_and_reset_members( - array, device_id, volume_name, extra_specs, False) - try: LOG.debug("Delete Volume: %(name)s. device_id: %(device_id)s.", {'name': volume_name, 'device_id': device_id}) self.provision.delete_volume_from_srp( array, device_id, volume_name) - except Exception as e: # If we cannot successfully delete the volume, then we want to # return the volume to the default storage group, # which should be the SG it previously belonged to. - self.masking.return_volume_to_default_storage_group( + self.masking.add_volume_to_default_storage_group( array, device_id, volume_name, extra_specs) error_message = (_("Failed to delete volume %(volume_name)s. " @@ -1253,6 +1374,24 @@ class VMAXCommon(object): LOG.exception(error_message) raise exception.VolumeBackendAPIException(data=error_message) + def _remove_vol_and_cleanup_replication( + self, array, device_id, volume_name, extra_specs, volume=None): + """Remove a volume from its storage groups and cleanup replication. + + :param array: the array serial number + :param device_id: the device id + :param volume_name: the volume name + :param extra_specs: the extra specifications + :param volume: the volume object + """ + # Remove from any storage groups + self.masking.remove_and_reset_members( + array, device_id, volume_name, extra_specs, False) + # Cleanup remote replication + if self.utils.is_replication_enabled(extra_specs): + self.cleanup_lun_replication(volume, volume_name, + device_id, extra_specs) + def get_target_wwns_from_masking_view( self, volume, connector): """Find target WWNs via the masking view. @@ -1306,7 +1445,7 @@ class VMAXCommon(object): :param array: the array serial number :param portgroup_name: port group name :param initiator_group_name: ig name - :return: list of masking views + :returns: list of masking views """ LOG.debug("Finding Masking Views for port group %(pg)s and %(ig)s.", {'pg': portgroup_name, 'ig': initiator_group_name}) @@ -1336,7 +1475,7 @@ class VMAXCommon(object): :returns: ip_and_iqn - list of dicts """ ips_and_iqns = [] - LOG.debug("The portgroup name for iscsiadm is %(pg)s.", + LOG.debug("The portgroup name for iscsiadm is %(pg)s", {'pg': port_group_name}) ports = self.rest.get_port_ids(array, port_group_name) for port in ports: @@ -1391,8 +1530,9 @@ class VMAXCommon(object): raise return clone_dict - def _cleanup_target(self, array, target_device_id, source_device_id, - clone_name, snap_name, extra_specs): + def _cleanup_target( + self, array, target_device_id, source_device_id, + clone_name, snap_name, extra_specs): """Cleanup target volume on failed clone/ snapshot creation. :param array: the array serial number @@ -1401,7 +1541,7 @@ class VMAXCommon(object): :param clone_name: the name of the clone volume :param extra_specs: the extra specifications """ - snap_session = self.rest._get_sync_session( + snap_session = self.rest.get_sync_session( array, source_device_id, snap_name, target_device_id) if snap_session: self.provision.break_replication_relationship( @@ -1455,17 +1595,29 @@ class VMAXCommon(object): # Check if the existing volume is valid for cinder management self._check_lun_valid_for_cinder_management( array, device_id, volume_id, external_ref) - # Rename the volume + extra_specs = self._initial_setup(volume) + volume_name = self.utils.get_volume_element_name(volume_id) - LOG.debug("Rename volume %(vol)s to %(elementName)s.", + # Rename the volume + LOG.debug("Rename volume %(vol)s to %(element_name)s.", {'vol': volume_id, - 'elementName': volume_name}) + 'element_name': volume_name}) self.rest.rename_volume(array, device_id, volume_name) - provider_location = {'device_id': device_id, 'array': array} + model_update = {'provider_location': six.text_type(provider_location)} + + # Set-up volume replication, if enabled + if self.utils.is_replication_enabled(extra_specs): + rep_update = self._replicate_volume(volume, volume_name, + provider_location, + extra_specs, delete_src=False) + model_update.update(rep_update) + + else: + # Add volume to default storage group + self.masking.add_volume_to_default_storage_group( + array, device_id, volume_name, extra_specs) - model_update = {'provider_location': six.text_type(provider_location), - 'display_name': volume_name} return model_update def _check_lun_valid_for_cinder_management( @@ -1529,9 +1681,28 @@ class VMAXCommon(object): {'volume': volume}) array, device_id = self.utils.get_array_and_device_id( volume, external_ref) + # Ensure the volume exists on the array + volume_details = self.rest.get_volume(array, device_id) + if not volume_details: + msg = (_('Unable to retrieve volume details from array for ' + 'device %(device_id)s') % {'device_id': device_id}) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + size = float(self.rest.get_size_of_device_on_array(array, device_id)) - LOG.debug("Size of volume %(device_id)s is %(volumeSize)s GB.", - {'device_id': device_id, 'volumeSize': int(size)}) + if not size.is_integer(): + exception_message = ( + _("Cannot manage existing VMAX volume %(device_id)s " + "- it has a size of %(vol_size)s but only whole GB " + "sizes are supported. Please extend the " + "volume to the nearest GB value before importing.") + % {'device_id': device_id, 'vol_size': size, }) + LOG.exception(exception_message) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=exception_message) + + LOG.debug("Size of volume %(device_id)s is %(vol_size)s GB.", + {'device_id': device_id, 'vol_size': int(size)}) return int(size) def unmanage(self, volume): @@ -1539,7 +1710,6 @@ class VMAXCommon(object): Leave the volume intact on the backend array. :param volume: the volume object - :raises VolumeBackendAPIException: """ volume_name = volume.name volume_id = volume.id @@ -1552,6 +1722,14 @@ class VMAXCommon(object): "unmanage operation. Exiting...", {'id': volume_id}) else: + # Check if volume is snap source + self._sync_check(extra_specs['array'], device_id, + volume_name, extra_specs) + # Remove volume from any openstack storage groups + # and remove any replication + self._remove_vol_and_cleanup_replication( + extra_specs['array'], device_id, + volume_name, extra_specs, volume) # Rename the volume to volumeId, thus remove the 'OS-' prefix. self.rest.rename_volume( extra_specs[utils.ARRAY], device_id, volume_id) @@ -1578,6 +1756,14 @@ class VMAXCommon(object): {'name': volume_name}) return False + if self.utils.is_replication_enabled(extra_specs): + LOG.error("Volume %(name)s is replicated - " + "Replicated volumes are not eligible for " + "storage assisted retype. Host assisted " + "retype is supported.", + {'name': volume_name}) + return False + return self._slo_workload_migration(device_id, volume, host, volume_name, new_type, extra_specs) @@ -1640,7 +1826,7 @@ class VMAXCommon(object): :param volume_name: the volume name :param new_type: the volume type to migrate to :param extra_specs: the extra specifications - :return: bool + :returns: bool """ storagegroups = self.rest.get_storage_groups_from_volume( array, device_id) @@ -1758,3 +1944,582 @@ class VMAXCommon(object): return false_ret return True, target_slo, target_workload + + def setup_volume_replication(self, array, volume, device_id, + extra_specs, target_device_id=None): + """Setup replication for volume, if enabled. + + Called on create volume, create cloned volume, create volume from + snapshot, manage_existing, and re-establishing a replication + relationship after extending. + :param array: the array serial number + :param volume: the volume object + :param device_id: the device id + :param extra_specs: the extra specifications + :param target_device_id: the target device id + :returns: replication_status -- str, replication_driver_data -- dict + """ + source_name = volume.name + LOG.debug('Starting replication setup ' + 'for volume: %s.', source_name) + # Get rdf details + rdf_group_no, remote_array = self.get_rdf_details(array) + rdf_vol_size = volume.size + if rdf_vol_size == 0: + rdf_vol_size = self.rest.get_size_of_device_on_array( + array, device_id) + + # Give the target volume the same Volume Element Name as the + # source volume + target_name = self.utils.get_volume_element_name(volume.id) + + if not target_device_id: + # Create a target volume on the target array + rep_extra_specs = self._get_replication_extra_specs( + extra_specs, self.rep_config) + volume_dict = self._create_volume( + target_name, rdf_vol_size, rep_extra_specs) + target_device_id = volume_dict['device_id'] + + LOG.debug("Create volume replica: Target device: %(target)s " + "Source Device: %(source)s " + "Volume identifier: %(name)s.", + {'target': target_device_id, + 'source': device_id, + 'name': target_name}) + + # Enable rdf replication and establish the link + rdf_dict = self.enable_rdf( + array, device_id, rdf_group_no, self.rep_config, + target_name, remote_array, target_device_id, extra_specs) + + LOG.info('Successfully setup replication for %s.', + target_name) + replication_status = REPLICATION_ENABLED + replication_driver_data = rdf_dict + + return replication_status, replication_driver_data + + def cleanup_lun_replication(self, volume, volume_name, + device_id, extra_specs): + """Cleanup target volume on delete. + + Extra logic if target is last in group. + :param volume: the volume object + :param volume_name: the volume name + :param device_id: the device id + :param extra_specs: extra specifications + :raises: VolumeBackendAPIException + """ + LOG.debug('Starting cleanup replication from volume: ' + '%s.', volume_name) + try: + loc = volume.provider_location + rep_data = volume.replication_driver_data + + if (isinstance(loc, six.string_types) + and isinstance(rep_data, six.string_types)): + name = ast.literal_eval(loc) + array = name['array'] + rep_extra_specs = self._get_replication_extra_specs( + extra_specs, self.rep_config) + (target_device, remote_array, rdf_group_no, + local_vol_state, pair_state) = ( + self.get_remote_target_device(array, volume, device_id)) + + if target_device is not None: + # Clean-up target + self.masking.remove_and_reset_members( + remote_array, target_device, volume_name, + rep_extra_specs, False) + self._cleanup_remote_target( + array, remote_array, device_id, target_device, + rdf_group_no, volume_name, rep_extra_specs) + LOG.info('Successfully destroyed replication for ' + 'volume: %(volume)s', + {'volume': volume_name}) + else: + LOG.warning('Replication target not found for ' + 'replication-enabled volume: %(volume)s', + {'volume': volume_name}) + except Exception as e: + exception_message = ( + _('Cannot get necessary information to cleanup ' + 'replication target for volume: %(volume)s. ' + 'The exception received was: %(e)s. Manual ' + 'clean-up may be required. Please contact ' + 'your administrator.') + % {'volume': volume_name, 'e': six.text_type(e)}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + def _cleanup_remote_target( + self, array, remote_array, device_id, target_device, + rdf_group, volume_name, rep_extra_specs): + """Clean-up remote replication target after exception or on deletion. + + :param array: the array serial number + :param remote_array: the remote array serial number + :param device_id: the source device id + :param target_device: the target device id + :param rdf_group: the RDF group + :param volume_name: the volume name + :param rep_extra_specs: replication extra specifications + """ + are_vols_paired, local_vol_state, pair_state = ( + self.rest.are_vols_rdf_paired( + array, remote_array, device_id, target_device, rdf_group)) + if are_vols_paired: + # Break the sync relationship. + self.provision.break_rdf_relationship( + array, device_id, target_device, rdf_group, + rep_extra_specs, pair_state) + self._delete_from_srp( + remote_array, target_device, volume_name, rep_extra_specs) + + def _cleanup_replication_source( + self, array, volume, volume_name, volume_dict, extra_specs): + """Cleanup a remote replication source volume on failure. + + If replication setup fails at any stage on a new volume create, + we must clean-up the source instance as the cinder database won't + be updated with the provider_location. This means the volume cannot + be properly deleted from the array by cinder. + :param array: the array serial number + :param volume: the volume object + :param volume_name: the name of the volume + :param volume_dict: the source volume dictionary + :param extra_specs: the extra specifications + """ + LOG.warning( + "Replication failed. Cleaning up the source volume. " + "Volume name: %(sourceName)s ", + {'sourceName': volume_name}) + device_id = volume_dict['device_id'] + # Remove from any storage groups and cleanup replication + self._remove_vol_and_cleanup_replication( + array, device_id, volume_name, extra_specs, volume) + self._delete_from_srp( + array, device_id, volume_name, extra_specs) + + def get_rdf_details(self, array): + """Retrieves an SRDF group instance. + + :param array: the array serial number + :returns: rdf_group_no, remote_array + """ + if not self.rep_config: + exception_message = (_("Replication is not configured on " + "backend: %(backend)s.") % + {'backend': self.configuration.safe_get( + 'volume_backend_name')}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + remote_array = self.rep_config['array'] + rdf_group_label = self.rep_config['rdf_group_label'] + LOG.info("Replication group: %(RDFGroup)s.", + {'RDFGroup': rdf_group_label}) + rdf_group_no = self.rest.get_rdf_group_number(array, rdf_group_label) + if rdf_group_no is None: + exception_message = (_("Cannot find replication group: " + "%(RDFGroup)s. Please check the name " + "and the array") % + {'RDFGroup': rdf_group_label}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.info("Found RDF group number: %(RDFGroup)s.", + {'RDFGroup': rdf_group_no}) + + return rdf_group_no, remote_array + + def failover_host(self, volumes, secondary_id=None, groups=None): + """Fails over the volumes on a host back and forth. + + Driver needs to update following info for failed-over volume: + 1. provider_location: update array details + 2. replication_status: new status for replication-enabled volume + :param volumes: the list of volumes to be failed over + :param secondary_id: the target backend + :param groups: replication groups + :returns: secondary_id, volume_update_list, group_update_list + """ + volume_update_list = [] + if secondary_id != 'default': + if not self.failover: + self.failover = True + if self.rep_config: + secondary_id = self.rep_config['array'] + else: + exception_message = (_( + "Backend %(backend)s is already failed over. " + "If you wish to failback, please append " + "'--backend_id default' to your command.") + % {'backend': self.configuration.safe_get( + 'volume_backend_name')}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + else: + if self.failover: + self.failover = False + secondary_id = None + else: + exception_message = (_( + "Cannot failback backend %(backend)s- backend not " + "in failed over state. If you meant to failover, please " + "omit the '--backend_id default' from the command") + % {'backend': self.configuration.safe_get( + 'volume_backend_name')}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + for volume in volumes: + extra_specs = self._initial_setup(volume) + if self.utils.is_replication_enabled(extra_specs): + model_update = self._failover_volume( + volume, self.failover, extra_specs) + volume_update_list.append(model_update) + else: + if self.failover: + # Since the array has been failed-over, + # volumes without replication should be in error. + volume_update_list.append({ + 'volume_id': volume.id, + 'updates': {'status': 'error'}}) + else: + # This is a failback, so we will attempt + # to recover non-failed over volumes + recovery = self.recover_volumes_on_failback( + volume, extra_specs) + volume_update_list.append(recovery) + + LOG.info("Failover host complete.") + return secondary_id, volume_update_list, [] + + def _failover_volume(self, vol, failover, extra_specs): + """Failover a volume. + + :param vol: the volume object + :param failover: flag to indicate failover or failback -- bool + :param extra_specs: the extra specifications + :returns: model_update -- dict + """ + loc = vol.provider_location + rep_data = vol.replication_driver_data + try: + name = ast.literal_eval(loc) + replication_keybindings = ast.literal_eval(rep_data) + array = name['array'] + device_id = self._find_device_on_array(vol, {utils.ARRAY: array}) + + (target_device, remote_array, rdf_group, + local_vol_state, pair_state) = ( + self.get_remote_target_device(array, vol, device_id)) + + self._sync_check(array, device_id, vol.name, extra_specs) + self.provision.failover_volume( + array, device_id, rdf_group, extra_specs, + local_vol_state, failover) + + if failover: + new_status = REPLICATION_FAILOVER + else: + new_status = REPLICATION_ENABLED + + # Transfer ownership to secondary_backend_id and + # update provider_location field + loc = six.text_type(replication_keybindings) + rep_data = six.text_type(name) + + except Exception as ex: + msg = ('Failed to failover volume %(volume_id)s. ' + 'Error: %(error)s.') + LOG.error(msg, {'volume_id': vol.id, + 'error': ex}, ) + new_status = FAILOVER_ERROR + + model_update = {'volume_id': vol.id, + 'updates': + {'replication_status': new_status, + 'replication_driver_data': rep_data, + 'provider_location': loc}} + return model_update + + def recover_volumes_on_failback(self, volume, extra_specs): + """Recover volumes on failback. + + On failback, attempt to recover non RE(replication enabled) + volumes from primary array. + :param volume: the volume object + :param extra_specs: the extra specifications + :returns: volume_update + """ + # Check if volume still exists on the primary + volume_update = {'volume_id': volume.id} + device_id = self._find_device_on_array(volume, extra_specs) + if not device_id: + volume_update['updates'] = {'status': 'error'} + else: + try: + maskingview = self.get_masking_views_from_volume( + extra_specs[utils.ARRAY], device_id, '') + except Exception: + maskingview = None + LOG.debug("Unable to determine if volume is in masking view.") + if not maskingview: + volume_update['updates'] = {'status': 'available'} + else: + volume_update['updates'] = {'status': 'in-use'} + return volume_update + + def get_remote_target_device(self, array, volume, device_id): + """Get the remote target for a given volume. + + :param array: the array serial number + :param volume: the volume object + :param device_id: the device id + :returns: target_device, target_array, rdf_group, state + """ + target_device, local_vol_state, pair_state = None, '', '' + rdf_group, remote_array = self.get_rdf_details(array) + try: + rep_target_data = volume.replication_driver_data + replication_keybindings = ast.literal_eval(rep_target_data) + remote_array = replication_keybindings['array'] + remote_device = replication_keybindings['device_id'] + target_device_info = self.rest.get_volume( + remote_array, remote_device) + if target_device_info is not None: + target_device = remote_device + are_vols_paired, local_vol_state, pair_state = ( + self.rest.are_vols_rdf_paired( + array, remote_array, device_id, + target_device, rdf_group)) + if not are_vols_paired: + target_device = None + except (KeyError, ValueError): + target_device = None + return (target_device, remote_array, rdf_group, + local_vol_state, pair_state) + + def extend_volume_is_replicated( + self, array, volume, device_id, volume_name, + new_size, extra_specs): + """Extend a replication-enabled volume. + + Cannot extend volumes in a synchronization pair. Must first break the + relationship, extend them separately, then recreate the pair + :param array: the array serial number + :param volume: the volume objcet + :param device_id: the volume device id + :param volume_name: the volume name + :param new_size: the new size the volume should be + :param extra_specs: extra specifications + """ + if self.extend_replicated_vol is True: + try: + (target_device, remote_array, rdf_group, + local_vol_state, pair_state) = ( + self.get_remote_target_device(array, volume, device_id)) + + # Volume must be removed from replication (storage) group + # before the replication relationship can be ended (cannot + # have a mix of replicated and non-replicated volumes as + # the SRDF groups become unmanageable). + self.masking.remove_and_reset_members( + array, device_id, volume_name, extra_specs, False) + + # Repeat on target side + rep_extra_specs = self._get_replication_extra_specs( + extra_specs, self.rep_config) + self.masking.remove_and_reset_members( + remote_array, target_device, volume_name, + rep_extra_specs, False) + + LOG.info("Breaking replication relationship...") + self.provision.break_rdf_relationship( + array, device_id, target_device, + rdf_group, rep_extra_specs, pair_state) + + # Extend the source volume + LOG.info("Extending source volume...") + self.provision.extend_volume( + array, device_id, new_size, extra_specs) + + # Extend the target volume + LOG.info("Extending target volume...") + self.provision.extend_volume( + remote_array, target_device, new_size, rep_extra_specs) + + # Re-create replication relationship + LOG.info("Recreating replication relationship...") + self.setup_volume_replication( + array, volume, device_id, extra_specs, target_device) + + except Exception as e: + exception_message = (_("Error extending volume. " + "Error received was %(e)s") % + {'e': e}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + else: + exception_message = (_( + "Extending a replicated volume is not " + "permitted on this backend. Please contact " + "your administrator.")) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + def enable_rdf(self, array, device_id, rdf_group_no, rep_config, + target_name, remote_array, target_device, extra_specs): + """Create a replication relationship with a target volume. + + :param array: the array serial number + :param device_id: the device id + :param rdf_group_no: the rdf group number + :param rep_config: the replication config + :param target_name: the target volume name + :param remote_array: the remote array serial number + :param target_device: the target device id + :param extra_specs: the extra specifications + :returns: rdf_dict + """ + rep_extra_specs = self._get_replication_extra_specs( + extra_specs, rep_config) + try: + # Remove source and target instances from their + # default storage groups + self.masking.remove_and_reset_members( + array, device_id, target_name, extra_specs, False) + + self.masking.remove_and_reset_members( + remote_array, target_device, target_name, + rep_extra_specs, False) + + # Establish replication relationship + rdf_dict = self.rest.create_rdf_device_pair( + array, device_id, rdf_group_no, target_device, remote_array, + target_name, extra_specs) + + # Add source and target instances to their replication groups + LOG.debug("Adding source device to default replication group.") + self.add_volume_to_replication_group( + array, device_id, target_name, extra_specs) + LOG.debug("Adding target device to default replication group.") + self.add_volume_to_replication_group( + remote_array, target_device, target_name, rep_extra_specs) + + except Exception as e: + LOG.warning( + ("Remote replication failed. Cleaning up the target " + "volume and returning source volume to default storage " + "group. Volume name: %(name)s "), + {'name': target_name}) + self.masking.remove_and_reset_members( + remote_array, target_device, target_name, + rep_extra_specs, False) + self._cleanup_remote_target( + array, remote_array, device_id, target_device, + rdf_group_no, target_name, rep_extra_specs) + # Re-throw the exception. + exception_message = (_("Remote replication failed with exception:" + " %(e)s") + % {'e': six.text_type(e)}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + return rdf_dict + + def add_volume_to_replication_group( + self, array, device_id, volume_name, extra_specs): + """Add a volume to the default replication group. + + Replication groups are VMAX storage groups that contain only + RDF-paired volumes. We can use our normal storage group operations. + :param array: array serial number + :param device_id: the device id + :param volume_name: the volume name + :param extra_specs: the extra specifications + :returns: storagegroup_name + """ + do_disable_compression = self.utils.is_compression_disabled( + extra_specs) + try: + storagegroup_name = ( + self.masking.get_or_create_default_storage_group( + array, extra_specs[utils.SRP], extra_specs[utils.SLO], + extra_specs[utils.WORKLOAD], extra_specs, + do_disable_compression, is_re=True)) + except Exception as e: + exception_message = (_("Failed to get or create replication" + "group. Exception received: %(e)s") + % {'e': six.text_type(e)}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + self.masking.add_volume_to_storage_group( + array, device_id, storagegroup_name, volume_name, extra_specs) + + return storagegroup_name + + def _get_replication_extra_specs(self, extra_specs, rep_config): + """Get replication extra specifications. + + Called when target array operations are necessary - + on create, extend, etc and when volume is failed over. + :param extra_specs: the extra specifications + :param rep_config: the replication configuration + :returns: repExtraSpecs - dict + """ + rep_extra_specs = deepcopy(extra_specs) + rep_extra_specs[utils.ARRAY] = rep_config['array'] + rep_extra_specs[utils.SRP] = rep_config['srp'] + rep_extra_specs[utils.PORTGROUPNAME] = rep_config['portgroup'] + + # If disable compression is set, check if target array is all flash + do_disable_compression = self.utils.is_compression_disabled( + extra_specs) + if do_disable_compression: + if not self.rest.is_compression_capable( + rep_extra_specs[utils.ARRAY]): + rep_extra_specs.pop(utils.DISABLECOMPRESSION, None) + + # Check to see if SLO and Workload are configured on the target array. + if extra_specs[utils.SLO]: + is_valid_slo, is_valid_workload = ( + self.provision.verify_slo_workload( + rep_extra_specs[utils.ARRAY], + extra_specs[utils.SLO], + rep_extra_specs[utils.WORKLOAD], + rep_extra_specs[utils.SRP])) + if not is_valid_slo or not is_valid_workload: + LOG.warning("The target array does not support the storage " + "pool setting for SLO %(slo)s or workload " + "%(workload)s. Not assigning any SLO or " + "workload.", + {'slo': extra_specs[utils.SLO], + 'workload': extra_specs[utils.WORKLOAD]}) + rep_extra_specs[utils.SLO] = None + if extra_specs[utils.WORKLOAD]: + rep_extra_specs[utils.WORKLOAD] = None + + return rep_extra_specs + + def get_secondary_stats_info(self, rep_config, array_info): + """On failover, report on secondary array statistics. + + :param rep_config: the replication configuration + :param array_info: the array info + :returns: secondary_info - dict + """ + secondary_info = array_info.copy() + secondary_info['SerialNumber'] = six.text_type(rep_config['array']) + secondary_info['srpName'] = rep_config['srp'] + return secondary_info diff --git a/cinder/volume/drivers/dell_emc/vmax/fc.py b/cinder/volume/drivers/dell_emc/vmax/fc.py index b3d7217129c..d4101b67f19 100644 --- a/cinder/volume/drivers/dell_emc/vmax/fc.py +++ b/cinder/volume/drivers/dell_emc/vmax/fc.py @@ -79,6 +79,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): - Retype (storage-assisted migration) - QoS support - Support for compression on All Flash + - Support for volume replication """ VERSION = "3.0.0" @@ -89,10 +90,12 @@ class VMAXFCDriver(driver.FibreChannelDriver): def __init__(self, *args, **kwargs): super(VMAXFCDriver, self).__init__(*args, **kwargs) + self.active_backend_id = kwargs.get('active_backend_id', None) self.common = common.VMAXCommon( 'FC', self.VERSION, - configuration=self.configuration) + configuration=self.configuration, + active_backend_id=self.active_backend_id) self.zonemanager_lookup_service = fczm_utils.create_lookup_service() def check_for_setup_error(self): @@ -102,7 +105,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): """Creates a VMAX volume. :param volume: the cinder volume object - :return: provider location dict + :returns: provider location dict """ return self.common.create_volume(volume) @@ -111,7 +114,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): :param volume: the cinder volume object :param snapshot: the cinder snapshot object - :return: provider location dict + :returns: provider location dict """ return self.common.create_volume_from_snapshot( volume, snapshot) @@ -121,7 +124,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): :param volume: the cinder volume object :param src_vref: the source volume reference - :return: provider location dict + :returns: provider location dict """ return self.common.create_cloned_volume(volume, src_vref) @@ -136,7 +139,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): """Creates a snapshot. :param snapshot: the cinder snapshot object - :return: provider location dict + :returns: provider location dict """ src_volume = snapshot.volume return self.common.create_snapshot(snapshot, src_volume) @@ -215,7 +218,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): } :param volume: the cinder volume object :param connector: the connector object - :return: dict -- the target_wwns and initiator_target_map + :returns: dict -- the target_wwns and initiator_target_map """ device_info = self.common.initialize_connection( volume, connector) @@ -349,7 +352,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): :param volume: the cinder volume object :param connector: the connector object - :return: target_wwns -- list, init_targ_map -- dict + :returns: target_wwns -- list, init_targ_map -- dict """ target_wwns, init_targ_map = [], {} initiator_wwns = connector['wwpns'] @@ -406,7 +409,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): Also need to consider things like QoS, Emulation, account/tenant. :param volume: the volume object :param external_ref: the reference for the VMAX volume - :return: model_update + :returns: model_update """ return self.common.manage_existing(volume, external_ref) @@ -440,3 +443,15 @@ class VMAXFCDriver(driver.FibreChannelDriver): :returns: boolean -- True if retype succeeded, False if error """ return self.common.retype(volume, new_type, host) + + def failover_host(self, context, volumes, secondary_id=None, groups=None): + """Failover volumes to a secondary host/ backend. + + :param context: the context + :param volumes: the list of volumes to be failed over + :param secondary_id: the backend to be failed over to, is 'default' + if fail back + :param groups: replication groups + :returns: secondary_id, volume_update_list, group_update_list + """ + return self.common.failover_host(volumes, secondary_id, groups) diff --git a/cinder/volume/drivers/dell_emc/vmax/iscsi.py b/cinder/volume/drivers/dell_emc/vmax/iscsi.py index 555167efa9c..eae2399f464 100644 --- a/cinder/volume/drivers/dell_emc/vmax/iscsi.py +++ b/cinder/volume/drivers/dell_emc/vmax/iscsi.py @@ -84,6 +84,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): - Retype (storage-assisted migration) - QoS support - Support for compression on All Flash + - Support for volume replication """ VERSION = "3.0.0" @@ -94,11 +95,13 @@ class VMAXISCSIDriver(driver.ISCSIDriver): def __init__(self, *args, **kwargs): super(VMAXISCSIDriver, self).__init__(*args, **kwargs) + self.active_backend_id = kwargs.get('active_backend_id', None) self.common = ( common.VMAXCommon( 'iSCSI', self.VERSION, - configuration=self.configuration)) + configuration=self.configuration, + active_backend_id=self.active_backend_id)) def check_for_setup_error(self): pass @@ -107,7 +110,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): """Creates a VMAX volume. :param volume: the cinder volume object - :return: provider location dict + :returns: provider location dict """ return self.common.create_volume(volume) @@ -116,7 +119,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): :param volume: the cinder volume object :param snapshot: the cinder snapshot object - :return: provider location dict + :returns: provider location dict """ return self.common.create_volume_from_snapshot( volume, snapshot) @@ -126,7 +129,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): :param volume: the cinder volume object :param src_vref: the source volume reference - :return: provider location dict + :returns: provider location dict """ return self.common.create_cloned_volume(volume, src_vref) @@ -141,7 +144,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): """Creates a snapshot. :param snapshot: the cinder snapshot object - :return: provider location dict + :returns: provider location dict """ src_volume = snapshot.volume return self.common.create_snapshot(snapshot, src_volume) @@ -220,7 +223,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): } :param volume: the cinder volume object :param connector: the connector object - :return: dict -- the iscsi dict + :returns: dict -- the iscsi dict """ device_info = self.common.initialize_connection( volume, connector) @@ -231,7 +234,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): :param device_info: device info dict :param volume: volume object - :return: iscsi dict + :returns: iscsi dict """ try: ip_and_iqn = device_info['ip_and_iqn'] @@ -273,7 +276,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): :param ip_and_iqn: list of ip and iqn dicts :param is_multipath: flag for multipath :param host_lun_id: the host lun id of the device - :return: properties + :returns: properties """ properties = {} if len(ip_and_iqn) > 1 and is_multipath: @@ -384,3 +387,15 @@ class VMAXISCSIDriver(driver.ISCSIDriver): :returns: boolean -- True if retype succeeded, False if error """ return self.common.retype(volume, new_type, host) + + def failover_host(self, context, volumes, secondary_id=None, groups=None): + """Failover volumes to a secondary host/ backend. + + :param context: the context + :param volumes: the list of volumes to be failed over + :param secondary_id: the backend to be failed over to, is 'default' + if fail back + :param groups: replication groups + :returns: secondary_id, volume_update_list, group_update_list + """ + return self.common.failover_host(volumes, secondary_id, groups) diff --git a/cinder/volume/drivers/dell_emc/vmax/masking.py b/cinder/volume/drivers/dell_emc/vmax/masking.py index 1a51fd0b6da..55e652044d4 100644 --- a/cinder/volume/drivers/dell_emc/vmax/masking.py +++ b/cinder/volume/drivers/dell_emc/vmax/masking.py @@ -142,14 +142,15 @@ class VMAXMasking(object): masking_view_dict[utils.SRP], masking_view_dict[utils.SLO], masking_view_dict[utils.WORKLOAD], - masking_view_dict[utils.DISABLECOMPRESSION]) + masking_view_dict[utils.DISABLECOMPRESSION], + masking_view_dict[utils.IS_RE]) check_vol = self.rest.is_volume_in_storagegroup( serial_number, device_id, default_sg_name) if check_vol: - self.remove_volume_from_sg( - serial_number, device_id, volume_name, default_sg_name, - extra_specs) + self.remove_vol_from_storage_group( + serial_number, device_id, default_sg_name, + volume_name, extra_specs) else: LOG.warning( "Volume: %(volume_name)s does not belong " @@ -587,7 +588,7 @@ class VMAXMasking(object): LOG.info("Added volume: %(vol_name)s to storage group %(sg_name)s.", {'vol_name': volume_name, 'sg_name': storagegroup_name}) - def _remove_vol_from_storage_group( + def remove_vol_from_storage_group( self, serial_number, device_id, storagegroup_name, volume_name, extra_specs): """Remove a volume from a storage group. @@ -890,7 +891,7 @@ class VMAXMasking(object): self._cleanup_deletion( serial_number, device_id, volume_name, extra_specs) if reset: - self.return_volume_to_default_storage_group( + self.add_volume_to_default_storage_group( serial_number, device_id, volume_name, extra_specs) def _cleanup_deletion( @@ -1139,7 +1140,7 @@ class VMAXMasking(object): :param storagegroup_name: storage group name :param extra_specs: extra specifications """ - self._remove_vol_from_storage_group( + self.remove_vol_from_storage_group( serial_number, device_id, storagegroup_name, volume_name, extra_specs) @@ -1204,7 +1205,7 @@ class VMAXMasking(object): LOG.info("Masking view %(maskingview)s successfully deleted.", {'maskingview': masking_view}) - def return_volume_to_default_storage_group( + def add_volume_to_default_storage_group( self, serial_number, device_id, volume_name, extra_specs): """Return volume to its default storage group. @@ -1215,9 +1216,11 @@ class VMAXMasking(object): """ do_disable_compression = self.utils.is_compression_disabled( extra_specs) + rep_enabled = self.utils.is_replication_enabled(extra_specs) storagegroup_name = self.get_or_create_default_storage_group( serial_number, extra_specs[utils.SRP], extra_specs[utils.SLO], - extra_specs[utils.WORKLOAD], extra_specs, do_disable_compression) + extra_specs[utils.WORKLOAD], extra_specs, do_disable_compression, + rep_enabled) self._check_adding_volume_to_storage_group( serial_number, device_id, storagegroup_name, volume_name, @@ -1225,7 +1228,7 @@ class VMAXMasking(object): def get_or_create_default_storage_group( self, serial_number, srp, slo, workload, extra_specs, - do_disable_compression=False): + do_disable_compression=False, is_re=False): """Get or create a default storage group. :param serial_number: the array serial number @@ -1234,12 +1237,14 @@ class VMAXMasking(object): :param workload: the workload :param extra_specs: extra specifications :param do_disable_compression: flag for compression + :param is_re: is replication enabled :returns: storagegroup_name :raises: VolumeBackendAPIException """ storagegroup, storagegroup_name = ( self.rest.get_vmax_default_storage_group( - serial_number, srp, slo, workload, do_disable_compression)) + serial_number, srp, slo, workload, do_disable_compression, + is_re)) if storagegroup is None: self.provision.create_storage_group( serial_number, storagegroup_name, srp, slo, workload, @@ -1278,7 +1283,7 @@ class VMAXMasking(object): :param extra_specs: extra specifications :param parent_sg_name: the parent sg name """ - self._remove_vol_from_storage_group( + self.remove_vol_from_storage_group( serial_number, device_id, storagegroup_name, volume_name, extra_specs) diff --git a/cinder/volume/drivers/dell_emc/vmax/provision.py b/cinder/volume/drivers/dell_emc/vmax/provision.py index 25e04cad40e..a8483390597 100644 --- a/cinder/volume/drivers/dell_emc/vmax/provision.py +++ b/cinder/volume/drivers/dell_emc/vmax/provision.py @@ -24,6 +24,8 @@ from cinder.volume.drivers.dell_emc.vmax import utils LOG = logging.getLogger(__name__) +WRITE_DISABLED = "Write Disabled" + class VMAXProvision(object): """Provisioning Class for Dell EMC VMAX volume drivers. @@ -238,7 +240,7 @@ class VMAXProvision(object): :param device_id: the volume device id :param new_size: the new size (GB) :param extra_specs: the extra specifications - :return: status_code + :returns: status_code """ start_time = time.time() self.rest.extend_volume(array, device_id, new_size, extra_specs) @@ -310,7 +312,7 @@ class VMAXProvision(object): :param array: the array serial number :param srp: the srp name :param array_info: array info dict - :return: remaining_capacity + :returns: remaining_capacity """ remaining_capacity = -1 if array_info['SLO']: @@ -336,13 +338,15 @@ class VMAXProvision(object): """ is_valid_slo, is_valid_workload = False, False - if workload: - if workload.lower() == 'none': - workload = None + if workload and workload.lower() == 'none': + workload = None if not workload: is_valid_workload = True + if slo and slo.lower() == 'none': + slo = None + valid_slos = self.rest.get_slo_list(array) valid_workloads = self.rest.get_workload_settings(array) for valid_slo in valid_slos: @@ -380,7 +384,7 @@ class VMAXProvision(object): :param array: the array serial number :param sg_name: the storage group name - :return: storage group slo settings + :returns: storage group slo settings """ slo = 'NONE' workload = 'NONE' @@ -398,3 +402,49 @@ class VMAXProvision(object): LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload} + + def break_rdf_relationship(self, array, device_id, target_device, + rdf_group, rep_extra_specs, state): + """Break the rdf relationship between a pair of devices. + + :param array: the array serial number + :param device_id: the source device id + :param target_device: target device id + :param rdf_group: the rdf group number + :param rep_extra_specs: replication extra specs + :param state: the state of the rdf pair + """ + LOG.info("Splitting rdf pair: source device: %(src)s " + "target device: %(tgt)s.", + {'src': device_id, 'tgt': target_device}) + if state == 'Synchronized': + self.rest.modify_rdf_device_pair( + array, device_id, rdf_group, rep_extra_specs, split=True) + LOG.info("Deleting rdf pair: source device: %(src)s " + "target device: %(tgt)s.", + {'src': device_id, 'tgt': target_device}) + self.rest.delete_rdf_pair(array, device_id, rdf_group) + + def failover_volume(self, array, device_id, rdf_group, + extra_specs, local_vol_state, failover): + """Failover or back a volume pair. + + :param array: the array serial number + :param device_id: the source device id + :param rdf_group: the rdf group number + :param extra_specs: extra specs + :param local_vol_state: the local volume state + :param failover: flag to indicate failover or failback -- bool + """ + if local_vol_state == WRITE_DISABLED: + LOG.info("Volume %(dev)s is already failed over.", + {'dev': device_id}) + return + if failover: + action = "Failing over" + else: + action = "Failing back" + LOG.info("%(action)s rdf pair: source device: %(src)s ", + {'action': action, 'src': device_id}) + self.rest.modify_rdf_device_pair( + array, device_id, rdf_group, extra_specs, split=False) diff --git a/cinder/volume/drivers/dell_emc/vmax/rest.py b/cinder/volume/drivers/dell_emc/vmax/rest.py index f604d9dfce5..19a6c732b62 100644 --- a/cinder/volume/drivers/dell_emc/vmax/rest.py +++ b/cinder/volume/drivers/dell_emc/vmax/rest.py @@ -106,7 +106,7 @@ class VMAXRest(object): :param method: The method (GET, POST, PUT, or DELETE) :param params: Additional URL parameters :param request_object: request payload (dict) - :return: server response object (dict) + :returns: server response object (dict) :raises: VolumeBackendAPIException """ message, status_code = None, None @@ -158,8 +158,8 @@ class VMAXRest(object): :param job: the job dict :param extra_specs: the extra_specs dict. - :return rc -- int, result -- string, status -- string, - task -- list of dicts detailing tasks in the job + :returns: rc -- int, result -- string, status -- string, + task -- list of dicts detailing tasks in the job :raises: VolumeBackendAPIException """ res, tasks = None, None @@ -259,7 +259,7 @@ class VMAXRest(object): :param status_code: the status code :param job: the job :param extra_specs: the extra specifications - :return: task -- list of dicts detailing tasks in the job + :returns: task -- list of dicts detailing tasks in the job :raises: VolumeBackendAPIException """ task = None @@ -288,7 +288,7 @@ class VMAXRest(object): :param resource_type: the resource type e.g. maskingview :param resource_name: the name of a specific resource :param private: empty string or '/private' if private url - :return: target url, string + :returns: target url, string """ target_uri = ('%(private)s/%(version)s/%(category)s/symmetrix/' '%(array)s/%(resource_type)s' @@ -306,7 +306,7 @@ class VMAXRest(object): :param target_uri: the target uri :param resource_type: the resource type, e.g. maskingview :param params: optional dict of filter params - :return: resource_object -- dict or None + :returns: resource_object -- dict or None """ resource_object = None sc, message = self.request(target_uri, GET, params=params) @@ -330,7 +330,7 @@ class VMAXRest(object): :param resource_name: the name of a specific resource :param params: query parameters :param private: empty string or '/private' if private url - :return: resource object -- dict or None + :returns: resource object -- dict or None """ target_uri = self._build_uri(array, category, resource_type, resource_name, private) @@ -345,7 +345,7 @@ class VMAXRest(object): :param resource_type: the resource type :param payload: the payload :param private: empty string or '/private' if private url - :return: status_code -- int, message -- string, server response + :returns: status_code -- int, message -- string, server response """ target_uri = self._build_uri(array, category, resource_type, None, private) @@ -366,7 +366,7 @@ class VMAXRest(object): :param payload: the payload :param resource_name: the resource name :param private: empty string or '/private' if private url - :return: status_code -- int, message -- string (server response) + :returns: status_code -- int, message -- string (server response) """ target_uri = self._build_uri(array, category, resource_type, resource_name, private) @@ -378,7 +378,7 @@ class VMAXRest(object): def delete_resource( self, array, category, resource_type, resource_name, - payload=None, private=''): + payload=None, private='', params=None): """Delete a provisioning resource. :param array: the array serial number @@ -387,11 +387,13 @@ class VMAXRest(object): :param resource_name: the name of the resource to be deleted :param payload: the payload, optional :param private: empty string or '/private' if private url + :param params: dict of optional query params """ target_uri = self._build_uri(array, category, resource_type, resource_name, private) status_code, message = self.request(target_uri, DELETE, - request_object=payload) + request_object=payload, + params=params) operation = 'delete %(res)s resource' % {'res': resource_type} self.check_status_code_success(operation, status_code, message) @@ -399,7 +401,7 @@ class VMAXRest(object): """Get an array from its serial number. :param array: the array serial number - :return: array_details -- dict or None + :returns: array_details -- dict or None """ target_uri = '/%s/system/symmetrix/%s' % (U4V_VERSION, array) array_details = self._get_request(target_uri, 'system') @@ -422,14 +424,14 @@ class VMAXRest(object): return srp_details def get_slo_list(self, array): - """Returns the list of service levels associated with an srp. + """Retrieve the list of slo's from the array :param array: the array serial number - :return slo_list -- list of service level names + :returns: slo_list -- list of service level names """ slo_list = [] slo_dict = self.get_resource(array, SLOPROVISIONING, 'slo') - if slo_dict: + if slo_dict and slo_dict.get('sloId'): slo_list = slo_dict['sloId'] return slo_list @@ -437,7 +439,7 @@ class VMAXRest(object): """Get valid workload options from array. :param array: the array serial number - :return: workload_setting -- list of workload names + :returns: workload_setting -- list of workload names """ workload_setting = [] wl_details = self.get_resource(array, SLOPROVISIONING, 'workloadtype') @@ -452,7 +454,7 @@ class VMAXRest(object): :param srp: the storage resource srp :param slo: the service level :param workload: the workload - :return remaining_capacity -- string, or None + :returns: remaining_capacity -- string, or None """ params = {'srp': srp, 'slo': slo, 'workloadtype': workload} try: @@ -484,7 +486,7 @@ class VMAXRest(object): :param array: the array serial number :param storage_group_name: the name of the storage group - :return: storage group dict or None + :returns: storage group dict or None """ return self.get_resource( array, SLOPROVISIONING, 'storagegroup', @@ -495,7 +497,7 @@ class VMAXRest(object): :param array: the array serial number :param params: optional filter parameters - :return: storage group list + :returns: storage group list """ sg_list = [] sg_details = self.get_resource(array, SLOPROVISIONING, @@ -509,7 +511,7 @@ class VMAXRest(object): :param array: the array serial number :param storage_group_name: the storage group name - :return: num_vols -- int + :returns: num_vols -- int """ num_vols = 0 storagegroup = self.get_storage_group(array, storage_group_name) @@ -525,7 +527,7 @@ class VMAXRest(object): :param array: the array serial number :param child_name: the child sg name :param parent_name: the parent sg name - :return: bool + :returns: bool """ parent_sg = self.get_storage_group(array, parent_name) if parent_sg and parent_sg.get('child_storage_group'): @@ -575,7 +577,7 @@ class VMAXRest(object): :param array: the array serial number :param payload: the payload -- dict - :return: status_code -- int, message -- string, server response + :returns: status_code -- int, message -- string, server response """ return self.create_resource( array, SLOPROVISIONING, 'storagegroup', payload) @@ -625,7 +627,7 @@ class VMAXRest(object): :param array: the array serial number :param storagegroup: storage group name :param payload: the request payload - :return: status_code -- int, message -- string, server response + :returns: status_code -- int, message -- string, server response """ return self.modify_resource( array, SLOPROVISIONING, 'storagegroup', payload, @@ -806,8 +808,9 @@ class VMAXRest(object): return_value = False return return_value - def get_vmax_default_storage_group(self, array, srp, slo, workload, - do_disable_compression=False): + def get_vmax_default_storage_group( + self, array, srp, slo, workload, + do_disable_compression=False, is_re=False): """Get the default storage group. :param array: the array serial number @@ -815,10 +818,11 @@ class VMAXRest(object): :param slo: the SLO :param workload: the workload :param do_disable_compression: flag for disabling compression + :param is_re: flag for replication :returns: the storage group dict (or None), the storage group name """ storagegroup_name = self.utils.get_default_storage_group_name( - srp, slo, workload) + srp, slo, workload, do_disable_compression, is_re) storagegroup = self.get_storage_group(array, storagegroup_name) return storagegroup, storagegroup_name @@ -837,7 +841,7 @@ class VMAXRest(object): :param array: the array serial number :param device_id: the volume device id - :return: volume dict + :returns: volume dict :raises: VolumeBackendAPIException """ volume_dict = self.get_resource( @@ -854,7 +858,7 @@ class VMAXRest(object): :param array: the array serial number :param device_id: the volume device id - :return: volume dict + :returns: volume dict :raises: VolumeBackendAPIException """ try: @@ -864,7 +868,7 @@ class VMAXRest(object): array, SLOPROVISIONING, 'volume', params=params, private='/private') volume_dict = volume_info['resultList']['result'][0] - except KeyError: + except (KeyError, TypeError): exception_message = (_("Volume %(deviceID)s not found.") % {'deviceID': device_id}) LOG.error(exception_message) @@ -878,7 +882,7 @@ class VMAXRest(object): very large and could affect performance if called often. :param array: the array serial number :param params: filter parameters - :return: device_ids -- list + :returns: device_ids -- list """ device_ids = [] volumes = self.get_resource( @@ -960,7 +964,7 @@ class VMAXRest(object): :param array: the array serial number :param maskingview: the masking view name :param device_id: the device ID - :return: host_lun_id -- int + :returns: host_lun_id -- int """ host_lun_id = None resource_name = ('%(maskingview)s/connections' @@ -990,7 +994,7 @@ class VMAXRest(object): :param array: the array serial number :param device_id: the volume device id - :return: storagegroup_list + :returns: storagegroup_list """ sg_list = [] vol = self.get_volume(array, device_id) @@ -1008,7 +1012,7 @@ class VMAXRest(object): :param array: the array serial number :param device_id: the device id :param storagegroup: the storage group name - :return: bool + :returns: bool """ is_vol_in_sg = False sg_list = self.get_storage_groups_from_volume(array, device_id) @@ -1021,7 +1025,7 @@ class VMAXRest(object): :param array: the array serial number :param volume_name: the volume name (OS-) - :return: device_id + :returns: device_id """ device_id = None params = {"volume_identifier": volume_name} @@ -1039,7 +1043,7 @@ class VMAXRest(object): :param array: array serial number :param device_id: the device id - :return: the volume identifier -- string + :returns: the volume identifier -- string """ vol = self.get_volume(array, device_id) return vol['volume_identifier'] @@ -1049,7 +1053,7 @@ class VMAXRest(object): :param array: the array serial number :param device_id: the volume device id - :return: size -- or None + :returns: size -- or None """ cap = None try: @@ -1066,7 +1070,7 @@ class VMAXRest(object): :param array: array serial number :param portgroup: the portgroup name - :return: portgroup dict or None + :returns: portgroup dict or None """ return self.get_resource( array, SLOPROVISIONING, 'portgroup', resource_name=portgroup) @@ -1076,7 +1080,7 @@ class VMAXRest(object): :param array: the array serial number :param portgroup: the name of the portgroup - :return: list of port ids, e.g. ['FA-3D:35', 'FA-4D:32'] + :returns: list of port ids, e.g. ['FA-3D:35', 'FA-4D:32'] """ portlist = [] portgroup_info = self.get_portgroup(array, portgroup) @@ -1092,7 +1096,7 @@ class VMAXRest(object): :param array: the array serial number :param port_id: the port id - :return: port dict, or None + :returns: port dict, or None """ dir_id = port_id.split(':')[0] port_no = port_id.split(':')[1] @@ -1107,7 +1111,7 @@ class VMAXRest(object): :param array: the array serial number :param port_id: the director port identifier - :return: (list of ip_addresses, iqn) + :returns: (list of ip_addresses, iqn) """ ip_addresses, iqn = None, None port_details = self.get_port(array, port_id) @@ -1142,7 +1146,7 @@ class VMAXRest(object): :param array: the array serial number :param initiator_group: the initaitor group name :param params: optional filter parameters - :return: initiator group dict, or None + :returns: initiator group dict, or None """ return self.get_resource( array, SLOPROVISIONING, 'host', @@ -1153,7 +1157,7 @@ class VMAXRest(object): :param array: the array serial number :param initiator_id: the initiator id - :return: initiator dict, or None + :returns: initiator dict, or None """ return self.get_resource( array, SLOPROVISIONING, 'initiator', @@ -1164,7 +1168,7 @@ class VMAXRest(object): :param array: the array serial number :param params: dict of optional params - :return: list of initiators + :returns: list of initiators """ init_dict = self.get_resource( array, SLOPROVISIONING, 'initiator', params=params) @@ -1180,7 +1184,7 @@ class VMAXRest(object): Gets the list of initiators from the array which are in hosts/ initiator groups. :param array: the array serial number - :return: init_list + :returns: init_list """ params = {'in_a_host': 'true'} return self.get_initiator_list(array, params) @@ -1190,7 +1194,7 @@ class VMAXRest(object): :param array: the array serial number :param initiator: the initiator id - :return: found_init_group_name -- string + :returns: found_init_group_name -- string """ found_init_group_name = None init_details = self.get_initiator(array, initiator) @@ -1231,7 +1235,7 @@ class VMAXRest(object): :param array: array serial number :param masking_view_name: the masking view name - :return: masking view dict + :returns: masking view dict """ return self.get_resource( array, SLOPROVISIONING, 'maskingview', masking_view_name) @@ -1241,7 +1245,7 @@ class VMAXRest(object): :param array: array serial number :param params: optional GET parameters - :return: masking view list + :returns: masking view list """ masking_view_list = [] masking_view_details = self.get_resource( @@ -1257,7 +1261,7 @@ class VMAXRest(object): :param array: the array serial number :param storagegroup: the storage group name - :return: masking view list + :returns: masking view list """ maskingviewlist = [] storagegroup = self.get_storage_group(array, storagegroup) @@ -1296,7 +1300,7 @@ class VMAXRest(object): :param portgroup: the port group name - optional :param host: the host name - optional :param storagegroup: the storage group name - optional - :return: name of the specified element -- string + :returns: name of the specified element -- string :raises: VolumeBackendAPIException """ element = None @@ -1320,7 +1324,7 @@ class VMAXRest(object): :param array: the array serial number :param portgroup_name: the port group name :param ig_name: the initiator group name - :return: masking view list + :returns: masking view list """ params = {'port_group_name': portgroup_name, 'host_or_host_group_name': ig_name} @@ -1473,7 +1477,7 @@ class VMAXRest(object): :param array: the array serial number :param source_device_id: the source volume device ID - :return: message -- dict, or None + :returns: message -- dict, or None """ resource_name = ("%(device_id)s/snapshot" % {'device_id': source_device_id}) @@ -1486,7 +1490,7 @@ class VMAXRest(object): :param array: the array serial number :param device_id: the source volume device id :param snap_name: the name of the snapshot - :return: snapshot dict, or None + :returns: snapshot dict, or None """ snapshot = None snap_info = self.get_volume_snap_info(array, device_id) @@ -1503,7 +1507,7 @@ class VMAXRest(object): :param array: the array serial number :param source_device_id: the osurce device id - :return: snapshot list or None + :returns: snapshot list or None """ snapshot_list = [] snap_info = self.get_volume_snap_info(array, source_device_id) @@ -1517,7 +1521,7 @@ class VMAXRest(object): :param array: the array serial number :param device_id: the device id - :return: snapvx_tgt -- bool, snapvx_src -- bool, + :returns: snapvx_tgt -- bool, snapvx_src -- bool, rdf_grp -- list or None """ snapvx_src = False @@ -1544,7 +1548,7 @@ class VMAXRest(object): :param target_device_id: target device id :param snap_name: snapshot name :param extra_specs: extra specifications - :return: bool + :returns: bool """ def _wait_for_sync(): @@ -1579,8 +1583,7 @@ class VMAXRest(object): kwargs = {'retries': 0, 'wait_for_sync_called': False} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync) - rc = timer.start(interval=int( - extra_specs[utils.INTERVAL])).wait() + rc = timer.start(interval=extra_specs[utils.INTERVAL]).wait() return rc def _is_sync_complete(self, array, source_device_id, snap_name, @@ -1591,24 +1594,24 @@ class VMAXRest(object): :param source_device_id: source device id :param snap_name: the snapshot name :param target_device_id: the target device id - :return: defined -- bool + :returns: defined -- bool """ defined = True - session = self._get_sync_session( + session = self.get_sync_session( array, source_device_id, snap_name, target_device_id) if session: defined = session['defined'] return defined - def _get_sync_session(self, array, source_device_id, snap_name, - target_device_id): + def get_sync_session(self, array, source_device_id, snap_name, + target_device_id): """Get a particular sync session. :param array: the array serial number :param source_device_id: source device id :param snap_name: the snapshot name :param target_device_id: the target device id - :return: sync session -- dict, or None + :returns: sync session -- dict, or None """ session = None linked_device_list = self.get_snap_linked_device_list( @@ -1623,7 +1626,7 @@ class VMAXRest(object): :param array: the array serial number :param source_device_id: the source device id - :return: list of snapshot dicts + :returns: list of snapshot dicts """ snap_dict_list = [] snapshots = self.get_volume_snapshot_list(array, source_device_id) @@ -1640,7 +1643,7 @@ class VMAXRest(object): :param array: the array serial number :param source_device_id: source device id :param snap_name: the snapshot name - :return: linked_device_list + :returns: linked_device_list """ linked_device_list = [] snap_list = self._find_snap_vx_source_sessions(array, source_device_id) @@ -1655,7 +1658,7 @@ class VMAXRest(object): :param array: the array serial number :param device_id: the device id :param tgt_only: Flag - return only sessions where device is target - :return: list of snapshot dicts + :returns: list of snapshot dicts """ snap_dict_list, sessions = [], [] vol_details = self._get_private_volume(array, device_id) @@ -1691,3 +1694,156 @@ class VMAXRest(object): 'source_vol': source_vol} snap_dict_list.append(link_info) return snap_dict_list + + def get_rdf_group(self, array, rdf_number): + """Get specific rdf group details. + + :param array: the array serial number + :param rdf_number: the rdf number + """ + return self.get_resource(array, REPLICATION, 'rdf_group', + rdf_number) + + def get_rdf_group_list(self, array): + """Get rdf group list from array. + + :param array: the array serial number + """ + return self.get_resource(array, REPLICATION, 'rdf_group') + + def get_rdf_group_volume(self, array, rdf_number, device_id): + """Get specific volume details, from an RDF group. + + :param array: the array serial number + :param rdf_number: the rdf group number + :param device_id: the device id + """ + resource_name = "%(rdf)s/volume/%(dev)s" % { + 'rdf': rdf_number, 'dev': device_id} + return self.get_resource(array, REPLICATION, 'rdf_group', + resource_name) + + def are_vols_rdf_paired(self, array, remote_array, device_id, + target_device, rdf_group): + """Check if a pair of volumes are RDF paired. + + :param array: the array serial number + :param remote_array: the remote array serial number + :param device_id: the device id + :param target_device: the target device id + :param rdf_group: the rdf group + :returns: paired -- bool, state -- string + """ + paired, local_vol_state, rdf_pair_state = False, '', '' + volume = self.get_rdf_group_volume(array, rdf_group, device_id) + if volume: + remote_volume = volume['remoteVolumeName'] + remote_symm = volume['remoteSymmetrixId'] + if (remote_volume == target_device + and remote_array == remote_symm): + paired = True + local_vol_state = volume['localVolumeState'] + rdf_pair_state = volume['rdfpairState'] + else: + LOG.warning("Cannot locate source RDF volume %s", device_id) + return paired, local_vol_state, rdf_pair_state + + def get_rdf_group_number(self, array, rdf_group_label): + """Given an rdf_group_label, return the associated group number. + + :param array: the array serial number + :param rdf_group_label: the group label + :returns: rdf_group_number + """ + number = None + rdf_list = self.get_rdf_group_list(array) + if rdf_list and rdf_list.get('rdfGroupID'): + number = [rdf['rdfgNumber'] for rdf in rdf_list['rdfGroupID'] + if rdf['label'] == rdf_group_label][0] + if number: + rdf_group = self.get_rdf_group(array, number) + if not rdf_group: + number = None + return number + + def create_rdf_device_pair(self, array, device_id, rdf_group_no, + target_device, remote_array, + target_vol_name, extra_specs): + """Create an RDF pairing. + + Create a remote replication relationship between source and target + devices. + :param array: the array serial number + :param device_id: the device id + :param rdf_group_no: the rdf group number + :param target_device: the target device id + :param remote_array: the remote array serial + :param target_vol_name: the name of the target volume + :param extra_specs: the extra specs + :returns: rdf_dict + """ + payload = ({"deviceNameListSource": [{"name": device_id}], + "deviceNameListTarget": [{"name": target_device}], + "replicationMode": "Synchronous", + "establish": 'true', + "rdfType": 'RDF1'}) + resource_type = ("rdf_group/%(rdf_num)s/volume" + % {'rdf_num': rdf_group_no}) + status_code, job = self.create_resource(array, REPLICATION, + resource_type, payload, + private="/private") + self.wait_for_job('Create rdf pair', status_code, + job, extra_specs) + rdf_dict = {'array': remote_array, 'device_id': target_device} + return rdf_dict + + def modify_rdf_device_pair( + self, array, device_id, rdf_group, extra_specs, split=False): + """Modify an rdf device pair. + + :param array: the array serial number + :param device_id: the device id + :param rdf_group: the rdf group + :param extra_specs: the extra specs + :param split: flag to indicate "split" action + """ + common_opts = {"force": 'false', + "symForce": 'false', + "star": 'false', + "hop2": 'false', + "bypass": 'false'} + if split: + common_opts.update({"immediate": 'false'}) + payload = {"action": "Split", + "executionOption": "ASYNCHRONOUS", + "split": common_opts} + + else: + common_opts.update({"establish": 'true', + "restore": 'false', + "remote": 'false', + "immediate": 'false'}) + payload = {"action": "Failover", + "executionOption": "ASYNCHRONOUS", + "failover": common_opts} + resource_name = ("%(rdf_num)s/volume/%(device_id)s" + % {'rdf_num': rdf_group, 'device_id': device_id}) + sc, job = self.modify_resource( + array, REPLICATION, 'rdf_group', + payload, resource_name=resource_name, private="/private") + self.wait_for_job('Modify device pair', sc, + job, extra_specs) + + def delete_rdf_pair(self, array, device_id, rdf_group): + """Delete an rdf pair. + + :param array: the array serial number + :param device_id: the device id + :param rdf_group: the rdf group + """ + params = {'half': 'false', 'force': 'true', 'symforce': 'false', + 'star': 'false', 'bypass': 'false'} + resource_name = ("%(rdf_num)s/volume/%(device_id)s" + % {'rdf_num': rdf_group, 'device_id': device_id}) + self.delete_resource(array, REPLICATION, 'rdf_group', resource_name, + private="/private", params=params) diff --git a/cinder/volume/drivers/dell_emc/vmax/utils.py b/cinder/volume/drivers/dell_emc/vmax/utils.py index 653ea2662e6..b95edc60178 100644 --- a/cinder/volume/drivers/dell_emc/vmax/utils.py +++ b/cinder/volume/drivers/dell_emc/vmax/utils.py @@ -25,6 +25,7 @@ import six from cinder import exception from cinder.i18n import _ +from cinder.objects import fields from cinder.volume import volume_types @@ -53,6 +54,7 @@ PARENT_SG_NAME = 'parent_sg_name' CONNECTOR = 'connector' VOL_NAME = 'volume_name' EXTRA_SPECS = 'extra_specs' +IS_RE = 'replication_enabled' DISABLECOMPRESSION = 'storagetype:disablecompression' @@ -147,13 +149,15 @@ class VMAXUtils(object): @staticmethod def get_default_storage_group_name( - srp_name, slo, workload, is_compression_disabled=False): + srp_name, slo, workload, is_compression_disabled=False, + is_re=False): """Determine default storage group from extra_specs. :param srp_name: the name of the srp on the array :param slo: the service level string e.g Bronze :param workload: the workload string e.g DSS :param is_compression_disabled: flag for disabling compression + :param is_re: flag for replication :returns: storage_group_name """ if slo and workload: @@ -166,6 +170,8 @@ class VMAXUtils(object): else: prefix = "OS-no_SLO" + if is_re: + prefix += "-RE" storage_group_name = ("%(prefix)s-SG" % {'prefix': prefix}) return storage_group_name @@ -374,7 +380,7 @@ class VMAXUtils(object): :param clone_name: the name of the clone :param source_device_id: the source device id - :return: snap_name + :returns: snap_name """ trunc_clone = self.truncate_string(clone_name, 10) snap_name = ("temp-%(device)s-%(clone)s" @@ -434,3 +440,64 @@ class VMAXUtils(object): return False else: return True + + @staticmethod + def is_replication_enabled(extra_specs): + """Check if replication is to be enabled. + + :param extra_specs: extra specifications + :returns: bool - true if enabled, else false + """ + replication_enabled = False + if IS_RE in extra_specs: + replication_enabled = True + return replication_enabled + + def get_replication_config(self, rep_device_list): + """Gather necessary replication configuration info. + + :param rep_device_list: the replication device list from cinder.conf + :returns: rep_config, replication configuration dict + """ + rep_config = {} + if not rep_device_list: + return None + else: + target = rep_device_list[0] + try: + rep_config['array'] = target['target_device_id'] + rep_config['srp'] = target['remote_pool'] + rep_config['rdf_group_label'] = target['rdf_group_label'] + rep_config['portgroup'] = target['remote_port_group'] + + except KeyError as ke: + error_message = (_("Failed to retrieve all necessary SRDF " + "information. Error received: %(ke)s.") % + {'ke': six.text_type(ke)}) + LOG.exception(error_message) + raise exception.VolumeBackendAPIException(data=error_message) + + try: + allow_extend = target['allow_extend'] + if strutils.bool_from_string(allow_extend): + rep_config['allow_extend'] = True + else: + rep_config['allow_extend'] = False + except KeyError: + rep_config['allow_extend'] = False + + return rep_config + + @staticmethod + def is_volume_failed_over(volume): + """Check if a volume has been failed over. + + :param volume: the volume object + :returns: bool + """ + if volume is not None: + if volume.get('replication_status') and ( + volume.replication_status == + fields.ReplicationStatus.FAILED_OVER): + return True + return False diff --git a/releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml b/releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml new file mode 100644 index 00000000000..92638b54939 --- /dev/null +++ b/releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adding Replication V2.1 functionality to VMAX driver version 3.0.