From 67a2178eb490e35320138bd25da650eddc9cd79a Mon Sep 17 00:00:00 2001 From: Helen Walsh Date: Fri, 9 Dec 2016 10:58:02 +0000 Subject: [PATCH] VMAX driver - Implement volume replication for VMAX Volume replication supports disaster recovery solution where there has been a catastrophic event in your data centre for the VMAX array. Change-Id: I2aafe564cdb31895756b4b8884af2635b054ae59 Implements: blueprint add-vmax-replication --- .../unit/volume/drivers/emc/test_emc_vmax.py | 695 +++++++++++- cinder/volume/drivers/emc/emc_vmax_common.py | 999 +++++++++++++++++- cinder/volume/drivers/emc/emc_vmax_fc.py | 42 +- cinder/volume/drivers/emc/emc_vmax_iscsi.py | 45 +- cinder/volume/drivers/emc/emc_vmax_masking.py | 9 +- .../drivers/emc/emc_vmax_provision_v3.py | 203 +++- cinder/volume/drivers/emc/emc_vmax_utils.py | 229 ++-- ...add-vmax-replication-490202c15503ae03.yaml | 3 + 8 files changed, 2036 insertions(+), 189 deletions(-) create mode 100644 releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml diff --git a/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py b/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py index 5e116be2a..b650b5e7e 100644 --- a/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py +++ b/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py @@ -106,6 +106,10 @@ class Symm_ArrayChassis(dict): pass +class CIM_ConnectivityCollection(dict): + pass + + class SE_ReplicationSettingData(dict): def __init__(self, *args, **kwargs): self['DefaultInstance'] = self.createInstance() @@ -288,6 +292,16 @@ class EMCVMAXCommonData(object): lunmaskctrl_name = ( 'OS-fakehost-gold-I-MV') + rdf_group = 'test_rdf' + srdf_group_instance = ( + '//10.73.28.137/root/emc:Symm_RemoteReplicationCollection.' + 'InstanceID="SYMMETRIX-+-000197200056-+-8-+-000195900551-+-8"') + rg_instance_name = { + 'CreationClassName': 'CIM_DeviceMaskingGroup', + 'ElementName': 'OS-SRP_1-gold-DSS-RE-SG', + 'SystemName': 'SYMMETRIX+000197200056' + } + initiatorgroup_id = ( 'SYMMETRIX+000195900551+OS-fakehost-IG') initiatorgroup_name = 'OS-fakehost-I-IG' @@ -316,6 +330,7 @@ class EMCVMAXCommonData(object): storagepoolid = 'SYMMETRIX+000195900551+U+gold' storagegroupname = 'OS-fakehost-gold-I-SG' defaultstoragegroupname = 'OS_default_GOLD1_SG' + re_storagegroup = 'OS-SRP_1-gold-DSS-RE-SG' storagevolume_creationclass = 'EMC_StorageVolume' policyrule = 'gold' poolname = 'gold' @@ -347,8 +362,13 @@ class EMCVMAXCommonData(object): 'SystemName': u'SYMMETRIX+000195900551', 'DeviceID': u'10', 'SystemCreationClassName': u'Symm_StorageSystem'} + re_keybindings = {'CreationClassName': u'Symm_StorageVolume', + 'SystemName': u'SYMMETRIX+000195900551', + 'DeviceID': u'1', + 'SystemCreationClassName': u'Symm_StorageSystem'} provider_location = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings} + 'keybindings': keybindings, + 'version': '2.5.0'} provider_location2 = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings2} provider_location3 = {'classname': 'Symm_StorageVolume', @@ -356,6 +376,7 @@ class EMCVMAXCommonData(object): provider_location_multi_pool = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings, 'version': '2.2.0'} + replication_driver_data = re_keybindings block_size = 512 majorVersion = 1 minorVersion = 2 @@ -539,6 +560,43 @@ class EMCVMAXCommonData(object): six.text_type(provider_location), 'display_description': 'snapshot source volume'} + test_volume_re = {'name': 'vol1', + 'size': 1, + 'volume_name': 'vol1', + 'id': '1', + 'device_id': '1', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'test volume', + 'volume_type_id': 'abc', + 'provider_location': six.text_type( + provider_location), + 'status': 'available', + 'replication_status': fields.ReplicationStatus.ENABLED, + 'host': fake_host, + 'NumberOfBlocks': 100, + 'BlockSize': block_size, + 'replication_driver_data': six.text_type( + replication_driver_data)} + + test_failed_re_volume = {'name': 'vol1', + 'size': 1, + 'volume_name': 'vol1', + 'id': '1', + 'device_id': '1', + 'display_name': 'vol1', + 'volume_type_id': 'abc', + 'provider_location': six.text_type( + {'keybindings': 'fake_keybindings'}), + 'replication_status': ( + fields.ReplicationStatus.ENABLED), + 'replication_driver_data': 'fake_data', + 'host': fake_host, + 'NumberOfBlocks': 100, + 'BlockSize': block_size + } + test_CG = consistencygroup.ConsistencyGroup( context=None, name='myCG1', id='12345abcde', volume_type_id='abc', status=fields.ConsistencyGroupStatus.AVAILABLE) @@ -629,6 +687,16 @@ class EMCVMAXCommonData(object): 'portgroupname': u'OS-portgroup-PG', 'pool_name': u'Bronze+DSS+SRP_1+1234567891011'} + extra_specs_is_re = {'storagetype:pool': u'SRP_1', + 'volume_backend_name': 'VMAXReplication', + 'storagetype:workload': u'DSS', + 'storagetype:slo': u'Bronze', + 'storagetype:array': u'1234567891011', + 'isV3': True, + 'portgroupname': u'OS-portgroup-PG', + 'replication_enabled': True, + 'MultiPoolSupport': False} + remainingSLOCapacity = '123456789' SYNCHRONIZED = 4 UNSYNCHRONIZED = 3 @@ -651,11 +719,11 @@ class FakeEcomConnection(object): Operation=None, Synchronization=None, TheElements=None, TheElement=None, LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None, - ProtocolControllers=None, + ProtocolControllers=None, ConnectivityCollection=None, MaskingGroup=None, Members=None, HardwareId=None, ElementSource=None, EMCInPools=None, CompositeType=None, EMCNumberOfMembers=None, - EMCBindElements=None, + EMCBindElements=None, Mode=None, InElements=None, TargetPool=None, RequestedState=None, ReplicationGroup=None, ReplicationType=None, ReplicationSettingData=None, GroupName=None, Force=None, @@ -870,6 +938,8 @@ class FakeEcomConnection(object): result = self._assoc_lunmaskctrls() elif ResultClass == 'CIM_TargetMaskingGroup': result = self._assoc_portgroup() + elif ResultClass == 'CIM_ConnectivityCollection': + result = self._assoc_rdfgroup() else: result = self._default_assoc(objectpath) return result @@ -1152,6 +1222,14 @@ class FakeEcomConnection(object): assocs.append(assoc) return assocs + def _assoc_rdfgroup(self): + assocs = [] + assoc = CIM_ConnectivityCollection() + assoc['ElementName'] = self.data.rdf_group + assoc.path = self.data.srdf_group_instance + assocs.append(assoc) + return assocs + def _default_assoc(self, objectpath): return objectpath @@ -2137,12 +2215,12 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): def test_get_random_pg_from_list(self): portGroupNames = ['pg1', 'pg2', 'pg3', 'pg4'] portGroupName = ( - self.driver.common.utils._get_random_pg_from_list(portGroupNames)) + self.driver.common.utils.get_random_pg_from_list(portGroupNames)) self.assertIn('pg', portGroupName) portGroupNames = ['pg1'] portGroupName = ( - self.driver.common.utils._get_random_pg_from_list(portGroupNames)) + self.driver.common.utils.get_random_pg_from_list(portGroupNames)) self.assertEqual('pg1', portGroupName) def test_get_random_portgroup(self): @@ -3500,9 +3578,9 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): self.driver.create_snapshot(self.data.test_snapshot) @mock.patch.object( - emc_vmax_common.EMCVMAXCommon, - '_validate_pool', - return_value=('Bogus_Pool')) + emc_vmax_utils.EMCVMAXUtils, + 'parse_file_to_get_array_map', + return_value=None) def test_create_snapshot_no_fast_failed(self, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, @@ -4456,9 +4534,9 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase): self.driver.create_snapshot(self.data.test_snapshot) @mock.patch.object( - emc_vmax_common.EMCVMAXCommon, - '_validate_pool', - return_value=('Bogus_Pool')) + emc_vmax_utils.EMCVMAXUtils, + 'parse_file_to_get_array_map', + return_value=None) def test_create_snapshot_fast_failed(self, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, @@ -5686,9 +5764,9 @@ class EMCVMAXFCDriverFastTestCase(test.TestCase): self.driver.create_snapshot(self.data.test_snapshot) @mock.patch.object( - emc_vmax_common.EMCVMAXCommon, - '_validate_pool', - return_value=('Bogus_Pool')) + emc_vmax_utils.EMCVMAXUtils, + 'parse_file_to_get_array_map', + return_value=None) def test_create_snapshot_fast_failed(self, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, @@ -5963,7 +6041,7 @@ class EMCV3DriverTestCase(test.TestCase): self.set_configuration() def set_configuration(self): - configuration = mock.Mock() + configuration = mock.MagicMock() configuration.cinder_emc_config_file = self.config_file_path configuration.config_group = 'V3' @@ -8723,7 +8801,8 @@ class EMCVMAXCommonTest(test.TestCase): 'workload': 'DSS', 'slo': 'Bronze'} self.driver.common._extend_volume( - volumeInstance, volumeName, new_size_gb, old_size_gbs, extraSpecs) + self.data.test_volume, volumeInstance, volumeName, + new_size_gb, old_size_gbs, extraSpecs) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, @@ -9069,6 +9148,34 @@ class EMCVMAXCommonTest(test.TestCase): self.data.test_host_1_v3, volumeName, 'retyping', new_type, extraSpecs)) + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_initial_setup', + return_value=EMCVMAXCommonData.extra_specs) + def test_failover_not_replicated(self, mock_setup): + common = self.driver.common + common.conn = FakeEcomConnection() + volumes = [self.data.test_volume] + # Path 1: Failover non replicated volume + verify_update_fo = [{'volume_id': volumes[0]['id'], + 'updates': {'status': 'error'}}] + secondary_id, volume_update = ( + common.failover_host('context', volumes, None)) + self.assertEqual(verify_update_fo, volume_update) + # Path 2: Failback non replicated volume + # Path 2a: Volume still available on primary + verify_update_fb1 = [{'volume_id': volumes[0]['id'], + 'updates': {'status': 'available'}}] + secondary_id, volume_update_1 = ( + common.failover_host('context', volumes, 'default')) + self.assertEqual(verify_update_fb1, volume_update_1) + # Path 2a: Volume not still available on primary + with mock.patch.object(common, '_find_lun', + return_value=None): + secondary_id, volume_update_2 = ( + common.failover_host('context', volumes, 'default')) + self.assertEqual(verify_update_fo, volume_update_2) + class EMCVMAXProvisionTest(test.TestCase): def setUp(self): @@ -9214,3 +9321,559 @@ class EMCVMAXISCSITest(test.TestCase): self.data.test_snapshot_v3, self.data.connector) common._unmap_lun.assert_called_once_with( self.data.test_snapshot_v3, self.data.connector) + + +class EMCV3ReplicationTest(test.TestCase): + + def setUp(self): + self.data = EMCVMAXCommonData() + + self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') + + self.tempdir = tempfile.mkdtemp() + super(EMCV3ReplicationTest, self).setUp() + self.config_file_path = None + self.create_fake_config_file_v3() + self.addCleanup(self._cleanup) + self.set_configuration() + + def set_configuration(self): + self.replication_device = [ + {'target_device_id': u'000195900551', + 'remote_port_group': self.data.port_group, + 'remote_pool': 'SRP_1', + 'rdf_group_label': self.data.rdf_group, + 'allow_extend': 'True'}] + self.configuration = mock.Mock( + replication_device=self.replication_device, + cinder_emc_config_file=self.config_file_path, + config_group='V3') + + def safe_get(key): + return getattr(self.configuration, key) + self.configuration.safe_get = safe_get + + self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', + self.fake_ecom_connection) + instancename = FakeCIMInstanceName() + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', + instancename.fake_getinstancename) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', + self.fake_is_v3) + self.mock_object(volume_types, 'get_volume_type_extra_specs', + self.fake_volume_type_extra_specs) + self.mock_object(emc_vmax_common.EMCVMAXCommon, + '_get_multi_pool_support_enabled_flag', + self.fake_get_multi_pool) + self.mock_object(emc_vmax_utils.EMCVMAXUtils, + 'get_existing_instance', + self.fake_get_existing_instance) + self.mock_object(cinder_utils, 'get_bool_param', + return_value=False) + self.patcher = mock.patch( + 'oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + self.patcher.start() + + driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=self.configuration) + driver.db = FakeDB() + self.driver = driver + + def create_fake_config_file_v3(self): + doc = minidom.Document() + emc = doc.createElement("EMC") + doc.appendChild(emc) + + ecomserverip = doc.createElement("EcomServerIp") + ecomserveriptext = doc.createTextNode("1.1.1.1") + emc.appendChild(ecomserverip) + ecomserverip.appendChild(ecomserveriptext) + + ecomserverport = doc.createElement("EcomServerPort") + ecomserverporttext = doc.createTextNode("10") + emc.appendChild(ecomserverport) + ecomserverport.appendChild(ecomserverporttext) + + ecomusername = doc.createElement("EcomUserName") + ecomusernametext = doc.createTextNode("user") + emc.appendChild(ecomusername) + ecomusername.appendChild(ecomusernametext) + + ecompassword = doc.createElement("EcomPassword") + ecompasswordtext = doc.createTextNode("pass") + emc.appendChild(ecompassword) + ecompassword.appendChild(ecompasswordtext) + + portgroup = doc.createElement("PortGroup") + portgrouptext = doc.createTextNode(self.data.port_group) + portgroup.appendChild(portgrouptext) + + pool = doc.createElement("Pool") + pooltext = doc.createTextNode("SRP_1") + emc.appendChild(pool) + pool.appendChild(pooltext) + + array = doc.createElement("Array") + arraytext = doc.createTextNode("1234567891011") + emc.appendChild(array) + array.appendChild(arraytext) + + slo = doc.createElement("ServiceLevel") + slotext = doc.createTextNode("Bronze") + emc.appendChild(slo) + slo.appendChild(slotext) + + workload = doc.createElement("Workload") + workloadtext = doc.createTextNode("DSS") + emc.appendChild(workload) + workload.appendChild(workloadtext) + + portgroups = doc.createElement("PortGroups") + portgroups.appendChild(portgroup) + emc.appendChild(portgroups) + + timeout = doc.createElement("Timeout") + timeouttext = doc.createTextNode("0") + emc.appendChild(timeout) + timeout.appendChild(timeouttext) + + filename = 'cinder_emc_config_V3.xml' + + self.config_file_path = self.tempdir + '/' + filename + + f = open(self.config_file_path, 'w') + doc.writexml(f) + f.close() + + def fake_ecom_connection(self): + self.conn = FakeEcomConnection() + return self.conn + + def fake_is_v3(self, conn, serialNumber): + return True + + def fake_volume_type_extra_specs(self, volume_type): + extraSpecs = {'volume_backend_name': 'VMAXReplication', + 'replication_enabled': ' True'} + return extraSpecs + + def fake_get_multi_pool(self): + return False + + def fake_get_existing_instance(self, conn, instancename): + return instancename + + def _cleanup(self): + bExists = os.path.exists(self.config_file_path) + if bExists: + os.remove(self.config_file_path) + shutil.rmtree(self.tempdir) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + 'get_target_instance', + return_value='volume_instance') + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_get_pool_and_storage_system', + return_value=(None, EMCVMAXCommonData.storage_system)) + def test_setup_volume_replication_success(self, mock_pool, + mock_target): + common = self.driver.common + common.conn = self.fake_ecom_connection() + sourceVolume = self.data.test_volume_re + volumeDict = self.data.provider_location + with mock.patch.object( + common, 'create_remote_replica', + return_value=(0, self.data.provider_location2)): + extraSpecs = self.data.extra_specs_is_re + rep_status, rep_driver_data = common.setup_volume_replication( + common.conn, sourceVolume, volumeDict, extraSpecs) + self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status) + self.assertEqual(self.data.keybindings2, rep_driver_data) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_get_pool_and_storage_system', + return_value=(None, EMCVMAXCommonData.storage_system)) + def test_setup_volume_replication_failed(self, mock_pool): + common = self.driver.common + common.conn = self.fake_ecom_connection() + sourceVolume = self.data.test_volume_re + volumeDict = self.data.provider_location + extraSpecs = self.data.extra_specs_is_re + self.assertRaises( + exception.VolumeBackendAPIException, + common.setup_volume_replication, common.conn, sourceVolume, + volumeDict, extraSpecs) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_cleanup_remote_target') + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_get_pool_and_storage_system', + return_value=(None, EMCVMAXCommonData.storage_system)) + def test_cleanup_lun_replication(self, mock_pool, mock_delete): + common = self.driver.common + common.conn = self.fake_ecom_connection() + volume = self.data.test_volume_re + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + sourceInstance = common.conn.GetInstance(volumeInstanceName) + extraSpecs = self.data.extra_specs_is_re + common.cleanup_lun_replication(common.conn, volume, volume['name'], + sourceInstance, extraSpecs) + with mock.patch.object( + common.utils, 'find_volume_instance', + return_value={'ElementName': self.data.test_volume_re['id']}): + targetInstance = sourceInstance + repServiceInstanceName = common.conn.EnumerateInstanceNames( + 'EMC_ReplicationService')[0] + rep_config = common.utils.get_replication_config( + self.replication_device) + repExtraSpecs = common._get_replication_extraSpecs( + extraSpecs, rep_config) + common._cleanup_remote_target.assert_called_once_with( + common.conn, repServiceInstanceName, sourceInstance, + targetInstance, extraSpecs, repExtraSpecs) + + def test_get_rdf_details(self): + common = self.driver.common + conn = self.fake_ecom_connection() + rdfGroupInstance, repServiceInstanceName = ( + common.get_rdf_details(conn, self.data.storage_system)) + self.assertEqual(rdfGroupInstance, self.data.srdf_group_instance) + self.assertEqual(repServiceInstanceName, + conn.EnumerateInstanceNames( + 'EMC_ReplicationService')[0]) + + @mock.patch.object( + emc_vmax_provision_v3.EMCVMAXProvisionV3, + '_check_sync_state', + return_value=6) + def test_failover_volume_success(self, mock_sync): + volumes = [self.data.test_volume_re] + rep_data = self.data.replication_driver_data + loc = six.text_type(self.data.provider_location) + rep_data = six.text_type(rep_data) + check_update_list = ( + [{'volume_id': self.data.test_volume_re['id'], + 'updates': + {'replication_status': fields.ReplicationStatus.ENABLED, + 'provider_location': loc, + 'replication_driver_data': rep_data}}]) + secondary_id, volume_update_list = ( + self.driver.failover_host('context', volumes, 'default')) + self.assertEqual(check_update_list, volume_update_list) + + def test_failover_volume_failed(self): + fake_vol = self.data.test_failed_re_volume + fake_location = six.text_type( + {'keybindings': 'fake_keybindings'}) + fake_volumes = [fake_vol] + check_update_list = ( + [{'volume_id': fake_vol['id'], + 'updates': + {'replication_status': ( + fields.ReplicationStatus.FAILOVER_ERROR), + 'provider_location': fake_location, + 'replication_driver_data': 'fake_data'}}]) + secondary_id, volume_update_list = ( + self.driver.failover_host('context', fake_volumes, None)) + self.assertEqual(check_update_list, volume_update_list) + + @mock.patch.object( + emc_vmax_provision_v3.EMCVMAXProvisionV3, + '_check_sync_state', + return_value=12) + def test_failback_volume_success(self, mock_sync): + volumes = [self.data.test_volume_re] + provider_location = self.data.provider_location + loc = six.text_type(provider_location) + rep_data = six.text_type(self.data.replication_driver_data) + check_update_list = ( + [{'volume_id': self.data.test_volume_re['id'], + 'updates': + {'replication_status': fields.ReplicationStatus.ENABLED, + 'replication_driver_data': rep_data, + 'provider_location': loc}}]) + secondary_id, volume_update_list = ( + self.driver.failover_host('context', volumes, 'default')) + self.assertEqual(check_update_list, volume_update_list) + + def test_failback_volume_failed(self): + fake_vol = self.data.test_failed_re_volume + fake_location = six.text_type( + {'keybindings': 'fake_keybindings'}) + fake_volumes = [fake_vol] + check_update_list = ( + [{'volume_id': fake_vol['id'], + 'updates': + {'replication_status': ( + fields.ReplicationStatus.FAILOVER_ERROR), + 'provider_location': fake_location, + 'replication_driver_data': 'fake_data'}}]) + secondary_id, volume_update_list = ( + self.driver.failover_host('context', fake_volumes, 'default')) + self.assertEqual(check_update_list, volume_update_list) + + @mock.patch.object( + emc_vmax_utils.EMCVMAXUtils, + 'compare_size', + return_value=0) + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + 'add_volume_to_replication_group', + return_value=EMCVMAXCommonData.re_storagegroup) + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_create_remote_replica', + return_value=(0, EMCVMAXCommonData.provider_location)) + def test_extend_volume_is_replicated_success( + self, mock_replica, mock_sg, mock_size): + common = self.driver.common + common.conn = self.fake_ecom_connection() + volume = self.data.test_volume_re + new_size = '2' + newSizeBits = common.utils.convert_gb_to_bits(new_size) + extendedVolumeInstance = self.data.volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + extendedVolumeSize = common.utils.get_volume_size( + self.conn, extendedVolumeInstance) + self.driver.extend_volume(volume, new_size) + common.utils.compare_size.assert_called_once_with( + newSizeBits, extendedVolumeSize) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_create_remote_replica', + return_value=(1, 'error')) + def test_extend_volume_is_replicated_failed(self, mock_replica): + volume = self.data.test_volume_re + new_size = '2' + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, volume, new_size) + + @mock.patch.object( + emc_vmax_masking.EMCVMAXMasking, + 'remove_and_reset_members') + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + 'add_volume_to_replication_group', + return_value=EMCVMAXCommonData.re_storagegroup) + @mock.patch.object( + emc_vmax_provision_v3.EMCVMAXProvisionV3, + 'get_volume_dict_from_job', + return_value=EMCVMAXCommonData.provider_location) + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_get_pool_and_storage_system', + return_value=(None, EMCVMAXCommonData.storage_system)) + def test_create_remote_replica_success(self, mock_pool, mock_volume_dict, + mock_sg, mock_return): + common = self.driver.common + common.conn = self.fake_ecom_connection() + repServiceInstanceName = common.conn.EnumerateInstanceNames( + 'EMC_ReplicationService')[0] + rdfGroupInstance = self.data.srdf_group_instance + sourceVolume = self.data.test_volume_re + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + sourceInstance = common.conn.GetInstance(volumeInstanceName) + targetInstance = sourceInstance + extraSpecs = self.data.extra_specs_is_re + rep_config = common.utils.get_replication_config( + self.replication_device) + referenceDict = EMCVMAXCommonData.provider_location + rc, rdfDict = common.create_remote_replica( + common.conn, repServiceInstanceName, rdfGroupInstance, + sourceVolume, sourceInstance, targetInstance, + extraSpecs, rep_config) + self.assertEqual(referenceDict, rdfDict) + + @mock.patch.object( + emc_vmax_masking.EMCVMAXMasking, + 'remove_and_reset_members') + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_cleanup_remote_target') + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_get_pool_and_storage_system', + return_value=(None, EMCVMAXCommonData.storage_system)) + def test_create_remote_replica_failed(self, mock_pool, + mock_cleanup, mock_return): + common = self.driver.common + common.conn = self.fake_ecom_connection() + repServiceInstanceName = common.conn.EnumerateInstanceNames( + 'EMC_ReplicationService')[0] + rdfGroupInstance = self.data.srdf_group_instance + sourceVolume = self.data.test_volume_re + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + sourceInstance = common.conn.GetInstance(volumeInstanceName) + targetInstance = sourceInstance + extraSpecs = self.data.extra_specs_is_re + rep_config = common.utils.get_replication_config( + self.replication_device) + repExtraSpecs = common._get_replication_extraSpecs( + extraSpecs, rep_config) + with mock.patch.object(common.provisionv3, + '_create_element_replica_extra_params', + return_value=(9, 'error')): + with mock.patch.object(common.utils, + 'wait_for_job_complete', + return_value=(9, 'error')): + self.assertRaises( + exception.VolumeBackendAPIException, + common.create_remote_replica, common.conn, + repServiceInstanceName, rdfGroupInstance, sourceVolume, + sourceInstance, targetInstance, extraSpecs, rep_config) + common._cleanup_remote_target.assert_called_once_with( + common.conn, repServiceInstanceName, sourceInstance, + targetInstance, extraSpecs, repExtraSpecs) + + @mock.patch.object( + emc_vmax_masking.EMCVMAXMasking, + 'get_masking_view_from_storage_group', + return_value=None) + def test_add_volume_to_replication_group_success(self, mock_mv): + common = self.driver.common + common.conn = self.fake_ecom_connection() + controllerConfigService = ( + common.utils.find_controller_configuration_service( + common.conn, self.data.storage_system)) + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + volumeInstance = common.conn.GetInstance(volumeInstanceName) + volumeName = self.data.test_volume_re['name'] + extraSpecs = self.data.extra_specs_is_re + with mock.patch.object( + common.utils, 'find_storage_masking_group', + return_value=self.data.default_sg_instance_name): + common.add_volume_to_replication_group( + common.conn, controllerConfigService, + volumeInstance, volumeName, extraSpecs) + + def test_add_volume_to_replication_group_failed(self): + common = self.driver.common + common.conn = self.fake_ecom_connection() + controllerConfigService = ( + common.utils.find_controller_configuration_service( + common.conn, self.data.storage_system)) + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + volumeInstance = common.conn.GetInstance(volumeInstanceName) + volumeName = self.data.test_volume_re['name'] + extraSpecs = self.data.extra_specs_is_re + with mock.patch.object( + common.utils, 'find_storage_masking_group', + return_value=None): + self.assertRaises(exception.VolumeBackendAPIException, + common.add_volume_to_replication_group, + common.conn, controllerConfigService, + volumeInstance, volumeName, extraSpecs) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + 'add_volume_to_replication_group') + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_create_v3_volume', + return_value=(0, EMCVMAXCommonData.provider_location, + EMCVMAXCommonData.storage_system)) + def test_create_replicated_volume_success(self, mock_create, mock_add): + model_update = self.driver.create_volume( + self.data.test_volume_re) + rep_status = model_update['replication_status'] + rep_data = model_update['replication_driver_data'] + self.assertEqual(fields.ReplicationStatus.ENABLED, + rep_status) + self.assertIsNotNone(rep_data) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_cleanup_replication_source') + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_create_v3_volume', + return_value=(0, EMCVMAXCommonData.provider_location, + EMCVMAXCommonData.storage_system)) + def test_create_replicated_volume_failed(self, mock_create, mock_cleanup): + common = self.driver.common + common.conn = self.fake_ecom_connection() + volumeName = self.data.test_volume_re['id'] + volumeDict = self.data.provider_location + extraSpecs = self.data.extra_specs_is_re + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, self.data.test_volume_re) + common._cleanup_replication_source.assert_called_once_with( + common.conn, volumeName, volumeDict, extraSpecs) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_delete_from_pool_v3') + def test_cleanup_replication_source(self, mock_delete): + common = self.driver.common + common.conn = self.fake_ecom_connection() + volumeName = self.data.test_volume_re['name'] + volumeDict = self.data.provider_location + extraSpecs = self.data.extra_specs_is_re + storageConfigService = ( + common.utils.find_storage_configuration_service( + common.conn, self.data.storage_system)) + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + sourceInstance = common.conn.GetInstance(volumeInstanceName) + deviceId = self.data.test_volume_re['device_id'] + common._cleanup_replication_source( + common.conn, volumeName, volumeDict, extraSpecs) + common._delete_from_pool_v3.assert_called_once_with( + storageConfigService, sourceInstance, + volumeName, deviceId, extraSpecs) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_delete_from_pool_v3') + def test_cleanup_remote_target(self, mock_delete): + common = self.driver.common + common.conn = self.fake_ecom_connection() + repServiceInstanceName = common.conn.EnumerateInstanceNames( + 'EMC_ReplicationService')[0] + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + sourceInstance = common.conn.GetInstance(volumeInstanceName) + targetInstance = sourceInstance.copy() + targetStorageConfigService = ( + common.utils.find_storage_configuration_service( + common.conn, self.data.storage_system)) + deviceId = targetInstance['DeviceID'] + volumeName = targetInstance['Name'] + extraSpecs = self.data.extra_specs_is_re + rep_config = common.utils.get_replication_config( + self.replication_device) + repExtraSpecs = common._get_replication_extraSpecs( + extraSpecs, rep_config) + common._cleanup_remote_target( + common.conn, repServiceInstanceName, sourceInstance, + targetInstance, extraSpecs, repExtraSpecs) + common._delete_from_pool_v3.assert_called_once_with( + targetStorageConfigService, targetInstance, volumeName, + deviceId, repExtraSpecs) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + 'cleanup_lun_replication') + def test_delete_re_volume(self, mock_cleanup): + common = self.driver.common + common.conn = self.fake_ecom_connection() + volume = self.data.test_volume_re + volumeName = volume['name'] + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + volumeInstance = common.conn.GetInstance(volumeInstanceName) + extraSpecs = self.data.extra_specs_is_re + self.driver.delete_volume(volume) + common.cleanup_lun_replication.assert_called_once_with( + common.conn, volume, volumeName, volumeInstance, extraSpecs) diff --git a/cinder/volume/drivers/emc/emc_vmax_common.py b/cinder/volume/drivers/emc/emc_vmax_common.py index abe5ae7cf..cc9f99869 100644 --- a/cinder/volume/drivers/emc/emc_vmax_common.py +++ b/cinder/volume/drivers/emc/emc_vmax_common.py @@ -56,7 +56,6 @@ EMC_ROOT = 'root/emc' POOL = 'storagetype:pool' ARRAY = 'storagetype:array' FASTPOLICY = 'storagetype:fastpolicy' -BACKENDNAME = 'volume_backend_name' COMPOSITETYPE = 'storagetype:compositetype' MULTI_POOL_SUPPORT = 'MultiPoolSupport' STRIPECOUNT = 'storagetype:stripecount' @@ -76,15 +75,26 @@ SNAPVX = 7 DISSOLVE_SNAPVX = 9 CREATE_NEW_TARGET = 2 SNAPVX_REPLICATION_TYPE = 6 +# Replication +IS_RE = 'replication_enabled' +REPLICATION_DISABLED = fields.ReplicationStatus.DISABLED +REPLICATION_ENABLED = fields.ReplicationStatus.ENABLED +REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER +FAILOVER_ERROR = fields.ReplicationStatus.FAILOVER_ERROR +REPLICATION_ERROR = fields.ReplicationStatus.ERROR + +SUSPEND_SRDF = 22 +DETACH_SRDF = 8 +MIRROR_SYNC_TYPE = 6 emc_opts = [ cfg.StrOpt('cinder_emc_config_file', default=CINDER_EMC_CONFIG_FILE, - help='use this file for cinder emc plugin ' + help='Use this file for cinder emc plugin ' 'config data'), cfg.StrOpt('multi_pool_support', default=False, - help='use this value to specify' + help='Use this value to specify ' 'multi-pool support for VMAX3')] CONF.register_opts(emc_opts) @@ -105,16 +115,20 @@ class EMCVMAXCommon(object): 'storage_protocol': None, 'total_capacity_gb': 0, 'vendor_name': 'EMC', - 'volume_backend_name': None} + 'volume_backend_name': None, + 'replication_enabled': False, + 'replication_targets': None} pool_info = {'backend_name': None, 'config_file': None, 'arrays_info': {}, 'max_over_subscription_ratio': None, - 'reserved_percentage': None + 'reserved_percentage': None, + 'replication_enabled': False } - def __init__(self, prtcl, version, configuration=None): + def __init__(self, prtcl, version, configuration=None, + active_backend_id=None): if not pywbemAvailable: LOG.info(_LI( @@ -134,6 +148,12 @@ class EMCVMAXCommon(object): self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl) self.provisionv3 = emc_vmax_provision_v3.EMCVMAXProvisionV3(prtcl) self.version = version + # replication + self.replication_enabled = False + self.extendReplicatedVolume = False + self.active_backend_id = active_backend_id + self.failover = False + self._get_replication_info() self.multiPoolSupportEnabled = False self._gather_info() @@ -173,6 +193,30 @@ class EMCVMAXCommon(object): arrayInfoList) self.pool_info['arrays_info'] = finalArrayInfoList + def _get_replication_info(self): + """Gather replication information, if provided.""" + self.rep_config = None + self.replication_targets = None + if hasattr(self.configuration, 'replication_device'): + self.rep_devices = self.configuration.safe_get( + 'replication_device') + if self.rep_devices and len(self.rep_devices) == 1: + self.rep_config = self.utils.get_replication_config( + self.rep_devices) + if self.rep_config: + self.replication_targets = [self.rep_config['array']] + if self.active_backend_id == self.rep_config['array']: + self.failover = True + self.extendReplicatedVolume = self.rep_config['allow_extend'] + # use self.replication_enabled for update_volume_stats + self.replication_enabled = True + LOG.debug("The replication configuration is %(rep_config)s.", + {'rep_config': self.rep_config}) + elif self.rep_devices and len(self.rep_devices) > 1: + LOG.error(_LE("More than one replication target is configured. " + "EMC VMAX only suppports a single replication " + "target. Replication will not be enabled.")) + def _get_slo_workload_combinations(self, arrayInfoList): """Method to query the array for SLO and Workloads. @@ -246,8 +290,9 @@ class EMCVMAXCommon(object): EMCNumberOfMembers is what the user specifies. :param volume: volume Object - :returns: dict -- volumeDict - the volume dictionary + :returns: model_update, dict """ + model_update = {} volumeSize = int(self.utils.convert_gb_to_bits(volume['size'])) volumeId = volume['id'] extraSpecs = self._initial_setup(volume) @@ -265,6 +310,21 @@ class EMCVMAXCommon(object): self._create_composite_volume(volume, volumeName, volumeSize, extraSpecs)) + # set-up volume replication, if enabled (V3 only) + if self.utils.is_replication_enabled(extraSpecs): + try: + replication_status, replication_driver_data = ( + self.setup_volume_replication( + self.conn, volume, volumeDict, extraSpecs)) + except Exception: + self._cleanup_replication_source(self.conn, volumeName, + volumeDict, extraSpecs) + raise + model_update.update( + {'replication_status': replication_status, + 'replication_driver_data': six.text_type( + replication_driver_data)}) + # If volume is created as part of a consistency group. if 'consistencygroup_id' in volume and volume['consistencygroup_id']: volumeInstance = self.utils.find_volume_instance( @@ -293,7 +353,10 @@ class EMCVMAXCommon(object): # Adding version information volumeDict['version'] = self.version - return volumeDict + model_update.update( + {'provider_location': six.text_type(volumeDict)}) + + return model_update def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. @@ -302,28 +365,70 @@ class EMCVMAXCommon(object): :param volume: volume Object :param snapshot: snapshot object - :returns: dict -- the cloned volume dictionary + :returns: model_update, dict :raises: VolumeBackendAPIException """ LOG.debug("Entering create_volume_from_snapshot.") extraSpecs = self._initial_setup(snapshot, host=volume['host']) + model_update = {} self.conn = self._get_ecom_connection() snapshotInstance = self._find_lun(snapshot) self._sync_check(snapshotInstance, snapshot['name'], extraSpecs) - return self._create_cloned_volume(volume, snapshot, extraSpecs, False) + cloneDict = self._create_cloned_volume(volume, snapshot, + extraSpecs, False) + # set-up volume replication, if enabled + if self.utils.is_replication_enabled(extraSpecs): + try: + replication_status, replication_driver_data = ( + self.setup_volume_replication( + self.conn, volume, cloneDict, extraSpecs)) + except Exception: + self._cleanup_replication_source(self.conn, snapshot['name'], + cloneDict, extraSpecs) + raise + model_update.update( + {'replication_status': six.text_type(replication_status), + 'replication_driver_data': replication_driver_data}) + + cloneDict['version'] = self.version + model_update.update( + {'provider_location': six.text_type(cloneDict)}) + + return model_update def create_cloned_volume(self, cloneVolume, sourceVolume): """Creates a clone of the specified volume. :param cloneVolume: clone volume Object :param sourceVolume: volume object - :returns: cloneVolumeDict -- the cloned volume dictionary + :returns: model_update, dict """ + model_update = {} extraSpecs = self._initial_setup(sourceVolume) - return self._create_cloned_volume(cloneVolume, sourceVolume, - extraSpecs, False) + cloneDict = self._create_cloned_volume(cloneVolume, sourceVolume, + extraSpecs, False) + + # set-up volume replication, if enabled + if self.utils.is_replication_enabled(extraSpecs): + try: + replication_status, replication_driver_data = ( + self.setup_volume_replication( + self.conn, cloneVolume, cloneDict, extraSpecs)) + except Exception: + self._cleanup_replication_source( + self.conn, cloneVolume['name'], cloneDict, extraSpecs) + raise + model_update.update( + {'replication_status': six.text_type(replication_status), + 'replication_driver_data': replication_driver_data}) + + cloneDict['version'] = self.version + model_update.update( + {'provider_location': six.text_type(cloneDict)}) + + return model_update def delete_volume(self, volume): """Deletes a EMC(VMAX) volume. @@ -393,6 +498,9 @@ class EMCVMAXCommon(object): :raises: VolumeBackendAPIException """ extraSpecs = self._initial_setup(volume) + if self.utils.is_volume_failed_over(volume): + extraSpecs = self._get_replication_extraSpecs( + extraSpecs, self.rep_config) volumename = volume['name'] LOG.info(_LI("Unmap volume: %(volume)s."), {'volume': volumename}) @@ -467,6 +575,9 @@ class EMCVMAXCommon(object): self.conn = self._get_ecom_connection() deviceInfoDict = self._wrap_find_device_number( volume, connector['host']) + if self.utils.is_volume_failed_over(volume): + extraSpecs = self._get_replication_extraSpecs( + extraSpecs, self.rep_config) maskingViewDict = self._populate_masking_dict( volume, connector, extraSpecs) @@ -659,20 +770,21 @@ class EMCVMAXCommon(object): LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return self._extend_volume( - volumeInstance, volumeName, newSize, originalVolumeSize, - extraSpecs) + volume, volumeInstance, volumeName, newSize, + originalVolumeSize, extraSpecs) def _extend_volume( - self, volumeInstance, volumeName, newSize, originalVolumeSize, - extraSpecs): + self, volume, volumeInstance, volumeName, newSize, + originalVolumeSize, extraSpecs): """Extends an existing volume. - :params volumeInstance: the volume Instance - :params volumeName: the volume name - :params newSize: the new size to increase the volume to - :params originalVolumeSize: the original size - :params extraSpecs: extra specifications - :returns: dict -- modifiedVolumeDict - the extended volume Object + :param volume: the volume Object + :param volumeInstance: the volume instance + :param volumeName: the volume name + :param newSize: the new size to increase the volume to + :param originalVolumeSize: + :param extraSpecs: extra specifications + :return: dict -- modifiedVolumeDict - the extended volume Object :raises: VolumeBackendAPIException """ if int(originalVolumeSize) > int(newSize): @@ -690,8 +802,14 @@ class EMCVMAXCommon(object): additionalVolumeSize) if extraSpecs[ISV3]: - rc, modifiedVolumeDict = self._extend_v3_volume( - volumeInstance, volumeName, newSize, extraSpecs) + if self.utils.is_replication_enabled(extraSpecs): + # extra logic required if volume is replicated + rc, modifiedVolumeDict = self.extend_volume_is_replicated( + volume, volumeInstance, volumeName, newSize, + extraSpecs) + else: + rc, modifiedVolumeDict = self._extend_v3_volume( + volumeInstance, volumeName, newSize, extraSpecs) else: # This is V2. rc, modifiedVolumeDict = self._extend_composite_volume( @@ -752,6 +870,9 @@ class EMCVMAXCommon(object): isV3 = self.utils.isArrayV3(self.conn, arrayInfo['SerialNumber']) if isV3: + if self.failover: + arrayInfo = self.get_secondary_stats_info( + self.rep_config, arrayInfo) # Report only the SLO name in the pool name for # backward compatibility if self.multiPoolSupportEnabled is False: @@ -792,8 +913,7 @@ class EMCVMAXCommon(object): % {'poolName': arrayInfo['PoolName'], 'array': arrayInfo['SerialNumber']}) - if (alreadyQueried - is True and self.multiPoolSupportEnabled is True): + if alreadyQueried and self.multiPoolSupportEnabled: # The dictionary will only have one key per VMAX3 # Construct the location info temp_location_info = ( @@ -815,7 +935,8 @@ class EMCVMAXCommon(object): 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'max_over_subscription_ratio': - max_oversubscription_ratio + max_oversubscription_ratio, + 'replication_enabled': self.replication_enabled } if ( arrays[arrayInfo['SerialNumber']][3] and @@ -836,7 +957,8 @@ class EMCVMAXCommon(object): 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'max_over_subscription_ratio': - max_oversubscription_ratio + max_oversubscription_ratio, + 'replication_enabled': self.replication_enabled } if ( array_reserve_percent and @@ -862,6 +984,8 @@ class EMCVMAXCommon(object): 'free_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, + 'replication_enabled': self.replication_enabled, + 'replication_targets': self.replication_targets, 'pools': pools} return data @@ -931,6 +1055,14 @@ class EMCVMAXCommon(object): return False if extraSpecs[ISV3]: + if self.utils.is_replication_enabled(extraSpecs): + LOG.error(_LE("Volume %(name)s is replicated - " + "Replicated volumes are not eligible for " + "storage assisted retype. Host assisted " + "retype is supported."), + {'name': volumeName}) + return False + return self._slo_workload_migration(volumeInstance, volume, host, volumeName, volumeStatus, new_type, extraSpecs) @@ -1480,7 +1612,8 @@ class EMCVMAXCommon(object): :returns: dict -- the extra specs dict :returns: string -- configuration file """ - extraSpecs = self.utils.get_volumetype_extraspecs(volume, volumeTypeId) + extraSpecs = self.utils.get_volumetype_extraspecs( + volume, volumeTypeId) qosSpecs = self.utils.get_volumetype_qosspecs(volume, volumeTypeId) configGroup = None # If there are no extra specs then the default case is assumed. @@ -1491,6 +1624,8 @@ class EMCVMAXCommon(object): self.multiPoolSupportEnabled = ( self._get_multi_pool_support_enabled_flag()) extraSpecs[MULTI_POOL_SUPPORT] = self.multiPoolSupportEnabled + if extraSpecs.get('replication_enabled') == ' True': + extraSpecs[IS_RE] = True return extraSpecs, configurationFile, qosSpecs def _get_multi_pool_support_enabled_flag(self): @@ -1965,6 +2100,8 @@ class EMCVMAXCommon(object): if arrayInfo is not None: if extraSpecs['MultiPoolSupport'] is True: poolRecord = arrayInfo[0] + elif len(arrayInfo) == 1: + poolRecord = arrayInfo[0] else: poolRecord = self.utils.extract_record(arrayInfo, pool) @@ -2040,8 +2177,10 @@ class EMCVMAXCommon(object): shortHostName = self.utils.get_host_short_name(hostName) if isV3: maskingViewDict['isCompressionDisabled'] = False + maskingViewDict['replication_enabled'] = False slo = extraSpecs[SLO] workload = extraSpecs[WORKLOAD] + rep_enabled = self.utils.is_replication_enabled(extraSpecs) maskingViewDict['slo'] = slo maskingViewDict['workload'] = workload maskingViewDict['pool'] = uniqueName @@ -2065,6 +2204,9 @@ class EMCVMAXCommon(object): ("OS-%(shortHostName)s-No_SLO-%(protocol)s" % {'shortHostName': shortHostName, 'protocol': protocol})) + if rep_enabled: + prefix += "-RE" + maskingViewDict['replication_enabled'] = True else: maskingViewDict['fastPolicy'] = extraSpecs[FASTPOLICY] if maskingViewDict['fastPolicy']: @@ -2379,8 +2521,8 @@ class EMCVMAXCommon(object): cloneInstance = self.utils.find_volume_instance( self.conn, cloneDict, cloneName) self._extend_volume( - cloneInstance, cloneName, cloneVolume['size'], - old_size_gbs, extraSpecs) + cloneVolume, cloneInstance, cloneName, + cloneVolume['size'], old_size_gbs, extraSpecs) LOG.debug("Leaving _create_cloned_volume: Volume: " "%(cloneName)s Source Volume: %(sourceName)s " @@ -2439,7 +2581,7 @@ class EMCVMAXCommon(object): cloneDict, cloneName, storageConfigService, storageSystemName, fastPolicyName, extraSpecs) - def _delete_volume(self, volume, host=None): + def _delete_volume(self, volume, isSnapshot=False, host=None): """Helper function to delete the specified volume. :param volume: volume object to be deleted @@ -2469,9 +2611,14 @@ class EMCVMAXCommon(object): deviceId = volumeInstance['DeviceID'] if extraSpecs[ISV3]: - rc = self._delete_from_pool_v3( - storageConfigService, volumeInstance, volumeName, - deviceId, extraSpecs) + if isSnapshot: + rc = self._delete_from_pool_v3( + storageConfigService, volumeInstance, volumeName, + deviceId, extraSpecs) + else: + rc = self._delete_from_pool_v3( + storageConfigService, volumeInstance, volumeName, + deviceId, extraSpecs, volume) else: rc = self._delete_from_pool(storageConfigService, volumeInstance, volumeName, deviceId, @@ -2637,7 +2784,7 @@ class EMCVMAXCommon(object): self.conn = self._get_ecom_connection() # Delete the target device. - rc, snapshotname = self._delete_volume(snapshot, host) + rc, snapshotname = self._delete_volume(snapshot, True, host) LOG.info(_LI("Leaving delete_snapshot: %(ssname)s Return code: " "%(rc)lu."), {'ssname': snapshotname, @@ -3171,7 +3318,7 @@ class EMCVMAXCommon(object): def _get_or_create_storage_group_v3( self, poolName, slo, workload, doDisableCompression, - storageSystemName, extraSpecs): + storageSystemName, extraSpecs, is_re=False): """Get or create storage group_v3 (V3). :param poolName: the SRP pool nsmr @@ -3180,12 +3327,13 @@ class EMCVMAXCommon(object): :param doDisableCompression: flag for compression :param storageSystemName: storage system name :param extraSpecs: extra specifications + :param is_re: flag for replication :returns: sgInstanceName """ storageGroupName, controllerConfigService, sgInstanceName = ( self.utils.get_v3_default_sg_instance_name( self.conn, poolName, slo, workload, storageSystemName, - doDisableCompression)) + doDisableCompression, is_re)) if sgInstanceName is None: sgInstanceName = self.provisionv3.create_storage_group_v3( self.conn, controllerConfigService, storageGroupName, @@ -3726,7 +3874,7 @@ class EMCVMAXCommon(object): return rc def _delete_from_pool_v3(self, storageConfigService, volumeInstance, - volumeName, deviceId, extraSpecs): + volumeName, deviceId, extraSpecs, volume=None): """Delete from pool (v3). :param storageConfigService: the storage config service @@ -3734,6 +3882,7 @@ class EMCVMAXCommon(object): :param volumeName: the volume Name :param deviceId: the device ID of the volume :param extraSpecs: extra specifications + :param volume: the cinder volume object :returns: int -- return code :raises: VolumeBackendAPIException """ @@ -3748,6 +3897,10 @@ class EMCVMAXCommon(object): self.conn, controllerConfigurationService, volumeInstance, volumeName, extraSpecs, None, False) + if volume and self.utils.is_replication_enabled(extraSpecs): + self.cleanup_lun_replication(self.conn, volume, volumeName, + volumeInstance, extraSpecs) + LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool " "ConfigServic: %(service)s TheElement: %(vol_instance)s " "DeviceId: %(deviceId)s.", @@ -4421,9 +4574,19 @@ class EMCVMAXCommon(object): provider_location['classname'] = volpath['CreationClassName'] provider_location['keybindings'] = keys + # set-up volume replication, if enabled + if self.utils.is_replication_enabled(extraSpecs): + replication_status, replication_driver_data = ( + self.setup_volume_replication( + self.conn, volume, provider_location, extraSpecs)) + model_update.update( + {'replication_status': six.text_type(replication_status)}) + model_update.update( + {'replication_driver_data': replication_driver_data}) + model_update.update({'display_name': volumeElementName}) - volume['provider_location'] = six.text_type(provider_location) - model_update.update({'provider_location': volume['provider_location']}) + model_update.update( + {'provider_location': six.text_type(provider_location)}) return model_update def manage_existing_get_size(self, volume, external_ref): @@ -4843,7 +5006,7 @@ class EMCVMAXCommon(object): return cgName def _sync_check(self, volumeInstance, volumeName, extraSpecs): - """Check if volume is part of a sync process. + """Check if volume is part of a snapshot/clone sync process. :param volumeInstance: volume instance :param volumeName: volume name @@ -4873,3 +5036,753 @@ class EMCVMAXCommon(object): else: self.provision.delete_clone_relationship( self.conn, repservice, syncInstanceName, extraSpecs, True) + + def setup_volume_replication(self, conn, sourceVolume, volumeDict, + extraSpecs, targetInstance=None): + """Setup replication for volume, if enabled. + + Called on create volume, create cloned volume, + create volume from snapshot, manage_existing, + and re-establishing a replication relationship after extending. + + :param conn: the connection to the ecom server + :param sourceVolume: the source volume object + :param volumeDict: the source volume dict (the provider_location) + :param extraSpecs: extra specifications + :param targetInstance: optional, target on secondary array + :return: rep_update - dict + """ + isTargetV3 = self.utils.isArrayV3(conn, self.rep_config['array']) + if not extraSpecs[ISV3] or not isTargetV3: + exception_message = (_("Replication is not supported on " + "VMAX 2")) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + sourceName = sourceVolume['name'] + sourceInstance = self.utils.find_volume_instance( + conn, volumeDict, sourceName) + LOG.debug('Starting replication setup ' + 'for volume: %s.', sourceVolume['name']) + storageSystem = sourceInstance['SystemName'] + # get rdf details + rdfGroupInstance, repServiceInstanceName = ( + self.get_rdf_details(conn, storageSystem)) + rdf_vol_size = sourceVolume['size'] + + # give the target volume the same Volume Element Name as the + # source volume + targetName = self.utils.get_volume_element_name( + sourceVolume['id']) + + if not targetInstance: + # create a target volume on the target array + # target must be passed in on remote replication + targetInstance = self.get_target_instance( + sourceVolume, self.rep_config, rdf_vol_size, + targetName, extraSpecs) + + LOG.debug("Create volume replica: Remote Volume: %(targetName)s " + "Source Volume: %(sourceName)s " + "Method: CreateElementReplica " + "ReplicationService: %(service)s ElementName: " + "%(elementname)s SyncType: 6 SourceElement: " + "%(sourceelement)s.", + {'targetName': targetName, + 'sourceName': sourceName, + 'service': repServiceInstanceName, + 'elementname': targetName, + 'sourceelement': sourceInstance.path}) + + # create the remote replica and establish the link + rc, rdfDict = self.create_remote_replica( + conn, repServiceInstanceName, rdfGroupInstance, + sourceVolume, sourceInstance, targetInstance, extraSpecs, + self.rep_config) + + LOG.info(_LI('Successfully setup replication for %s.'), + sourceVolume['name']) + replication_status = REPLICATION_ENABLED + replication_driver_data = rdfDict['keybindings'] + + return replication_status, replication_driver_data + + # called on delete volume after remove_and_reset_members + def cleanup_lun_replication(self, conn, volume, volumeName, + sourceInstance, extraSpecs): + """Cleanup target volume on delete. + + Extra logic if target is last in group. + :param conn: the connection to the ecom server + :param volume: the volume object + :param volumeName: the volume name + :param sourceInstance: the source volume instance + :param extraSpecs: extra specification + """ + LOG.debug('Starting cleanup replication from volume: ' + '%s.', volumeName) + try: + loc = volume['provider_location'] + rep_data = volume['replication_driver_data'] + + if (isinstance(loc, six.string_types) + and isinstance(rep_data, six.string_types)): + name = eval(loc) + replication_keybindings = eval(rep_data) + storageSystem = replication_keybindings['SystemName'] + rdfGroupInstance, repServiceInstanceName = ( + self.get_rdf_details(conn, storageSystem)) + repExtraSpecs = self._get_replication_extraSpecs( + extraSpecs, self.rep_config) + + targetVolumeDict = {'classname': name['classname'], + 'keybindings': replication_keybindings} + + targetInstance = self.utils.find_volume_instance( + conn, targetVolumeDict, volumeName) + # Ensure element name matches openstack id. + volumeElementName = (self.utils. + get_volume_element_name(volume['id'])) + if volumeElementName != targetInstance['ElementName']: + targetInstance = None + + if targetInstance is not None: + # clean-up target + targetControllerConfigService = ( + self.utils.find_controller_configuration_service( + conn, storageSystem)) + self.masking.remove_and_reset_members( + conn, targetControllerConfigService, targetInstance, + volumeName, repExtraSpecs, None, False) + self._cleanup_remote_target( + conn, repServiceInstanceName, sourceInstance, + targetInstance, extraSpecs, repExtraSpecs) + LOG.info(_LI('Successfully destroyed replication for ' + 'volume: %(volume)s'), + {'volume': volumeName}) + else: + LOG.warning(_LW('Replication target not found for ' + 'replication-enabled volume: %(volume)s'), + {'volume': volumeName}) + except Exception as e: + LOG.error(_LE('Cannot get necessary information to cleanup ' + 'replication target for volume: %(volume)s. ' + 'The exception received was: %(e)s. Manual ' + 'clean-up may be required. Please contact ' + 'your administrator.'), + {'volume': volumeName, 'e': e}) + + def _cleanup_remote_target( + self, conn, repServiceInstanceName, sourceInstance, + targetInstance, extraSpecs, repExtraSpecs): + """Clean-up remote replication target after exception or on deletion. + + :param conn: connection to the ecom server + :param repServiceInstanceName: the replication service + :param sourceInstance: the source volume instance + :param targetInstance: the target volume instance + :param extraSpecs: extra specifications + :param repExtraSpecs: replication extra specifications + """ + storageSystem = sourceInstance['SystemName'] + targetStorageSystem = targetInstance['SystemName'] + syncInstanceName = self.utils.find_rdf_storage_sync_sv_sv( + conn, sourceInstance, storageSystem, + targetInstance, targetStorageSystem, + extraSpecs, False) + if syncInstanceName is not None: + # Break the sync relationship. + self.break_rdf_relationship( + conn, repServiceInstanceName, syncInstanceName, extraSpecs) + targetStorageConfigService = ( + self.utils.find_storage_configuration_service( + conn, targetStorageSystem)) + deviceId = targetInstance['DeviceID'] + volumeName = targetInstance['Name'] + self._delete_from_pool_v3( + targetStorageConfigService, targetInstance, volumeName, + deviceId, repExtraSpecs) + + def _cleanup_replication_source( + self, conn, volumeName, volumeDict, extraSpecs): + """Cleanup a remote replication source volume on failure. + + If replication setup fails at any stage on a new volume create, + we must clean-up the source instance as the cinder database won't + be updated with the provider_location. This means the volume can not + be properly deleted from the array by cinder. + + :param conn: the connection to the ecom server + :param volumeName: the name of the volume + :param volumeDict: the source volume dictionary + :param extraSpecs: the extra specifications + """ + LOG.warning(_LW( + "Replication failed. Cleaning up the source volume. " + "Volume name: %(sourceName)s "), + {'sourceName': volumeName}) + sourceInstance = self.utils.find_volume_instance( + conn, volumeDict, volumeName) + storageSystem = sourceInstance['SystemName'] + deviceId = sourceInstance['DeviceID'] + volumeName = sourceInstance['Name'] + storageConfigService = ( + self.utils.find_storage_configuration_service( + conn, storageSystem)) + self._delete_from_pool_v3( + storageConfigService, sourceInstance, volumeName, + deviceId, extraSpecs) + + def break_rdf_relationship(self, conn, repServiceInstanceName, + syncInstanceName, extraSpecs): + # Break the sync relationship. + LOG.debug("Suspending the SRDF relationship...") + self.provisionv3.break_replication_relationship( + conn, repServiceInstanceName, syncInstanceName, + SUSPEND_SRDF, extraSpecs, True) + LOG.debug("Detaching the SRDF relationship...") + self.provisionv3.break_replication_relationship( + conn, repServiceInstanceName, syncInstanceName, + DETACH_SRDF, extraSpecs, True) + + def get_rdf_details(self, conn, storageSystem): + """Retrieves an SRDF group instance. + + :param conn: connection to the ecom server + :param storageSystem: the storage system name + :return: + """ + if not self.rep_config: + exception_message = (_("Replication is not configured on " + "backend: %(backend)s.") % + {'backend': self.configuration.safe_get( + 'volume_backend_name')}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + repServiceInstanceName = self.utils.find_replication_service( + conn, storageSystem) + RDFGroupName = self.rep_config['rdf_group_label'] + LOG.info(_LI("Replication group: %(RDFGroup)s."), + {'RDFGroup': RDFGroupName}) + rdfGroupInstance = self.provisionv3.get_rdf_group_instance( + conn, repServiceInstanceName, RDFGroupName) + LOG.info(_LI("Found RDF group instance: %(RDFGroup)s."), + {'RDFGroup': rdfGroupInstance}) + if rdfGroupInstance is None: + exception_message = (_("Cannot find replication group: " + "%(RDFGroup)s.") % + {'RDFGroup': rdfGroupInstance}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + return rdfGroupInstance, repServiceInstanceName + + def failover_host(self, context, volumes, secondary_id=None): + """Fails over the volume back and forth. + + Driver needs to update following info for failed-over volume: + 1. provider_location: update array details + 2. replication_status: new status for replication-enabled volume + :param context: the context + :param volumes: the list of volumes to be failed over + :param secondary_id: the target backend + :return: secondary_id, volume_update_list + """ + volume_update_list = [] + if not self.conn: + self.conn = self._get_ecom_connection() + if secondary_id != 'default': + self.failover = True + if self.rep_config: + secondary_id = self.rep_config['array'] + else: + self.failover = False + secondary_id = None + + def failover_volume(vol, failover): + loc = vol['provider_location'] + rep_data = vol['replication_driver_data'] + try: + name = eval(loc) + replication_keybindings = eval(rep_data) + keybindings = name['keybindings'] + storageSystem = keybindings['SystemName'] + sourceInstance = self._find_lun(vol) + volumeDict = {'classname': name['classname'], + 'keybindings': replication_keybindings} + + targetInstance = self.utils.find_volume_instance( + self.conn, volumeDict, vol['name']) + targetStorageSystem = ( + replication_keybindings['SystemName']) + repServiceInstanceName = ( + self.utils.find_replication_service( + self.conn, storageSystem)) + + if failover: + storageSynchronizationSv = ( + self.utils.find_rdf_storage_sync_sv_sv( + self.conn, sourceInstance, storageSystem, + targetInstance, targetStorageSystem, + extraSpecs)) + self.provisionv3.failover_volume( + self.conn, repServiceInstanceName, + storageSynchronizationSv, + extraSpecs) + new_status = REPLICATION_FAILOVER + + else: + storageSynchronizationSv = ( + self.utils.find_rdf_storage_sync_sv_sv( + self.conn, targetInstance, targetStorageSystem, + sourceInstance, storageSystem, + extraSpecs, False)) + self.provisionv3.failback_volume( + self.conn, repServiceInstanceName, + storageSynchronizationSv, + extraSpecs) + new_status = REPLICATION_ENABLED + + # Transfer ownership to secondary_backend_id and + # update provider_location field + provider_location, replication_driver_data = ( + self.utils.failover_provider_location( + name, replication_keybindings)) + loc = six.text_type(provider_location) + rep_data = six.text_type(replication_driver_data) + + except Exception as ex: + msg = _LE( + 'Failed to failover volume %(volume_id)s. ' + 'Error: %(error)s.') + LOG.error(msg, {'volume_id': vol['id'], + 'error': ex}, ) + new_status = FAILOVER_ERROR + + model_update = {'volume_id': vol['id'], + 'updates': + {'replication_status': new_status, + 'replication_driver_data': rep_data, + 'provider_location': loc}} + volume_update_list.append(model_update) + + for volume in volumes: + extraSpecs = self._initial_setup(volume) + if self.utils.is_replication_enabled(extraSpecs): + failover_volume(volume, self.failover) + else: + if self.failover: + # Since the array has been failed-over, + # volumes without replication should be in error. + volume_update_list.append({ + 'volume_id': volume['id'], + 'updates': {'status': 'error'}}) + else: + # This is a failback, so we will attempt + # to recover non-failed over volumes + recovery = self.recover_volumes_on_failback(volume) + volume_update_list.append(recovery) + + LOG.info(_LI("Failover host complete")) + + return secondary_id, volume_update_list + + def recover_volumes_on_failback(self, volume): + """Recover volumes on failback. + + On failback, attempt to recover non RE(replication enabled) + volumes from primary array. + + :param volume: + :return: volume_update + """ + + # check if volume still exists on the primary + volume_update = {'volume_id': volume['id']} + volumeInstance = self._find_lun(volume) + if not volumeInstance: + volume_update['updates'] = {'status': 'error'} + else: + try: + maskingview = self._is_volume_in_masking_view(volumeInstance) + except Exception: + maskingview = None + LOG.debug("Unable to determine if volume is in masking view.") + if not maskingview: + volume_update['updates'] = {'status': 'available'} + else: + volume_update['updates'] = {'status': 'in-use'} + return volume_update + + def _is_volume_in_masking_view(self, volumeInstance): + """Helper function to check if a volume is in a masking view. + + :param volumeInstance: the volume instance + :return: maskingview + """ + maskingView = None + volumeInstanceName = volumeInstance.path + storageGroups = self.utils.get_storage_groups_from_volume( + self.conn, volumeInstanceName) + if storageGroups: + for storageGroup in storageGroups: + maskingView = self.utils.get_masking_view_from_storage_group( + self.conn, storageGroup) + if maskingView: + break + return maskingView + + def extend_volume_is_replicated(self, volume, volumeInstance, + volumeName, newSize, extraSpecs): + """Extend a replication-enabled volume. + + Cannot extend volumes in a synchronization pair. + Must first break the relationship, extend them + separately, then recreate the pair + :param volume: the volume objcet + :param volumeInstance: the volume instance + :param volumeName: the volume name + :param newSize: the new size the volume should be + :param extraSpecs: extra specifications + :return: rc, volumeDict + """ + if self.extendReplicatedVolume is True: + storageSystem = volumeInstance['SystemName'] + loc = volume['provider_location'] + rep_data = volume['replication_driver_data'] + try: + name = eval(loc) + replication_keybindings = eval(rep_data) + targetStorageSystem = replication_keybindings['SystemName'] + targetVolumeDict = {'classname': name['classname'], + 'keybindings': replication_keybindings} + targetVolumeInstance = self.utils.find_volume_instance( + self.conn, targetVolumeDict, volumeName) + repServiceInstanceName = self.utils.find_replication_service( + self.conn, targetStorageSystem) + storageSynchronizationSv = ( + self.utils.find_rdf_storage_sync_sv_sv( + self.conn, volumeInstance, storageSystem, + targetVolumeInstance, targetStorageSystem, + extraSpecs)) + + # volume must be removed from replication (storage) group + # before the replication relationship can be ended (cannot + # have a mix of replicated and non-replicated volumes as + # the SRDF groups become unmanageable). + controllerConfigService = ( + self.utils.find_controller_configuration_service( + self.conn, storageSystem)) + self.masking.remove_and_reset_members( + self.conn, controllerConfigService, volumeInstance, + volumeName, extraSpecs, None, False) + + # repeat on target side + targetControllerConfigService = ( + self.utils.find_controller_configuration_service( + self.conn, targetStorageSystem)) + repExtraSpecs = self._get_replication_extraSpecs( + extraSpecs, self.rep_config) + self.masking.remove_and_reset_members( + self.conn, targetControllerConfigService, + targetVolumeInstance, volumeName, repExtraSpecs, + None, False) + + LOG.info(_LI("Breaking replication relationship...")) + self.break_rdf_relationship( + self.conn, repServiceInstanceName, + storageSynchronizationSv, extraSpecs) + + # extend the source volume + + LOG.info(_LI("Extending source volume...")) + rc, volumeDict = self._extend_v3_volume( + volumeInstance, volumeName, newSize, extraSpecs) + + # extend the target volume + LOG.info(_LI("Extending target volume...")) + self._extend_v3_volume(targetVolumeInstance, volumeName, + newSize, repExtraSpecs) + + # re-create replication relationship + LOG.info(_LI("Recreating replication relationship...")) + self.setup_volume_replication( + self.conn, volume, volumeDict, + extraSpecs, targetVolumeInstance) + + except Exception as e: + exception_message = (_("Error extending volume. " + "Error received was %(e)s") % + {'e': e}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + return rc, volumeDict + + else: + exceptionMessage = (_( + "Extending a replicated volume is not " + "permitted on this backend. Please contact " + "your administrator.")) + LOG.error(exceptionMessage) + raise exception.VolumeBackendAPIException(data=exceptionMessage) + + def create_remote_replica(self, conn, repServiceInstanceName, + rdfGroupInstance, sourceVolume, sourceInstance, + targetInstance, extraSpecs, rep_config): + """Create a replication relationship with a target volume. + + :param conn: the connection to the ecom server + :param repServiceInstanceName: the replication service + :param rdfGroupInstance: the SRDF group instance + :param sourceVolume: the source volume object + :param sourceInstance: the source volume instance + :param targetInstance: the target volume instance + :param extraSpecs: extra specifications + :param rep_config: the replication configuration + :return: rc, rdfDict - the target volume dictionary + """ + # remove source and target instances from their default storage groups + volumeName = sourceVolume['name'] + storageSystemName = sourceInstance['SystemName'] + controllerConfigService = ( + self.utils.find_controller_configuration_service( + conn, storageSystemName)) + repExtraSpecs = self._get_replication_extraSpecs(extraSpecs, + rep_config) + try: + self.masking.remove_and_reset_members( + conn, controllerConfigService, sourceInstance, + volumeName, extraSpecs, connector=None, reset=False) + + targetStorageSystemName = targetInstance['SystemName'] + targetControllerConfigService = ( + self.utils.find_controller_configuration_service( + conn, targetStorageSystemName)) + self.masking.remove_and_reset_members( + conn, targetControllerConfigService, targetInstance, + volumeName, repExtraSpecs, connector=None, reset=False) + + # establish replication relationship + rc, rdfDict = self._create_remote_replica( + conn, repServiceInstanceName, rdfGroupInstance, volumeName, + sourceInstance, targetInstance, extraSpecs, + controllerConfigService, repExtraSpecs) + + # add source and target instances to their replication groups + LOG.debug("Adding sourceInstance to default replication group.") + self.add_volume_to_replication_group(conn, controllerConfigService, + sourceInstance, volumeName, + extraSpecs) + LOG.debug("Adding targetInstance to default replication group.") + self.add_volume_to_replication_group( + conn, targetControllerConfigService, targetInstance, + volumeName, repExtraSpecs) + + except Exception as e: + LOG.warning( + _LW("Remote replication failed. Cleaning up the target " + "volume and returning source volume to default storage " + "group. Volume name: %(cloneName)s "), + {'cloneName': volumeName}) + + self._cleanup_remote_target( + conn, repServiceInstanceName, sourceInstance, + targetInstance, extraSpecs, repExtraSpecs) + # Re-throw the exception. + exception_message = (_("Remote replication failed with exception:" + " %(e)s") + % {'e': six.text_type(e)}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + return rc, rdfDict + + def add_volume_to_replication_group(self, conn, controllerConfigService, + volumeInstance, volumeName, + extraSpecs): + """Add a volume to the default replication group. + + SE_ReplicationGroups are actually VMAX storage groups under + the covers, so we can use our normal storage group operations. + :param conn: the connection to the ecom served + :param controllerConfigService: the controller config service + :param volumeInstance: the volume instance + :param volumeName: the name of the volume + :param extraSpecs: extra specifications + :return: storageGroupInstanceName + """ + storageGroupName = self.utils.get_v3_storage_group_name( + extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], + False, True) + storageSystemName = volumeInstance['SystemName'] + doDisableCompression = self.utils.is_compression_disabled(extraSpecs) + try: + storageGroupInstanceName = self._get_or_create_storage_group_v3( + extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], + doDisableCompression, storageSystemName, extraSpecs, + is_re=True) + except Exception as e: + exception_message = (_("Failed to get or create replication" + "group. Exception received: %(e)s") + % {'e': six.text_type(e)}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + self.masking.add_volume_to_storage_group( + conn, controllerConfigService, storageGroupInstanceName, + volumeInstance, volumeName, storageGroupName, extraSpecs) + + return storageGroupInstanceName + + def _create_remote_replica( + self, conn, repServiceInstanceName, rdfGroupInstance, + volumeName, sourceInstance, targetInstance, extraSpecs, + controllerConfigService, repExtraSpecs): + """Helper function to establish a replication relationship. + + :param conn: the connection to the ecom server + :param repServiceInstanceName: replication service instance + :param rdfGroupInstance: rdf group instance + :param volumeName: volume name + :param sourceInstance: the source volume instance + :param targetInstance: the target volume instance + :param extraSpecs: extra specifications + :param controllerConfigService: the controller config service + :param repExtraSpecs: replication extra specifications + :return: rc, rdfDict - the target volume dictionary + """ + syncType = MIRROR_SYNC_TYPE + rc, job = self.provisionv3.create_remote_element_replica( + conn, repServiceInstanceName, volumeName, syncType, + sourceInstance, targetInstance, rdfGroupInstance, extraSpecs) + rdfDict = self.provisionv3.get_volume_dict_from_job( + self.conn, job['Job']) + + return rc, rdfDict + + def get_target_instance(self, sourceVolume, rep_config, + rdf_vol_size, targetName, extraSpecs): + """Create a replication target for a given source volume. + + :param sourceVolume: the source volume + :param rep_config: the replication configuration + :param rdf_vol_size: the size of the volume + :param targetName: the Element Name for the new volume + :param extraSpecs: the extra specifications + :return: the target instance + """ + repExtraSpecs = self._get_replication_extraSpecs( + extraSpecs, rep_config) + volumeSize = int(self.utils.convert_gb_to_bits(rdf_vol_size)) + rc, volumeDict, storageSystemName = self._create_v3_volume( + sourceVolume, targetName, volumeSize, repExtraSpecs) + targetInstance = self.utils.find_volume_instance( + self.conn, volumeDict, targetName) + return targetInstance + + def _get_replication_extraSpecs(self, extraSpecs, rep_config): + """Get replication extra specifications. + + Called when target array operations are necessary - + on create, extend, etc and when volume is failed over. + :param extraSpecs: the extra specifications + :param rep_config: the replication configuration + :return: repExtraSpecs - dict + """ + repExtraSpecs = extraSpecs.copy() + repExtraSpecs[ARRAY] = rep_config['array'] + repExtraSpecs[POOL] = rep_config['pool'] + repExtraSpecs[PORTGROUPNAME] = rep_config['portgroup'] + + # if disable compression is set, check if target array is all flash + doDisableCompression = self.utils.is_compression_disabled( + extraSpecs) + if doDisableCompression: + if not self.utils.is_all_flash(self.conn, repExtraSpecs[ARRAY]): + repExtraSpecs.pop(self.utils.DISABLECOMPRESSION, None) + + # Check to see if SLO and Workload are configured on the target array. + poolInstanceName, storageSystemName = ( + self._get_pool_and_storage_system(repExtraSpecs)) + storagePoolCapability = self.provisionv3.get_storage_pool_capability( + self.conn, poolInstanceName) + if extraSpecs[SLO]: + if storagePoolCapability: + try: + self.provisionv3.get_storage_pool_setting( + self.conn, storagePoolCapability, extraSpecs[SLO], + extraSpecs[WORKLOAD]) + except Exception: + LOG.warning( + _LW("The target array does not support the storage " + "pool setting for SLO %(slo)s or workload " + "%(workload)s. Not assigning any SLO or " + "workload."), + {'slo': extraSpecs[SLO], + 'workload': extraSpecs[WORKLOAD]}) + repExtraSpecs[SLO] = None + if extraSpecs[WORKLOAD]: + repExtraSpecs[WORKLOAD] = None + + else: + LOG.warning(_LW("Cannot determine storage pool settings of " + "target array. Not assigning any SLO or " + "workload")) + repExtraSpecs[SLO] = None + if extraSpecs[WORKLOAD]: + repExtraSpecs[WORKLOAD] = None + + return repExtraSpecs + + def get_secondary_stats_info(self, rep_config, arrayInfo): + """On failover, report on secondary array statistics. + + :param rep_config: the replication configuration + :param arrayInfo: the array info + :return: secondaryInfo - dict + """ + secondaryInfo = arrayInfo.copy() + secondaryInfo['SerialNumber'] = six.text_type(rep_config['array']) + secondaryInfo['PoolName'] = rep_config['pool'] + pool_info_specs = {ARRAY: secondaryInfo['SerialNumber'], + POOL: rep_config['pool'], + ISV3: True} + # Check to see if SLO and Workload are configured on the target array. + poolInstanceName, storageSystemName = ( + self._get_pool_and_storage_system(pool_info_specs)) + storagePoolCapability = self.provisionv3.get_storage_pool_capability( + self.conn, poolInstanceName) + if arrayInfo['SLO']: + if storagePoolCapability: + try: + self.provisionv3.get_storage_pool_setting( + self.conn, storagePoolCapability, arrayInfo['SLO'], + arrayInfo['Workload']) + except Exception: + LOG.info( + _LI("The target array does not support the storage " + "pool setting for SLO %(slo)s or workload " + "%(workload)s. SLO stats will not be reported."), + {'slo': arrayInfo['SLO'], + 'workload': arrayInfo['Workload']}) + secondaryInfo['SLO'] = None + if arrayInfo['Workload']: + secondaryInfo['Workload'] = None + if self.multiPoolSupportEnabled: + self.multiPoolSupportEnabled = False + + else: + LOG.info(_LI("Cannot determine storage pool settings of " + "target array. SLO stats will not be reported.")) + secondaryInfo['SLO'] = None + if arrayInfo['Workload']: + secondaryInfo['Workload'] = None + if self.multiPoolSupportEnabled: + self.multiPoolSupportEnabled = False + return secondaryInfo diff --git a/cinder/volume/drivers/emc/emc_vmax_fc.py b/cinder/volume/drivers/emc/emc_vmax_fc.py index a678df3ef..cc564881c 100644 --- a/cinder/volume/drivers/emc/emc_vmax_fc.py +++ b/cinder/volume/drivers/emc/emc_vmax_fc.py @@ -75,6 +75,7 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver): - Storage assisted volume migration via retype (bp vmax-volume-migration) - Support for compression on All Flash + - Volume replication 2.1 (bp add-vmax-replication) """ @@ -86,44 +87,32 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver): def __init__(self, *args, **kwargs): super(EMCVMAXFCDriver, self).__init__(*args, **kwargs) + self.active_backend_id = kwargs.get('active_backend_id', None) self.common = emc_vmax_common.EMCVMAXCommon( 'FC', self.VERSION, - configuration=self.configuration) + configuration=self.configuration, + active_backend_id=self.active_backend_id) self.zonemanager_lookup_service = fczm_utils.create_lookup_service() def check_for_setup_error(self): pass def create_volume(self, volume): - """Creates a EMC(VMAX/VNX) volume.""" - volpath = self.common.create_volume(volume) - - model_update = {} - volume['provider_location'] = six.text_type(volpath) - model_update['provider_location'] = volume['provider_location'] - return model_update + """Creates a VMAX volume.""" + return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" - volpath = self.common.create_volume_from_snapshot(volume, snapshot) - - model_update = {} - volume['provider_location'] = six.text_type(volpath) - model_update['provider_location'] = volume['provider_location'] - return model_update + return self.common.create_volume_from_snapshot( + volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" - volpath = self.common.create_cloned_volume(volume, src_vref) - - model_update = {} - volume['provider_location'] = six.text_type(volpath) - model_update['provider_location'] = volume['provider_location'] - return model_update + return self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): - """Deletes an EMC volume.""" + """Deletes an VMAX volume.""" self.common.delete_volume(volume) def create_snapshot(self, snapshot): @@ -524,3 +513,14 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver): def backup_use_temp_snapshot(self): return True + + def failover_host(self, context, volumes, secondary_id=None): + """Failover volumes to a secondary host/ backend. + + :param context: the context + :param volumes: the list of volumes to be failed over + :param secondary_id: the backend to be failed over to, is 'default' + if fail back + :return: secondary_id, volume_update_list + """ + return self.common.failover_host(context, volumes, secondary_id) diff --git a/cinder/volume/drivers/emc/emc_vmax_iscsi.py b/cinder/volume/drivers/emc/emc_vmax_iscsi.py index 9403e2a8b..09cd61a63 100644 --- a/cinder/volume/drivers/emc/emc_vmax_iscsi.py +++ b/cinder/volume/drivers/emc/emc_vmax_iscsi.py @@ -81,6 +81,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): - Storage assisted volume migration via retype (bp vmax-volume-migration) - Support for compression on All Flash + - Volume replication 2.1 (bp add-vmax-replication) """ @@ -92,43 +93,32 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): def __init__(self, *args, **kwargs): super(EMCVMAXISCSIDriver, self).__init__(*args, **kwargs) + self.active_backend_id = kwargs.get('active_backend_id', None) self.common = ( - emc_vmax_common.EMCVMAXCommon('iSCSI', - self.VERSION, - configuration=self.configuration)) + emc_vmax_common.EMCVMAXCommon( + 'iSCSI', + self.VERSION, + configuration=self.configuration, + active_backend_id=self.active_backend_id)) def check_for_setup_error(self): pass def create_volume(self, volume): """Creates a VMAX volume.""" - volpath = self.common.create_volume(volume) - - model_update = {} - volume['provider_location'] = six.text_type(volpath) - model_update['provider_location'] = volume['provider_location'] - return model_update + return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" - volpath = self.common.create_volume_from_snapshot(volume, snapshot) - - model_update = {} - volume['provider_location'] = six.text_type(volpath) - model_update['provider_location'] = volume['provider_location'] - return model_update + return self.common.create_volume_from_snapshot( + volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" - volpath = self.common.create_cloned_volume(volume, src_vref) - - model_update = {} - volume['provider_location'] = six.text_type(volpath) - model_update['provider_location'] = volume['provider_location'] - return model_update + return self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): - """Deletes an EMC volume.""" + """Deletes an VMAX volume.""" self.common.delete_volume(volume) def create_snapshot(self, snapshot): @@ -448,3 +438,14 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): def backup_use_temp_snapshot(self): return True + + def failover_host(self, context, volumes, secondary_id=None): + """Failover volumes to a secondary host/ backend. + + :param context: the context + :param volumes: the list of volumes to be failed over + :param secondary_id: the backend to be failed over to, is 'default' + if fail back + :return: secondary_id, volume_update_list + """ + return self.common.failover_host(context, volumes, secondary_id) diff --git a/cinder/volume/drivers/emc/emc_vmax_masking.py b/cinder/volume/drivers/emc/emc_vmax_masking.py index cb0f253a2..532e29e81 100644 --- a/cinder/volume/drivers/emc/emc_vmax_masking.py +++ b/cinder/volume/drivers/emc/emc_vmax_masking.py @@ -204,7 +204,8 @@ class EMCVMAXMasking(object): maskingviewdict['pool'], maskingviewdict['slo'], maskingviewdict['workload'], - maskingviewdict['isCompressionDisabled']) + maskingviewdict['isCompressionDisabled'], + maskingviewdict['replication_enabled']) assocStorageGroupInstanceNames = ( self.utils.get_storage_groups_from_volume( conn, volumeinstance.path)) @@ -2230,16 +2231,18 @@ class EMCVMAXMasking(object): """Return volume to the default storage group in v3. :param conn: the ecom connection - :param controllerConfigService: controller config service + :param controllerConfigurationService: controller config service :param volumeInstance: volumeInstance :param volumeName: the volume name :param extraSpecs: additional info :raises: VolumeBackendAPIException """ + rep_enabled = self.utils.is_replication_enabled(extraSpecs) isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs) storageGroupName = self.utils.get_v3_storage_group_name( extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], - extraSpecs[self.utils.WORKLOAD], isCompressionDisabled) + extraSpecs[self.utils.WORKLOAD], isCompressionDisabled, + rep_enabled) storageGroupInstanceName = self.utils.find_storage_masking_group( conn, controllerConfigurationService, storageGroupName) diff --git a/cinder/volume/drivers/emc/emc_vmax_provision_v3.py b/cinder/volume/drivers/emc/emc_vmax_provision_v3.py index e2fe4c58e..9dc5c65b6 100644 --- a/cinder/volume/drivers/emc/emc_vmax_provision_v3.py +++ b/cinder/volume/drivers/emc/emc_vmax_provision_v3.py @@ -35,6 +35,12 @@ INFO_SRC_V3 = 3 ACTIVATESNAPVX = 4 DEACTIVATESNAPVX = 19 SNAPSYNCTYPE = 7 +RDF_FAILOVER = 10 +RDF_FAILBACK = 11 +RDF_RESYNC = 14 +RDF_SYNC_MODE = 2 +RDF_SYNCHRONIZED = 6 +RDF_FAILEDOVER = 12 class EMCVMAXProvisionV3(object): @@ -231,6 +237,29 @@ class EMCVMAXProvisionV3(object): return volumeDict + def get_or_create_default_sg(self, conn, extraSpecs, storageSystemName, + doDisableCompression): + """Get or create default storage group for a replica. + + :param conn: the connection to the ecom server + :param extraSpecs: the extra specifications + :param storageSystemName: the storage system name + :param doDisableCompression: flag for compression + :returns: sgInstanceName, instance of storage group + """ + pool = extraSpecs[self.utils.POOL] + slo = extraSpecs[self.utils.SLO] + workload = extraSpecs[self.utils.WORKLOAD] + storageGroupName, controllerConfigService, sgInstanceName = ( + self.utils.get_v3_default_sg_instance_name( + conn, pool, slo, workload, storageSystemName, + doDisableCompression)) + if sgInstanceName is None: + sgInstanceName = self.create_storage_group_v3( + conn, controllerConfigService, storageGroupName, + pool, slo, workload, extraSpecs, doDisableCompression) + return sgInstanceName + def create_element_replica( self, conn, repServiceInstanceName, cloneName, syncType, sourceInstance, extraSpecs, @@ -257,12 +286,9 @@ class EMCVMAXProvisionV3(object): 'source': sourceInstance.path}) storageSystemName = sourceInstance['SystemName'] doDisableCompression = self.utils.is_compression_disabled(extraSpecs) - __, __, sgInstanceName = ( - self.utils.get_v3_default_sg_instance_name( - conn, extraSpecs[self.utils.POOL], - extraSpecs[self.utils.SLO], - extraSpecs[self.utils.WORKLOAD], storageSystemName, - doDisableCompression)) + sgInstanceName = ( + self.get_or_create_default_sg( + conn, extraSpecs, storageSystemName, doDisableCompression)) try: storageGroupInstance = conn.GetInstance(sgInstanceName) except Exception: @@ -309,9 +335,54 @@ class EMCVMAXProvisionV3(object): return rc, job return do_create_element_replica() + def create_remote_element_replica( + self, conn, repServiceInstanceName, cloneName, syncType, + sourceInstance, targetInstance, rdfGroupInstance, extraSpecs): + """Create a replication relationship between source and target. + + :param conn: the ecom connection + :param repServiceInstanceName: the replication service + :param cloneName: the name of the target volume + :param syncType: the synchronization type + :param sourceInstance: the source volume instance + :param targetInstance: the target volume instance + :param rdfGroupInstance: the rdf group instance + :param extraSpecs: additional info + :return: rc, job + """ + startTime = time.time() + LOG.debug("Setup replication relationship: %(source)s " + "syncType: %(syncType)s Source: %(target)s.", + {'source': sourceInstance.path, + 'syncType': syncType, + 'target': targetInstance.path}) + rc, job = self._create_element_replica_extra_params( + conn, repServiceInstanceName, cloneName, syncType, + sourceInstance, targetInstance, None, None, rdfGroupInstance) + if rc != 0: + rc, errordesc = self.utils.wait_for_job_complete(conn, job, + extraSpecs) + if rc != 0: + exceptionMessage = ( + _("Error Create Cloned Volume: %(cloneName)s " + "Return code: %(rc)lu. Error: %(error)s.") + % {'cloneName': cloneName, + 'rc': rc, + 'error': errordesc}) + LOG.error(exceptionMessage) + raise exception.VolumeBackendAPIException( + data=exceptionMessage) + + LOG.debug("InvokeMethod CreateElementReplica " + "took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(startTime, + time.time())}) + return rc, job + def _create_element_replica_extra_params( self, conn, repServiceInstanceName, cloneName, syncType, - sourceInstance, targetInstance, rsdInstance, sgInstanceName): + sourceInstance, targetInstance, rsdInstance, sgInstanceName, + rdfGroupInstance=None): """CreateElementReplica using extra parameters. :param conn: the connection to the ecom server @@ -326,6 +397,7 @@ class EMCVMAXProvisionV3(object): :returns: job - job object of the replica creation operation """ syncType = self.utils.get_num(syncType, '16') + modeType = self.utils.get_num(RDF_SYNC_MODE, '16') if targetInstance and rsdInstance: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, @@ -334,13 +406,14 @@ class EMCVMAXProvisionV3(object): SourceElement=sourceInstance.path, TargetElement=targetInstance.path, ReplicationSettingData=rsdInstance) - elif targetInstance: + elif targetInstance and rdfGroupInstance: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, SyncType=syncType, + Mode=modeType, SourceElement=sourceInstance.path, - TargetElement=targetInstance.path) + TargetElement=targetInstance.path, + ConnectivityCollection=rdfGroupInstance) elif rsdInstance: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, @@ -349,7 +422,13 @@ class EMCVMAXProvisionV3(object): SourceElement=sourceInstance.path, ReplicationSettingData=rsdInstance, Collections=[sgInstanceName]) - + elif targetInstance: + rc, job = conn.InvokeMethod( + 'CreateElementReplica', repServiceInstanceName, + ElementName=cloneName, + SyncType=syncType, + SourceElement=sourceInstance.path, + TargetElement=targetInstance.path) return rc, job def break_replication_relationship( @@ -871,3 +950,105 @@ class EMCVMAXProvisionV3(object): # Find the newly created volume. volumeDict = self.get_volume_dict_from_job(conn, job['Job']) return volumeDict, rc + + def get_rdf_group_instance(self, conn, repServiceInstanceName, + RDFGroupName): + """Get the SRDF group instance. + + :param conn: the connection to the ecom server + :param repServiceInstanceName: the replication service + :param RDFGroupName: the element name of the RDF group + :return: foundRDFGroupInstanceName + """ + foundRDFGroupInstanceName = None + + RDFGroupInstances = ( + conn.Associators(repServiceInstanceName, + ResultClass='CIM_ConnectivityCollection')) + + for RDFGroupInstance in RDFGroupInstances: + + if RDFGroupName == ( + six.text_type(RDFGroupInstance['ElementName'])): + # Check that it has not been deleted recently. + instance = self.utils.get_existing_instance( + conn, RDFGroupInstance.path) + if instance is None: + # SRDF group not found. + foundRDFGroupInstanceName = None + else: + foundRDFGroupInstanceName = ( + RDFGroupInstance.path) + break + return foundRDFGroupInstanceName + + def failover_volume(self, conn, repServiceInstanceName, + storageSynchronizationSv, + extraSpecs): + """Failover a volume to its target device. + + :param conn: the connection to the ecom server + :param repServiceInstanceName: the replication service + :param storageSynchronizationSv: the storage synchronized object + :param extraSpecs: the extra specifications + """ + operation = RDF_FAILOVER + # check if volume already in failover state + syncState = self._check_sync_state(conn, storageSynchronizationSv) + if syncState == RDF_FAILEDOVER: + return + + else: + LOG.debug("Failover: %(sv)s operation: %(operation)s.", + {'sv': storageSynchronizationSv, 'operation': operation}) + + return self._modify_replica_synchronization( + conn, repServiceInstanceName, storageSynchronizationSv, + operation, extraSpecs) + + def failback_volume(self, conn, repServiceInstanceName, + storageSynchronizationSv, + extraSpecs): + """Failback a volume to the source device. + + :param conn: the connection to the ecom server + :param repServiceInstanceName: the replication service + :param storageSynchronizationSv: the storage synchronized object + :param extraSpecs: the extra specifications + """ + failback_operation = RDF_FAILBACK + # check if volume already in failback state + syncState = self._check_sync_state(conn, storageSynchronizationSv) + if syncState == RDF_SYNCHRONIZED: + return + + else: + LOG.debug("Failback: %(sv)s operation: %(operation)s.", + {'sv': storageSynchronizationSv, + 'operation': failback_operation}) + + return self._modify_replica_synchronization( + conn, repServiceInstanceName, storageSynchronizationSv, + failback_operation, extraSpecs) + + def _check_sync_state(self, conn, syncName): + """Get the copy state of a sync name. + + :param conn: the connection to the ecom server + :param syncName: the storage sync sv name + :return: the copy state + """ + try: + syncInstance = conn.GetInstance(syncName, + LocalOnly=False) + syncState = syncInstance['syncState'] + LOG.debug("syncState is %(syncState)lu.", + {'syncState': syncState}) + return syncState + except Exception as ex: + exceptionMessage = ( + _("Getting sync instance failed with: %(ex)s.") + % {'ex': six.text_type(ex)}) + LOG.exception(exceptionMessage) + raise exception.VolumeBackendAPIException( + data=exceptionMessage) diff --git a/cinder/volume/drivers/emc/emc_vmax_utils.py b/cinder/volume/drivers/emc/emc_vmax_utils.py index 1d01f9279..2a1a522c5 100644 --- a/cinder/volume/drivers/emc/emc_vmax_utils.py +++ b/cinder/volume/drivers/emc/emc_vmax_utils.py @@ -30,6 +30,7 @@ import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW +from cinder.objects import fields from cinder.volume import volume_types @@ -63,7 +64,10 @@ RETRIES = 'storagetype:retries' CIM_ERR_NOT_FOUND = 6 VOLUME_ELEMENT_NAME_PREFIX = 'OS-' SYNCHRONIZED = 4 +RDF_FAILOVER = 10 SMI_VERSION_83 = 830 +IS_RE = 'replication_enabled' +REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER class EMCVMAXUtils(object): @@ -1574,30 +1578,33 @@ class EMCVMAXUtils(object): return isValidSLO, isValidWorkload def get_v3_storage_group_name(self, poolName, slo, workload, - isCompressionDisabled): + isCompressionDisabled, rep_enabled=False): """Determine default v3 storage group from extraSpecs. :param poolName: the poolName :param slo: the SLO string e.g Bronze :param workload: the workload string e.g DSS :param isCompressionDisabled: is compression disabled + :param rep_enabled: True if replication enabled :returns: storageGroupName """ if slo and workload: + + prefix = ("OS-%(poolName)s-%(slo)s-%(workload)s" + % {'poolName': poolName, + 'slo': slo, + 'workload': workload}) + if isCompressionDisabled: - postfix = 'CD-SG' - else: - postfix = 'SG' - - storageGroupName = ( - "OS-%(poolName)s-%(slo)s-%(workload)s-%(postfix)s" - % {'poolName': poolName, - 'slo': slo, - 'workload': workload, - 'postfix': postfix}) + prefix += "-CD" else: - storageGroupName = ("OS-no_SLO-SG") + prefix = "OS-no_SLO" + if rep_enabled: + prefix += "-RE" + + storageGroupName = ("%(prefix)s-SG" + % {'prefix': prefix}) return storageGroupName def _get_fast_settings_from_storage_group(self, storageGroupInstance): @@ -1913,63 +1920,6 @@ class EMCVMAXUtils(object): return kwargs - def _single_pool_support(self, fileName): - """Single pool support. - - VMAX2 - - 10.108.246.202 - 5988 - admin - #1Password - - OS-PORTGROUP1-PG - - 000198700439 - FC_SLVR1 - - VMAX3 - - :param fileName: the configuration file - :returns: list - """ - myList = [] - kwargs = {} - connargs = {} - myFile = open(fileName, 'r') - data = myFile.read() - myFile.close() - dom = minidom.parseString(data) - try: - connargs = self._get_connection_info(dom) - interval = self._process_tag(dom, 'Interval') - retries = self._process_tag(dom, 'Retries') - portGroup = self._get_random_portgroup(dom) - - serialNumber = self._process_tag(dom, 'Array') - if serialNumber is None: - LOG.error(_LE( - "Array Serial Number must be in the file " - "%(fileName)s."), - {'fileName': fileName}) - poolName = self._process_tag(dom, 'Pool') - if poolName is None: - LOG.error(_LE( - "PoolName must be in the file " - "%(fileName)s."), - {'fileName': fileName}) - kwargs = self._fill_record( - connargs, serialNumber, poolName, portGroup, dom) - if interval: - kwargs['Interval'] = interval - if retries: - kwargs['Retries'] = retries - - myList.append(kwargs) - except IndexError: - pass - return myList - def parse_file_to_get_array_map(self, fileName): """Parses a file and gets array map. @@ -2100,14 +2050,14 @@ class EMCVMAXUtils(object): portGroupNames.append(portGroupName.strip()) portGroupNames = EMCVMAXUtils._filter_list(portGroupNames) if len(portGroupNames) > 0: - return EMCVMAXUtils._get_random_pg_from_list(portGroupNames) + return EMCVMAXUtils.get_random_pg_from_list(portGroupNames) exception_message = (_("No Port Group elements found in config file.")) LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) @staticmethod - def _get_random_pg_from_list(portgroupnames): + def get_random_pg_from_list(portgroupnames): """From list of portgroup, choose one randomly :param portGroupNames: list of available portgroups @@ -2624,7 +2574,7 @@ class EMCVMAXUtils(object): def get_v3_default_sg_instance_name( self, conn, poolName, slo, workload, storageSystemName, - isCompressionDisabled): + isCompressionDisabled, is_re=False): """Get the V3 default instance name :param conn: the connection to the ecom server @@ -2636,7 +2586,7 @@ class EMCVMAXUtils(object): :returns: the storage group instance name """ storageGroupName = self.get_v3_storage_group_name( - poolName, slo, workload, isCompressionDisabled) + poolName, slo, workload, isCompressionDisabled, is_re) controllerConfigService = ( self.find_controller_configuration_service( conn, storageSystemName)) @@ -2908,3 +2858,136 @@ class EMCVMAXUtils(object): :returns: boolean """ return value.lower() in ("yes", "true") + + def is_replication_enabled(self, extraSpecs): + """Check if replication is to be enabled. + + :param extraSpecs: extra specifications + :returns: bool - true if enabled, else false + """ + replication_enabled = False + if IS_RE in extraSpecs: + replication_enabled = True + return replication_enabled + + def get_replication_config(self, rep_device_list): + """Gather necessary replication configuration info. + + :param rep_device_list: the replication device list from cinder.conf + :returns: rep_config, replication configuration dict + """ + rep_config = {} + if not rep_device_list: + return None + else: + target = rep_device_list[0] + try: + rep_config['array'] = target['target_device_id'] + rep_config['pool'] = target['remote_pool'] + rep_config['rdf_group_label'] = target['rdf_group_label'] + rep_config['portgroup'] = target['remote_port_group'] + + except KeyError as ke: + errorMessage = (_("Failed to retrieve all necessary SRDF " + "information. Error received: %(ke)s.") % + {'ke': six.text_type(ke)}) + LOG.exception(errorMessage) + raise exception.VolumeBackendAPIException(data=errorMessage) + + try: + allow_extend = target['allow_extend'] + if self.str2bool(allow_extend): + rep_config['allow_extend'] = True + else: + rep_config['allow_extend'] = False + except KeyError: + rep_config['allow_extend'] = False + + return rep_config + + def failover_provider_location(self, provider_location, + replication_keybindings): + """Transfer ownership of a volume from one array to another. + + :param provider_location: the provider location + :param replication_keybindings: the rep keybindings + :return: updated provider_location + """ + if isinstance(provider_location, six.text_type): + provider_location = eval(provider_location) + if isinstance(replication_keybindings, six.text_type): + replication_keybindings = eval(replication_keybindings) + + keybindings = provider_location['keybindings'] + provider_location['keybindings'] = replication_keybindings + replication_driver_data = keybindings + return provider_location, replication_driver_data + + def find_rdf_storage_sync_sv_sv( + self, conn, sourceInstance, storageSystem, + targetInstance, targetStorageSystem, + extraSpecs, waitforsync=True): + """Find the storage synchronized name. + + :param conn: the connection to the ecom server + :param sourceInstance: the source instance + :param storageSystem: the source storage system name + :param targetInstance: the target instance + :param targetStorageSystem: the target storage system name + :param extraSpecs: the extra specifications + :param waitforsync: flag for waiting until sync is complete + :return: foundSyncInstanceName + """ + + foundSyncInstanceName = None + syncInstanceNames = conn.EnumerateInstanceNames( + 'SE_StorageSynchronized_SV_SV') + for syncInstanceName in syncInstanceNames: + syncSvTarget = syncInstanceName['SyncedElement'] + syncSvSource = syncInstanceName['SystemElement'] + if storageSystem != syncSvSource['SystemName'] or ( + targetStorageSystem != syncSvTarget['SystemName']): + continue + if syncSvTarget['DeviceID'] == targetInstance['DeviceID'] and ( + syncSvSource['DeviceID'] == sourceInstance['DeviceID']): + # Check that it hasn't recently been deleted. + try: + conn.GetInstance(syncInstanceName) + foundSyncInstanceName = syncInstanceName + LOG.debug("Found sync Name: %(sync_name)s.", + {'sync_name': foundSyncInstanceName}) + except Exception: + foundSyncInstanceName = None + break + + if foundSyncInstanceName: + # Wait for SE_StorageSynchronized_SV_SV to be fully synced. + if waitforsync: + LOG.warning(_LW( + "Expect a performance hit as volume is not not fully " + "synced on %(deviceId)s."), + {'deviceId': sourceInstance['DeviceID']}) + startTime = time.time() + self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs) + LOG.warning(_LW( + "Synchronization process took: %(delta)s H:MM:SS."), + {'delta': self.get_time_delta(startTime, + time.time())}) + + return foundSyncInstanceName + + @staticmethod + def is_volume_failed_over(volume): + """Check if a volume has been failed over. + + :param volume: the volume object + :return: bool + """ + if volume is None: + return False + else: + if volume.get('replication_status'): + if volume['replication_status'] == REPLICATION_FAILOVER: + return True + else: + return False diff --git a/releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml b/releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml new file mode 100644 index 000000000..ae747bbed --- /dev/null +++ b/releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add v2.1 volume replication support in VMAX driver.