VMAX driver - Implement volume replication for VMAX
Volume replication supports disaster recovery solution where there has been a catastrophic event in your data centre for the VMAX array. Change-Id: I2aafe564cdb31895756b4b8884af2635b054ae59 Implements: blueprint add-vmax-replication
This commit is contained in:
parent
54e7c1deaf
commit
67a2178eb4
@ -106,6 +106,10 @@ class Symm_ArrayChassis(dict):
|
||||
pass
|
||||
|
||||
|
||||
class CIM_ConnectivityCollection(dict):
|
||||
pass
|
||||
|
||||
|
||||
class SE_ReplicationSettingData(dict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self['DefaultInstance'] = self.createInstance()
|
||||
@ -288,6 +292,16 @@ class EMCVMAXCommonData(object):
|
||||
lunmaskctrl_name = (
|
||||
'OS-fakehost-gold-I-MV')
|
||||
|
||||
rdf_group = 'test_rdf'
|
||||
srdf_group_instance = (
|
||||
'//10.73.28.137/root/emc:Symm_RemoteReplicationCollection.'
|
||||
'InstanceID="SYMMETRIX-+-000197200056-+-8-+-000195900551-+-8"')
|
||||
rg_instance_name = {
|
||||
'CreationClassName': 'CIM_DeviceMaskingGroup',
|
||||
'ElementName': 'OS-SRP_1-gold-DSS-RE-SG',
|
||||
'SystemName': 'SYMMETRIX+000197200056'
|
||||
}
|
||||
|
||||
initiatorgroup_id = (
|
||||
'SYMMETRIX+000195900551+OS-fakehost-IG')
|
||||
initiatorgroup_name = 'OS-fakehost-I-IG'
|
||||
@ -316,6 +330,7 @@ class EMCVMAXCommonData(object):
|
||||
storagepoolid = 'SYMMETRIX+000195900551+U+gold'
|
||||
storagegroupname = 'OS-fakehost-gold-I-SG'
|
||||
defaultstoragegroupname = 'OS_default_GOLD1_SG'
|
||||
re_storagegroup = 'OS-SRP_1-gold-DSS-RE-SG'
|
||||
storagevolume_creationclass = 'EMC_StorageVolume'
|
||||
policyrule = 'gold'
|
||||
poolname = 'gold'
|
||||
@ -347,8 +362,13 @@ class EMCVMAXCommonData(object):
|
||||
'SystemName': u'SYMMETRIX+000195900551',
|
||||
'DeviceID': u'10',
|
||||
'SystemCreationClassName': u'Symm_StorageSystem'}
|
||||
re_keybindings = {'CreationClassName': u'Symm_StorageVolume',
|
||||
'SystemName': u'SYMMETRIX+000195900551',
|
||||
'DeviceID': u'1',
|
||||
'SystemCreationClassName': u'Symm_StorageSystem'}
|
||||
provider_location = {'classname': 'Symm_StorageVolume',
|
||||
'keybindings': keybindings}
|
||||
'keybindings': keybindings,
|
||||
'version': '2.5.0'}
|
||||
provider_location2 = {'classname': 'Symm_StorageVolume',
|
||||
'keybindings': keybindings2}
|
||||
provider_location3 = {'classname': 'Symm_StorageVolume',
|
||||
@ -356,6 +376,7 @@ class EMCVMAXCommonData(object):
|
||||
provider_location_multi_pool = {'classname': 'Symm_StorageVolume',
|
||||
'keybindings': keybindings,
|
||||
'version': '2.2.0'}
|
||||
replication_driver_data = re_keybindings
|
||||
block_size = 512
|
||||
majorVersion = 1
|
||||
minorVersion = 2
|
||||
@ -539,6 +560,43 @@ class EMCVMAXCommonData(object):
|
||||
six.text_type(provider_location),
|
||||
'display_description': 'snapshot source volume'}
|
||||
|
||||
test_volume_re = {'name': 'vol1',
|
||||
'size': 1,
|
||||
'volume_name': 'vol1',
|
||||
'id': '1',
|
||||
'device_id': '1',
|
||||
'provider_auth': None,
|
||||
'project_id': 'project',
|
||||
'display_name': 'vol1',
|
||||
'display_description': 'test volume',
|
||||
'volume_type_id': 'abc',
|
||||
'provider_location': six.text_type(
|
||||
provider_location),
|
||||
'status': 'available',
|
||||
'replication_status': fields.ReplicationStatus.ENABLED,
|
||||
'host': fake_host,
|
||||
'NumberOfBlocks': 100,
|
||||
'BlockSize': block_size,
|
||||
'replication_driver_data': six.text_type(
|
||||
replication_driver_data)}
|
||||
|
||||
test_failed_re_volume = {'name': 'vol1',
|
||||
'size': 1,
|
||||
'volume_name': 'vol1',
|
||||
'id': '1',
|
||||
'device_id': '1',
|
||||
'display_name': 'vol1',
|
||||
'volume_type_id': 'abc',
|
||||
'provider_location': six.text_type(
|
||||
{'keybindings': 'fake_keybindings'}),
|
||||
'replication_status': (
|
||||
fields.ReplicationStatus.ENABLED),
|
||||
'replication_driver_data': 'fake_data',
|
||||
'host': fake_host,
|
||||
'NumberOfBlocks': 100,
|
||||
'BlockSize': block_size
|
||||
}
|
||||
|
||||
test_CG = consistencygroup.ConsistencyGroup(
|
||||
context=None, name='myCG1', id='12345abcde',
|
||||
volume_type_id='abc', status=fields.ConsistencyGroupStatus.AVAILABLE)
|
||||
@ -629,6 +687,16 @@ class EMCVMAXCommonData(object):
|
||||
'portgroupname': u'OS-portgroup-PG',
|
||||
'pool_name': u'Bronze+DSS+SRP_1+1234567891011'}
|
||||
|
||||
extra_specs_is_re = {'storagetype:pool': u'SRP_1',
|
||||
'volume_backend_name': 'VMAXReplication',
|
||||
'storagetype:workload': u'DSS',
|
||||
'storagetype:slo': u'Bronze',
|
||||
'storagetype:array': u'1234567891011',
|
||||
'isV3': True,
|
||||
'portgroupname': u'OS-portgroup-PG',
|
||||
'replication_enabled': True,
|
||||
'MultiPoolSupport': False}
|
||||
|
||||
remainingSLOCapacity = '123456789'
|
||||
SYNCHRONIZED = 4
|
||||
UNSYNCHRONIZED = 3
|
||||
@ -651,11 +719,11 @@ class FakeEcomConnection(object):
|
||||
Operation=None, Synchronization=None,
|
||||
TheElements=None, TheElement=None,
|
||||
LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None,
|
||||
ProtocolControllers=None,
|
||||
ProtocolControllers=None, ConnectivityCollection=None,
|
||||
MaskingGroup=None, Members=None,
|
||||
HardwareId=None, ElementSource=None, EMCInPools=None,
|
||||
CompositeType=None, EMCNumberOfMembers=None,
|
||||
EMCBindElements=None,
|
||||
EMCBindElements=None, Mode=None,
|
||||
InElements=None, TargetPool=None, RequestedState=None,
|
||||
ReplicationGroup=None, ReplicationType=None,
|
||||
ReplicationSettingData=None, GroupName=None, Force=None,
|
||||
@ -870,6 +938,8 @@ class FakeEcomConnection(object):
|
||||
result = self._assoc_lunmaskctrls()
|
||||
elif ResultClass == 'CIM_TargetMaskingGroup':
|
||||
result = self._assoc_portgroup()
|
||||
elif ResultClass == 'CIM_ConnectivityCollection':
|
||||
result = self._assoc_rdfgroup()
|
||||
else:
|
||||
result = self._default_assoc(objectpath)
|
||||
return result
|
||||
@ -1152,6 +1222,14 @@ class FakeEcomConnection(object):
|
||||
assocs.append(assoc)
|
||||
return assocs
|
||||
|
||||
def _assoc_rdfgroup(self):
|
||||
assocs = []
|
||||
assoc = CIM_ConnectivityCollection()
|
||||
assoc['ElementName'] = self.data.rdf_group
|
||||
assoc.path = self.data.srdf_group_instance
|
||||
assocs.append(assoc)
|
||||
return assocs
|
||||
|
||||
def _default_assoc(self, objectpath):
|
||||
return objectpath
|
||||
|
||||
@ -2137,12 +2215,12 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase):
|
||||
def test_get_random_pg_from_list(self):
|
||||
portGroupNames = ['pg1', 'pg2', 'pg3', 'pg4']
|
||||
portGroupName = (
|
||||
self.driver.common.utils._get_random_pg_from_list(portGroupNames))
|
||||
self.driver.common.utils.get_random_pg_from_list(portGroupNames))
|
||||
self.assertIn('pg', portGroupName)
|
||||
|
||||
portGroupNames = ['pg1']
|
||||
portGroupName = (
|
||||
self.driver.common.utils._get_random_pg_from_list(portGroupNames))
|
||||
self.driver.common.utils.get_random_pg_from_list(portGroupNames))
|
||||
self.assertEqual('pg1', portGroupName)
|
||||
|
||||
def test_get_random_portgroup(self):
|
||||
@ -3500,9 +3578,9 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase):
|
||||
self.driver.create_snapshot(self.data.test_snapshot)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_validate_pool',
|
||||
return_value=('Bogus_Pool'))
|
||||
emc_vmax_utils.EMCVMAXUtils,
|
||||
'parse_file_to_get_array_map',
|
||||
return_value=None)
|
||||
def test_create_snapshot_no_fast_failed(self, mock_pool):
|
||||
self.data.test_volume['volume_name'] = "vmax-1234567"
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
@ -4456,9 +4534,9 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase):
|
||||
self.driver.create_snapshot(self.data.test_snapshot)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_validate_pool',
|
||||
return_value=('Bogus_Pool'))
|
||||
emc_vmax_utils.EMCVMAXUtils,
|
||||
'parse_file_to_get_array_map',
|
||||
return_value=None)
|
||||
def test_create_snapshot_fast_failed(self, mock_pool):
|
||||
self.data.test_volume['volume_name'] = "vmax-1234567"
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
@ -5686,9 +5764,9 @@ class EMCVMAXFCDriverFastTestCase(test.TestCase):
|
||||
self.driver.create_snapshot(self.data.test_snapshot)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_validate_pool',
|
||||
return_value=('Bogus_Pool'))
|
||||
emc_vmax_utils.EMCVMAXUtils,
|
||||
'parse_file_to_get_array_map',
|
||||
return_value=None)
|
||||
def test_create_snapshot_fast_failed(self, mock_pool):
|
||||
self.data.test_volume['volume_name'] = "vmax-1234567"
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
@ -5963,7 +6041,7 @@ class EMCV3DriverTestCase(test.TestCase):
|
||||
self.set_configuration()
|
||||
|
||||
def set_configuration(self):
|
||||
configuration = mock.Mock()
|
||||
configuration = mock.MagicMock()
|
||||
configuration.cinder_emc_config_file = self.config_file_path
|
||||
configuration.config_group = 'V3'
|
||||
|
||||
@ -8723,7 +8801,8 @@ class EMCVMAXCommonTest(test.TestCase):
|
||||
'workload': 'DSS',
|
||||
'slo': 'Bronze'}
|
||||
self.driver.common._extend_volume(
|
||||
volumeInstance, volumeName, new_size_gb, old_size_gbs, extraSpecs)
|
||||
self.data.test_volume, volumeInstance, volumeName,
|
||||
new_size_gb, old_size_gbs, extraSpecs)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
@ -9069,6 +9148,34 @@ class EMCVMAXCommonTest(test.TestCase):
|
||||
self.data.test_host_1_v3, volumeName, 'retyping', new_type,
|
||||
extraSpecs))
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_initial_setup',
|
||||
return_value=EMCVMAXCommonData.extra_specs)
|
||||
def test_failover_not_replicated(self, mock_setup):
|
||||
common = self.driver.common
|
||||
common.conn = FakeEcomConnection()
|
||||
volumes = [self.data.test_volume]
|
||||
# Path 1: Failover non replicated volume
|
||||
verify_update_fo = [{'volume_id': volumes[0]['id'],
|
||||
'updates': {'status': 'error'}}]
|
||||
secondary_id, volume_update = (
|
||||
common.failover_host('context', volumes, None))
|
||||
self.assertEqual(verify_update_fo, volume_update)
|
||||
# Path 2: Failback non replicated volume
|
||||
# Path 2a: Volume still available on primary
|
||||
verify_update_fb1 = [{'volume_id': volumes[0]['id'],
|
||||
'updates': {'status': 'available'}}]
|
||||
secondary_id, volume_update_1 = (
|
||||
common.failover_host('context', volumes, 'default'))
|
||||
self.assertEqual(verify_update_fb1, volume_update_1)
|
||||
# Path 2a: Volume not still available on primary
|
||||
with mock.patch.object(common, '_find_lun',
|
||||
return_value=None):
|
||||
secondary_id, volume_update_2 = (
|
||||
common.failover_host('context', volumes, 'default'))
|
||||
self.assertEqual(verify_update_fo, volume_update_2)
|
||||
|
||||
|
||||
class EMCVMAXProvisionTest(test.TestCase):
|
||||
def setUp(self):
|
||||
@ -9214,3 +9321,559 @@ class EMCVMAXISCSITest(test.TestCase):
|
||||
self.data.test_snapshot_v3, self.data.connector)
|
||||
common._unmap_lun.assert_called_once_with(
|
||||
self.data.test_snapshot_v3, self.data.connector)
|
||||
|
||||
|
||||
class EMCV3ReplicationTest(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.data = EMCVMAXCommonData()
|
||||
|
||||
self.flags(rpc_backend='oslo_messaging._drivers.impl_fake')
|
||||
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
super(EMCV3ReplicationTest, self).setUp()
|
||||
self.config_file_path = None
|
||||
self.create_fake_config_file_v3()
|
||||
self.addCleanup(self._cleanup)
|
||||
self.set_configuration()
|
||||
|
||||
def set_configuration(self):
|
||||
self.replication_device = [
|
||||
{'target_device_id': u'000195900551',
|
||||
'remote_port_group': self.data.port_group,
|
||||
'remote_pool': 'SRP_1',
|
||||
'rdf_group_label': self.data.rdf_group,
|
||||
'allow_extend': 'True'}]
|
||||
self.configuration = mock.Mock(
|
||||
replication_device=self.replication_device,
|
||||
cinder_emc_config_file=self.config_file_path,
|
||||
config_group='V3')
|
||||
|
||||
def safe_get(key):
|
||||
return getattr(self.configuration, key)
|
||||
self.configuration.safe_get = safe_get
|
||||
|
||||
self.mock_object(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection',
|
||||
self.fake_ecom_connection)
|
||||
instancename = FakeCIMInstanceName()
|
||||
self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name',
|
||||
instancename.fake_getinstancename)
|
||||
self.mock_object(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3',
|
||||
self.fake_is_v3)
|
||||
self.mock_object(volume_types, 'get_volume_type_extra_specs',
|
||||
self.fake_volume_type_extra_specs)
|
||||
self.mock_object(emc_vmax_common.EMCVMAXCommon,
|
||||
'_get_multi_pool_support_enabled_flag',
|
||||
self.fake_get_multi_pool)
|
||||
self.mock_object(emc_vmax_utils.EMCVMAXUtils,
|
||||
'get_existing_instance',
|
||||
self.fake_get_existing_instance)
|
||||
self.mock_object(cinder_utils, 'get_bool_param',
|
||||
return_value=False)
|
||||
self.patcher = mock.patch(
|
||||
'oslo_service.loopingcall.FixedIntervalLoopingCall',
|
||||
new=utils.ZeroIntervalLoopingCall)
|
||||
self.patcher.start()
|
||||
|
||||
driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=self.configuration)
|
||||
driver.db = FakeDB()
|
||||
self.driver = driver
|
||||
|
||||
def create_fake_config_file_v3(self):
|
||||
doc = minidom.Document()
|
||||
emc = doc.createElement("EMC")
|
||||
doc.appendChild(emc)
|
||||
|
||||
ecomserverip = doc.createElement("EcomServerIp")
|
||||
ecomserveriptext = doc.createTextNode("1.1.1.1")
|
||||
emc.appendChild(ecomserverip)
|
||||
ecomserverip.appendChild(ecomserveriptext)
|
||||
|
||||
ecomserverport = doc.createElement("EcomServerPort")
|
||||
ecomserverporttext = doc.createTextNode("10")
|
||||
emc.appendChild(ecomserverport)
|
||||
ecomserverport.appendChild(ecomserverporttext)
|
||||
|
||||
ecomusername = doc.createElement("EcomUserName")
|
||||
ecomusernametext = doc.createTextNode("user")
|
||||
emc.appendChild(ecomusername)
|
||||
ecomusername.appendChild(ecomusernametext)
|
||||
|
||||
ecompassword = doc.createElement("EcomPassword")
|
||||
ecompasswordtext = doc.createTextNode("pass")
|
||||
emc.appendChild(ecompassword)
|
||||
ecompassword.appendChild(ecompasswordtext)
|
||||
|
||||
portgroup = doc.createElement("PortGroup")
|
||||
portgrouptext = doc.createTextNode(self.data.port_group)
|
||||
portgroup.appendChild(portgrouptext)
|
||||
|
||||
pool = doc.createElement("Pool")
|
||||
pooltext = doc.createTextNode("SRP_1")
|
||||
emc.appendChild(pool)
|
||||
pool.appendChild(pooltext)
|
||||
|
||||
array = doc.createElement("Array")
|
||||
arraytext = doc.createTextNode("1234567891011")
|
||||
emc.appendChild(array)
|
||||
array.appendChild(arraytext)
|
||||
|
||||
slo = doc.createElement("ServiceLevel")
|
||||
slotext = doc.createTextNode("Bronze")
|
||||
emc.appendChild(slo)
|
||||
slo.appendChild(slotext)
|
||||
|
||||
workload = doc.createElement("Workload")
|
||||
workloadtext = doc.createTextNode("DSS")
|
||||
emc.appendChild(workload)
|
||||
workload.appendChild(workloadtext)
|
||||
|
||||
portgroups = doc.createElement("PortGroups")
|
||||
portgroups.appendChild(portgroup)
|
||||
emc.appendChild(portgroups)
|
||||
|
||||
timeout = doc.createElement("Timeout")
|
||||
timeouttext = doc.createTextNode("0")
|
||||
emc.appendChild(timeout)
|
||||
timeout.appendChild(timeouttext)
|
||||
|
||||
filename = 'cinder_emc_config_V3.xml'
|
||||
|
||||
self.config_file_path = self.tempdir + '/' + filename
|
||||
|
||||
f = open(self.config_file_path, 'w')
|
||||
doc.writexml(f)
|
||||
f.close()
|
||||
|
||||
def fake_ecom_connection(self):
|
||||
self.conn = FakeEcomConnection()
|
||||
return self.conn
|
||||
|
||||
def fake_is_v3(self, conn, serialNumber):
|
||||
return True
|
||||
|
||||
def fake_volume_type_extra_specs(self, volume_type):
|
||||
extraSpecs = {'volume_backend_name': 'VMAXReplication',
|
||||
'replication_enabled': '<is> True'}
|
||||
return extraSpecs
|
||||
|
||||
def fake_get_multi_pool(self):
|
||||
return False
|
||||
|
||||
def fake_get_existing_instance(self, conn, instancename):
|
||||
return instancename
|
||||
|
||||
def _cleanup(self):
|
||||
bExists = os.path.exists(self.config_file_path)
|
||||
if bExists:
|
||||
os.remove(self.config_file_path)
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'get_target_instance',
|
||||
return_value='volume_instance')
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_get_pool_and_storage_system',
|
||||
return_value=(None, EMCVMAXCommonData.storage_system))
|
||||
def test_setup_volume_replication_success(self, mock_pool,
|
||||
mock_target):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
sourceVolume = self.data.test_volume_re
|
||||
volumeDict = self.data.provider_location
|
||||
with mock.patch.object(
|
||||
common, 'create_remote_replica',
|
||||
return_value=(0, self.data.provider_location2)):
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
rep_status, rep_driver_data = common.setup_volume_replication(
|
||||
common.conn, sourceVolume, volumeDict, extraSpecs)
|
||||
self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status)
|
||||
self.assertEqual(self.data.keybindings2, rep_driver_data)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_get_pool_and_storage_system',
|
||||
return_value=(None, EMCVMAXCommonData.storage_system))
|
||||
def test_setup_volume_replication_failed(self, mock_pool):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
sourceVolume = self.data.test_volume_re
|
||||
volumeDict = self.data.provider_location
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
common.setup_volume_replication, common.conn, sourceVolume,
|
||||
volumeDict, extraSpecs)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_cleanup_remote_target')
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_get_pool_and_storage_system',
|
||||
return_value=(None, EMCVMAXCommonData.storage_system))
|
||||
def test_cleanup_lun_replication(self, mock_pool, mock_delete):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
volume = self.data.test_volume_re
|
||||
volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
sourceInstance = common.conn.GetInstance(volumeInstanceName)
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
common.cleanup_lun_replication(common.conn, volume, volume['name'],
|
||||
sourceInstance, extraSpecs)
|
||||
with mock.patch.object(
|
||||
common.utils, 'find_volume_instance',
|
||||
return_value={'ElementName': self.data.test_volume_re['id']}):
|
||||
targetInstance = sourceInstance
|
||||
repServiceInstanceName = common.conn.EnumerateInstanceNames(
|
||||
'EMC_ReplicationService')[0]
|
||||
rep_config = common.utils.get_replication_config(
|
||||
self.replication_device)
|
||||
repExtraSpecs = common._get_replication_extraSpecs(
|
||||
extraSpecs, rep_config)
|
||||
common._cleanup_remote_target.assert_called_once_with(
|
||||
common.conn, repServiceInstanceName, sourceInstance,
|
||||
targetInstance, extraSpecs, repExtraSpecs)
|
||||
|
||||
def test_get_rdf_details(self):
|
||||
common = self.driver.common
|
||||
conn = self.fake_ecom_connection()
|
||||
rdfGroupInstance, repServiceInstanceName = (
|
||||
common.get_rdf_details(conn, self.data.storage_system))
|
||||
self.assertEqual(rdfGroupInstance, self.data.srdf_group_instance)
|
||||
self.assertEqual(repServiceInstanceName,
|
||||
conn.EnumerateInstanceNames(
|
||||
'EMC_ReplicationService')[0])
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_provision_v3.EMCVMAXProvisionV3,
|
||||
'_check_sync_state',
|
||||
return_value=6)
|
||||
def test_failover_volume_success(self, mock_sync):
|
||||
volumes = [self.data.test_volume_re]
|
||||
rep_data = self.data.replication_driver_data
|
||||
loc = six.text_type(self.data.provider_location)
|
||||
rep_data = six.text_type(rep_data)
|
||||
check_update_list = (
|
||||
[{'volume_id': self.data.test_volume_re['id'],
|
||||
'updates':
|
||||
{'replication_status': fields.ReplicationStatus.ENABLED,
|
||||
'provider_location': loc,
|
||||
'replication_driver_data': rep_data}}])
|
||||
secondary_id, volume_update_list = (
|
||||
self.driver.failover_host('context', volumes, 'default'))
|
||||
self.assertEqual(check_update_list, volume_update_list)
|
||||
|
||||
def test_failover_volume_failed(self):
|
||||
fake_vol = self.data.test_failed_re_volume
|
||||
fake_location = six.text_type(
|
||||
{'keybindings': 'fake_keybindings'})
|
||||
fake_volumes = [fake_vol]
|
||||
check_update_list = (
|
||||
[{'volume_id': fake_vol['id'],
|
||||
'updates':
|
||||
{'replication_status': (
|
||||
fields.ReplicationStatus.FAILOVER_ERROR),
|
||||
'provider_location': fake_location,
|
||||
'replication_driver_data': 'fake_data'}}])
|
||||
secondary_id, volume_update_list = (
|
||||
self.driver.failover_host('context', fake_volumes, None))
|
||||
self.assertEqual(check_update_list, volume_update_list)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_provision_v3.EMCVMAXProvisionV3,
|
||||
'_check_sync_state',
|
||||
return_value=12)
|
||||
def test_failback_volume_success(self, mock_sync):
|
||||
volumes = [self.data.test_volume_re]
|
||||
provider_location = self.data.provider_location
|
||||
loc = six.text_type(provider_location)
|
||||
rep_data = six.text_type(self.data.replication_driver_data)
|
||||
check_update_list = (
|
||||
[{'volume_id': self.data.test_volume_re['id'],
|
||||
'updates':
|
||||
{'replication_status': fields.ReplicationStatus.ENABLED,
|
||||
'replication_driver_data': rep_data,
|
||||
'provider_location': loc}}])
|
||||
secondary_id, volume_update_list = (
|
||||
self.driver.failover_host('context', volumes, 'default'))
|
||||
self.assertEqual(check_update_list, volume_update_list)
|
||||
|
||||
def test_failback_volume_failed(self):
|
||||
fake_vol = self.data.test_failed_re_volume
|
||||
fake_location = six.text_type(
|
||||
{'keybindings': 'fake_keybindings'})
|
||||
fake_volumes = [fake_vol]
|
||||
check_update_list = (
|
||||
[{'volume_id': fake_vol['id'],
|
||||
'updates':
|
||||
{'replication_status': (
|
||||
fields.ReplicationStatus.FAILOVER_ERROR),
|
||||
'provider_location': fake_location,
|
||||
'replication_driver_data': 'fake_data'}}])
|
||||
secondary_id, volume_update_list = (
|
||||
self.driver.failover_host('context', fake_volumes, 'default'))
|
||||
self.assertEqual(check_update_list, volume_update_list)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_utils.EMCVMAXUtils,
|
||||
'compare_size',
|
||||
return_value=0)
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'add_volume_to_replication_group',
|
||||
return_value=EMCVMAXCommonData.re_storagegroup)
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_create_remote_replica',
|
||||
return_value=(0, EMCVMAXCommonData.provider_location))
|
||||
def test_extend_volume_is_replicated_success(
|
||||
self, mock_replica, mock_sg, mock_size):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
volume = self.data.test_volume_re
|
||||
new_size = '2'
|
||||
newSizeBits = common.utils.convert_gb_to_bits(new_size)
|
||||
extendedVolumeInstance = self.data.volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
extendedVolumeSize = common.utils.get_volume_size(
|
||||
self.conn, extendedVolumeInstance)
|
||||
self.driver.extend_volume(volume, new_size)
|
||||
common.utils.compare_size.assert_called_once_with(
|
||||
newSizeBits, extendedVolumeSize)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_create_remote_replica',
|
||||
return_value=(1, 'error'))
|
||||
def test_extend_volume_is_replicated_failed(self, mock_replica):
|
||||
volume = self.data.test_volume_re
|
||||
new_size = '2'
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.extend_volume, volume, new_size)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_masking.EMCVMAXMasking,
|
||||
'remove_and_reset_members')
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'add_volume_to_replication_group',
|
||||
return_value=EMCVMAXCommonData.re_storagegroup)
|
||||
@mock.patch.object(
|
||||
emc_vmax_provision_v3.EMCVMAXProvisionV3,
|
||||
'get_volume_dict_from_job',
|
||||
return_value=EMCVMAXCommonData.provider_location)
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_get_pool_and_storage_system',
|
||||
return_value=(None, EMCVMAXCommonData.storage_system))
|
||||
def test_create_remote_replica_success(self, mock_pool, mock_volume_dict,
|
||||
mock_sg, mock_return):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
repServiceInstanceName = common.conn.EnumerateInstanceNames(
|
||||
'EMC_ReplicationService')[0]
|
||||
rdfGroupInstance = self.data.srdf_group_instance
|
||||
sourceVolume = self.data.test_volume_re
|
||||
volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
sourceInstance = common.conn.GetInstance(volumeInstanceName)
|
||||
targetInstance = sourceInstance
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
rep_config = common.utils.get_replication_config(
|
||||
self.replication_device)
|
||||
referenceDict = EMCVMAXCommonData.provider_location
|
||||
rc, rdfDict = common.create_remote_replica(
|
||||
common.conn, repServiceInstanceName, rdfGroupInstance,
|
||||
sourceVolume, sourceInstance, targetInstance,
|
||||
extraSpecs, rep_config)
|
||||
self.assertEqual(referenceDict, rdfDict)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_masking.EMCVMAXMasking,
|
||||
'remove_and_reset_members')
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_cleanup_remote_target')
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_get_pool_and_storage_system',
|
||||
return_value=(None, EMCVMAXCommonData.storage_system))
|
||||
def test_create_remote_replica_failed(self, mock_pool,
|
||||
mock_cleanup, mock_return):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
repServiceInstanceName = common.conn.EnumerateInstanceNames(
|
||||
'EMC_ReplicationService')[0]
|
||||
rdfGroupInstance = self.data.srdf_group_instance
|
||||
sourceVolume = self.data.test_volume_re
|
||||
volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
sourceInstance = common.conn.GetInstance(volumeInstanceName)
|
||||
targetInstance = sourceInstance
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
rep_config = common.utils.get_replication_config(
|
||||
self.replication_device)
|
||||
repExtraSpecs = common._get_replication_extraSpecs(
|
||||
extraSpecs, rep_config)
|
||||
with mock.patch.object(common.provisionv3,
|
||||
'_create_element_replica_extra_params',
|
||||
return_value=(9, 'error')):
|
||||
with mock.patch.object(common.utils,
|
||||
'wait_for_job_complete',
|
||||
return_value=(9, 'error')):
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
common.create_remote_replica, common.conn,
|
||||
repServiceInstanceName, rdfGroupInstance, sourceVolume,
|
||||
sourceInstance, targetInstance, extraSpecs, rep_config)
|
||||
common._cleanup_remote_target.assert_called_once_with(
|
||||
common.conn, repServiceInstanceName, sourceInstance,
|
||||
targetInstance, extraSpecs, repExtraSpecs)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_masking.EMCVMAXMasking,
|
||||
'get_masking_view_from_storage_group',
|
||||
return_value=None)
|
||||
def test_add_volume_to_replication_group_success(self, mock_mv):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
controllerConfigService = (
|
||||
common.utils.find_controller_configuration_service(
|
||||
common.conn, self.data.storage_system))
|
||||
volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
volumeInstance = common.conn.GetInstance(volumeInstanceName)
|
||||
volumeName = self.data.test_volume_re['name']
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
with mock.patch.object(
|
||||
common.utils, 'find_storage_masking_group',
|
||||
return_value=self.data.default_sg_instance_name):
|
||||
common.add_volume_to_replication_group(
|
||||
common.conn, controllerConfigService,
|
||||
volumeInstance, volumeName, extraSpecs)
|
||||
|
||||
def test_add_volume_to_replication_group_failed(self):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
controllerConfigService = (
|
||||
common.utils.find_controller_configuration_service(
|
||||
common.conn, self.data.storage_system))
|
||||
volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
volumeInstance = common.conn.GetInstance(volumeInstanceName)
|
||||
volumeName = self.data.test_volume_re['name']
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
with mock.patch.object(
|
||||
common.utils, 'find_storage_masking_group',
|
||||
return_value=None):
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
common.add_volume_to_replication_group,
|
||||
common.conn, controllerConfigService,
|
||||
volumeInstance, volumeName, extraSpecs)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'add_volume_to_replication_group')
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_create_v3_volume',
|
||||
return_value=(0, EMCVMAXCommonData.provider_location,
|
||||
EMCVMAXCommonData.storage_system))
|
||||
def test_create_replicated_volume_success(self, mock_create, mock_add):
|
||||
model_update = self.driver.create_volume(
|
||||
self.data.test_volume_re)
|
||||
rep_status = model_update['replication_status']
|
||||
rep_data = model_update['replication_driver_data']
|
||||
self.assertEqual(fields.ReplicationStatus.ENABLED,
|
||||
rep_status)
|
||||
self.assertIsNotNone(rep_data)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_cleanup_replication_source')
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_create_v3_volume',
|
||||
return_value=(0, EMCVMAXCommonData.provider_location,
|
||||
EMCVMAXCommonData.storage_system))
|
||||
def test_create_replicated_volume_failed(self, mock_create, mock_cleanup):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
volumeName = self.data.test_volume_re['id']
|
||||
volumeDict = self.data.provider_location
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume, self.data.test_volume_re)
|
||||
common._cleanup_replication_source.assert_called_once_with(
|
||||
common.conn, volumeName, volumeDict, extraSpecs)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_delete_from_pool_v3')
|
||||
def test_cleanup_replication_source(self, mock_delete):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
volumeName = self.data.test_volume_re['name']
|
||||
volumeDict = self.data.provider_location
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
storageConfigService = (
|
||||
common.utils.find_storage_configuration_service(
|
||||
common.conn, self.data.storage_system))
|
||||
volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
sourceInstance = common.conn.GetInstance(volumeInstanceName)
|
||||
deviceId = self.data.test_volume_re['device_id']
|
||||
common._cleanup_replication_source(
|
||||
common.conn, volumeName, volumeDict, extraSpecs)
|
||||
common._delete_from_pool_v3.assert_called_once_with(
|
||||
storageConfigService, sourceInstance,
|
||||
volumeName, deviceId, extraSpecs)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'_delete_from_pool_v3')
|
||||
def test_cleanup_remote_target(self, mock_delete):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
repServiceInstanceName = common.conn.EnumerateInstanceNames(
|
||||
'EMC_ReplicationService')[0]
|
||||
volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
sourceInstance = common.conn.GetInstance(volumeInstanceName)
|
||||
targetInstance = sourceInstance.copy()
|
||||
targetStorageConfigService = (
|
||||
common.utils.find_storage_configuration_service(
|
||||
common.conn, self.data.storage_system))
|
||||
deviceId = targetInstance['DeviceID']
|
||||
volumeName = targetInstance['Name']
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
rep_config = common.utils.get_replication_config(
|
||||
self.replication_device)
|
||||
repExtraSpecs = common._get_replication_extraSpecs(
|
||||
extraSpecs, rep_config)
|
||||
common._cleanup_remote_target(
|
||||
common.conn, repServiceInstanceName, sourceInstance,
|
||||
targetInstance, extraSpecs, repExtraSpecs)
|
||||
common._delete_from_pool_v3.assert_called_once_with(
|
||||
targetStorageConfigService, targetInstance, volumeName,
|
||||
deviceId, repExtraSpecs)
|
||||
|
||||
@mock.patch.object(
|
||||
emc_vmax_common.EMCVMAXCommon,
|
||||
'cleanup_lun_replication')
|
||||
def test_delete_re_volume(self, mock_cleanup):
|
||||
common = self.driver.common
|
||||
common.conn = self.fake_ecom_connection()
|
||||
volume = self.data.test_volume_re
|
||||
volumeName = volume['name']
|
||||
volumeInstanceName = (
|
||||
common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
|
||||
volumeInstance = common.conn.GetInstance(volumeInstanceName)
|
||||
extraSpecs = self.data.extra_specs_is_re
|
||||
self.driver.delete_volume(volume)
|
||||
common.cleanup_lun_replication.assert_called_once_with(
|
||||
common.conn, volume, volumeName, volumeInstance, extraSpecs)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -75,6 +75,7 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver):
|
||||
- Storage assisted volume migration via retype
|
||||
(bp vmax-volume-migration)
|
||||
- Support for compression on All Flash
|
||||
- Volume replication 2.1 (bp add-vmax-replication)
|
||||
|
||||
"""
|
||||
|
||||
@ -86,44 +87,32 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
super(EMCVMAXFCDriver, self).__init__(*args, **kwargs)
|
||||
self.active_backend_id = kwargs.get('active_backend_id', None)
|
||||
self.common = emc_vmax_common.EMCVMAXCommon(
|
||||
'FC',
|
||||
self.VERSION,
|
||||
configuration=self.configuration)
|
||||
configuration=self.configuration,
|
||||
active_backend_id=self.active_backend_id)
|
||||
self.zonemanager_lookup_service = fczm_utils.create_lookup_service()
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a EMC(VMAX/VNX) volume."""
|
||||
volpath = self.common.create_volume(volume)
|
||||
|
||||
model_update = {}
|
||||
volume['provider_location'] = six.text_type(volpath)
|
||||
model_update['provider_location'] = volume['provider_location']
|
||||
return model_update
|
||||
"""Creates a VMAX volume."""
|
||||
return self.common.create_volume(volume)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
|
||||
|
||||
model_update = {}
|
||||
volume['provider_location'] = six.text_type(volpath)
|
||||
model_update['provider_location'] = volume['provider_location']
|
||||
return model_update
|
||||
return self.common.create_volume_from_snapshot(
|
||||
volume, snapshot)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a cloned volume."""
|
||||
volpath = self.common.create_cloned_volume(volume, src_vref)
|
||||
|
||||
model_update = {}
|
||||
volume['provider_location'] = six.text_type(volpath)
|
||||
model_update['provider_location'] = volume['provider_location']
|
||||
return model_update
|
||||
return self.common.create_cloned_volume(volume, src_vref)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes an EMC volume."""
|
||||
"""Deletes an VMAX volume."""
|
||||
self.common.delete_volume(volume)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
@ -524,3 +513,14 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver):
|
||||
|
||||
def backup_use_temp_snapshot(self):
|
||||
return True
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
"""Failover volumes to a secondary host/ backend.
|
||||
|
||||
:param context: the context
|
||||
:param volumes: the list of volumes to be failed over
|
||||
:param secondary_id: the backend to be failed over to, is 'default'
|
||||
if fail back
|
||||
:return: secondary_id, volume_update_list
|
||||
"""
|
||||
return self.common.failover_host(context, volumes, secondary_id)
|
||||
|
@ -81,6 +81,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
|
||||
- Storage assisted volume migration via retype
|
||||
(bp vmax-volume-migration)
|
||||
- Support for compression on All Flash
|
||||
- Volume replication 2.1 (bp add-vmax-replication)
|
||||
|
||||
"""
|
||||
|
||||
@ -92,43 +93,32 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
super(EMCVMAXISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.active_backend_id = kwargs.get('active_backend_id', None)
|
||||
self.common = (
|
||||
emc_vmax_common.EMCVMAXCommon('iSCSI',
|
||||
self.VERSION,
|
||||
configuration=self.configuration))
|
||||
emc_vmax_common.EMCVMAXCommon(
|
||||
'iSCSI',
|
||||
self.VERSION,
|
||||
configuration=self.configuration,
|
||||
active_backend_id=self.active_backend_id))
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a VMAX volume."""
|
||||
volpath = self.common.create_volume(volume)
|
||||
|
||||
model_update = {}
|
||||
volume['provider_location'] = six.text_type(volpath)
|
||||
model_update['provider_location'] = volume['provider_location']
|
||||
return model_update
|
||||
return self.common.create_volume(volume)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
|
||||
|
||||
model_update = {}
|
||||
volume['provider_location'] = six.text_type(volpath)
|
||||
model_update['provider_location'] = volume['provider_location']
|
||||
return model_update
|
||||
return self.common.create_volume_from_snapshot(
|
||||
volume, snapshot)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a cloned volume."""
|
||||
volpath = self.common.create_cloned_volume(volume, src_vref)
|
||||
|
||||
model_update = {}
|
||||
volume['provider_location'] = six.text_type(volpath)
|
||||
model_update['provider_location'] = volume['provider_location']
|
||||
return model_update
|
||||
return self.common.create_cloned_volume(volume, src_vref)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes an EMC volume."""
|
||||
"""Deletes an VMAX volume."""
|
||||
self.common.delete_volume(volume)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
@ -448,3 +438,14 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
|
||||
|
||||
def backup_use_temp_snapshot(self):
|
||||
return True
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
"""Failover volumes to a secondary host/ backend.
|
||||
|
||||
:param context: the context
|
||||
:param volumes: the list of volumes to be failed over
|
||||
:param secondary_id: the backend to be failed over to, is 'default'
|
||||
if fail back
|
||||
:return: secondary_id, volume_update_list
|
||||
"""
|
||||
return self.common.failover_host(context, volumes, secondary_id)
|
||||
|
@ -204,7 +204,8 @@ class EMCVMAXMasking(object):
|
||||
maskingviewdict['pool'],
|
||||
maskingviewdict['slo'],
|
||||
maskingviewdict['workload'],
|
||||
maskingviewdict['isCompressionDisabled'])
|
||||
maskingviewdict['isCompressionDisabled'],
|
||||
maskingviewdict['replication_enabled'])
|
||||
assocStorageGroupInstanceNames = (
|
||||
self.utils.get_storage_groups_from_volume(
|
||||
conn, volumeinstance.path))
|
||||
@ -2230,16 +2231,18 @@ class EMCVMAXMasking(object):
|
||||
"""Return volume to the default storage group in v3.
|
||||
|
||||
:param conn: the ecom connection
|
||||
:param controllerConfigService: controller config service
|
||||
:param controllerConfigurationService: controller config service
|
||||
:param volumeInstance: volumeInstance
|
||||
:param volumeName: the volume name
|
||||
:param extraSpecs: additional info
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
rep_enabled = self.utils.is_replication_enabled(extraSpecs)
|
||||
isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs)
|
||||
storageGroupName = self.utils.get_v3_storage_group_name(
|
||||
extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO],
|
||||
extraSpecs[self.utils.WORKLOAD], isCompressionDisabled)
|
||||
extraSpecs[self.utils.WORKLOAD], isCompressionDisabled,
|
||||
rep_enabled)
|
||||
storageGroupInstanceName = self.utils.find_storage_masking_group(
|
||||
conn, controllerConfigurationService, storageGroupName)
|
||||
|
||||
|
@ -35,6 +35,12 @@ INFO_SRC_V3 = 3
|
||||
ACTIVATESNAPVX = 4
|
||||
DEACTIVATESNAPVX = 19
|
||||
SNAPSYNCTYPE = 7
|
||||
RDF_FAILOVER = 10
|
||||
RDF_FAILBACK = 11
|
||||
RDF_RESYNC = 14
|
||||
RDF_SYNC_MODE = 2
|
||||
RDF_SYNCHRONIZED = 6
|
||||
RDF_FAILEDOVER = 12
|
||||
|
||||
|
||||
class EMCVMAXProvisionV3(object):
|
||||
@ -231,6 +237,29 @@ class EMCVMAXProvisionV3(object):
|
||||
|
||||
return volumeDict
|
||||
|
||||
def get_or_create_default_sg(self, conn, extraSpecs, storageSystemName,
|
||||
doDisableCompression):
|
||||
"""Get or create default storage group for a replica.
|
||||
|
||||
:param conn: the connection to the ecom server
|
||||
:param extraSpecs: the extra specifications
|
||||
:param storageSystemName: the storage system name
|
||||
:param doDisableCompression: flag for compression
|
||||
:returns: sgInstanceName, instance of storage group
|
||||
"""
|
||||
pool = extraSpecs[self.utils.POOL]
|
||||
slo = extraSpecs[self.utils.SLO]
|
||||
workload = extraSpecs[self.utils.WORKLOAD]
|
||||
storageGroupName, controllerConfigService, sgInstanceName = (
|
||||
self.utils.get_v3_default_sg_instance_name(
|
||||
conn, pool, slo, workload, storageSystemName,
|
||||
doDisableCompression))
|
||||
if sgInstanceName is None:
|
||||
sgInstanceName = self.create_storage_group_v3(
|
||||
conn, controllerConfigService, storageGroupName,
|
||||
pool, slo, workload, extraSpecs, doDisableCompression)
|
||||
return sgInstanceName
|
||||
|
||||
def create_element_replica(
|
||||
self, conn, repServiceInstanceName,
|
||||
cloneName, syncType, sourceInstance, extraSpecs,
|
||||
@ -257,12 +286,9 @@ class EMCVMAXProvisionV3(object):
|
||||
'source': sourceInstance.path})
|
||||
storageSystemName = sourceInstance['SystemName']
|
||||
doDisableCompression = self.utils.is_compression_disabled(extraSpecs)
|
||||
__, __, sgInstanceName = (
|
||||
self.utils.get_v3_default_sg_instance_name(
|
||||
conn, extraSpecs[self.utils.POOL],
|
||||
extraSpecs[self.utils.SLO],
|
||||
extraSpecs[self.utils.WORKLOAD], storageSystemName,
|
||||
doDisableCompression))
|
||||
sgInstanceName = (
|
||||
self.get_or_create_default_sg(
|
||||
conn, extraSpecs, storageSystemName, doDisableCompression))
|
||||
try:
|
||||
storageGroupInstance = conn.GetInstance(sgInstanceName)
|
||||
except Exception:
|
||||
@ -309,9 +335,54 @@ class EMCVMAXProvisionV3(object):
|
||||
return rc, job
|
||||
return do_create_element_replica()
|
||||
|
||||
def create_remote_element_replica(
|
||||
self, conn, repServiceInstanceName, cloneName, syncType,
|
||||
sourceInstance, targetInstance, rdfGroupInstance, extraSpecs):
|
||||
"""Create a replication relationship between source and target.
|
||||
|
||||
:param conn: the ecom connection
|
||||
:param repServiceInstanceName: the replication service
|
||||
:param cloneName: the name of the target volume
|
||||
:param syncType: the synchronization type
|
||||
:param sourceInstance: the source volume instance
|
||||
:param targetInstance: the target volume instance
|
||||
:param rdfGroupInstance: the rdf group instance
|
||||
:param extraSpecs: additional info
|
||||
:return: rc, job
|
||||
"""
|
||||
startTime = time.time()
|
||||
LOG.debug("Setup replication relationship: %(source)s "
|
||||
"syncType: %(syncType)s Source: %(target)s.",
|
||||
{'source': sourceInstance.path,
|
||||
'syncType': syncType,
|
||||
'target': targetInstance.path})
|
||||
rc, job = self._create_element_replica_extra_params(
|
||||
conn, repServiceInstanceName, cloneName, syncType,
|
||||
sourceInstance, targetInstance, None, None, rdfGroupInstance)
|
||||
if rc != 0:
|
||||
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
|
||||
extraSpecs)
|
||||
if rc != 0:
|
||||
exceptionMessage = (
|
||||
_("Error Create Cloned Volume: %(cloneName)s "
|
||||
"Return code: %(rc)lu. Error: %(error)s.")
|
||||
% {'cloneName': cloneName,
|
||||
'rc': rc,
|
||||
'error': errordesc})
|
||||
LOG.error(exceptionMessage)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=exceptionMessage)
|
||||
|
||||
LOG.debug("InvokeMethod CreateElementReplica "
|
||||
"took: %(delta)s H:MM:SS.",
|
||||
{'delta': self.utils.get_time_delta(startTime,
|
||||
time.time())})
|
||||
return rc, job
|
||||
|
||||
def _create_element_replica_extra_params(
|
||||
self, conn, repServiceInstanceName, cloneName, syncType,
|
||||
sourceInstance, targetInstance, rsdInstance, sgInstanceName):
|
||||
sourceInstance, targetInstance, rsdInstance, sgInstanceName,
|
||||
rdfGroupInstance=None):
|
||||
"""CreateElementReplica using extra parameters.
|
||||
|
||||
:param conn: the connection to the ecom server
|
||||
@ -326,6 +397,7 @@ class EMCVMAXProvisionV3(object):
|
||||
:returns: job - job object of the replica creation operation
|
||||
"""
|
||||
syncType = self.utils.get_num(syncType, '16')
|
||||
modeType = self.utils.get_num(RDF_SYNC_MODE, '16')
|
||||
if targetInstance and rsdInstance:
|
||||
rc, job = conn.InvokeMethod(
|
||||
'CreateElementReplica', repServiceInstanceName,
|
||||
@ -334,13 +406,14 @@ class EMCVMAXProvisionV3(object):
|
||||
SourceElement=sourceInstance.path,
|
||||
TargetElement=targetInstance.path,
|
||||
ReplicationSettingData=rsdInstance)
|
||||
elif targetInstance:
|
||||
elif targetInstance and rdfGroupInstance:
|
||||
rc, job = conn.InvokeMethod(
|
||||
'CreateElementReplica', repServiceInstanceName,
|
||||
ElementName=cloneName,
|
||||
SyncType=syncType,
|
||||
Mode=modeType,
|
||||
SourceElement=sourceInstance.path,
|
||||
TargetElement=targetInstance.path)
|
||||
TargetElement=targetInstance.path,
|
||||
ConnectivityCollection=rdfGroupInstance)
|
||||
elif rsdInstance:
|
||||
rc, job = conn.InvokeMethod(
|
||||
'CreateElementReplica', repServiceInstanceName,
|
||||
@ -349,7 +422,13 @@ class EMCVMAXProvisionV3(object):
|
||||
SourceElement=sourceInstance.path,
|
||||
ReplicationSettingData=rsdInstance,
|
||||
Collections=[sgInstanceName])
|
||||
|
||||
elif targetInstance:
|
||||
rc, job = conn.InvokeMethod(
|
||||
'CreateElementReplica', repServiceInstanceName,
|
||||
ElementName=cloneName,
|
||||
SyncType=syncType,
|
||||
SourceElement=sourceInstance.path,
|
||||
TargetElement=targetInstance.path)
|
||||
return rc, job
|
||||
|
||||
def break_replication_relationship(
|
||||
@ -871,3 +950,105 @@ class EMCVMAXProvisionV3(object):
|
||||
# Find the newly created volume.
|
||||
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
|
||||
return volumeDict, rc
|
||||
|
||||
def get_rdf_group_instance(self, conn, repServiceInstanceName,
|
||||
RDFGroupName):
|
||||
"""Get the SRDF group instance.
|
||||
|
||||
:param conn: the connection to the ecom server
|
||||
:param repServiceInstanceName: the replication service
|
||||
:param RDFGroupName: the element name of the RDF group
|
||||
:return: foundRDFGroupInstanceName
|
||||
"""
|
||||
foundRDFGroupInstanceName = None
|
||||
|
||||
RDFGroupInstances = (
|
||||
conn.Associators(repServiceInstanceName,
|
||||
ResultClass='CIM_ConnectivityCollection'))
|
||||
|
||||
for RDFGroupInstance in RDFGroupInstances:
|
||||
|
||||
if RDFGroupName == (
|
||||
six.text_type(RDFGroupInstance['ElementName'])):
|
||||
# Check that it has not been deleted recently.
|
||||
instance = self.utils.get_existing_instance(
|
||||
conn, RDFGroupInstance.path)
|
||||
if instance is None:
|
||||
# SRDF group not found.
|
||||
foundRDFGroupInstanceName = None
|
||||
else:
|
||||
foundRDFGroupInstanceName = (
|
||||
RDFGroupInstance.path)
|
||||
break
|
||||
return foundRDFGroupInstanceName
|
||||
|
||||
def failover_volume(self, conn, repServiceInstanceName,
|
||||
storageSynchronizationSv,
|
||||
extraSpecs):
|
||||
"""Failover a volume to its target device.
|
||||
|
||||
:param conn: the connection to the ecom server
|
||||
:param repServiceInstanceName: the replication service
|
||||
:param storageSynchronizationSv: the storage synchronized object
|
||||
:param extraSpecs: the extra specifications
|
||||
"""
|
||||
operation = RDF_FAILOVER
|
||||
# check if volume already in failover state
|
||||
syncState = self._check_sync_state(conn, storageSynchronizationSv)
|
||||
if syncState == RDF_FAILEDOVER:
|
||||
return
|
||||
|
||||
else:
|
||||
LOG.debug("Failover: %(sv)s operation: %(operation)s.",
|
||||
{'sv': storageSynchronizationSv, 'operation': operation})
|
||||
|
||||
return self._modify_replica_synchronization(
|
||||
conn, repServiceInstanceName, storageSynchronizationSv,
|
||||
operation, extraSpecs)
|
||||
|
||||
def failback_volume(self, conn, repServiceInstanceName,
|
||||
storageSynchronizationSv,
|
||||
extraSpecs):
|
||||
"""Failback a volume to the source device.
|
||||
|
||||
:param conn: the connection to the ecom server
|
||||
:param repServiceInstanceName: the replication service
|
||||
:param storageSynchronizationSv: the storage synchronized object
|
||||
:param extraSpecs: the extra specifications
|
||||
"""
|
||||
failback_operation = RDF_FAILBACK
|
||||
# check if volume already in failback state
|
||||
syncState = self._check_sync_state(conn, storageSynchronizationSv)
|
||||
if syncState == RDF_SYNCHRONIZED:
|
||||
return
|
||||
|
||||
else:
|
||||
LOG.debug("Failback: %(sv)s operation: %(operation)s.",
|
||||
{'sv': storageSynchronizationSv,
|
||||
'operation': failback_operation})
|
||||
|
||||
return self._modify_replica_synchronization(
|
||||
conn, repServiceInstanceName, storageSynchronizationSv,
|
||||
failback_operation, extraSpecs)
|
||||
|
||||
def _check_sync_state(self, conn, syncName):
|
||||
"""Get the copy state of a sync name.
|
||||
|
||||
:param conn: the connection to the ecom server
|
||||
:param syncName: the storage sync sv name
|
||||
:return: the copy state
|
||||
"""
|
||||
try:
|
||||
syncInstance = conn.GetInstance(syncName,
|
||||
LocalOnly=False)
|
||||
syncState = syncInstance['syncState']
|
||||
LOG.debug("syncState is %(syncState)lu.",
|
||||
{'syncState': syncState})
|
||||
return syncState
|
||||
except Exception as ex:
|
||||
exceptionMessage = (
|
||||
_("Getting sync instance failed with: %(ex)s.")
|
||||
% {'ex': six.text_type(ex)})
|
||||
LOG.exception(exceptionMessage)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=exceptionMessage)
|
||||
|
@ -30,6 +30,7 @@ import six
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder.objects import fields
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
@ -63,7 +64,10 @@ RETRIES = 'storagetype:retries'
|
||||
CIM_ERR_NOT_FOUND = 6
|
||||
VOLUME_ELEMENT_NAME_PREFIX = 'OS-'
|
||||
SYNCHRONIZED = 4
|
||||
RDF_FAILOVER = 10
|
||||
SMI_VERSION_83 = 830
|
||||
IS_RE = 'replication_enabled'
|
||||
REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER
|
||||
|
||||
|
||||
class EMCVMAXUtils(object):
|
||||
@ -1574,30 +1578,33 @@ class EMCVMAXUtils(object):
|
||||
return isValidSLO, isValidWorkload
|
||||
|
||||
def get_v3_storage_group_name(self, poolName, slo, workload,
|
||||
isCompressionDisabled):
|
||||
isCompressionDisabled, rep_enabled=False):
|
||||
"""Determine default v3 storage group from extraSpecs.
|
||||
|
||||
:param poolName: the poolName
|
||||
:param slo: the SLO string e.g Bronze
|
||||
:param workload: the workload string e.g DSS
|
||||
:param isCompressionDisabled: is compression disabled
|
||||
:param rep_enabled: True if replication enabled
|
||||
:returns: storageGroupName
|
||||
"""
|
||||
if slo and workload:
|
||||
|
||||
prefix = ("OS-%(poolName)s-%(slo)s-%(workload)s"
|
||||
% {'poolName': poolName,
|
||||
'slo': slo,
|
||||
'workload': workload})
|
||||
|
||||
if isCompressionDisabled:
|
||||
postfix = 'CD-SG'
|
||||
else:
|
||||
postfix = 'SG'
|
||||
|
||||
storageGroupName = (
|
||||
"OS-%(poolName)s-%(slo)s-%(workload)s-%(postfix)s"
|
||||
% {'poolName': poolName,
|
||||
'slo': slo,
|
||||
'workload': workload,
|
||||
'postfix': postfix})
|
||||
prefix += "-CD"
|
||||
else:
|
||||
storageGroupName = ("OS-no_SLO-SG")
|
||||
prefix = "OS-no_SLO"
|
||||
|
||||
if rep_enabled:
|
||||
prefix += "-RE"
|
||||
|
||||
storageGroupName = ("%(prefix)s-SG"
|
||||
% {'prefix': prefix})
|
||||
return storageGroupName
|
||||
|
||||
def _get_fast_settings_from_storage_group(self, storageGroupInstance):
|
||||
@ -1913,63 +1920,6 @@ class EMCVMAXUtils(object):
|
||||
|
||||
return kwargs
|
||||
|
||||
def _single_pool_support(self, fileName):
|
||||
"""Single pool support.
|
||||
|
||||
VMAX2
|
||||
<EMC>
|
||||
<EcomServerIp>10.108.246.202</EcomServerIp>
|
||||
<EcomServerPort>5988</EcomServerPort>
|
||||
<EcomUserName>admin</EcomUserName>
|
||||
<EcomPassword>#1Password</EcomPassword>
|
||||
<PortGroups>
|
||||
<PortGroup>OS-PORTGROUP1-PG</PortGroup>
|
||||
</PortGroups>
|
||||
<Array>000198700439</Array>
|
||||
<Pool>FC_SLVR1</Pool>
|
||||
</EMC>
|
||||
VMAX3
|
||||
|
||||
:param fileName: the configuration file
|
||||
:returns: list
|
||||
"""
|
||||
myList = []
|
||||
kwargs = {}
|
||||
connargs = {}
|
||||
myFile = open(fileName, 'r')
|
||||
data = myFile.read()
|
||||
myFile.close()
|
||||
dom = minidom.parseString(data)
|
||||
try:
|
||||
connargs = self._get_connection_info(dom)
|
||||
interval = self._process_tag(dom, 'Interval')
|
||||
retries = self._process_tag(dom, 'Retries')
|
||||
portGroup = self._get_random_portgroup(dom)
|
||||
|
||||
serialNumber = self._process_tag(dom, 'Array')
|
||||
if serialNumber is None:
|
||||
LOG.error(_LE(
|
||||
"Array Serial Number must be in the file "
|
||||
"%(fileName)s."),
|
||||
{'fileName': fileName})
|
||||
poolName = self._process_tag(dom, 'Pool')
|
||||
if poolName is None:
|
||||
LOG.error(_LE(
|
||||
"PoolName must be in the file "
|
||||
"%(fileName)s."),
|
||||
{'fileName': fileName})
|
||||
kwargs = self._fill_record(
|
||||
connargs, serialNumber, poolName, portGroup, dom)
|
||||
if interval:
|
||||
kwargs['Interval'] = interval
|
||||
if retries:
|
||||
kwargs['Retries'] = retries
|
||||
|
||||
myList.append(kwargs)
|
||||
except IndexError:
|
||||
pass
|
||||
return myList
|
||||
|
||||
def parse_file_to_get_array_map(self, fileName):
|
||||
"""Parses a file and gets array map.
|
||||
|
||||
@ -2100,14 +2050,14 @@ class EMCVMAXUtils(object):
|
||||
portGroupNames.append(portGroupName.strip())
|
||||
portGroupNames = EMCVMAXUtils._filter_list(portGroupNames)
|
||||
if len(portGroupNames) > 0:
|
||||
return EMCVMAXUtils._get_random_pg_from_list(portGroupNames)
|
||||
return EMCVMAXUtils.get_random_pg_from_list(portGroupNames)
|
||||
|
||||
exception_message = (_("No Port Group elements found in config file."))
|
||||
LOG.error(exception_message)
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
@staticmethod
|
||||
def _get_random_pg_from_list(portgroupnames):
|
||||
def get_random_pg_from_list(portgroupnames):
|
||||
"""From list of portgroup, choose one randomly
|
||||
|
||||
:param portGroupNames: list of available portgroups
|
||||
@ -2624,7 +2574,7 @@ class EMCVMAXUtils(object):
|
||||
|
||||
def get_v3_default_sg_instance_name(
|
||||
self, conn, poolName, slo, workload, storageSystemName,
|
||||
isCompressionDisabled):
|
||||
isCompressionDisabled, is_re=False):
|
||||
"""Get the V3 default instance name
|
||||
|
||||
:param conn: the connection to the ecom server
|
||||
@ -2636,7 +2586,7 @@ class EMCVMAXUtils(object):
|
||||
:returns: the storage group instance name
|
||||
"""
|
||||
storageGroupName = self.get_v3_storage_group_name(
|
||||
poolName, slo, workload, isCompressionDisabled)
|
||||
poolName, slo, workload, isCompressionDisabled, is_re)
|
||||
controllerConfigService = (
|
||||
self.find_controller_configuration_service(
|
||||
conn, storageSystemName))
|
||||
@ -2908,3 +2858,136 @@ class EMCVMAXUtils(object):
|
||||
:returns: boolean
|
||||
"""
|
||||
return value.lower() in ("yes", "true")
|
||||
|
||||
def is_replication_enabled(self, extraSpecs):
|
||||
"""Check if replication is to be enabled.
|
||||
|
||||
:param extraSpecs: extra specifications
|
||||
:returns: bool - true if enabled, else false
|
||||
"""
|
||||
replication_enabled = False
|
||||
if IS_RE in extraSpecs:
|
||||
replication_enabled = True
|
||||
return replication_enabled
|
||||
|
||||
def get_replication_config(self, rep_device_list):
|
||||
"""Gather necessary replication configuration info.
|
||||
|
||||
:param rep_device_list: the replication device list from cinder.conf
|
||||
:returns: rep_config, replication configuration dict
|
||||
"""
|
||||
rep_config = {}
|
||||
if not rep_device_list:
|
||||
return None
|
||||
else:
|
||||
target = rep_device_list[0]
|
||||
try:
|
||||
rep_config['array'] = target['target_device_id']
|
||||
rep_config['pool'] = target['remote_pool']
|
||||
rep_config['rdf_group_label'] = target['rdf_group_label']
|
||||
rep_config['portgroup'] = target['remote_port_group']
|
||||
|
||||
except KeyError as ke:
|
||||
errorMessage = (_("Failed to retrieve all necessary SRDF "
|
||||
"information. Error received: %(ke)s.") %
|
||||
{'ke': six.text_type(ke)})
|
||||
LOG.exception(errorMessage)
|
||||
raise exception.VolumeBackendAPIException(data=errorMessage)
|
||||
|
||||
try:
|
||||
allow_extend = target['allow_extend']
|
||||
if self.str2bool(allow_extend):
|
||||
rep_config['allow_extend'] = True
|
||||
else:
|
||||
rep_config['allow_extend'] = False
|
||||
except KeyError:
|
||||
rep_config['allow_extend'] = False
|
||||
|
||||
return rep_config
|
||||
|
||||
def failover_provider_location(self, provider_location,
|
||||
replication_keybindings):
|
||||
"""Transfer ownership of a volume from one array to another.
|
||||
|
||||
:param provider_location: the provider location
|
||||
:param replication_keybindings: the rep keybindings
|
||||
:return: updated provider_location
|
||||
"""
|
||||
if isinstance(provider_location, six.text_type):
|
||||
provider_location = eval(provider_location)
|
||||
if isinstance(replication_keybindings, six.text_type):
|
||||
replication_keybindings = eval(replication_keybindings)
|
||||
|
||||
keybindings = provider_location['keybindings']
|
||||
provider_location['keybindings'] = replication_keybindings
|
||||
replication_driver_data = keybindings
|
||||
return provider_location, replication_driver_data
|
||||
|
||||
def find_rdf_storage_sync_sv_sv(
|
||||
self, conn, sourceInstance, storageSystem,
|
||||
targetInstance, targetStorageSystem,
|
||||
extraSpecs, waitforsync=True):
|
||||
"""Find the storage synchronized name.
|
||||
|
||||
:param conn: the connection to the ecom server
|
||||
:param sourceInstance: the source instance
|
||||
:param storageSystem: the source storage system name
|
||||
:param targetInstance: the target instance
|
||||
:param targetStorageSystem: the target storage system name
|
||||
:param extraSpecs: the extra specifications
|
||||
:param waitforsync: flag for waiting until sync is complete
|
||||
:return: foundSyncInstanceName
|
||||
"""
|
||||
|
||||
foundSyncInstanceName = None
|
||||
syncInstanceNames = conn.EnumerateInstanceNames(
|
||||
'SE_StorageSynchronized_SV_SV')
|
||||
for syncInstanceName in syncInstanceNames:
|
||||
syncSvTarget = syncInstanceName['SyncedElement']
|
||||
syncSvSource = syncInstanceName['SystemElement']
|
||||
if storageSystem != syncSvSource['SystemName'] or (
|
||||
targetStorageSystem != syncSvTarget['SystemName']):
|
||||
continue
|
||||
if syncSvTarget['DeviceID'] == targetInstance['DeviceID'] and (
|
||||
syncSvSource['DeviceID'] == sourceInstance['DeviceID']):
|
||||
# Check that it hasn't recently been deleted.
|
||||
try:
|
||||
conn.GetInstance(syncInstanceName)
|
||||
foundSyncInstanceName = syncInstanceName
|
||||
LOG.debug("Found sync Name: %(sync_name)s.",
|
||||
{'sync_name': foundSyncInstanceName})
|
||||
except Exception:
|
||||
foundSyncInstanceName = None
|
||||
break
|
||||
|
||||
if foundSyncInstanceName:
|
||||
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
|
||||
if waitforsync:
|
||||
LOG.warning(_LW(
|
||||
"Expect a performance hit as volume is not not fully "
|
||||
"synced on %(deviceId)s."),
|
||||
{'deviceId': sourceInstance['DeviceID']})
|
||||
startTime = time.time()
|
||||
self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs)
|
||||
LOG.warning(_LW(
|
||||
"Synchronization process took: %(delta)s H:MM:SS."),
|
||||
{'delta': self.get_time_delta(startTime,
|
||||
time.time())})
|
||||
|
||||
return foundSyncInstanceName
|
||||
|
||||
@staticmethod
|
||||
def is_volume_failed_over(volume):
|
||||
"""Check if a volume has been failed over.
|
||||
|
||||
:param volume: the volume object
|
||||
:return: bool
|
||||
"""
|
||||
if volume is None:
|
||||
return False
|
||||
else:
|
||||
if volume.get('replication_status'):
|
||||
if volume['replication_status'] == REPLICATION_FAILOVER:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Add v2.1 volume replication support in VMAX driver.
|
Loading…
Reference in New Issue
Block a user