VMAX driver - Implement Tiramisu feature on VMAX

In Tiramisu, a group construct is used to manage the group
of volumes to be replicated together for the ease of management.
This patch adds this support to the VMAX driver.

Change-Id: I9fffa0c6dc3092f3230cfa5da1ea5f3ff1e3151b
Implements: blueprint vmax-replication-group
This commit is contained in:
Ciara Stacke 2017-09-20 10:56:13 +01:00
parent 27fd333df9
commit c6b0c4bca6
8 changed files with 1198 additions and 204 deletions

View File

@ -26,6 +26,7 @@ import six
from cinder import context
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.objects import group
from cinder.objects import group_snapshot
@ -252,12 +253,12 @@ class VMAXCommonData(object):
test_vol_grp_name_id_only = 'ec870a2f-6bf7-4152-aa41-75aad8e2ea96'
test_vol_grp_name = 'Grp_source_sg_%s' % test_vol_grp_name_id_only
test_fo_vol_group = 'fo_vol_group_%s' % test_vol_grp_name_id_only
test_group_1 = group.Group(
context=None, name=storagegroup_name_source,
group_id='abc', size=1,
id=test_vol_grp_name_id_only,
status='available',
id=test_vol_grp_name_id_only, status='available',
provider_auth=None, volume_type_ids=['abc'],
group_type_id='grptypeid',
volume_types=test_volume_type_list,
@ -272,17 +273,21 @@ class VMAXCommonData(object):
provider_auth=None, volume_type_ids=['abc'],
group_type_id='grptypeid',
volume_types=test_volume_type_list,
host=fake_host, provider_location=six.text_type(provider_location))
host=fake_host, provider_location=six.text_type(provider_location),
replication_status=fields.ReplicationStatus.DISABLED)
test_rep_group = fake_group.fake_group_obj(
context=ctx, name=storagegroup_name_source,
id=test_vol_grp_name_id_only, host=fake_host,
replication_status=fields.ReplicationStatus.ENABLED)
test_group = fake_group.fake_group_obj(
context=ctx, name=storagegroup_name_source,
id='7634bda4-6950-436f-998c-37c3e01bad30', host=fake_host)
id=test_vol_grp_name_id_only, host=fake_host)
test_group_without_name = fake_group.fake_group_obj(
context=ctx,
name=None,
id=test_vol_grp_name_id_only,
host=fake_host)
context=ctx, name=None,
id=test_vol_grp_name_id_only, host=fake_host)
test_group_snapshot_1 = group_snapshot.GroupSnapshot(
context=None, id='6560405d-b89a-4f79-9e81-ad1752f5a139',
@ -300,6 +305,13 @@ class VMAXCommonData(object):
status='available',
group=test_group_failed)
test_volume_group_member = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(provider_location),
volume_type=test_volume_type, host=fake_host,
replication_driver_data=six.text_type(provider_location3),
group_id=test_vol_grp_name_id_only)
# masking view dict
masking_view_dict = {
'array': array,
@ -441,6 +453,17 @@ class VMAXCommonData(object):
"symmetrixId": array,
"numSnapVXSnapshots": 1}]
sg_rdf_details = [{"storageGroupName": test_vol_grp_name,
"symmetrixId": array,
"modes": ["Synchronous"],
"rdfGroupNumber": rdf_group_no,
"states": ["Synchronized"]},
{"storageGroupName": test_fo_vol_group,
"symmetrixId": array,
"modes": ["Synchronous"],
"rdfGroupNumber": rdf_group_no,
"states": ["Failed Over"]}]
sg_list = {"storageGroupId": [storagegroup_name_f,
defaultstoragegroup_name]}
@ -498,7 +521,12 @@ class VMAXCommonData(object):
"targetDevice": device_id2,
"sourceDevice": device_id}}],
"snapVXSrc": 'true',
"snapVXTgt": 'true'}}]}}
"snapVXTgt": 'true'},
"rdfInfo": {"RDFSession": [
{"SRDFStatus": "Ready",
"pairState": "Synchronized",
"remoteDeviceID": device_id2,
"remoteSymmetrixID": remote_array}]}}]}}
workloadtype = {"workloadId": ["OLTP", "OLTP_REP", "DSS", "DSS_REP"]}
slo_details = {"sloId": ["Bronze", "Diamond", "Gold",
@ -741,15 +769,15 @@ class FakeRequestsSession(object):
def _replication(self, url):
return_object = None
if 'rdf_group' in url:
if 'storagegroup' in url:
return_object = self._replication_sg(url)
elif 'rdf_group' in url:
if self.data.device_id in url:
return_object = self.data.rdf_group_vol_details
elif self.data.rdf_group_no in url:
return_object = self.data.rdf_group_details
else:
return_object = self.data.rdf_group_list
elif 'storagegroup' in url:
return_object = self._replication_sg(url)
elif 'snapshot' in url:
return_object = self.data.volume_snap_vx
elif 'capabilities' in url:
@ -760,6 +788,11 @@ class FakeRequestsSession(object):
return_object = None
if 'generation' in url:
return_object = self.data.group_snap_vx
elif 'rdf_group' in url:
for sg in self.data.sg_rdf_details:
if sg['storageGroupName'] in url:
return_object = sg
break
elif 'storagegroup' in url:
return_object = self.data.sg_details_rep[0]
return return_object
@ -1247,19 +1280,6 @@ class VMAXUtilsTest(test.TestCase):
vol_grp_name = self.utils.update_volume_group_name(group)
self.assertEqual(ref_group_name, vol_grp_name)
def test_update_admin_metadata(self):
admin_metadata = {'targetVolumeName': '123456'}
ref_model_update = [{'id': '12345',
'admin_metadata': admin_metadata}]
volume_model_update = {'id': '12345'}
volumes_model_update = [volume_model_update]
key = 'targetVolumeName'
values = {}
values['12345'] = '123456'
self.utils.update_admin_metadata(
volumes_model_update, key, values)
self.assertEqual(ref_model_update, volumes_model_update)
def test_get_volume_group_utils(self):
group = self.data.test_group_1
array, extraspecs_dict = self.utils.get_volume_group_utils(
@ -1313,6 +1333,38 @@ class VMAXUtilsTest(test.TestCase):
volume_model_updates, volumes, 'abc')
self.assertEqual(ref_val, ret_val)
def test_check_replication_matched(self):
# Check 1: Volume is not part of a group
self.utils.check_replication_matched(
self.data.test_volume, self.data.extra_specs)
group_volume = deepcopy(self.data.test_volume)
group_volume.group = self.data.test_group
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False):
# Check 2: Both volume and group have the same rep status
self.utils.check_replication_matched(
group_volume, self.data.extra_specs)
# Check 3: Volume and group have different rep status
with mock.patch.object(self.utils, 'is_replication_enabled',
return_value=True):
self.assertRaises(exception.InvalidInput,
self.utils.check_replication_matched,
group_volume, self.data.extra_specs)
def test_check_rep_status_enabled(self):
# Check 1: not replication enabled
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False):
self.utils.check_rep_status_enabled(self.data.test_group)
# Check 2: replication enabled, status enabled
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=True):
self.utils.check_rep_status_enabled(self.data.test_rep_group)
# Check 3: replication enabled, status disabled
self.assertRaises(exception.InvalidInput,
self.utils.check_rep_status_enabled,
self.data.test_group)
class VMAXRestTest(test.TestCase):
def setUp(self):
@ -2408,6 +2460,14 @@ class VMAXRestTest(test.TestCase):
sg_value, qos_extra_spec, input_prop_dict)
self.assertEqual(input_prop_dict, ret_prop_dict)
@mock.patch.object(rest.VMAXRest, 'modify_storage_group',
return_value=(202, VMAXCommonData.job_list[0]))
def test_set_storagegroup_srp(self, mock_mod):
self.rest.set_storagegroup_srp(
self.data.array, self.data.test_vol_grp_name,
self.data.srp2, self.data.extra_specs)
mock_mod.assert_called_once()
def test_get_rdf_group(self):
with mock.patch.object(self.rest, 'get_resource') as mock_get:
self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no)
@ -2420,26 +2480,29 @@ class VMAXRestTest(test.TestCase):
self.assertEqual(self.data.rdf_group_list, rdf_list)
def test_get_rdf_group_volume(self):
with mock.patch.object(self.rest, 'get_resource') as mock_get:
vol_details = self.data.private_vol_details['resultList']['result'][0]
with mock.patch.object(
self.rest, '_get_private_volume', return_value=vol_details
) as mock_get:
self.rest.get_rdf_group_volume(
self.data.array, self.data.rdf_group_no, self.data.device_id)
self.data.array, self.data.device_id)
mock_get.assert_called_once_with(
self.data.array, 'replication', 'rdf_group', "70/volume/00001")
self.data.array, self.data.device_id)
def test_are_vols_rdf_paired(self):
are_vols1, local_state, pair_state = self.rest.are_vols_rdf_paired(
self.data.array, self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_no)
self.data.device_id2)
self.assertTrue(are_vols1)
are_vols2, local_state, pair_state = self.rest.are_vols_rdf_paired(
self.data.array, "00012345", self.data.device_id,
self.data.device_id2, self.data.rdf_group_no)
self.data.device_id2)
self.assertFalse(are_vols2)
with mock.patch.object(self.rest, "get_rdf_group_volume",
return_value=None):
are_vols3, local, pair = self.rest.are_vols_rdf_paired(
self.data.array, self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_no)
self.data.device_id2)
self.assertFalse(are_vols3)
def test_get_rdf_group_number(self):
@ -2536,6 +2599,40 @@ class VMAXRestTest(test.TestCase):
snap_name,
extra_specs)
def test_get_storagegroup_rdf_details(self):
details = self.rest.get_storagegroup_rdf_details(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no)
self.assertEqual(self.data.sg_rdf_details[0], details)
def test_verify_rdf_state(self):
verify1 = self.rest._verify_rdf_state(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, 'Failover')
self.assertTrue(verify1)
verify2 = self.rest._verify_rdf_state(
self.data.array, self.data.test_fo_vol_group,
self.data.rdf_group_no, 'Establish')
self.assertTrue(verify2)
def test_modify_storagegroup_rdf(self):
with mock.patch.object(
self.rest, 'modify_resource',
return_value=(202, self.data.job_list[0])) as mock_mod:
self.rest.modify_storagegroup_rdf(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, 'Failover',
self.data.extra_specs)
mock_mod.assert_called_once()
def test_delete_storagegroup_rdf(self):
with mock.patch.object(
self.rest, 'delete_resource') as mock_del:
self.rest.delete_storagegroup_rdf(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no)
mock_del.assert_called_once()
class VMAXProvisionTest(test.TestCase):
def setUp(self):
@ -2887,6 +2984,59 @@ class VMAXProvisionTest(test.TestCase):
self.assertEqual(2, mock_sg.call_count)
self.assertEqual(1, mock_create.call_count)
@mock.patch.object(rest.VMAXRest, 'create_resource',
return_value=(202, VMAXCommonData.job_list[0]))
def test_replicate_group(self, mock_create):
self.rest.replicate_group(
self.data.array, self.data.test_rep_group,
self.data.rdf_group_no, self.data.remote_array,
self.data.extra_specs)
mock_create.assert_called_once()
def test_enable_group_replication(self):
with mock.patch.object(self.rest,
'modify_storagegroup_rdf') as mock_mod:
self.provision.enable_group_replication(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, self.data.extra_specs)
mock_mod.assert_called_once()
def test_disable_group_replication(self):
with mock.patch.object(self.rest,
'modify_storagegroup_rdf') as mock_mod:
self.provision.disable_group_replication(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, self.data.extra_specs)
mock_mod.assert_called_once()
def test_failover_group(self):
with mock.patch.object(self.rest,
'modify_storagegroup_rdf') as mock_fo:
# Failover
self.provision.failover_group(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, self.data.extra_specs)
mock_fo.assert_called_once_with(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, 'Failover', self.data.extra_specs)
mock_fo.reset_mock()
# Failback
self.provision.failover_group(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, self.data.extra_specs, False)
mock_fo.assert_called_once_with(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, 'Failback', self.data.extra_specs)
@mock.patch.object(rest.VMAXRest, 'modify_storagegroup_rdf')
@mock.patch.object(rest.VMAXRest, 'delete_storagegroup_rdf')
def test_delete_group_replication(self, mock_mod, mock_del):
self.provision.delete_group_replication(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, self.data.extra_specs)
mock_mod.assert_called_once()
mock_del.assert_called_once()
class VMAXCommonTest(test.TestCase):
def setUp(self):
@ -4178,15 +4328,14 @@ class VMAXCommonTest(test.TestCase):
group_snapshot,
snapshots)
def test_create_group(self):
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch.object(volume_utils, 'is_group_a_type',
side_effect=[False, False])
def test_create_group(self, mock_type, mock_cg_type):
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
context = None
group = self.data.test_group_1
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update = self.common.create_group(context, group)
self.assertEqual(ref_model_update, model_update)
model_update = self.common.create_group(None, self.data.test_group_1)
self.assertEqual(ref_model_update, model_update)
def test_create_group_exception(self):
context = None
@ -4196,8 +4345,7 @@ class VMAXCommonTest(test.TestCase):
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.create_group,
context,
group)
context, group)
def test_delete_group_snapshot(self):
group_snapshot = self.data.test_group_snapshot_1
@ -4234,47 +4382,39 @@ class VMAXCommonTest(test.TestCase):
snapshots))
self.assertEqual(ref_model_update, model_update)
def test_update_group(self):
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group(self, mock_cg_type, mock_type_check):
group = self.data.test_group_1
add_vols = [self.data.test_volume]
remove_vols = []
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, __, __ = self.common.update_group(group,
add_vols,
remove_vols)
self.assertEqual(ref_model_update, model_update)
model_update, __, __ = self.common.update_group(group,
add_vols,
remove_vols)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(common.VMAXCommon, '_find_volume_group',
return_value=None)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_not_found(self, mock_check):
group = self.data.test_group_1
add_vols = []
remove_vols = []
with mock.patch.object(
self.common, '_find_volume_group',
return_value=None):
self.assertRaises(exception.GroupNotFound,
self.common.update_group,
group,
add_vols,
remove_vols)
def test_update_group_not_found(self, mock_check, mock_grp):
self.assertRaises(exception.GroupNotFound, self.common.update_group,
self.data.test_group_1, [], [])
@mock.patch.object(common.VMAXCommon, '_find_volume_group',
side_effect=exception.VolumeBackendAPIException)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_exception(self, mock_check):
group = self.data.test_group_1
add_vols = []
remove_vols = []
with mock.patch.object(
self.common, '_find_volume_group',
side_effect=exception.VolumeBackendAPIException):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.update_group,
group, add_vols, remove_vols)
def test_update_group_exception(self, mock_check, mock_grp):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.update_group,
self.data.test_group_1, [], [])
def test_delete_group(self):
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
def test_delete_group(self, mock_check):
group = self.data.test_group_1
volumes = [self.data.test_volume]
context = None
@ -4287,7 +4427,8 @@ class VMAXCommonTest(test.TestCase):
context, group, volumes)
self.assertEqual(ref_model_update, model_update)
def test_delete_group_success(self):
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
def test_delete_group_success(self, mock_check):
group = self.data.test_group_1
volumes = []
ref_model_update = {'status': fields.GroupStatus.DELETED}
@ -4307,9 +4448,10 @@ class VMAXCommonTest(test.TestCase):
model_update, __ = self.common._delete_group(group, volumes)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_failed(self, mock_check):
def test_delete_group_failed(self, mock_check, mock_type_check):
group = self.data.test_group_1
volumes = []
ref_model_update = {'status': fields.GroupStatus.ERROR_DELETING}
@ -4320,7 +4462,11 @@ class VMAXCommonTest(test.TestCase):
group, volumes)
self.assertEqual(ref_model_update, model_update)
def test_create_group_from_src_success(self):
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
def test_create_group_from_src_success(self, mock_type, mock_cg_type):
context = None
group = self.data.test_group_1
group_snapshot = self.data.test_group_snapshot_1
@ -4329,13 +4475,11 @@ class VMAXCommonTest(test.TestCase):
source_group = None
source_vols = []
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, volumes_model_update = (
self.common.create_group_from_src(
context, group, volumes,
group_snapshot, snapshots,
source_group, source_vols))
model_update, volumes_model_update = (
self.common.create_group_from_src(
context, group, volumes,
group_snapshot, snapshots,
source_group, source_vols))
self.assertEqual(ref_model_update, model_update)
@ -4565,6 +4709,27 @@ class VMAXFCTest(test.TestCase):
mock_fo.assert_called_once_with([self.data.test_volume], None,
None)
def test_enable_replication(self):
with mock.patch.object(
self.common, 'enable_replication') as mock_er:
self.driver.enable_replication(
self.data.ctx, self.data.test_group, [self.data.test_volume])
mock_er.assert_called_once()
def test_disable_replication(self):
with mock.patch.object(
self.common, 'disable_replication') as mock_dr:
self.driver.disable_replication(
self.data.ctx, self.data.test_group, [self.data.test_volume])
mock_dr.assert_called_once()
def test_failover_replication(self):
with mock.patch.object(
self.common, 'failover_replication') as mock_fo:
self.driver.failover_replication(
self.data.ctx, self.data.test_group, [self.data.test_volume])
mock_fo.assert_called_once()
class VMAXISCSITest(test.TestCase):
def setUp(self):
@ -4804,6 +4969,27 @@ class VMAXISCSITest(test.TestCase):
mock_fo.assert_called_once_with([self.data.test_volume], None,
None)
def test_enable_replication(self):
with mock.patch.object(
self.common, 'enable_replication') as mock_er:
self.driver.enable_replication(
self.data.ctx, self.data.test_group, [self.data.test_volume])
mock_er.assert_called_once()
def test_disable_replication(self):
with mock.patch.object(
self.common, 'disable_replication') as mock_dr:
self.driver.disable_replication(
self.data.ctx, self.data.test_group, [self.data.test_volume])
mock_dr.assert_called_once()
def test_failover_replication(self):
with mock.patch.object(
self.common, 'failover_replication') as mock_fo:
self.driver.failover_replication(
self.data.ctx, self.data.test_group, [self.data.test_volume])
mock_fo.assert_called_once()
class VMAXMaskingTest(test.TestCase):
def setUp(self):
@ -5759,18 +5945,33 @@ class VMAXCommonReplicationTest(test.TestCase):
self.common._get_replication_info()
self.assertTrue(self.common.replication_enabled)
def test_create_replicated_volume(self):
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=False)
@mock.patch.object(objects.Group, 'get_by_id',
return_value=VMAXCommonData.test_rep_group)
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=True)
@mock.patch.object(utils.VMAXUtils, 'check_replication_matched',
return_value=True)
@mock.patch.object(masking.VMAXMasking, 'add_volume_to_storage_group')
@mock.patch.object(
common.VMAXCommon, '_replicate_volume',
return_value={
'replication_driver_data':
VMAXCommonData.test_volume.replication_driver_data})
def test_create_replicated_volume(self, mock_rep, mock_add, mock_match,
mock_check, mock_get, mock_cg):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
vol_identifier = self.utils.get_volume_element_name(
self.data.test_volume.id)
with mock.patch.object(self.common, '_replicate_volume',
return_value={}) as mock_rep:
self.common.create_volume(self.data.test_volume)
volume_dict = self.data.provider_location
mock_rep.assert_called_once_with(
self.data.test_volume, vol_identifier, volume_dict,
extra_specs)
self.common.create_volume(self.data.test_volume)
volume_dict = self.data.provider_location
mock_rep.assert_called_once_with(
self.data.test_volume, vol_identifier, volume_dict,
extra_specs)
# Add volume to replication group
self.common.create_volume(self.data.test_volume_group_member)
mock_add.assert_called_once()
def test_create_cloned_replicated_volume(self):
extra_specs = deepcopy(self.extra_specs)
@ -6013,6 +6214,17 @@ class VMAXCommonReplicationTest(test.TestCase):
self.common.failover_host,
volumes, secondary_id="default")
@mock.patch.object(common.VMAXCommon, 'failover_replication',
return_value=({}, {}))
@mock.patch.object(common.VMAXCommon, '_failover_volume',
return_value={})
def test_failover_host_groups(self, mock_fv, mock_fg):
volumes = [self.data.test_volume_group_member]
group1 = self.data.test_group
self.common.failover_host(volumes, None, [group1])
mock_fv.assert_not_called()
mock_fg.assert_called_once()
def test_failover_volume(self):
ref_model_update = {
'volume_id': self.data.test_volume.id,
@ -6215,3 +6427,164 @@ class VMAXCommonReplicationTest(test.TestCase):
secondary_info = self.common.get_secondary_stats_info(
rep_config, array_info)
self.assertEqual(ref_info, secondary_info)
def test_replicate_group(self):
volume_model_update = {
'id': self.data.test_volume.id,
'provider_location': self.data.test_volume.provider_location}
vols_model_update = self.common._replicate_group(
self.data.array, [volume_model_update],
self.data.test_vol_grp_name, self.extra_specs)
ref_rep_data = six.text_type({'array': self.data.remote_array,
'device_id': self.data.device_id2})
ref_vol_update = {
'id': self.data.test_volume.id,
'provider_location': self.data.test_volume.provider_location,
'replication_driver_data': ref_rep_data,
'replication_status': fields.ReplicationStatus.ENABLED}
self.assertEqual(ref_vol_update, vols_model_update[0])
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_type',
side_effect=[True, True])
def test_create_replicaton_group(self, mock_type, mock_cg_type):
ref_model_update = {
'status': fields.GroupStatus.AVAILABLE,
'replication_status': fields.ReplicationStatus.ENABLED}
model_update = self.common.create_group(None, self.data.test_group_1)
self.assertEqual(ref_model_update, model_update)
def test_enable_replication(self):
# Case 1: Group not replicated
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False):
self.assertRaises(NotImplementedError,
self.common.enable_replication,
None, self.data.test_group,
[self.data.test_volume])
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=True):
# Case 2: Empty group
model_update, __ = self.common.enable_replication(
None, self.data.test_group, [])
self.assertEqual({}, model_update)
# Case 3: Successfully enabled
model_update, __ = self.common.enable_replication(
None, self.data.test_group, [self.data.test_volume])
self.assertEqual(fields.ReplicationStatus.ENABLED,
model_update['replication_status'])
# Case 4: Exception
model_update, __ = self.common.enable_replication(
None, self.data.test_group_failed, [self.data.test_volume])
self.assertEqual(fields.ReplicationStatus.ERROR,
model_update['replication_status'])
def test_disable_replication(self):
# Case 1: Group not replicated
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False):
self.assertRaises(NotImplementedError,
self.common.disable_replication,
None, self.data.test_group,
[self.data.test_volume])
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=True):
# Case 2: Empty group
model_update, __ = self.common.disable_replication(
None, self.data.test_group, [])
self.assertEqual({}, model_update)
# Case 3: Successfully disabled
model_update, __ = self.common.disable_replication(
None, self.data.test_group, [self.data.test_volume])
self.assertEqual(fields.ReplicationStatus.DISABLED,
model_update['replication_status'])
# Case 4: Exception
model_update, __ = self.common.disable_replication(
None, self.data.test_group_failed, [self.data.test_volume])
self.assertEqual(fields.ReplicationStatus.ERROR,
model_update['replication_status'])
def test_failover_replication(self):
# Case 1: Group not replicated
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False):
self.assertRaises(NotImplementedError,
self.common.failover_replication,
None, self.data.test_group,
[self.data.test_volume])
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=True):
# Case 2: Empty group
model_update, __ = self.common.failover_replication(
None, self.data.test_group, [])
self.assertEqual({}, model_update)
# Case 3: Successfully failed over
model_update, __ = self.common.failover_replication(
None, self.data.test_group, [self.data.test_volume])
self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
model_update['replication_status'])
# Case 4: Successfully failed back
model_update, __ = self.common.failover_replication(
None, self.data.test_group, [self.data.test_volume],
secondary_backend_id='default')
self.assertEqual(fields.ReplicationStatus.ENABLED,
model_update['replication_status'])
# Case 5: Exception
model_update, __ = self.common.failover_replication(
None, self.data.test_group_failed, [self.data.test_volume])
self.assertEqual(fields.ReplicationStatus.ERROR,
model_update['replication_status'])
@mock.patch.object(utils.VMAXUtils, 'get_volume_group_utils',
return_value=(VMAXCommonData.array, []))
@mock.patch.object(common.VMAXCommon, '_cleanup_group_replication')
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=True)
def test_delete_replication_group(self, mock_check,
mock_cleanup, mock_utils):
self.common._delete_group(self.data.test_rep_group, [])
mock_cleanup.assert_called_once()
@mock.patch.object(masking.VMAXMasking,
'remove_volumes_from_storage_group')
@mock.patch.object(utils.VMAXUtils, 'check_rep_status_enabled')
@mock.patch.object(common.VMAXCommon,
'_remove_remote_vols_from_volume_group')
@mock.patch.object(common.VMAXCommon, '_add_remote_vols_to_volume_group')
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=True)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_replicated_group(self, mock_cg_type, mock_type_check,
mock_add, mock_remove, mock_check,
mock_rm):
add_vols = [self.data.test_volume]
remove_vols = [self.data.test_clone_volume]
self.common.update_group(
self.data.test_group_1, add_vols, remove_vols)
mock_add.assert_called_once()
mock_remove.assert_called_once()
@mock.patch.object(masking.VMAXMasking,
'add_volumes_to_storage_group')
def test_add_remote_vols_to_volume_group(self, mock_add):
self.common._add_remote_vols_to_volume_group(
self.data.remote_array, [self.data.test_volume],
self.data.test_rep_group, self.data.rep_extra_specs)
mock_add.assert_called_once()
@mock.patch.object(masking.VMAXMasking,
'remove_volumes_from_storage_group')
def test_remove_remote_vols_from_volume_group(self, mock_rm):
self.common._remove_remote_vols_from_volume_group(
self.data.remote_array, [self.data.test_volume],
self.data.test_rep_group, self.data.rep_extra_specs)
mock_rm.assert_called_once()
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
@mock.patch.object(masking.VMAXMasking,
'remove_volumes_from_storage_group')
def test_cleanup_group_replication(self, mock_rm, mock_rm_reset):
self.common._cleanup_group_replication(
self.data.array, self.data.test_vol_grp_name,
[self.data.device_id], self.extra_specs)
mock_rm.assert_called_once()

View File

@ -22,7 +22,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
import six
import uuid
from cinder import coordination
from cinder import exception
@ -243,6 +242,7 @@ class VMAXCommon(object):
:returns: model_update - dict
"""
model_update = {}
rep_driver_data = {}
volume_id = volume.id
extra_specs = self._initial_setup(volume)
@ -253,24 +253,50 @@ class VMAXCommon(object):
volume_dict = (self._create_volume(
volume_name, volume_size, extra_specs))
if volume.group_id is not None:
group_name = self.provision.get_or_create_volume_group(
extra_specs[utils.ARRAY], volume.group, extra_specs)
self.masking.add_volume_to_storage_group(
extra_specs[utils.ARRAY], volume_dict['device_id'],
group_name, volume_name, extra_specs)
# Set-up volume replication, if enabled
if self.utils.is_replication_enabled(extra_specs):
rep_update = self._replicate_volume(volume, volume_name,
volume_dict, extra_specs)
rep_driver_data = rep_update['replication_driver_data']
model_update.update(rep_update)
# Add volume to group, if required
if volume.group_id is not None:
if (volume_utils.is_group_a_cg_snapshot_type(volume.group)
or volume.group.is_replicated):
self._add_new_volume_to_volume_group(
volume, volume_dict['device_id'], volume_name,
extra_specs, rep_driver_data)
LOG.info("Leaving create_volume: %(name)s. Volume dict: %(dict)s.",
{'name': volume_name, 'dict': volume_dict})
model_update.update(
{'provider_location': six.text_type(volume_dict)})
return model_update
def _add_new_volume_to_volume_group(self, volume, device_id, volume_name,
extra_specs, rep_driver_data=None):
"""Add a new volume to a volume group.
This may also be called after extending a replicated volume.
:param volume: the volume object
:param device_id: the device id
:param volume_name: the volume name
:param extra_specs: the extra specifications
:param rep_driver_data: the replication driver data, optional
"""
self.utils.check_replication_matched(volume, extra_specs)
group_name = self.provision.get_or_create_volume_group(
extra_specs[utils.ARRAY], volume.group, extra_specs)
self.masking.add_volume_to_storage_group(
extra_specs[utils.ARRAY], device_id,
group_name, volume_name, extra_specs)
# Add remote volume to remote group, if required
if volume.group.is_replicated:
self._add_remote_vols_to_volume_group(
extra_specs[utils.ARRAY],
[volume], volume.group, extra_specs, rep_driver_data)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
@ -757,7 +783,10 @@ class VMAXCommon(object):
'max_over_subscription_ratio':
max_oversubscription_ratio,
'reserved_percentage': reserved_percentage,
'replication_enabled': self.replication_enabled
'replication_enabled': self.replication_enabled,
'group_replication_enabled': self.replication_enabled,
'consistent_group_replication_enabled':
self.replication_enabled
}
if array_reserve_percent:
if isinstance(reserved_percentage, int):
@ -877,18 +906,8 @@ class VMAXCommon(object):
device_id = name['keybindings']['DeviceID']
else:
device_id = None
element_name = self.utils.get_volume_element_name(
volume_name)
admin_metadata = {}
if 'admin_metadata' in volume:
admin_metadata = volume.admin_metadata
if 'targetVolumeName' in admin_metadata:
target_vol_name = admin_metadata['targetVolumeName']
founddevice_id = self.rest.check_volume_device_id(
array, target_vol_name, device_id)
else:
founddevice_id = self.rest.check_volume_device_id(
array, device_id, element_name)
founddevice_id = self.rest.check_volume_device_id(
array, device_id, volume_name)
if founddevice_id is None:
LOG.debug("Volume %(volume_name)s not found on the array.",
@ -2227,7 +2246,7 @@ class VMAXCommon(object):
"""
are_vols_paired, local_vol_state, pair_state = (
self.rest.are_vols_rdf_paired(
array, remote_array, device_id, target_device, rdf_group))
array, remote_array, device_id, target_device))
if are_vols_paired:
# Break the sync relationship.
self.provision.break_rdf_relationship(
@ -2304,8 +2323,11 @@ class VMAXCommon(object):
:param secondary_id: the target backend
:param groups: replication groups
:returns: secondary_id, volume_update_list, group_update_list
:raises: VolumeBackendAPIException
"""
volume_update_list = []
group_update_list = []
group_fo = None
if secondary_id != 'default':
if not self.failover:
self.failover = True
@ -2325,6 +2347,7 @@ class VMAXCommon(object):
if self.failover:
self.failover = False
secondary_id = None
group_fo = 'default'
else:
exception_message = (_(
"Cannot failback backend %(backend)s- backend not "
@ -2336,6 +2359,20 @@ class VMAXCommon(object):
raise exception.VolumeBackendAPIException(
data=exception_message)
if groups:
for group in groups:
vol_list = []
for index, vol in enumerate(volumes):
if vol.group_id == group.id:
vol_list.append(volumes.pop(index))
grp_update, vol_updates = (
self.failover_replication(
None, group, vol_list, group_fo, host=True))
group_update_list.append({'group_id': group.id,
'updates': grp_update})
volume_update_list += vol_updates
for volume in volumes:
extra_specs = self._initial_setup(volume)
if self.utils.is_replication_enabled(extra_specs):
@ -2357,7 +2394,7 @@ class VMAXCommon(object):
volume_update_list.append(recovery)
LOG.info("Failover host complete.")
return secondary_id, volume_update_list, []
return secondary_id, volume_update_list, group_update_list
def _failover_volume(self, vol, failover, extra_specs):
"""Failover a volume.
@ -2460,8 +2497,7 @@ class VMAXCommon(object):
target_device = remote_device
are_vols_paired, local_vol_state, pair_state = (
self.rest.are_vols_rdf_paired(
array, remote_array, device_id,
target_device, rdf_group))
array, remote_array, device_id, target_device))
if not are_vols_paired:
target_device = None
except (KeyError, ValueError):
@ -2523,6 +2559,11 @@ class VMAXCommon(object):
self.setup_volume_replication(
array, volume, device_id, extra_specs, target_device)
# Check if volume needs to be returned to volume group
if volume.group_id:
self._add_new_volume_to_volume_group(
volume, device_id, volume_name, extra_specs)
except Exception as e:
exception_message = (_("Error extending volume. "
"Error received was %(e)s") %
@ -2676,7 +2717,8 @@ class VMAXCommon(object):
return rep_extra_specs
def get_secondary_stats_info(self, rep_config, array_info):
@staticmethod
def get_secondary_stats_info(rep_config, array_info):
"""On failover, report on secondary array statistics.
:param rep_config: the replication configuration
@ -2722,10 +2764,11 @@ class VMAXCommon(object):
:param context: the context
:param group: the group object to be created
:returns: dict -- modelUpdate = {'status': 'available'}
:returns: dict -- modelUpdate
:raises: VolumeBackendAPIException, NotImplementedError
"""
if not volume_utils.is_group_a_cg_snapshot_type(group):
if (not volume_utils.is_group_a_cg_snapshot_type(group)
and not group.is_replicated):
raise NotImplementedError()
model_update = {'status': fields.GroupStatus.AVAILABLE}
@ -2742,6 +2785,15 @@ class VMAXCommon(object):
self.interval, self.retries)
self.provision.create_volume_group(
array, vol_grp_name, interval_retries_dict)
if group.is_replicated:
LOG.debug("Group: %(group)s is a replication group.",
{'group': group.id})
# Create remote group
__, remote_array = self.get_rdf_details(array)
self.provision.create_volume_group(
remote_array, vol_grp_name, interval_retries_dict)
model_update.update({
'replication_status': fields.ReplicationStatus.ENABLED})
except Exception:
exception_message = (_("Failed to create generic volume group:"
" %(volGrpName)s.")
@ -2763,7 +2815,8 @@ class VMAXCommon(object):
"""
LOG.info("Delete generic volume group: %(group)s.",
{'group': group.id})
if not volume_utils.is_group_a_cg_snapshot_type(group):
if (not volume_utils.is_group_a_cg_snapshot_type(group)
and not group.is_replicated):
raise NotImplementedError()
model_update, volumes_model_update = self._delete_group(
group, volumes)
@ -2800,33 +2853,36 @@ class VMAXCommon(object):
intervals_retries_dict = self.utils.get_intervals_retries_dict(
self.interval, self.retries)
deleted_volume_device_ids = []
# Remove replication for group, if applicable
if group.is_replicated:
self._cleanup_group_replication(
array, vol_grp_name, volume_device_ids,
intervals_retries_dict)
try:
# If there are no volumes in sg then delete it
if not volume_device_ids:
self.rest.delete_storage_group(array, vol_grp_name)
model_update = {'status': fields.GroupStatus.DELETED}
volumes_model_update = self.utils.update_volume_model_updates(
volumes_model_update, volumes, group.id, status='deleted')
return model_update, volumes_model_update
# First remove all the volumes from the SG
self.masking.remove_volumes_from_storage_group(
array, volume_device_ids, vol_grp_name, intervals_retries_dict)
for vol in volumes:
for extraspecs_dict in extraspecs_dict_list:
if vol.volume_type_id in extraspecs_dict['volumeTypeId']:
extraspecs = extraspecs_dict.get(utils.EXTRA_SPECS)
device_id = self._find_device_on_array(vol,
extraspecs)
if device_id in volume_device_ids:
self._remove_vol_and_cleanup_replication(
array, device_id,
vol.name, extraspecs, vol)
self._delete_from_srp(
array, device_id, "group vol", extraspecs)
else:
LOG.debug("Volume not present in storage group.")
# Add the device id to the deleted list
deleted_volume_device_ids.append(device_id)
if volume_device_ids:
# First remove all the volumes from the SG
self.masking.remove_volumes_from_storage_group(
array, volume_device_ids, vol_grp_name,
intervals_retries_dict)
for vol in volumes:
for extraspecs_dict in extraspecs_dict_list:
if (vol.volume_type_id in
extraspecs_dict['volumeTypeId']):
extraspecs = extraspecs_dict.get(
utils.EXTRA_SPECS)
device_id = self._find_device_on_array(
vol, extraspecs)
if device_id in volume_device_ids:
self.masking.remove_and_reset_members(
array, vol, device_id, vol.name,
extraspecs, False)
self._delete_from_srp(
array, device_id, "group vol", extraspecs)
else:
LOG.debug("Volume not found on the array.")
# Add the device id to the deleted list
deleted_volume_device_ids.append(device_id)
# Once all volumes are deleted then delete the SG
self.rest.delete_storage_group(array, vol_grp_name)
model_update = {'status': fields.GroupStatus.DELETED}
@ -2865,6 +2921,38 @@ class VMAXCommon(object):
return model_update, volumes_model_update
def _cleanup_group_replication(
self, array, vol_grp_name, volume_device_ids, extra_specs):
"""Cleanup remote replication.
Break and delete the rdf replication relationship and
delete the remote storage group and member devices.
:param array: the array serial number
:param vol_grp_name: the volume group name
:param volume_device_ids: the device ids of the local volumes
:param extra_specs: the extra specifications
"""
rdf_group_no, remote_array = self.get_rdf_details(array)
# Delete replication for group, if applicable
if volume_device_ids:
self.provision.delete_group_replication(
array, vol_grp_name, rdf_group_no, extra_specs)
remote_device_ids = self._get_members_of_volume_group(
remote_array, vol_grp_name)
# Remove volumes from remote replication group
if remote_device_ids:
self.masking.remove_volumes_from_storage_group(
remote_array, remote_device_ids, vol_grp_name, extra_specs)
for device_id in remote_device_ids:
# Make sure they are not members of any other storage groups
self.masking.remove_and_reset_members(
remote_array, None, device_id, 'target_vol',
extra_specs, False)
self._delete_from_srp(
remote_array, device_id, "group vol", extra_specs)
# Once all volumes are deleted then delete the SG
self.rest.delete_storage_group(remote_array, vol_grp_name)
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a generic volume group snapshot.
@ -3053,7 +3141,8 @@ class VMAXCommon(object):
"This adds and/or removes volumes from "
"a generic volume group.",
{'group': group.id})
if not volume_utils.is_group_a_cg_snapshot_type(group):
if (not volume_utils.is_group_a_cg_snapshot_type(group)
and not group.is_replicated):
raise NotImplementedError()
array, __ = self.utils.get_volume_group_utils(
@ -3065,25 +3154,35 @@ class VMAXCommon(object):
remove_device_ids = self._get_volume_device_ids(remove_vols, array)
vol_grp_name = None
try:
volume_group = self._find_volume_group(
array, group)
volume_group = self._find_volume_group(array, group)
if volume_group:
if 'name' in volume_group:
vol_grp_name = volume_group['name']
if vol_grp_name is None:
raise exception.GroupNotFound(
group_id=group.id)
raise exception.GroupNotFound(group_id=group.id)
interval_retries_dict = self.utils.get_intervals_retries_dict(
self.interval, self.retries)
# Add volume(s) to the group
if add_device_ids:
self.utils.check_rep_status_enabled(group)
for vol in add_vols:
extra_specs = self._initial_setup(vol)
self.utils.check_replication_matched(vol, extra_specs)
self.masking.add_volumes_to_storage_group(
array, add_device_ids, vol_grp_name, interval_retries_dict)
if group.is_replicated:
# Add remote volumes to remote storage group
self._add_remote_vols_to_volume_group(
array, add_vols, group, interval_retries_dict)
# Remove volume(s) from the group
if remove_device_ids:
self.masking.remove_volumes_from_storage_group(
array, remove_device_ids,
vol_grp_name, interval_retries_dict)
if group.is_replicated:
# Remove remote volumes from the remote storage group
self._remove_remote_vols_from_volume_group(
array, remove_vols, group, interval_retries_dict)
except exception.GroupNotFound:
raise
except Exception as ex:
@ -3096,6 +3195,57 @@ class VMAXCommon(object):
return model_update, None, None
def _add_remote_vols_to_volume_group(
self, array, volumes, group,
extra_specs, rep_driver_data=None):
"""Add the remote volumes to their volume group.
:param array: the array serial number
:param volumes: list of volumes
:param group: the id of the group
:param extra_specs: the extra specifications
:param rep_driver_data: replication driver data, optional
"""
remote_device_list = []
__, remote_array = self.get_rdf_details(array)
for vol in volumes:
try:
remote_loc = ast.literal_eval(vol.replication_driver_data)
except (ValueError, KeyError):
remote_loc = ast.literal_eval(rep_driver_data)
founddevice_id = self.rest.check_volume_device_id(
remote_array, remote_loc['device_id'], vol.id)
if founddevice_id is not None:
remote_device_list.append(founddevice_id)
group_name = self.provision.get_or_create_volume_group(
remote_array, group, extra_specs)
self.masking.add_volumes_to_storage_group(
remote_array, remote_device_list, group_name, extra_specs)
LOG.info("Added volumes to remote volume group.")
def _remove_remote_vols_from_volume_group(
self, array, volumes, group, extra_specs):
"""Remove the remote volumes from their volume group.
:param array: the array serial number
:param volumes: list of volumes
:param group: the id of the group
:param extra_specs: the extra specifications
"""
remote_device_list = []
__, remote_array = self.get_rdf_details(array)
for vol in volumes:
remote_loc = ast.literal_eval(vol.replication_driver_data)
founddevice_id = self.rest.check_volume_device_id(
remote_array, remote_loc['device_id'], vol.id)
if founddevice_id is not None:
remote_device_list.append(founddevice_id)
group_name = self.provision.get_or_create_volume_group(
array, group, extra_specs)
self.masking.remove_volumes_from_storage_group(
remote_array, remote_device_list, group_name, extra_specs)
LOG.info("Removed volumes from remote volume group.")
def _get_volume_device_ids(self, volumes, array):
"""Get volume device ids from volume.
@ -3159,7 +3309,6 @@ class VMAXCommon(object):
tgt_name = self.utils.update_volume_group_name(group)
self.create_group(context, group)
model_update = {'status': fields.GroupStatus.AVAILABLE}
snap_name = None
try:
array, extraspecs_dict_list = (
self.utils.get_volume_group_utils(
@ -3178,8 +3327,8 @@ class VMAXCommon(object):
if volume.volume_type_id in (
extraspecs_dict['volumeTypeId']):
extraspecs = extraspecs_dict.get(utils.EXTRA_SPECS)
# Create a random UUID and use it as volume name
target_volume_name = six.text_type(uuid.uuid4())
target_volume_name = (
self.utils.get_volume_element_name(volume.id))
volume_dict = self.provision.create_volume_from_sg(
array, target_volume_name,
tgt_name, volume_size, extraspecs)
@ -3209,7 +3358,6 @@ class VMAXCommon(object):
self.provision.link_and_break_replica(
array, vol_grp_name, tgt_name, snap_name,
interval_retries_dict, delete_snapshot=create_snapshot)
except Exception:
exception_message = (_("Failed to create vol grp %(volGrpName)s"
" from source %(grpSnapshot)s.")
@ -3220,16 +3368,203 @@ class VMAXCommon(object):
volumes_model_update = self.utils.update_volume_model_updates(
volumes_model_update, volumes, group.id, model_update['status'])
# Update the provider_location
# Update the provider_location & replication status
for volume_model_update in volumes_model_update:
if volume_model_update['id'] in dict_volume_dicts:
volume_model_update.update(
{'provider_location': six.text_type(
dict_volume_dicts[volume_model_update['id']])})
# Update the volumes_model_update with admin_metadata
self.utils.update_admin_metadata(volumes_model_update,
key='targetVolumeName',
values=target_volume_names)
if group.is_replicated:
volumes_model_update = self._replicate_group(
array, volumes_model_update,
tgt_name, interval_retries_dict)
model_update.update({
'replication_status': fields.ReplicationStatus.ENABLED})
return model_update, volumes_model_update
def _replicate_group(self, array, volumes_model_update,
group_name, extra_specs):
"""Replicate a cloned volume group.
:param array: the array serial number
:param volumes_model_update: the volumes model updates
:param group_name: the group name
:param extra_specs: the extra specs
:return: volumes_model_update
"""
rdf_group_no, remote_array = self.get_rdf_details(array)
self.rest.replicate_group(
array, group_name, rdf_group_no, remote_array, extra_specs)
# Need to set SRP to None for generic volume group - Not set
# automatically, and a volume can only be in one storage group
# managed by FAST
self.rest.set_storagegroup_srp(array, group_name, "None", extra_specs)
for volume_model_update in volumes_model_update:
vol_id = volume_model_update['id']
loc = ast.literal_eval(volume_model_update['provider_location'])
src_device_id = loc['device_id']
rdf_vol_details = self.rest.get_rdf_group_volume(
array, src_device_id)
tgt_device_id = rdf_vol_details['remoteDeviceID']
element_name = self.utils.get_volume_element_name(vol_id)
self.rest.rename_volume(remote_array, tgt_device_id, element_name)
rep_update = {'device_id': tgt_device_id, 'array': remote_array}
volume_model_update.update(
{'replication_driver_data': six.text_type(rep_update),
'replication_status': fields.ReplicationStatus.ENABLED})
return volumes_model_update
def enable_replication(self, context, group, volumes):
"""Enable replication for a group.
Replication is enabled on replication-enabled groups by default.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
if not group.is_replicated:
raise NotImplementedError()
model_update = {}
if not volumes:
# Return if empty group
return model_update, None
try:
vol_grp_name = None
extra_specs = self._initial_setup(volumes[0])
array = extra_specs[utils.ARRAY]
volume_group = self._find_volume_group(array, group)
if volume_group:
if 'name' in volume_group:
vol_grp_name = volume_group['name']
if vol_grp_name is None:
raise exception.GroupNotFound(group_id=group.id)
rdf_group_no, _ = self.get_rdf_details(array)
self.provision.enable_group_replication(
array, vol_grp_name, rdf_group_no, extra_specs)
model_update.update({
'replication_status': fields.ReplicationStatus.ENABLED})
except Exception as e:
model_update.update({
'replication_status': fields.ReplicationStatus.ERROR})
LOG.error("Error enabling replication on group %(group)s. "
"Exception received: %(e)s.",
{'group': group.id, 'e': e})
return model_update, None
def disable_replication(self, context, group, volumes):
"""Disable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
if not group.is_replicated:
raise NotImplementedError()
model_update = {}
if not volumes:
# Return if empty group
return model_update, None
try:
vol_grp_name = None
extra_specs = self._initial_setup(volumes[0])
array = extra_specs[utils.ARRAY]
volume_group = self._find_volume_group(array, group)
if volume_group:
if 'name' in volume_group:
vol_grp_name = volume_group['name']
if vol_grp_name is None:
raise exception.GroupNotFound(group_id=group.id)
rdf_group_no, _ = self.get_rdf_details(array)
self.provision.disable_group_replication(
array, vol_grp_name, rdf_group_no, extra_specs)
model_update.update({
'replication_status': fields.ReplicationStatus.DISABLED})
except Exception as e:
model_update.update({
'replication_status': fields.ReplicationStatus.ERROR})
LOG.error("Error disabling replication on group %(group)s. "
"Exception received: %(e)s.",
{'group': group.id, 'e': e})
return model_update, None
def failover_replication(self, context, group, volumes,
secondary_backend_id=None, host=False):
"""Failover replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:param secondary_backend_id: the secondary backend id - default None
:param host: flag to indicate if whole host is being failed over
:returns: model_update, None
"""
if not group.is_replicated:
raise NotImplementedError()
model_update = {}
vol_model_updates = []
if not volumes:
# Return if empty group
return model_update, vol_model_updates
try:
vol_grp_name = None
extra_specs = self._initial_setup(volumes[0])
array = extra_specs[utils.ARRAY]
volume_group = self._find_volume_group(array, group)
if volume_group:
if 'name' in volume_group:
vol_grp_name = volume_group['name']
if vol_grp_name is None:
raise exception.GroupNotFound(group_id=group.id)
rdf_group_no, _ = self.get_rdf_details(array)
# As we only support a single replication target, ignore
# any secondary_backend_id which is not 'default'
failover = False if secondary_backend_id == 'default' else True
self.provision.failover_group(
array, vol_grp_name, rdf_group_no, extra_specs, failover)
if failover:
model_update.update({
'replication_status':
fields.ReplicationStatus.FAILED_OVER})
vol_rep_status = fields.ReplicationStatus.FAILED_OVER
else:
model_update.update({
'replication_status': fields.ReplicationStatus.ENABLED})
vol_rep_status = fields.ReplicationStatus.ENABLED
except Exception as e:
model_update.update({
'replication_status': fields.ReplicationStatus.ERROR})
vol_rep_status = fields.ReplicationStatus.ERROR
LOG.error("Error failover replication on group %(group)s. "
"Exception received: %(e)s.",
{'group': group.id, 'e': e})
for vol in volumes:
loc = vol.provider_location
rep_data = vol.replication_driver_data
if vol_rep_status != fields.ReplicationStatus.ERROR:
loc = vol.replication_driver_data
rep_data = vol.provider_location
update = {'id': vol.id,
'replication_status': vol_rep_status,
'provider_location': loc,
'replication_driver_data': rep_data}
if host:
update = {'volume_id': vol.id, 'updates': update}
vol_model_updates.append(update)
return model_update, vol_model_updates

View File

@ -82,9 +82,10 @@ class VMAXFCDriver(driver.FibreChannelDriver):
- Support for volume replication
- Support for live migration
- Support for Generic Volume Group
3.1.0 - Support for replication groups (Tiramisu)
"""
VERSION = "3.0.0"
VERSION = "3.1.0"
# ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI"
@ -465,8 +466,9 @@ class VMAXFCDriver(driver.FibreChannelDriver):
:param context: the context
:param group: the group object
:returns: model_update
"""
self.common.create_group(context, group)
return self.common.create_group(context, group)
def delete_group(self, context, group, volumes):
"""Deletes a generic volume group.
@ -526,3 +528,36 @@ class VMAXFCDriver(driver.FibreChannelDriver):
return self.common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots, source_group,
source_vols)
def enable_replication(self, context, group, volumes):
"""Enable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
return self.common.enable_replication(context, group, volumes)
def disable_replication(self, context, group, volumes):
"""Disable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
return self.common.disable_replication(context, group, volumes)
def failover_replication(self, context, group, volumes,
secondary_backend_id=None):
"""Failover replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:param secondary_backend_id: the secondary backend id - default None
:returns: model_update, vol_model_updates
"""
return self.common.failover_replication(
context, group, volumes, secondary_backend_id)

View File

@ -87,9 +87,10 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
- Support for volume replication
- Support for live migration
- Support for Generic Volume Group
3.1.0 - Support for replication groups (Tiramisu)
"""
VERSION = "3.0.0"
VERSION = "3.1.0"
# ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI"
@ -412,8 +413,9 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
:param context: the context
:param group: the group object
:returns: model_update
"""
self.common.create_group(context, group)
return self.common.create_group(context, group)
def delete_group(self, context, group, volumes):
"""Deletes a generic volume group.
@ -473,3 +475,36 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
return self.common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots, source_group,
source_vols)
def enable_replication(self, context, group, volumes):
"""Enable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
return self.common.enable_replication(context, group, volumes)
def disable_replication(self, context, group, volumes):
"""Disable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
return self.common.disable_replication(context, group, volumes)
def failover_replication(self, context, group, volumes,
secondary_backend_id=None):
"""Failover replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:param secondary_backend_id: the secondary backend id - default None
:returns: model_update, vol_model_updates
"""
return self.common.failover_replication(
context, group, volumes, secondary_backend_id)

View File

@ -596,3 +596,67 @@ class VMAXProvision(object):
timer = loopingcall.FixedIntervalLoopingCall(_unlink_grp)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def enable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Resume rdf replication on a storage group.
Replication is enabled by default. This allows resuming
replication on a suspended group.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
action = "Resume"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def disable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Suspend rdf replication on a storage group.
This does not delete the rdf pairs, that can only be done
by deleting the group. This method suspends all i/o activity
on the rdf links.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
action = "Suspend"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def failover_group(self, array, storagegroup_name,
rdf_group_num, extra_specs, failover=True):
"""Failover or failback replication on a storage group.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
:param failover: flag to indicate failover/ failback
"""
action = "Failover" if failover else "Failback"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def delete_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Split replication for a group and delete the pairs.
:param array: the array serial number
:param storagegroup_name: the storage group name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
action = "Split"
LOG.debug("Splitting remote replication for group %(sg)s",
{'sg': storagegroup_name})
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
LOG.debug("Deleting remote replication for group %(sg)s",
{'sg': storagegroup_name})
self.rest.delete_storagegroup_rdf(
array, storagegroup_name, rdf_group_num)

View File

@ -697,14 +697,15 @@ class VMAXRest(object):
volume_dict = {'array': array, 'device_id': device_id}
return volume_dict
def check_volume_device_id(self, array, device_id, element_name):
def check_volume_device_id(self, array, device_id, volume_id):
"""Check if the identifiers match for a given volume.
:param array: the array serial number
:param device_id: the device id
:param element_name: name associated with cinder, e.g.OS-<cinderUUID>
:return: found_device_id
:param volume_id: cinder volume id
:returns: found_device_id
"""
element_name = self.utils.get_volume_element_name(volume_id)
found_device_id = None
vol_details = self.get_volume(array, device_id)
if vol_details:
@ -860,6 +861,22 @@ class VMAXRest(object):
data=exception_message)
return property_dict
def set_storagegroup_srp(
self, array, storagegroup_name, srp_name, extra_specs):
"""Modify a storage group's srp value.
:param array: the array serial number
:param storagegroup_name: the storage group name
:param srp_name: the srp pool name
:param extra_specs: the extra specifications
"""
payload = {"editStorageGroupActionParam": {
"editStorageGroupSRPParam": {"srpId": srp_name}}}
status_code, job = self.modify_storage_group(
array, storagegroup_name, payload)
self.wait_for_job("Set storage group srp", status_code,
job, extra_specs)
def get_vmax_default_storage_group(
self, array, srp, slo, workload,
do_disable_compression=False, is_re=False):
@ -1796,41 +1813,43 @@ class VMAXRest(object):
"""
return self.get_resource(array, REPLICATION, 'rdf_group')
def get_rdf_group_volume(self, array, rdf_number, device_id):
"""Get specific volume details, from an RDF group.
def get_rdf_group_volume(self, array, src_device_id):
"""Get the RDF details for a volume.
:param array: the array serial number
:param rdf_number: the rdf group number
:param device_id: the device id
:param src_device_id: the source device id
:returns: rdf_session
"""
resource_name = "%(rdf)s/volume/%(dev)s" % {
'rdf': rdf_number, 'dev': device_id}
return self.get_resource(array, REPLICATION, 'rdf_group',
resource_name)
rdf_session = None
volume = self._get_private_volume(array, src_device_id)
try:
rdf_session = volume['rdfInfo']['RDFSession'][0]
except (KeyError, TypeError, IndexError):
LOG.warning("Cannot locate source RDF volume %s", src_device_id)
return rdf_session
def are_vols_rdf_paired(self, array, remote_array, device_id,
target_device, rdf_group):
def are_vols_rdf_paired(self, array, remote_array,
device_id, target_device):
"""Check if a pair of volumes are RDF paired.
:param array: the array serial number
:param remote_array: the remote array serial number
:param device_id: the device id
:param target_device: the target device id
:param rdf_group: the rdf group
:returns: paired -- bool, state -- string
:returns: paired -- bool, local_vol_state, rdf_pair_state
"""
paired, local_vol_state, rdf_pair_state = False, '', ''
volume = self.get_rdf_group_volume(array, rdf_group, device_id)
if volume:
remote_volume = volume['remoteVolumeName']
remote_symm = volume['remoteSymmetrixId']
rdf_session = self.get_rdf_group_volume(array, device_id)
if rdf_session:
remote_volume = rdf_session['remoteDeviceID']
remote_symm = rdf_session['remoteSymmetrixID']
if (remote_volume == target_device
and remote_array == remote_symm):
paired = True
local_vol_state = volume['localVolumeState']
rdf_pair_state = volume['rdfpairState']
local_vol_state = rdf_session['SRDFStatus']
rdf_pair_state = rdf_session['pairState']
else:
LOG.warning("Cannot locate source RDF volume %s", device_id)
LOG.warning("Cannot locate RDF session for volume %s", device_id)
return paired, local_vol_state, rdf_pair_state
def get_rdf_group_number(self, array, rdf_group_label):
@ -1843,8 +1862,9 @@ class VMAXRest(object):
number = None
rdf_list = self.get_rdf_group_list(array)
if rdf_list and rdf_list.get('rdfGroupID'):
number = [rdf['rdfgNumber'] for rdf in rdf_list['rdfGroupID']
if rdf['label'] == rdf_group_label][0]
number_list = [rdf['rdfgNumber'] for rdf in rdf_list['rdfGroupID']
if rdf['label'] == rdf_group_label]
number = number_list[0] if len(number_list) > 0 else None
if number:
rdf_group = self.get_rdf_group(array, number)
if not rdf_group:
@ -2023,3 +2043,105 @@ class VMAXRest(object):
% {'sg_name': source_sg_id, 'snap_id': snap_name})
return self.delete_resource(
array, REPLICATION, 'storagegroup', resource_name)
def get_storagegroup_rdf_details(self, array, storagegroup_name,
rdf_group_num):
"""Get the remote replication details of a storage group.
:param array: the array serial number
:param storagegroup_name: the storage group name
:param rdf_group_num: the rdf group number
"""
resource_name = ("%(sg_name)s/rdf_group/%(rdf_num)s"
% {'sg_name': storagegroup_name,
'rdf_num': rdf_group_num})
return self.get_resource(array, REPLICATION, 'storagegroup',
resource_name=resource_name)
def replicate_group(self, array, storagegroup_name,
rdf_group_num, remote_array, extra_specs):
"""Create a target group on the remote array and enable replication.
:param array: the array serial number
:param storagegroup_name: the name of the group
:param rdf_group_num: the rdf group number
:param remote_array: the remote array serial number
:param extra_specs: the extra specifications
"""
resource_name = ("storagegroup/%(sg_name)s/rdf_group"
% {'sg_name': storagegroup_name})
payload = {"executionOption": "ASYNCHRONOUS",
"replicationMode": "Synchronous",
"remoteSymmId": remote_array,
"remoteStorageGroupName": storagegroup_name,
"rdfgNumber": rdf_group_num, "establish": 'true'}
status_code, job = self.create_resource(
array, REPLICATION, resource_name, payload)
self.wait_for_job('Create storage group rdf', status_code,
job, extra_specs)
def _verify_rdf_state(self, array, storagegroup_name,
rdf_group_num, action):
"""Verify if a storage group requires the requested state change.
:param array: the array serial number
:param storagegroup_name: the storage group name
:param rdf_group_num: the rdf group number
:param action: the requested action
:returns: bool
"""
mod_rqd = False
sg_rdf_details = self.get_storagegroup_rdf_details(
array, storagegroup_name, rdf_group_num)
if sg_rdf_details:
state_list = sg_rdf_details['states']
for state in state_list:
if (action.lower() in ["establish", "failback", "resume"] and
state.lower() in ["suspended", "failed over"]):
mod_rqd = True
break
elif (action.lower() in ["split", "failover", "suspend"] and
state.lower() in ["synchronized", "syncinprog"]):
mod_rqd = True
break
return mod_rqd
def modify_storagegroup_rdf(self, array, storagegroup_name,
rdf_group_num, action, extra_specs):
"""Modify the rdf state of a storage group.
:param array: the array serial number
:param storagegroup_name: the name of the storage group
:param rdf_group_num: the number of the rdf group
:param action: the required action
:param extra_specs: the extra specifications
"""
# Check if group is in valid state for desired action
mod_reqd = self._verify_rdf_state(array, storagegroup_name,
rdf_group_num, action)
if mod_reqd:
payload = {"executionOption": "ASYNCHRONOUS", "action": action}
resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s'
% {'sg_name': storagegroup_name,
'rdf_num': rdf_group_num})
status_code, job = self.modify_resource(
array, REPLICATION, 'storagegroup', payload,
resource_name=resource_name)
self.wait_for_job('Modify storagegroup rdf',
status_code, job, extra_specs)
def delete_storagegroup_rdf(self, array, storagegroup_name,
rdf_group_num):
"""Delete the rdf pairs for a storage group.
:param array: the array serial number
:param storagegroup_name: the name of the storage group
:param rdf_group_num: the number of the rdf group
"""
resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s'
% {'sg_name': storagegroup_name,
'rdf_num': rdf_group_num})
self.delete_resource(
array, REPLICATION, 'storagegroup', resource_name=resource_name)

View File

@ -559,22 +559,6 @@ class VMAXUtils(object):
default_dict[RETRIES] = retries
return default_dict
@staticmethod
def update_admin_metadata(volumes_model_update, key, values):
"""Update the volume_model_updates with admin metadata.
:param volumes_model_update: List of volume model updates
:param key: Key to be updated in the admin_metadata
:param values: Dictionary of values per volume id
"""
for volume_model_update in volumes_model_update:
volume_id = volume_model_update['id']
if volume_id in values:
admin_metadata = {}
admin_metadata.update({key: values[volume_id]})
volume_model_update.update(
{'admin_metadata': admin_metadata})
def get_volume_group_utils(self, group, interval, retries):
"""Standard utility for generic volume groups.
@ -639,7 +623,7 @@ class VMAXUtils(object):
:returns: group_name -- formatted name + id
"""
group_name = ""
if group.name is not None:
if group.name is not None and group.name != group.id:
group_name = (
self.truncate_string(
group.name, TRUNCATE_27) + "_")
@ -685,3 +669,44 @@ class VMAXUtils(object):
new_pool['pool_name'] = new_pool_name
pools.append(new_pool)
return pools
def check_replication_matched(self, volume, extra_specs):
"""Check volume type and group type.
This will make sure they do not conflict with each other.
:param volume: volume to be checked
:param extra_specs: the extra specifications
:raises: InvalidInput
"""
# If volume is not a member of group, skip this check anyway.
if not volume.group:
return
vol_is_re = self.is_replication_enabled(extra_specs)
group_is_re = volume.group.is_replicated
if not (vol_is_re == group_is_re):
msg = _('Replication should be enabled or disabled for both '
'volume or group. Volume replication status: '
'%(vol_status)s, group replication status: '
'%(group_status)s') % {
'vol_status': vol_is_re, 'group_status': group_is_re}
raise exception.InvalidInput(reason=msg)
@staticmethod
def check_rep_status_enabled(group):
"""Check replication status for group.
Group status must be enabled before proceeding with certain
operations.
:param group: the group object
:raises: InvalidInput
"""
if group.is_replicated:
if group.replication_status != fields.ReplicationStatus.ENABLED:
msg = (_('Replication status should be %s for '
'replication-enabled group.')
% fields.ReplicationStatus.ENABLED)
raise exception.InvalidInput(reason=msg)
else:
LOG.debug('Replication is not enabled on group %s, '
'skip status check.', group.id)

View File

@ -0,0 +1,5 @@
---
features:
- |
Add consistent replication group support in Dell EMC VMAX cinder driver.