Merge "[Unity] Support consistency group replication"

This commit is contained in:
Zuul 2020-04-10 20:15:23 +00:00 committed by Gerrit Code Review
commit a039f5e7a5
11 changed files with 715 additions and 34 deletions

View File

@ -297,6 +297,29 @@ class MockClient(object):
if rep_session.name != 'rep_session_name_1':
raise client.ClientReplicationError()
def is_cg_replicated(self, cg_id):
return cg_id and 'is_replicated' in cg_id
def get_cg(self, name):
return test_client.MockResource(_id=name)
def create_cg_replication(self, group_id, pool_id, remote_system,
max_time):
if group_id and 'error' in group_id:
raise Exception('has issue when creating cg replication session.')
def delete_cg_rep_session(self, group_id):
if group_id and 'error' in group_id:
raise Exception('has issue when deleting cg replication session.')
def failover_cg_rep_session(self, group_id, need_sync):
if group_id and 'error' in group_id:
raise Exception('has issue when failover cg replication session.')
def failback_cg_rep_session(self, group_id):
if group_id and 'error' in group_id:
raise Exception('has issue when failback cg replication session.')
class MockLookupService(object):
@staticmethod
@ -399,13 +422,29 @@ def get_volume_type_extra_specs(type_id):
return {}
def get_group_type_specs(group_type_id):
if group_type_id == '':
return {'consistent_group_snapshot_enabled': '<is> True',
'group_type_id': group_type_id}
return {}
def group_is_cg(group):
return group.id != 'not_cg'
def patch_for_unity_adapter(func):
@functools.wraps(func)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs',
new=get_volume_type_extra_specs)
@mock.patch('cinder.volume.group_types.get_group_type_specs',
new=get_group_type_specs)
@mock.patch('cinder.volume.drivers.dell_emc.unity.utils.'
'get_backend_qos_specs',
new=get_backend_qos_specs)
@mock.patch('cinder.volume.drivers.dell_emc.unity.utils.'
'group_is_cg',
new=group_is_cg)
@mock.patch('cinder.utils.brick_get_connector_properties',
new=get_connector_properties)
def func_wrapper(*args, **kwargs):
@ -1428,6 +1467,133 @@ class CommonAdapterTest(test.TestCase):
self.assertFalse(
self.adapter.replication_manager.is_service_failed_over)
@patch_for_unity_adapter
def test_failed_enable_replication(self):
cg = MockOSResource(id='not_cg', name='cg_name',
description='cg_description')
volumes = [MockOSResource(id=vol_id,
provider_location=get_lun_pl(lun_id))
for vol_id, lun_id in (('volume-3', 'sv_3'),
('volume-4', 'sv_4'))]
self.assertRaises(exception.InvalidGroupType,
self.adapter.enable_replication, None,
cg, volumes)
@patch_for_unity_adapter
def test_enable_replication(self):
cg = MockOSResource(id='test_cg_1', name='cg_name',
description='cg_description')
volumes = [MockOSResource(id=vol_id,
provider_location=get_lun_pl(lun_id))
for vol_id, lun_id in (('volume-3', 'sv_3'),
('volume-4', 'sv_4'))]
secondary_device = mock_replication_device()
self.adapter.replication_manager.replication_devices = {
'secondary_unity': secondary_device
}
result = self.adapter.enable_replication(None, cg, volumes)
self.assertEqual(({'replication_status': 'enabled'}, None), result)
@patch_for_unity_adapter
def test_cannot_disable_replication_on_generic_group(self):
cg = MockOSResource(id='not_cg', name='cg_name',
description='cg_description')
volumes = [MockOSResource(id=vol_id,
provider_location=get_lun_pl(lun_id))
for vol_id, lun_id in (('volume-3', 'sv_3'),
('volume-4', 'sv_4'))]
self.assertRaises(exception.InvalidGroupType,
self.adapter.disable_replication, None,
cg, volumes)
@patch_for_unity_adapter
def test_disable_replication(self):
cg = MockOSResource(id='cg_is_replicated', name='cg_name',
description='cg_description')
volumes = [MockOSResource(id=vol_id,
provider_location=get_lun_pl(lun_id))
for vol_id, lun_id in (('volume-3', 'sv_3'),
('volume-4', 'sv_4'))]
result = self.adapter.disable_replication(None, cg, volumes)
self.assertEqual(({'replication_status': 'disabled'}, None), result)
@patch_for_unity_adapter
def test_failover_replication(self):
cg = MockOSResource(id='cg_is_replicated', name='cg_name',
description='cg_description')
volumes = [MockOSResource(id=vol_id,
provider_location=get_lun_pl(lun_id))
for vol_id, lun_id in (('volume-3', 'sv_3'),
('volume-4', 'sv_4'))]
real_secondary_id = 'secondary_unity'
secondary_device = mock_replication_device()
self.adapter.replication_manager.replication_devices = {
real_secondary_id: secondary_device
}
result = self.adapter.failover_replication(None, cg, volumes,
real_secondary_id)
self.assertEqual(({'replication_status': 'failed-over'},
[{'id': 'volume-3',
'replication_status': 'failed-over'},
{'id': 'volume-4',
'replication_status': 'failed-over'}]), result)
@patch_for_unity_adapter
def test_failback_replication(self):
cg = MockOSResource(id='cg_is_replicated', name='cg_name',
description='cg_description')
volumes = [MockOSResource(id=vol_id,
provider_location=get_lun_pl(lun_id))
for vol_id, lun_id in (('volume-3', 'sv_3'),
('volume-4', 'sv_4'))]
input_secondary_id = 'default'
real_secondary_id = 'secondary_unity'
secondary_device = mock_replication_device()
self.adapter.replication_manager.replication_devices = {
real_secondary_id: secondary_device
}
result = self.adapter.failover_replication(None, cg, volumes,
input_secondary_id)
self.assertEqual(({'replication_status': 'enabled'},
[{'id': 'volume-3',
'replication_status': 'enabled'},
{'id': 'volume-4',
'replication_status': 'enabled'}]),
result)
failed_cg = MockOSResource(id='cg_is_replicated_but_has_error',
name='cg_name',
description='cg_description')
failed_result = self.adapter.failover_replication(
None, failed_cg, volumes, real_secondary_id)
self.assertEqual(({'replication_status': 'error'},
[{'id': 'volume-3',
'replication_status': 'error'},
{'id': 'volume-4',
'replication_status': 'error'}]), failed_result)
@patch_for_unity_adapter
def test_failover_replication_error(self):
cg = MockOSResource(id='cg_is_replicated_but_has_error',
name='cg_name',
description='cg_description')
volumes = [MockOSResource(id=vol_id,
provider_location=get_lun_pl(lun_id))
for vol_id, lun_id in (('volume-3', 'sv_3'),
('volume-4', 'sv_4'))]
real_secondary_id = 'default'
secondary_device = mock_replication_device()
self.adapter.replication_manager.replication_devices = {
real_secondary_id: secondary_device
}
result = self.adapter.failover_replication(
None, cg, volumes, real_secondary_id)
self.assertEqual(({'replication_status': 'error'},
[{'id': 'volume-3',
'replication_status': 'error'},
{'id': 'volume-4',
'replication_status': 'error'}]), result)
class FCAdapterTest(test.TestCase):
def setUp(self):

View File

@ -218,6 +218,17 @@ class MockResource(object):
return False
return True
def replicate_cg_with_dst_resource_provisioning(self,
max_time_out_of_sync,
source_luns,
dst_pool_id,
remote_system=None,
dst_cg_name=None):
return {'max_time_out_of_sync': max_time_out_of_sync,
'dst_pool_id': dst_pool_id,
'remote_system': remote_system,
'dst_cg_name': dst_cg_name}
def replicate_with_dst_resource_provisioning(self, max_time_out_of_sync,
dst_pool_id,
remote_system=None,
@ -233,6 +244,11 @@ class MockResource(object):
def failback(self, force_full_copy=None):
return {'force_full_copy': force_full_copy}
def check_cg_is_replicated(self):
if self.name == 'replicated_cg':
return True
return False
class MockResourceList(object):
def __init__(self, names=None, ids=None):
@ -310,6 +326,12 @@ class MockSystem(object):
raise ex.UnityResourceNotFoundError()
return MockResource(name)
@staticmethod
def get_cg(name):
if not name:
raise ex.UnityResourceNotFoundError()
return MockResource(name, _id=name)
@staticmethod
def create_host(name):
return MockResource(name)
@ -370,6 +392,12 @@ class MockSystem(object):
elif src_resource_id == 'lun_in_multiple_replications':
return [MockResource(_id='lun_rep_session_1'),
MockResource(_id='lun_rep_session_2')]
elif src_resource_id and ('is_in_replication'
in src_resource_id):
return [MockResource(name='rep_session')]
elif dst_resource_id and ('is_in_replication'
in dst_resource_id):
return [MockResource(name='rep_session')]
else:
return {'name': name,
'src_resource_id': src_resource_id,
@ -855,7 +883,7 @@ class ClientTest(unittest.TestCase):
def test_failover_replication(self):
rep_session = MockResource(_id='rep_id_1')
called = self.client.failover_replication(rep_session)
self.assertEqual(called['sync'], False)
self.assertFalse(called['sync'])
def test_failover_replication_raise(self):
rep_session = MockResource(_id='rep_id_1')
@ -871,7 +899,7 @@ class ClientTest(unittest.TestCase):
def test_failback_replication(self):
rep_session = MockResource(_id='rep_id_1')
called = self.client.failback_replication(rep_session)
self.assertEqual(called['force_full_copy'], True)
self.assertTrue(called['force_full_copy'])
def test_failback_replication_raise(self):
rep_session = MockResource(_id='rep_id_1')
@ -883,3 +911,38 @@ class ClientTest(unittest.TestCase):
self.assertRaises(client.ClientReplicationError,
self.client.failback_replication,
rep_session)
def test_create_cg_replication(self):
remote_system = MockResource(_id='RS_2')
cg_name = 'test_cg'
called = self.client.create_cg_replication(
cg_name, 'pool_1', remote_system, 60)
self.assertEqual(60, called['max_time_out_of_sync'])
self.assertEqual('pool_1', called['dst_pool_id'])
self.assertEqual('test_cg', called['dst_cg_name'])
self.assertIs(remote_system, called['remote_system'])
def test_cg_in_replciation(self):
existing_cg = MockResource(_id='replicated_cg')
result = self.client.is_cg_replicated(existing_cg.id)
self.assertTrue(result)
def test_cg_not_in_replciation(self):
existing_cg = MockResource(_id='test_cg')
result = self.client.is_cg_replicated(existing_cg.id)
self.assertFalse(result)
def test_delete_cg_rep_session(self):
src_cg = MockResource(_id='cg_is_in_replication')
result = self.client.delete_cg_rep_session(src_cg.id)
self.assertIsNone(result)
def test_failover_cg_rep_session(self):
src_cg = MockResource(_id='failover_cg_is_in_replication')
result = self.client.failover_cg_rep_session(src_cg.id, True)
self.assertIsNone(result)
def test_failback_cg_rep_session(self):
src_cg = MockResource(_id='failback_cg_is_in_replication')
result = self.client.failback_cg_rep_session(src_cg.id)
self.assertIsNone(result)

View File

@ -17,6 +17,7 @@ import functools
import unittest
from unittest import mock
from cinder.objects import fields
from cinder.tests.unit.volume.drivers.dell_emc.unity \
import fake_exception as ex
from cinder.tests.unit.volume.drivers.dell_emc.unity import test_adapter
@ -143,6 +144,37 @@ class MockAdapter(object):
'secondary_id': secondary_id,
'groups': groups}
@staticmethod
def enable_replication(context, group, volumes):
if volumes and group:
return {'replication_status':
fields.ReplicationStatus.ENABLED}, None
return {}, None
@staticmethod
def disable_replication(context, group, volumes):
if volumes and group:
return {'replication_status':
fields.ReplicationStatus.DISABLED}, None
return {}, None
@staticmethod
def failover_replication(context, group, volumes,
secondary_backend_id):
group_update = {}
volumes_update = []
if volumes and group and secondary_backend_id:
group_update = {'replication_status':
fields.ReplicationStatus.FAILED_OVER}
for volume in volumes:
volume_update = {
'id': volume.id,
'replication_status':
fields.ReplicationStatus.FAILED_OVER}
volumes_update.append(volume_update)
return group_update, volumes_update
return group_update, None
class MockReplicationManager(object):
def __init__(self):
@ -171,6 +203,15 @@ class UnityDriverTest(unittest.TestCase):
return test_adapter.MockOSResource(provider_location='id^lun_43',
id='id_43')
@staticmethod
def get_volumes():
volumes = []
for number in ['50', '51', '52', '53']:
volume = test_adapter.MockOSResource(
provider_location='id^lun_' + number, id='id_' + number)
volumes.append(volume)
return volumes
@staticmethod
def get_generic_group():
return test_adapter.MockOSResource(name='group_name_generic',
@ -455,3 +496,36 @@ class UnityDriverTest(unittest.TestCase):
self.assertListEqual(called['volumes'], [volume])
self.assertEqual('secondary_unity', called['secondary_id'])
self.assertIsNone(called['groups'])
def test_enable_replication(self):
cg = self.get_cg()
volumes = self.get_volumes()
result = self.driver.enable_replication(None, cg, volumes)
self.assertEqual(result,
({'replication_status':
fields.ReplicationStatus.ENABLED},
None))
def test_disable_replication(self):
cg = self.get_cg()
volumes = self.get_volumes()
result = self.driver.disable_replication(None, cg, volumes)
self.assertEqual(result,
({'replication_status':
fields.ReplicationStatus.DISABLED},
None))
def test_failover_replication(self):
cg = self.get_cg()
volumes = self.get_volumes()
result = self.driver.failover_replication(
None, cg, volumes, 'test_secondary_id')
volumes = [{'id': 'id_50', 'replication_status': 'failed-over'},
{'id': 'id_51', 'replication_status': 'failed-over'},
{'id': 'id_52', 'replication_status': 'failed-over'},
{'id': 'id_53', 'replication_status': 'failed-over'}]
self.assertEqual(result,
({'replication_status':
fields.ReplicationStatus.FAILED_OVER},
volumes))

View File

@ -21,6 +21,7 @@ from oslo_utils import units
from cinder import exception
from cinder.tests.unit.volume.drivers.dell_emc.unity import test_adapter
from cinder.tests.unit.volume.drivers.dell_emc.unity import test_driver
from cinder.volume.drivers.dell_emc.unity import utils
@ -28,6 +29,11 @@ def get_volume_type_extra_specs(volume_type):
return {'provisioning:type': volume_type}
def get_group_type_specs(group_type):
return {'consistent_group_snapshot_enabled': '<is> True',
'group_type_id': group_type}
def get_volume_type_qos_specs(type_id):
if type_id == 'invalid_backend_qos_consumer':
ret = {'qos_specs': {'consumer': 'invalid'}}
@ -72,6 +78,17 @@ def patch_volume_types(func):
return func_wrapper
def patch_group_types(func):
@functools.wraps(func)
@mock.patch(target=('cinder.volume.group_types'
'.get_group_type_specs'),
new=get_group_type_specs)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
class UnityUtilsTest(unittest.TestCase):
def test_validate_pool_names_filter(self):
all_pools = list('acd')
@ -294,3 +311,21 @@ class UnityUtilsTest(unittest.TestCase):
ret = utils.remove_empty(option, value_list)
expected = None
self.assertEqual(expected, ret)
@patch_group_types
def test_group_is_cg(self):
cg = test_driver.UnityDriverTest.get_cg()
result = utils.group_is_cg(cg)
self.assertTrue(result)
@patch_group_types
def test_get_group_specs_by_key(self):
cg = test_driver.UnityDriverTest.get_cg()
result = utils.get_group_specs(cg, 'consistent_group_snapshot_enabled')
self.assertEqual('<is> True', result)
@patch_group_types
def test_no_group_specs_key(self):
cg = test_driver.UnityDriverTest.get_cg()
result = utils.get_group_specs(cg, 'test_key')
self.assertIsNone(result)

View File

@ -48,7 +48,7 @@ PROTOCOL_ISCSI = 'iSCSI'
class VolumeParams(object):
def __init__(self, adapter, volume):
def __init__(self, adapter, volume, group_specs=None):
self._adapter = adapter
self._volume = volume
@ -65,6 +65,7 @@ class VolumeParams(object):
self._is_in_cg = None
self._is_replication_enabled = None
self._tiering_policy = None
self.group_specs = group_specs if group_specs else {}
@property
def volume_id(self):
@ -411,14 +412,26 @@ class CommonAdapter(object):
is_compressed=params.is_compressed,
tiering_policy=params.tiering_policy)
if params.cg_id:
LOG.debug('Adding lun %(lun)s to cg %(cg)s.',
{'lun': lun.get_id(), 'cg': params.cg_id})
if self.client.is_cg_replicated(params.cg_id):
msg = (_('Consistency group %(cg_id)s is in '
'replication status, cannot add lun to it.')
% {'cg_id': params.cg_id})
raise exception.InvalidGroupStatus(reason=msg)
LOG.info('Adding lun %(lun)s to cg %(cg)s.',
{'lun': lun.get_id(), 'cg': params.cg_id})
self.client.update_cg(params.cg_id, [lun.get_id()], ())
model_update = self.makeup_model(lun.get_id())
if params.is_replication_enabled:
model_update = self.setup_replications(lun, model_update)
if not params.cg_id:
model_update = self.setup_replications(
lun, model_update)
else:
# Volume replication_status need be disabled
# And be controlled by group replication
model_update['replication_status'] = (
fields.ReplicationStatus.DISABLED)
return model_update
def delete_volume(self, volume):
@ -950,7 +963,10 @@ class CommonAdapter(object):
"""
# Deleting cg will also delete all the luns in it.
self.client.delete_cg(group.id)
group_id = group.id
if self.client.is_cg_replicated(group_id):
self.client.delete_cg_rep_session(group_id)
self.client.delete_cg(group_id)
return None, None
def update_group(self, group, add_volumes, remove_volumes):
@ -1018,6 +1034,173 @@ class CommonAdapter(object):
self.client.delete_snap(cg_snap)
return None, None
def enable_replication(self, context, group, volumes):
"""Enable the group replication."""
@cinder_utils.retry(exception.InvalidGroup, interval=20, retries=6)
def _wait_until_cg_not_replicated(_client, _cg_id):
cg = _client.get_cg(name=_cg_id)
if cg.check_cg_is_replicated():
msg = _('The remote cg (%s) is still in replication status, '
'maybe the source cg was just deleted, '
'retrying.') % group_id
LOG.info(msg)
raise exception.InvalidGroup(reason=msg)
return cg
group_update = {}
if not volumes:
LOG.warning('There is no Volume in group: %s, cannot enable '
'group replication')
return group_update, []
group_id = group.id
# check whether the group was created as cg in unity
group_is_cg = utils.group_is_cg(group)
if not group_is_cg:
msg = (_('Cannot enable replication on generic group '
'%(group_id)s, need to use CG type instead '
'(need to enable consistent_group_snapshot_enabled in '
'the group type).')
% {'group_id': group_id})
raise exception.InvalidGroupType(reason=msg)
cg = self.client.get_cg(name=group_id)
try:
if not cg.check_cg_is_replicated():
rep_devices = self.replication_manager.replication_devices
for backend_id, dst in rep_devices.items():
remote_serial_number = dst.adapter.serial_number
max_time = dst.max_time_out_of_sync
pool_id = dst.destination_pool.get_id()
_client = dst.adapter.client
remote_system = self.client.get_remote_system(
remote_serial_number)
# check if remote cg exists and delete it
# before enable replication
remote_cg = _wait_until_cg_not_replicated(_client,
group_id)
remote_cg.delete()
# create cg replication session
self.client.create_cg_replication(
group_id, pool_id, remote_system, max_time)
group_update.update({
'replication_status':
fields.ReplicationStatus.ENABLED})
else:
LOG.info('group: %s is already in replication, no need to '
'enable again.', group_id)
except Exception as e:
group_update.update({
'replication_status': fields.ReplicationStatus.ERROR})
LOG.error("Error enabling replication on group %(group)s. "
"Exception received: %(e)s.",
{'group': group.id, 'e': e})
return group_update, None
def disable_replication(self, context, group, volumes):
"""Disable the group replication."""
group_update = {}
if not volumes:
# Return if empty group
LOG.warning('There is no Volume in group: %s, cannot disable '
'group replication')
return group_update, []
group_id = group.id
group_is_cg = utils.group_is_cg(group)
if not group_is_cg:
msg = (_('Cannot disable replication on generic group '
'%(group_id)s, need use CG type instead of '
'that (need enable '
'consistent_group_snapshot_enabled in '
'group type).')
% {'group_id': group_id})
raise exception.InvalidGroupType(reason=msg)
try:
if self.client.is_cg_replicated(group_id):
# delete rep session if exists
self.client.delete_cg_rep_session(group_id)
if not self.client.is_cg_replicated(group_id):
LOG.info('Group is not in replication, '
'not need to disable replication again.')
group_update.update({
'replication_status': fields.ReplicationStatus.DISABLED})
except Exception as e:
group_update.update({
'replication_status': fields.ReplicationStatus.ERROR})
LOG.error("Error disabling replication on group %(group)s. "
"Exception received: %(e)s.",
{'group': group.id, 'e': e})
return group_update, None
def failover_replication(self, context, group, volumes,
secondary_id):
""""Fail-over the consistent group."""
group_update = {}
volume_update_list = []
if not volumes:
# Return if empty group
return group_update, volume_update_list
group_is_cg = utils.group_is_cg(group)
group_id = group.id
if not group_is_cg:
msg = (_('Cannot failover replication on generic group '
'%(group_id)s, need use CG type instead of '
'that (need enable '
'consistent_group_snapshot_enabled in '
'group type).')
% {'group_id': group_id})
raise exception.InvalidGroupType(reason=msg)
real_secondary_id = random.choice(
list(self.replication_manager.replication_devices))
group_update = {'replication_status': group.replication_status}
if self.client.is_cg_replicated(group_id):
try:
if secondary_id != 'default':
try:
# Planed failover after sync date when the source unity
# is in health status
self.client.failover_cg_rep_session(group_id, True)
except Exception as ex:
LOG.warning('ERROR happened when failover from source '
'unity, issue details: %s. Try failover '
'from target unity', ex)
# Something wrong with the source unity, try failover
# from target unity without sync date
_adapter = self.replication_manager.replication_devices
[real_secondary_id].adapter
_client = _adapter.client
_client.failover_cg_rep_session(group_id, False)
rep_status = fields.ReplicationStatus.FAILED_OVER
else:
# start failback when secondary_id is 'default'
_adapter = self.replication_manager.replication_devices[
real_secondary_id].adapter
_client = _adapter.client
_client.failback_cg_rep_session(group_id)
rep_status = fields.ReplicationStatus.ENABLED
except Exception as ex:
rep_status = fields.ReplicationStatus.ERROR
LOG.error("Error failover replication on group %(group)s. "
"Exception received: %(e)s.",
{'group': group_id, 'e': ex})
group_update['replication_status'] = rep_status
for volume in volumes:
volume_update = {
'id': volume.id,
'replication_status': rep_status}
volume_update_list.append(volume_update)
return group_update, volume_update_list
def get_replication_error_status(self, context, groups):
"""The failover only happens manually, no need to update the status."""
return [], []
@cinder_utils.trace
def failover(self, volumes, secondary_id=None, groups=None):
# TODO(ryan) support group failover after group bp merges

View File

@ -428,6 +428,38 @@ class UnityClient(object):
def filter_snaps_in_cg_snap(self, cg_snap_id):
return self.system.get_snap(snap_group=cg_snap_id).list
def create_cg_replication(self, cg_name, pool_id,
remote_system, max_time_out_of_sync):
# Creates a new cg on remote system and sets up replication to it.
src_cg = self.get_cg(cg_name)
src_luns = src_cg.luns
return src_cg.replicate_cg_with_dst_resource_provisioning(
max_time_out_of_sync, src_luns, pool_id,
dst_cg_name=cg_name, remote_system=remote_system)
def is_cg_replicated(self, cg_name):
src_cg = self.get_cg(cg_name)
return src_cg.check_cg_is_replicated()
def delete_cg_rep_session(self, cg_name):
src_cg = self.get_cg(cg_name)
rep_sessions = self.get_replication_session(src_resource_id=src_cg.id)
for rep_session in rep_sessions:
rep_session.delete()
def failover_cg_rep_session(self, cg_name, sync):
src_cg = self.get_cg(cg_name)
rep_sessions = self.get_replication_session(src_resource_id=src_cg.id)
for rep_session in rep_sessions:
rep_session.failover(sync=sync)
def failback_cg_rep_session(self, cg_name):
cg = self.get_cg(cg_name)
# failback starts from remote replication session
rep_sessions = self.get_replication_session(dst_resource_id=cg.id)
for rep_session in rep_sessions:
rep_session.failback(force_full_copy=True)
@staticmethod
def create_replication(src_lun, max_time_out_of_sync,
dst_pool_id, remote_system):

View File

@ -82,9 +82,10 @@ class UnityDriver(driver.ManageableVD,
6.0.0 - Support generic group and consistent group
6.1.0 - Support volume replication
7.0.0 - Support tiering policy
7.1.0 - Support consistency group replication
"""
VERSION = '07.00.00'
VERSION = '07.01.00'
VENDOR = 'Dell EMC'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "EMC_UNITY_CI"
@ -329,3 +330,17 @@ class UnityDriver(driver.ManageableVD,
"""Failovers volumes to secondary backend."""
return self.adapter.failover(volumes,
secondary_id=secondary_id, groups=groups)
def enable_replication(self, context, group, volumes):
return self.adapter.enable_replication(context, group, volumes)
def disable_replication(self, context, group, volumes):
return self.adapter.disable_replication(context, group, volumes)
def failover_replication(self, context, group, volumes,
secondary_backend_id=None):
return self.adapter.failover_replication(
context, group, volumes, secondary_backend_id)
def get_replication_error_status(self, context, groups):
return self.adapter.get_replication_error_status(context, groups)

View File

@ -29,6 +29,7 @@ from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume import group_types
from cinder.volume import volume_types
from cinder.volume import volume_utils
from cinder.zonemanager import utils as zm_utils
@ -204,6 +205,20 @@ def get_extra_spec(volume, spec_key):
return spec_value
def group_is_cg(group):
result = get_group_specs(group, 'consistent_group_snapshot_enabled')
return result == '<is> True'
def get_group_specs(group, spec_key):
spec_value = None
if group.group_type_id:
group_specs = group_types.get_group_type_specs(group.group_type_id)
if spec_key in group_specs:
spec_value = group_specs[spec_key]
return spec_value
def ignore_exception(func, *args, **kwargs):
try:
func(*args, **kwargs)
@ -326,7 +341,8 @@ def append_capabilities(func):
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'consistent_group_snapshot_enabled': True,
'fast_support': True
'fast_support': True,
'consistent_group_replication_enabled': True
}
@six.wraps(func)

View File

@ -15,7 +15,7 @@ Prerequisites
+===================+=================+
| Unity OE | 4.1.X or newer |
+-------------------+-----------------+
| storops | 1.1.0 or newer |
| storops | 1.2.3 or newer |
+-------------------+-----------------+
@ -43,6 +43,7 @@ Supported operations
- Create a consistent group from a snapshot.
- Attach a volume to multiple servers simultaneously (multiattach).
- Volume replications.
- Consistency group replications.
Driver configuration
~~~~~~~~~~~~~~~~~~~~
@ -459,47 +460,138 @@ for more detail.
2. Add `replication_device` to storage backend settings in `cinder.conf`, then
restart Cinder Volume service.
Example of `cinder.conf` for volume replications:
Example of `cinder.conf` for volume replications:
.. code-block:: ini
.. code-block:: ini
[unity-primary]
san_ip = xxx.xxx.xxx.xxx
...
replication_device = backend_id:unity-secondary,san_ip:yyy.yyy.yyy.yyy,san_password:****,max_time_out_of_sync:60
[unity-primary]
san_ip = xxx.xxx.xxx.xxx
...
replication_device = backend_id:unity-secondary,san_ip:yyy.yyy.yyy.yyy,san_login:username,san_password:****,max_time_out_of_sync:60
- Only one `replication_device` can be configured for each primary backend.
- Keys `backend_id`, `san_ip`, `san_password`, and `max_time_out_of_sync`
are supported in `replication_device`, while `backend_id` and `san_ip`
are required.
- `san_password` uses the same one as primary backend's if it is omitted.
- `max_time_out_of_sync` is the max time in minutes replications are out of
sync. It must be equal or greater than `0`. `0` means sync replications
of volumes will be created. Note that remote systems for sync replications
need to be created on Unity first. `60` will be used if it is omitted.
- Only one `replication_device` can be configured for each primary backend.
- Keys `backend_id`, `san_ip`, `san_password`, and `max_time_out_of_sync`
are supported in `replication_device`, while `backend_id` and `san_ip`
are required.
- `san_password` uses the same one as primary backend's if it is omitted.
- `max_time_out_of_sync` is the max time in minutes replications are out of
sync. It must be equal or greater than `0`. `0` means sync replications
of volumes will be created. Note that remote systems for sync replications
need to be created on Unity first. `60` will be used if it is omitted.
#. Create a volume type with property `replication_enabled='<is> True'`.
.. code-block:: console
.. code-block:: console
$ openstack volume type create --property replication_enabled='<is> True' type-replication
$ openstack volume type create --property replication_enabled='<is> True' type-replication
#. Any volumes with volume type of step #3 will failover to secondary backend
after `failover_host` is executed.
.. code-block:: console
.. code-block:: console
$ cinder failover-host --backend_id unity-secondary stein@unity-primary
$ cinder failover-host --backend_id unity-secondary stein@unity-primary
#. Later, they could be failed back.
.. code-block:: console
.. code-block:: console
$ cinder failover-host --backend_id default stein@unity-primary
$ cinder failover-host --backend_id default stein@unity-primary
.. note:: The volume can be deleted even when it is participating in a
replication. The replication session will be deleted from Unity before the
LUN is deleted.
replication. The replication session will be deleted from Unity before the
LUN is deleted.
Consistency group replications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To enable consistency group replications, follow below steps:
1. On Unisphere, configure remote system and interfaces for replications.
The way could be different depending on the type of replications - sync or async.
Refer to `Unity Replication White Paper
<https://www.emc.com/collateral/white-papers/h15088-dell-emc-unity-replication-technologies.pdf>`_
for more detail.
2. Add `replication_device` to storage backend settings in `cinder.conf`, then
restart Cinder Volume service.
Example of `cinder.conf` for volume replications:
.. code-block:: ini
[unity-primary]
san_ip = xxx.xxx.xxx.xxx
...
replication_device = backend_id:unity-secondary,san_ip:yyy.yyy.yyy.yyy,san_login:username,san_password:****,max_time_out_of_sync:60
- Only one `replication_device` can be configured for each primary backend.
- Keys `backend_id`, `san_ip`, `san_password`, and `max_time_out_of_sync`
are supported in `replication_device`, while `backend_id` and `san_ip`
are required.
- `san_password` uses the same one as primary backend's if it is omitted.
- `max_time_out_of_sync` is the max time in minutes replications are out of
sync. It must be equal or greater than `0`. `0` means sync replications
of volumes will be created. Note that remote systems for sync replications
need to be created on Unity first. `60` will be used if it is omitted.
3. Create a volume type with property `replication_enabled='<is> True'`.
.. code-block:: console
$ openstack volume type create --property replication_enabled='<is> True' type-replication
4. Create a consistency group type with properties
`consistent_group_snapshot_enabled='<is> True'`
and `consistent_group_replication_enabled='<is> True'`.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-type-create type-cg-replication
$ cinder --os-volume-api-version 3.38 group-type-key type-cg-replication set
consistent_group_snapshot_enabled='<is> True' consistent_group_replication_enabled='<is> True'
5. Create a group type with volume types support replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-create --name test-cg {type-cg-replication-id} type-replication
6. Create volume in the consistency group.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 create --volume-type type-replication --group-id {test-cg-id}
--name {volume-name} {size}
7. Enable consistency group replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-enable-replication test-cg
8. Disable consistency group replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-disable-replication test-cg
9. Failover consistency group replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-failover-replication test-cg
10. Failback consistency group replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-failover-replication test-cg --secondary-backend-id default
.. note:: Only support group replication of consistency group,
see step 4 and 5 to create consistency group support replication.
Troubleshooting
~~~~~~~~~~~~~~~

View File

@ -28,7 +28,7 @@ rados # LGPLv2.1
rbd # LGPLv2.1
# Dell EMC VNX and Unity
storops>=1.1.0 # Apache-2.0
storops>=1.2.3 # Apache-2.0
# INFINIDAT
infinisdk # BSD-3

View File

@ -0,0 +1,5 @@
---
features:
- |
Dell EMC Unity Driver: Added consistency group replication support.
The storops library version 1.2.3 or newer is required.