Merge "ibm_storage - fix enable replication after disable"

This commit is contained in:
Jenkins 2017-10-08 16:33:37 +00:00 committed by Gerrit Code Review
commit 1170154c97
3 changed files with 78 additions and 2 deletions

View File

@ -418,6 +418,47 @@ class XIVProxyTest(test.TestCase):
[{'id': vol['id'],
'replication_status': fields.ReplicationStatus.ENABLED}]), ret)
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
"xiv_replication.VolumeReplication.create_replication",
mock.MagicMock())
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
"xiv_replication.GroupReplication.create_replication",
mock.MagicMock())
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
"xiv_proxy.XIVProxy._get_target_params",
mock.MagicMock(return_value=REPLICA_PARAMS))
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
"xiv_proxy.XIVProxy._get_target",
mock.MagicMock(return_value="BLABLA"))
@mock.patch("cinder.volume.group_types.get_group_type_specs",
mock.MagicMock(return_value=TEST_GROUP_SPECS))
def test_enable_replication_remote_cg_exists(self):
"""Test enable_replication"""
driver = mock.MagicMock()
driver.VERSION = "VERSION"
p = self.proxy(
self.default_storage_info,
mock.MagicMock(),
test_mock.cinder.exception,
driver)
p.ibm_storage_cli = mock.MagicMock()
p._call_remote_xiv_xcli = mock.MagicMock()
p._update_consistencygroup = mock.MagicMock()
p.targets = {'tgt1': 'info1'}
error = errors.CgNameExistsError('bla', 'bla',
ElementTree.Element('bla'))
p._call_remote_xiv_xcli.cmd.cg_create.side_effect = error
group = self._create_test_group('WTF')
vol = testutils.create_volume(self.ctxt)
ret = p.enable_replication(self.ctxt, group, [vol])
self.assertEqual((
{'replication_status': fields.ReplicationStatus.ENABLED},
[{'id': vol['id'],
'replication_status': fields.ReplicationStatus.ENABLED}]), ret)
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
"xiv_replication.VolumeReplication.delete_replication",
mock.MagicMock())

View File

@ -105,7 +105,7 @@ class XIVProxy(proxy.IBMStorageProxy):
Supports IBM XIV, Spectrum Accelerate, A9000, A9000R
Version: 2.1.0
Required pyxcli version: 1.1.4
Required pyxcli version: 1.1.5
.. code:: text
@ -600,7 +600,11 @@ class XIVProxy(proxy.IBMStorageProxy):
# mirror entire group
group_name = self._cg_name_from_group(group)
self._create_consistencygroup_on_remote(context, group_name)
try:
self._create_consistencygroup_on_remote(context, group_name)
except errors.CgNameExistsError:
LOG.debug("CG name %(cg)s exists, no need to open it on "
"secondary backend.", {'cg': group_name})
repl.GroupReplication(self).create_replication(group_name,
replication_info)
@ -652,6 +656,13 @@ class XIVProxy(proxy.IBMStorageProxy):
# we need to unlock it for further use.
try:
self.ibm_storage_cli.cmd.vol_unlock(vol=volume.name)
self.ibm_storage_remote_cli.cmd.vol_unlock(
vol=volume.name)
self.ibm_storage_remote_cli.cmd.cg_remove_vol(
vol=volume.name)
except errors.VolumeBadNameError:
LOG.debug("Failed to delete vol %(vol)s - "
"ignoring.", {'vol': volume.name})
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg = ('Failed to unlock volumes %(details)s' %
@ -671,6 +682,17 @@ class XIVProxy(proxy.IBMStorageProxy):
# update status
for volume in volumes:
try:
self.ibm_storage_cli.cmd.vol_unlock(vol=volume.name)
self.ibm_storage_remote_cli.cmd.vol_unlock(
vol=volume.name)
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg = (_('Failed to unlock volumes %(details)s'),
{'details': details})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
updated_volumes.append(
{'id': volume['id'],
'replication_status': fields.ReplicationStatus.DISABLED})

View File

@ -265,6 +265,19 @@ class VolumeReplication(Replication):
rpo=replication_info['rpo'],
schedule=schedule,
activate_mirror='yes')
except errors.RemoteVolumeExists:
# if volume exists (same ID), don't create slave
# This only happens when vol is a part of a cg
recovery_mgr.create_mirror(
resource_name=resource_name,
target_name=target,
mirror_type=replication_info['mode'],
slave_resource_name=resource_name,
create_slave='no',
remote_pool=pool,
rpo=replication_info['rpo'],
schedule=schedule,
activate_mirror='yes')
except errors.VolumeMasterError:
LOG.debug('Volume %(vol)s has been already mirrored',
{'vol': resource_name})