Dell SC: Support Replication V2.1
Updated Dell SC support to version 2.1. Also removed direct DB calls in consistency groups. ManageableSnapshotsVD support added. Updated versions to 2.5.0. Change-Id: I525bba93a04cc01db92af8711e6b2917e80a93d0
This commit is contained in:
parent
077b8593ae
commit
87b9380e20
@ -1636,16 +1636,13 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.driver.db = mock.Mock()
|
||||
mock_volume = mock.MagicMock()
|
||||
expected_volumes = [mock_volume]
|
||||
self.driver.db.volume_get_all_by_group.return_value = expected_volumes
|
||||
context = {}
|
||||
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
|
||||
'status': fields.ConsistencyGroupStatus.DELETED}
|
||||
model_update, volumes = self.driver.delete_consistencygroup(context,
|
||||
group,
|
||||
[])
|
||||
model_update, volumes = self.driver.delete_consistencygroup(
|
||||
context, group, [mock_volume])
|
||||
mock_find_replay_profile.assert_called_once_with(group['id'])
|
||||
mock_delete_replay_profile.assert_called_once_with(self.SCRPLAYPROFILE)
|
||||
mock_delete_volume.assert_called_once_with(mock_volume)
|
||||
@ -1666,10 +1663,6 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.driver.db = mock.Mock()
|
||||
mock_volume = mock.MagicMock()
|
||||
expected_volumes = [mock_volume]
|
||||
self.driver.db.volume_get_all_by_group.return_value = expected_volumes
|
||||
context = {}
|
||||
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
|
||||
'status': fields.ConsistencyGroupStatus.DELETED}
|
||||
@ -1678,9 +1671,9 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
[])
|
||||
mock_find_replay_profile.assert_called_once_with(group['id'])
|
||||
self.assertFalse(mock_delete_replay_profile.called)
|
||||
mock_delete_volume.assert_called_once_with(mock_volume)
|
||||
self.assertFalse(mock_delete_volume.called)
|
||||
self.assertEqual(group['status'], model_update['status'])
|
||||
self.assertEqual(expected_volumes, volumes)
|
||||
self.assertEqual([], volumes)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'update_cg_volumes',
|
||||
@ -1763,9 +1756,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_replay_profile',
|
||||
return_value=SCRPLAYPROFILE)
|
||||
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
|
||||
def test_create_cgsnapshot(self,
|
||||
mock_get_all_for_cgsnapshot,
|
||||
mock_find_replay_profile,
|
||||
mock_snap_cg_replay,
|
||||
mock_close_connection,
|
||||
@ -1773,13 +1764,12 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
mock_init):
|
||||
mock_snapshot = mock.MagicMock()
|
||||
expected_snapshots = [mock_snapshot]
|
||||
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
|
||||
|
||||
context = {}
|
||||
cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
|
||||
'id': '100'}
|
||||
model_update, snapshots = self.driver.create_cgsnapshot(context, cggrp,
|
||||
[])
|
||||
model_update, snapshots = self.driver.create_cgsnapshot(
|
||||
context, cggrp, [mock_snapshot])
|
||||
mock_find_replay_profile.assert_called_once_with(
|
||||
cggrp['consistencygroup_id'])
|
||||
mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
|
||||
@ -1839,9 +1829,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_replay_profile',
|
||||
return_value=SCRPLAYPROFILE)
|
||||
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
|
||||
def test_delete_cgsnapshot(self,
|
||||
mock_get_all_for_cgsnapshot,
|
||||
mock_find_replay_profile,
|
||||
mock_delete_cg_replay,
|
||||
mock_close_connection,
|
||||
@ -1849,15 +1837,13 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
mock_init):
|
||||
mock_snapshot = mock.MagicMock()
|
||||
expected_snapshots = [mock_snapshot]
|
||||
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
|
||||
context = {}
|
||||
cgsnap = {'consistencygroup_id':
|
||||
'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
|
||||
'id': '100',
|
||||
'status': 'deleted'}
|
||||
model_update, snapshots = self.driver.delete_cgsnapshot(context,
|
||||
cgsnap,
|
||||
[])
|
||||
model_update, snapshots = self.driver.delete_cgsnapshot(
|
||||
context, cgsnap, [mock_snapshot])
|
||||
mock_find_replay_profile.assert_called_once_with(
|
||||
cgsnap['consistencygroup_id'])
|
||||
mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
|
||||
@ -1870,9 +1856,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_replay_profile',
|
||||
return_value=None)
|
||||
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
|
||||
def test_delete_cgsnapshot_profile_not_found(self,
|
||||
mock_get_all_for_cgsnapshot,
|
||||
mock_find_replay_profile,
|
||||
mock_delete_cg_replay,
|
||||
mock_close_connection,
|
||||
@ -1880,15 +1864,13 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
mock_init):
|
||||
mock_snapshot = mock.MagicMock()
|
||||
expected_snapshots = [mock_snapshot]
|
||||
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
|
||||
context = {}
|
||||
cgsnap = {'consistencygroup_id':
|
||||
'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
|
||||
'id': '100',
|
||||
'status': 'deleted'}
|
||||
model_update, snapshots = self.driver.delete_cgsnapshot(context,
|
||||
cgsnap,
|
||||
[])
|
||||
model_update, snapshots = self.driver.delete_cgsnapshot(
|
||||
context, cgsnap, [mock_snapshot])
|
||||
mock_find_replay_profile.assert_called_once_with(
|
||||
cgsnap['consistencygroup_id'])
|
||||
|
||||
@ -2213,197 +2195,25 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
self.VOLUME, 'B')
|
||||
self.assertTrue(res)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'resume_replication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_volume',
|
||||
return_value=VOLUME)
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_do_repl')
|
||||
def test_replication_enable(self,
|
||||
mock_do_repl,
|
||||
mock_find_volume,
|
||||
mock_resume_replication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Note that since we do nothing with sync or async here
|
||||
# at all we do not bother testing it.
|
||||
mock_do_repl.side_effect = [(False, False), # No run.
|
||||
(True, False), # Good run.
|
||||
(True, False), # Bad run.
|
||||
(True, False), # Multiple replications.
|
||||
(True, False)] # Multiple fail.
|
||||
mock_resume_replication.side_effect = [True, # Good run.
|
||||
False, # Bad run.
|
||||
True, # Multiple replications.
|
||||
True,
|
||||
False] # Multiple fail.
|
||||
vref = {'replication_driver_data': '',
|
||||
'id': 'guid'}
|
||||
model_update = {}
|
||||
# No run
|
||||
ret = self.driver.replication_enable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# we didn't try to resume, right?
|
||||
self.assertEqual(0, mock_resume_replication.call_count)
|
||||
# Good run
|
||||
vref = {'replication_driver_data': '12345',
|
||||
'id': 'guid'}
|
||||
ret = self.driver.replication_enable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# Hard to distinguish good from bad. Make sure we tried.
|
||||
self.assertEqual(1, mock_resume_replication.call_count)
|
||||
# Bad run
|
||||
model_update = {'replication_status': 'error'}
|
||||
ret = self.driver.replication_enable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# Make sure we actually sent this down.
|
||||
self.assertEqual(2, mock_resume_replication.call_count)
|
||||
mock_resume_replication.assert_called_with(self.VOLUME, 12345)
|
||||
# Multiple replications.
|
||||
vref = {'replication_driver_data': '12345,67890',
|
||||
'id': 'guid'}
|
||||
model_update = {}
|
||||
ret = self.driver.replication_enable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# Should be called two more times.
|
||||
self.assertEqual(4, mock_resume_replication.call_count)
|
||||
# This checks the last call
|
||||
mock_resume_replication.assert_called_with(self.VOLUME, 67890)
|
||||
# Multiple fail.
|
||||
model_update = {'replication_status': 'error'}
|
||||
ret = self.driver.replication_enable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# We are set to fail on the first call so one more.
|
||||
self.assertEqual(5, mock_resume_replication.call_count)
|
||||
# This checks the last call.
|
||||
mock_resume_replication.assert_called_with(self.VOLUME, 12345)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'pause_replication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_volume',
|
||||
return_value=VOLUME)
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_do_repl')
|
||||
def test_replication_disable(self,
|
||||
mock_do_repl,
|
||||
mock_find_volume,
|
||||
mock_pause_replication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Note that since we do nothing with sync or async here
|
||||
# at all we do not bother testing it.
|
||||
mock_do_repl.side_effect = [(False, False), # No run.
|
||||
(True, False), # Good run.
|
||||
(True, False), # Bad run.
|
||||
(True, False), # Multiple replications.
|
||||
(True, False)] # Multiple fail.
|
||||
mock_pause_replication.side_effect = [True, # Good run.
|
||||
False, # Bad run.
|
||||
True, # Multiple replications.
|
||||
True,
|
||||
False] # Multiple fail.
|
||||
vref = {'replication_driver_data': '',
|
||||
'id': 'guid'}
|
||||
model_update = {}
|
||||
# No run
|
||||
ret = self.driver.replication_disable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# we didn't try to resume, right?
|
||||
self.assertEqual(0, mock_pause_replication.call_count)
|
||||
# Good run
|
||||
vref = {'replication_driver_data': '12345',
|
||||
'id': 'guid'}
|
||||
ret = self.driver.replication_disable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# Hard to distinguish good from bad. Make sure we tried.
|
||||
self.assertEqual(1, mock_pause_replication.call_count)
|
||||
# Bad run
|
||||
model_update = {'replication_status': 'error'}
|
||||
ret = self.driver.replication_disable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# Make sure we actually sent this down.
|
||||
self.assertEqual(2, mock_pause_replication.call_count)
|
||||
mock_pause_replication.assert_called_with(self.VOLUME, 12345)
|
||||
# Multiple replications.
|
||||
vref = {'replication_driver_data': '12345,67890',
|
||||
'id': 'guid'}
|
||||
model_update = {}
|
||||
ret = self.driver.replication_disable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# Should be called two more times.
|
||||
self.assertEqual(4, mock_pause_replication.call_count)
|
||||
# This checks the last call
|
||||
mock_pause_replication.assert_called_with(self.VOLUME, 67890)
|
||||
# Multiple fail.
|
||||
model_update = {'replication_status': 'error'}
|
||||
ret = self.driver.replication_disable({}, vref)
|
||||
self.assertEqual(model_update, ret)
|
||||
# We are set to fail on the first call so one more.
|
||||
self.assertEqual(5, mock_pause_replication.call_count)
|
||||
# This checks the last call.
|
||||
mock_pause_replication.assert_called_with(self.VOLUME, 12345)
|
||||
|
||||
def test__find_host(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
backends = self.driver.backends
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'managed_backend_name': 'host@dell1',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'managed_backend_name': 'host@dell2',
|
||||
'qosnode': 'cinderqos'}]
|
||||
# Just make sure we are turning the correct bit..
|
||||
# Good run
|
||||
expected = 'host@dell2'
|
||||
ret = self.driver._find_host('67890')
|
||||
self.assertEqual(expected, ret)
|
||||
# Bad run
|
||||
ret = self.driver._find_host('54321')
|
||||
self.assertIsNone(ret)
|
||||
self.driver.backends = backends
|
||||
|
||||
def test__parse_secondary(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
backends = self.driver.backends
|
||||
vref = {'id': 'guid', 'replication_driver_data': '67890'}
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'managed_backend_name': 'host@dell1',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'managed_backend_name': 'host@dell2',
|
||||
'qosnode': 'cinderqos'}]
|
||||
mock_api = mock.MagicMock()
|
||||
# Good run. Secondary in replication_driver_data and backend. sc up.
|
||||
destssn, host = self.driver._parse_secondary(mock_api, vref, '67890')
|
||||
destssn = self.driver._parse_secondary(mock_api, '67890')
|
||||
self.assertEqual(67890, destssn)
|
||||
self.assertEqual('host@dell2', host)
|
||||
# Bad run. Secondary not in replication_driver_data
|
||||
destssn, host = self.driver._parse_secondary(mock_api, vref, '12345')
|
||||
self.assertIsNone(destssn)
|
||||
self.assertIsNone(host)
|
||||
# Bad run. Secondary not in backend.
|
||||
vref['replication_driver_data'] = '67891'
|
||||
destssn, host = self.driver._parse_secondary(mock_api, vref, '67890')
|
||||
destssn = self.driver._parse_secondary(mock_api, '99999')
|
||||
self.assertIsNone(destssn)
|
||||
self.assertIsNone(host)
|
||||
# Bad run. no driver data
|
||||
vref['replication_driver_data'] = ''
|
||||
destssn, host = self.driver._parse_secondary(mock_api, vref, '67890')
|
||||
self.assertIsNone(destssn)
|
||||
self.assertIsNone(host)
|
||||
# Good run. No secondary selected.
|
||||
vref['replication_driver_data'] = '12345'
|
||||
destssn, host = self.driver._parse_secondary(mock_api, vref, '12345')
|
||||
# Good run.
|
||||
destssn = self.driver._parse_secondary(mock_api, '12345')
|
||||
self.assertEqual(12345, destssn)
|
||||
self.assertEqual('host@dell1', host)
|
||||
self.driver.backends = backends
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
@ -2414,118 +2224,214 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
backends = self.driver.backends
|
||||
vref = {'id': 'guid', 'replication_driver_data': '12345'}
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'managed_backend_name': 'host@dell1',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'managed_backend_name': 'host@dell2',
|
||||
'qosnode': 'cinderqos'}]
|
||||
mock_api = mock.MagicMock()
|
||||
# Bad run. Good selection. SC down.
|
||||
vref['replication_driver_data'] = '12345'
|
||||
mock_api.find_sc = mock.MagicMock(
|
||||
side_effect=exception.VolumeBackendAPIException(data='1234'))
|
||||
destssn, host = self.driver._parse_secondary(mock_api, vref, '12345')
|
||||
destssn = self.driver._parse_secondary(mock_api, '12345')
|
||||
self.assertIsNone(destssn)
|
||||
self.assertIsNone(host)
|
||||
self.driver.backends = backends
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'break_replication')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_parse_secondary')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_do_repl')
|
||||
def test_replication_failover(self,
|
||||
mock_do_repl,
|
||||
mock_parse_secondary,
|
||||
mock_break_replication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
mock_parse_secondary.side_effect = [(12345, 'host@host#be'), # Good.
|
||||
(12345, 'host@host#be'), # Bad.
|
||||
(None, None)] # Not found.
|
||||
mock_break_replication.side_effect = [True, # Good run.
|
||||
False] # Bad run.
|
||||
mock_do_repl.side_effect = [(False, False), # No run.
|
||||
(True, False), # Good run.
|
||||
(True, False), # Bad run.
|
||||
(True, False)] # Secondary not found.
|
||||
vref = {'id': 'guid'}
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'remove_mappings')
|
||||
def test_failover_host(self,
|
||||
mock_remove_mappings,
|
||||
mock_find_volume,
|
||||
mock_parse_secondary,
|
||||
mock_break_replication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.driver.replication_enabled = False
|
||||
self.driver.failed_over = False
|
||||
volumes = [{'id': 'guid1', 'replication_driver_data': '12345'},
|
||||
{'id': 'guid2', 'replication_driver_data': '12345'}]
|
||||
# No run. Not doing repl. Should raise.
|
||||
self.assertRaises(exception.ReplicationError,
|
||||
self.driver.replication_failover,
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.failover_host,
|
||||
{},
|
||||
vref,
|
||||
volumes,
|
||||
'12345')
|
||||
# Good run
|
||||
expected = {'host': 'host@host#be',
|
||||
'replication_driver_data': None}
|
||||
ret = self.driver.replication_failover({}, vref, '12345')
|
||||
self.assertEqual(expected, ret)
|
||||
# Bad run. (break_replication fails)
|
||||
self.assertRaises(exception.ReplicationError,
|
||||
self.driver.replication_failover,
|
||||
{},
|
||||
vref,
|
||||
'12345')
|
||||
self.driver.replication_enabled = True
|
||||
mock_parse_secondary.return_value = 12345
|
||||
expected_destssn = 12345
|
||||
expected_volume_update = [{'volume_id': 'guid1', 'updates':
|
||||
{'replication_status': 'failed-over'}},
|
||||
{'volume_id': 'guid2', 'updates':
|
||||
{'replication_status': 'failed-over'}}]
|
||||
destssn, volume_update = self.driver.failover_host(
|
||||
{}, volumes, '12345')
|
||||
self.assertEqual(expected_destssn, destssn)
|
||||
self.assertEqual(expected_volume_update, volume_update)
|
||||
# Good run. Not all volumes replicated.
|
||||
volumes = [{'id': 'guid1', 'replication_driver_data': '12345'},
|
||||
{'id': 'guid2', 'replication_driver_data': ''}]
|
||||
expected_volume_update = [{'volume_id': 'guid1', 'updates':
|
||||
{'replication_status': 'failed-over'}},
|
||||
{'volume_id': 'guid2', 'updates':
|
||||
{'status': 'error'}}]
|
||||
destssn, volume_update = self.driver.failover_host(
|
||||
{}, volumes, '12345')
|
||||
self.assertEqual(expected_destssn, destssn)
|
||||
self.assertEqual(expected_volume_update, volume_update)
|
||||
# Good run. Not all volumes replicated. No replication_driver_data.
|
||||
volumes = [{'id': 'guid1', 'replication_driver_data': '12345'},
|
||||
{'id': 'guid2'}]
|
||||
expected_volume_update = [{'volume_id': 'guid1', 'updates':
|
||||
{'replication_status': 'failed-over'}},
|
||||
{'volume_id': 'guid2', 'updates':
|
||||
{'status': 'error'}}]
|
||||
destssn, volume_update = self.driver.failover_host(
|
||||
{}, volumes, '12345')
|
||||
self.assertEqual(expected_destssn, destssn)
|
||||
self.assertEqual(expected_volume_update, volume_update)
|
||||
# Good run. No volumes replicated. No replication_driver_data.
|
||||
volumes = [{'id': 'guid1'},
|
||||
{'id': 'guid2'}]
|
||||
expected_volume_update = [{'volume_id': 'guid1', 'updates':
|
||||
{'status': 'error'}},
|
||||
{'volume_id': 'guid2', 'updates':
|
||||
{'status': 'error'}}]
|
||||
destssn, volume_update = self.driver.failover_host(
|
||||
{}, volumes, '12345')
|
||||
self.assertEqual(expected_destssn, destssn)
|
||||
self.assertEqual(expected_volume_update, volume_update)
|
||||
# Secondary not found.
|
||||
self.assertRaises(exception.ReplicationError,
|
||||
self.driver.replication_failover,
|
||||
mock_parse_secondary.return_value = None
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.driver.failover_host,
|
||||
{},
|
||||
vref,
|
||||
volumes,
|
||||
'54321')
|
||||
# Already failed over.
|
||||
self.driver.failed_over = True
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.failover_host,
|
||||
{},
|
||||
volumes,
|
||||
'12345')
|
||||
self.driver.replication_enabled = False
|
||||
|
||||
def test__get_unmanaged_replay(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
mock_api = mock.MagicMock()
|
||||
existing_ref = None
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.driver._get_unmanaged_replay,
|
||||
mock_api,
|
||||
'guid',
|
||||
existing_ref)
|
||||
existing_ref = {'source-id': 'Not a source-name'}
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.driver._get_unmanaged_replay,
|
||||
mock_api,
|
||||
'guid',
|
||||
existing_ref)
|
||||
existing_ref = {'source-name': 'name'}
|
||||
mock_api.find_volume = mock.MagicMock(return_value=None)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver._get_unmanaged_replay,
|
||||
mock_api,
|
||||
'guid',
|
||||
existing_ref)
|
||||
mock_api.find_volume.return_value = {'instanceId': '1'}
|
||||
mock_api.find_replay = mock.MagicMock(return_value=None)
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.driver._get_unmanaged_replay,
|
||||
mock_api,
|
||||
'guid',
|
||||
existing_ref)
|
||||
mock_api.find_replay.return_value = {'instanceId': 2}
|
||||
ret = self.driver._get_unmanaged_replay(mock_api, 'guid', existing_ref)
|
||||
self.assertEqual({'instanceId': 2}, ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_do_repl')
|
||||
def test_list_replication_targets(self,
|
||||
mock_do_repl,
|
||||
'_get_unmanaged_replay')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'manage_replay')
|
||||
def test_manage_existing_snapshot(self,
|
||||
mock_manage_replay,
|
||||
mock_get_unmanaged_replay,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
mock_do_repl.side_effect = [(False, False), # No repl.
|
||||
(True, False), # Good run.
|
||||
(True, False)] # Target not found.
|
||||
backends = self.driver.backends
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'managed_backend_name': 'host@dell1',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'managed_backend_name': 'host@dell2',
|
||||
'qosnode': 'cinderqos'}]
|
||||
# No repl.
|
||||
expected = {'volume_id': 'guid',
|
||||
'targets': []}
|
||||
vref = {'replication_driver_data': '',
|
||||
'id': 'guid'}
|
||||
ret = self.driver.list_replication_targets({}, vref)
|
||||
self.assertEqual(expected, ret)
|
||||
# Good run.
|
||||
expected = {'volume_id': 'guid',
|
||||
'targets': [{'type': 'managed',
|
||||
'target_device_id': '12345',
|
||||
'backend_name': 'host@dell1'},
|
||||
{'type': 'managed',
|
||||
'target_device_id': '67890',
|
||||
'backend_name': 'host@dell2'}]}
|
||||
vref = {'replication_driver_data': '12345,67890',
|
||||
'id': 'guid'}
|
||||
ret = self.driver.list_replication_targets({}, vref)
|
||||
self.assertEqual(expected, ret)
|
||||
# Target not found.
|
||||
# We find one target but not another. This could happen for a variety
|
||||
# of reasons most of them administrator negligence. But the main one
|
||||
# is that someone reconfigured their backends without taking into
|
||||
# account how this would affect the children.
|
||||
expected = {'volume_id': 'guid',
|
||||
'targets': [{'type': 'managed',
|
||||
'target_device_id': '12345',
|
||||
'backend_name': 'host@dell1'}]}
|
||||
vref = {'replication_driver_data': '12345,99999',
|
||||
'id': 'guid'}
|
||||
ret = self.driver.list_replication_targets({}, vref)
|
||||
self.assertEqual(expected, ret)
|
||||
snapshot = {'volume_id': 'guida',
|
||||
'id': 'guidb'}
|
||||
existing_ref = {'source-name': 'name'}
|
||||
screplay = {'description': 'name'}
|
||||
mock_get_unmanaged_replay.return_value = screplay
|
||||
mock_manage_replay.return_value = True
|
||||
self.driver.manage_existing_snapshot(snapshot, existing_ref)
|
||||
self.assertEqual(1, mock_get_unmanaged_replay.call_count)
|
||||
mock_manage_replay.assert_called_once_with(screplay, 'guidb')
|
||||
mock_manage_replay.return_value = False
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.manage_existing_snapshot,
|
||||
snapshot,
|
||||
existing_ref)
|
||||
|
||||
self.driver.backends = backends
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_get_unmanaged_replay')
|
||||
def test_manage_existing_snapshot_get_size(self,
|
||||
mock_get_unmanaged_replay,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
snapshot = {'volume_id': 'a',
|
||||
'id': 'b'}
|
||||
existing_ref = {'source-name'}
|
||||
# Good size.
|
||||
mock_get_unmanaged_replay.return_value = {'size':
|
||||
'1.073741824E9 Bytes'}
|
||||
ret = self.driver.manage_existing_snapshot_get_size(snapshot,
|
||||
existing_ref)
|
||||
self.assertEqual(1, ret)
|
||||
# Not on 1GB boundries.
|
||||
mock_get_unmanaged_replay.return_value = {'size':
|
||||
'2.073741824E9 Bytes'}
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.manage_existing_snapshot_get_size,
|
||||
snapshot,
|
||||
existing_ref)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_replay')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'unmanage_replay')
|
||||
def test_unmanage_snapshot(self,
|
||||
mock_unmanage_replay,
|
||||
mock_find_replay,
|
||||
mock_find_volume,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
snapshot = {'volume_id': 'guida',
|
||||
'id': 'guidb'}
|
||||
screplay = {'description': 'guidb'}
|
||||
mock_find_volume.return_value = None
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.unmanage_snapshot,
|
||||
snapshot)
|
||||
mock_find_volume.return_value = {'name': 'guida'}
|
||||
mock_find_replay.return_value = None
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.unmanage_snapshot,
|
||||
snapshot)
|
||||
mock_find_replay.return_value = screplay
|
||||
self.driver.unmanage_snapshot(snapshot)
|
||||
mock_unmanage_replay.assert_called_once_with(screplay)
|
||||
|
@ -1675,6 +1675,8 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
self.scapi.ssn = self.configuration.dell_sc_ssn
|
||||
self.scapi.sfname = self.configuration.dell_sc_server_folder
|
||||
self.scapi.vfname = self.configuration.dell_sc_volume_folder
|
||||
# Note that we set this to True (or not) on the replication tests.
|
||||
self.scapi.failed_over = False
|
||||
|
||||
self.volid = str(uuid.uuid4())
|
||||
self.volume_name = "volume" + self.volid
|
||||
@ -2305,7 +2307,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Test calling find_volume with result of no volume found
|
||||
mock_get_volume_list.side_effect = [[], [], []]
|
||||
mock_get_volume_list.side_effect = [[], []]
|
||||
res = self.scapi.find_volume(self.volume_name)
|
||||
self.assertIsNone(res, 'None expected')
|
||||
|
||||
@ -2320,9 +2322,11 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.scapi.failed_over = True
|
||||
mock_get_volume_list.side_effect = [[], [], self.VOLUME_LIST]
|
||||
res = self.scapi.find_volume(self.volume_name)
|
||||
self.assertEqual(self.VOLUME, res, 'Unexpected volume')
|
||||
self.scapi.failed_over = False
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_import_one',
|
||||
@ -2335,9 +2339,11 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.scapi.failed_over = True
|
||||
mock_get_volume_list.side_effect = [[], [], self.VOLUME_LIST]
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.scapi.find_volume, self.volume_name)
|
||||
self.scapi.failed_over = False
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_get_volume_list')
|
||||
@ -2350,8 +2356,10 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_get_volume_list.side_effect = [[],
|
||||
[],
|
||||
self.VOLUME_LIST_MULTI_VOLS]
|
||||
self.scapi.failed_over = True
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.scapi.find_volume, self.volume_name)
|
||||
self.scapi.failed_over = False
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_get_volume_list',
|
||||
@ -5472,13 +5480,13 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
gb, rem = self.scapi._size_to_gb('1.073741824E9 Byte')
|
||||
gb, rem = self.scapi.size_to_gb('1.073741824E9 Byte')
|
||||
self.assertEqual(1, gb)
|
||||
self.assertEqual(0, rem)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.scapi._size_to_gb,
|
||||
self.scapi.size_to_gb,
|
||||
'banana')
|
||||
gb, rem = self.scapi._size_to_gb('1.073741924E9 Byte')
|
||||
gb, rem = self.scapi.size_to_gb('1.073741924E9 Byte')
|
||||
self.assertEqual(1, gb)
|
||||
self.assertEqual(100, rem)
|
||||
|
||||
@ -5514,7 +5522,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
return_value=[{'configuredSize':
|
||||
'1.073741824E9 Bytes'}])
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_size_to_gb',
|
||||
'size_to_gb',
|
||||
return_value=(1, 0))
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_find_mappings',
|
||||
@ -5585,7 +5593,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
return_value=[{'configuredSize':
|
||||
'1.073741924E9 Bytes'}])
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_size_to_gb',
|
||||
'size_to_gb',
|
||||
return_value=(1, 100))
|
||||
def test_manage_existing_bad_size(self,
|
||||
mock_size_to_gb,
|
||||
@ -5612,7 +5620,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
return_value=[{'configuredSize':
|
||||
'1.073741824E9 Bytes'}])
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_size_to_gb',
|
||||
'size_to_gb',
|
||||
return_value=(1, 0))
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_find_mappings',
|
||||
@ -5643,7 +5651,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
return_value=[{'configuredSize':
|
||||
'1.073741824E9 Bytes'}])
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_size_to_gb',
|
||||
'size_to_gb',
|
||||
return_value=(1, 0))
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_find_mappings',
|
||||
@ -5678,7 +5686,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
return_value=[{'configuredSize':
|
||||
'1.073741824E9 Bytes'}])
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_size_to_gb',
|
||||
'size_to_gb',
|
||||
return_value=(1, 0))
|
||||
def test_get_unmanaged_volume_size(self,
|
||||
mock_size_to_gb,
|
||||
@ -5734,7 +5742,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
return_value=[{'configuredSize':
|
||||
'1.073741924E9 Bytes'}])
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_size_to_gb',
|
||||
'size_to_gb',
|
||||
return_value=(1, 100))
|
||||
def test_get_unmanaged_volume_size_bad_size(self,
|
||||
mock_size_to_gb,
|
||||
@ -6077,94 +6085,6 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_post.assert_any_call('StorageCenter/ScReplication', payload)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_screplication',
|
||||
return_value=SCREPL)
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'post',
|
||||
return_value=RESPONSE_200)
|
||||
def test_pause_replication(self,
|
||||
mock_post,
|
||||
mock_get_screplication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Not much to test here without an SC.
|
||||
ret = self.scapi.pause_replication(self.VOLUME, 65495)
|
||||
self.assertTrue(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_screplication',
|
||||
return_value=SCREPL)
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'post',
|
||||
return_value=RESPONSE_400)
|
||||
def test_pause_replication_error(self,
|
||||
mock_post,
|
||||
mock_get_screplication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Not much to test here without an SC.
|
||||
ret = self.scapi.pause_replication(self.VOLUME, 65495)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_screplication',
|
||||
return_value=None)
|
||||
def test_pause_replication_not_found(self,
|
||||
mock_get_screplication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Not much to test here without an SC.
|
||||
ret = self.scapi.pause_replication(self.VOLUME, 65495)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_screplication',
|
||||
return_value=SCREPL)
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'post',
|
||||
return_value=RESPONSE_200)
|
||||
def test_resume_replication(self,
|
||||
mock_post,
|
||||
mock_get_screplication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Not much to test here without an SC.
|
||||
ret = self.scapi.resume_replication(self.VOLUME, 65495)
|
||||
self.assertTrue(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_screplication',
|
||||
return_value=SCREPL)
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'post',
|
||||
return_value=RESPONSE_400)
|
||||
def test_resume_replication_error(self,
|
||||
mock_post,
|
||||
mock_get_screplication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Not much to test here without an SC.
|
||||
ret = self.scapi.resume_replication(self.VOLUME, 65495)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_screplication',
|
||||
return_value=None)
|
||||
def test_resume_replication_not_found(self,
|
||||
mock_get_screplication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
# Not much to test here without an SC.
|
||||
ret = self.scapi.resume_replication(self.VOLUME, 65495)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'post',
|
||||
return_value=RESPONSE_200)
|
||||
@ -6223,19 +6143,16 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_screplication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'rename_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_repl_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_remove_mappings')
|
||||
'remove_mappings')
|
||||
def test_break_replication(self,
|
||||
mock_remove_mappings,
|
||||
mock_find_volume,
|
||||
mock_find_repl_volume,
|
||||
mock_rename_volume,
|
||||
mock_get_screplication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
@ -6245,54 +6162,37 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_find_volume.side_effect = [self.VOLUME, # 1
|
||||
self.VOLUME, # 2
|
||||
None, # 3
|
||||
None, # 4
|
||||
None] # 5
|
||||
None] # 4
|
||||
# Much like find volume we do not gate on this.
|
||||
mock_get_screplication.side_effect = [self.SCREPL[0], # 1
|
||||
None, # 2
|
||||
None, # 3
|
||||
None, # 4
|
||||
None] # 5
|
||||
None] # 4
|
||||
# This
|
||||
mock_find_repl_volume.side_effect = [self.VOLUME, # 1
|
||||
self.VOLUME, # 2
|
||||
self.VOLUME, # 3
|
||||
self.VOLUME, # 4
|
||||
None] # 5
|
||||
self.VOLUME] # 4
|
||||
mock_remove_mappings.side_effect = [True, # 1
|
||||
True,
|
||||
True, # 2
|
||||
False,
|
||||
True, # 3
|
||||
True,
|
||||
True, # 4
|
||||
True,
|
||||
False] # 5
|
||||
False] # 4
|
||||
|
||||
mock_rename_volume.side_effect = [True, # 1
|
||||
True, # 2
|
||||
True, # 3
|
||||
False] # 4
|
||||
# Good path.
|
||||
ret = self.scapi.break_replication('name', 65495)
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(1, mock_rename_volume.call_count)
|
||||
# Source found, screpl not found.
|
||||
ret = self.scapi.break_replication('name', 65495)
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(2, mock_rename_volume.call_count)
|
||||
# No source vol good path.
|
||||
ret = self.scapi.break_replication('name', 65495)
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(3, mock_rename_volume.call_count)
|
||||
# rename fail
|
||||
ret = self.scapi.break_replication('name', 65495)
|
||||
self.assertFalse(ret)
|
||||
self.assertEqual(4, mock_rename_volume.call_count)
|
||||
# fail remove mappings
|
||||
ret = self.scapi.break_replication('name', 65495)
|
||||
self.assertFalse(ret)
|
||||
self.assertEqual(4, mock_rename_volume.call_count)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_get_user_preferences')
|
||||
@ -6440,6 +6340,43 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
scvol,
|
||||
'a,b')
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'put')
|
||||
def test_manage_replay(self,
|
||||
mock_put,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
screplay = {'description': 'notguid',
|
||||
'instanceId': 1}
|
||||
payload = {'description': 'guid',
|
||||
'expireTime': 0}
|
||||
mock_put.return_value = self.RESPONSE_200
|
||||
ret = self.scapi.manage_replay(screplay, 'guid')
|
||||
self.assertTrue(ret)
|
||||
mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload)
|
||||
mock_put.return_value = self.RESPONSE_400
|
||||
ret = self.scapi.manage_replay(screplay, 'guid')
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'put')
|
||||
def test_unmanage_replay(self,
|
||||
mock_put,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
screplay = {'description': 'guid',
|
||||
'instanceId': 1}
|
||||
payload = {'expireTime': 1440}
|
||||
mock_put.return_value = self.RESPONSE_200
|
||||
ret = self.scapi.unmanage_replay(screplay)
|
||||
self.assertTrue(ret)
|
||||
mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload)
|
||||
mock_put.return_value = self.RESPONSE_400
|
||||
ret = self.scapi.unmanage_replay(screplay)
|
||||
self.assertFalse(ret)
|
||||
|
||||
|
||||
class DellSCSanAPIConnectionTestCase(test.TestCase):
|
||||
|
||||
|
@ -158,8 +158,12 @@ class StorageCenterApiHelper(object):
|
||||
connection to the Dell REST API.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config, active_backend_id):
|
||||
self.config = config
|
||||
# Now that active_backend_id is set on failover.
|
||||
# Use that if set. Mark the backend as failed over.
|
||||
self.active_backend_id = active_backend_id
|
||||
self.ssn = self.config.dell_sc_ssn
|
||||
|
||||
def open_connection(self):
|
||||
"""Creates the StorageCenterApi object.
|
||||
@ -168,11 +172,10 @@ class StorageCenterApiHelper(object):
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
connection = None
|
||||
ssn = self.config.dell_sc_ssn
|
||||
LOG.info(_LI('open_connection to %(ssn)s at %(ip)s'),
|
||||
{'ssn': ssn,
|
||||
{'ssn': self.ssn,
|
||||
'ip': self.config.san_ip})
|
||||
if ssn:
|
||||
if self.ssn:
|
||||
"""Open connection to REST API."""
|
||||
connection = StorageCenterApi(self.config.san_ip,
|
||||
self.config.dell_sc_api_port,
|
||||
@ -182,9 +185,17 @@ class StorageCenterApiHelper(object):
|
||||
# This instance is for a single backend. That backend has a
|
||||
# few items of information we should save rather than passing them
|
||||
# about.
|
||||
connection.ssn = ssn
|
||||
connection.vfname = self.config.dell_sc_volume_folder
|
||||
connection.sfname = self.config.dell_sc_server_folder
|
||||
# Set appropriate ssn and failover state.
|
||||
if self.active_backend_id:
|
||||
connection.ssn = self.active_backend_id
|
||||
connection.failed_over = True
|
||||
else:
|
||||
|
||||
connection.ssn = self.ssn
|
||||
connection.failed_over = False
|
||||
# Open connection.
|
||||
connection.open_connection()
|
||||
else:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
@ -208,8 +219,10 @@ class StorageCenterApi(object):
|
||||
2.3.0 - Added Legacy Port Mode Support
|
||||
2.3.1 - Updated error handling.
|
||||
2.4.0 - Added Replication V2 support.
|
||||
2.4.1 - Updated Replication support to V2.1.
|
||||
2.5.0 - ManageableSnapshotsVD implemented.
|
||||
"""
|
||||
APIVERSION = '2.4.0'
|
||||
APIVERSION = '2.5.0'
|
||||
|
||||
def __init__(self, host, port, user, password, verify):
|
||||
"""This creates a connection to Dell SC or EM.
|
||||
@ -223,8 +236,8 @@ class StorageCenterApi(object):
|
||||
"""
|
||||
self.notes = 'Created by Dell Cinder Driver'
|
||||
self.repl_prefix = 'Cinder repl of '
|
||||
self.failover_prefix = 'Cinder failover '
|
||||
self.ssn = None
|
||||
self.failed_over = False
|
||||
self.vfname = 'openstack'
|
||||
self.sfname = 'openstack'
|
||||
self.legacypayloadfilters = False
|
||||
@ -877,6 +890,9 @@ class StorageCenterApi(object):
|
||||
for the volume first. If not found it searches the entire array for
|
||||
the volume.
|
||||
|
||||
Remember that in the case of a failover we have already been switched
|
||||
to our new SSN. So the initial searches are valid.
|
||||
|
||||
:param name: Name of the volume to search for. This is the cinder
|
||||
volume ID.
|
||||
:returns: Dell Volume object or None if not found.
|
||||
@ -899,19 +915,17 @@ class StorageCenterApi(object):
|
||||
{'n': name,
|
||||
'v': self.vfname})
|
||||
vollist = self._get_volume_list(name, None, False)
|
||||
# Failover Check.
|
||||
# If an empty list was returned then either there is no such volume
|
||||
# or we are in a failover state. Look for failover volume.
|
||||
if not vollist:
|
||||
# If we found nothing and are failed over then we might not have
|
||||
# completed our replication failover. Look for the replication
|
||||
# volume. We are already pointing at that SC.
|
||||
if not vollist and self.failed_over:
|
||||
LOG.debug('Unable to locate volume. Checking for failover.')
|
||||
# Get our failover name.
|
||||
fn = self._failover_name(name)
|
||||
# Get our replay name.
|
||||
fn = self._repl_name(name)
|
||||
vollist = self._get_volume_list(fn, None, False)
|
||||
# Same deal as the rest of these. If 0 not found. If greater than
|
||||
# one we have multiple copies and cannot return a valid result.
|
||||
if len(vollist) == 1:
|
||||
# So we are in failover. Rename the volume and move it to our
|
||||
# volume folder.
|
||||
LOG.info(_LI('Found failover volume. Competing failover.'))
|
||||
# Import our found volume. This completes our failover.
|
||||
scvolume = self._import_one(vollist[0], name)
|
||||
@ -920,7 +934,7 @@ class StorageCenterApi(object):
|
||||
{'fail': fn,
|
||||
'guid': name})
|
||||
return scvolume
|
||||
msg = _('Unable to complete import of %s.') % fn
|
||||
msg = _('Unable to complete failover of %s.') % fn
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
# If multiple volumes of the same name are found we need to error.
|
||||
@ -1097,8 +1111,8 @@ class StorageCenterApi(object):
|
||||
# 201 expected.
|
||||
if self._check_result(r):
|
||||
# Server was created
|
||||
LOG.info(_LI('SC server created %s'), scserver)
|
||||
scserver = self._first_result(r)
|
||||
LOG.info(_LI('SC server created %s'), scserver)
|
||||
|
||||
# Add hba to our server
|
||||
if scserver is not None:
|
||||
@ -1731,6 +1745,44 @@ class StorageCenterApi(object):
|
||||
|
||||
return None
|
||||
|
||||
def manage_replay(self, screplay, replayid):
|
||||
"""Basically renames the screplay and sets it to never expire.
|
||||
|
||||
:param screplay: DellSC object.
|
||||
:param replayid: New name for replay.
|
||||
:return: True on success. False on fail.
|
||||
"""
|
||||
if screplay and replayid:
|
||||
payload = {}
|
||||
payload['description'] = replayid
|
||||
payload['expireTime'] = 0
|
||||
r = self.client.put('StorageCenter/ScReplay/%s' %
|
||||
self._get_id(screplay),
|
||||
payload)
|
||||
if self._check_result(r):
|
||||
return True
|
||||
LOG.error(_LE('Error managing replay %s'),
|
||||
screplay.get('description'))
|
||||
return False
|
||||
|
||||
def unmanage_replay(self, screplay):
|
||||
"""Basically sets the expireTime
|
||||
|
||||
:param screplay: DellSC object.
|
||||
:return: True on success. False on fail.
|
||||
"""
|
||||
if screplay:
|
||||
payload = {}
|
||||
payload['expireTime'] = 1440
|
||||
r = self.client.put('StorageCenter/ScReplay/%s' %
|
||||
self._get_id(screplay),
|
||||
payload)
|
||||
if self._check_result(r):
|
||||
return True
|
||||
LOG.error(_LE('Error unmanaging replay %s'),
|
||||
screplay.get('description'))
|
||||
return False
|
||||
|
||||
def delete_replay(self, scvolume, replayid):
|
||||
"""Finds a Dell replay by replayid string and expires it.
|
||||
|
||||
@ -2264,7 +2316,8 @@ class StorageCenterApi(object):
|
||||
' for Consistency Group support')
|
||||
raise NotImplementedError(data=msg)
|
||||
|
||||
def _size_to_gb(self, spacestring):
|
||||
@staticmethod
|
||||
def size_to_gb(spacestring):
|
||||
"""Splits a SC size string into GB and a remainder.
|
||||
|
||||
Space is returned in a string like ...
|
||||
@ -2332,7 +2385,7 @@ class StorageCenterApi(object):
|
||||
if count == 1:
|
||||
# First thing to check is if the size is something we can
|
||||
# work with.
|
||||
sz, rem = self._size_to_gb(vollist[0]['configuredSize'])
|
||||
sz, rem = self.size_to_gb(vollist[0]['configuredSize'])
|
||||
if rem > 0:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=_('Volume size must multiple of 1 GB.'))
|
||||
@ -2368,7 +2421,7 @@ class StorageCenterApi(object):
|
||||
count = len(vollist)
|
||||
# If we found one volume with that name we can work with it.
|
||||
if count == 1:
|
||||
sz, rem = self._size_to_gb(vollist[0]['configuredSize'])
|
||||
sz, rem = self.size_to_gb(vollist[0]['configuredSize'])
|
||||
if rem > 0:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=_('Volume size must multiple of 1 GB.'))
|
||||
@ -2512,9 +2565,6 @@ class StorageCenterApi(object):
|
||||
def _repl_name(self, name):
|
||||
return self.repl_prefix + name
|
||||
|
||||
def _failover_name(self, name):
|
||||
return self.failover_prefix + name
|
||||
|
||||
def _get_disk_folder(self, ssn, foldername):
|
||||
# TODO(tswanson): Harden this.
|
||||
diskfolder = None
|
||||
@ -2586,27 +2636,14 @@ class StorageCenterApi(object):
|
||||
'destsc': destssn})
|
||||
return screpl
|
||||
|
||||
def pause_replication(self, scvolume, destssn):
|
||||
# destssn should probably be part of the object.
|
||||
replication = self.get_screplication(scvolume, destssn)
|
||||
if replication:
|
||||
r = self.client.post('StorageCenter/ScReplication/%s/Pause' %
|
||||
self._get_id(replication), {})
|
||||
if self._check_result(r):
|
||||
return True
|
||||
return False
|
||||
|
||||
def resume_replication(self, scvolume, destssn):
|
||||
# destssn should probably be part of the object.
|
||||
replication = self.get_screplication(scvolume, destssn)
|
||||
if replication:
|
||||
r = self.client.post('StorageCenter/ScReplication/%s/Resume' %
|
||||
self._get_id(replication), {})
|
||||
if self._check_result(r):
|
||||
return True
|
||||
return False
|
||||
|
||||
def find_repl_volume(self, guid, destssn, instance_id=None):
|
||||
"""Find our replay destination volume on the destssn.
|
||||
|
||||
:param guid: Volume ID.
|
||||
:param destssn: Where to look for the volume.
|
||||
:param instance_id: If we know our exact volume ID use that.
|
||||
:return: SC Volume object or None
|
||||
"""
|
||||
# Do a normal volume search.
|
||||
pf = self._get_payload_filter()
|
||||
pf.append('scSerialNumber', destssn)
|
||||
@ -2616,7 +2653,7 @@ class StorageCenterApi(object):
|
||||
pf.append('instanceId', instance_id)
|
||||
else:
|
||||
# Try the name.
|
||||
pf.append('Name', self.repl_prefix + guid)
|
||||
pf.append('Name', self._repl_name(guid))
|
||||
r = self.client.post('StorageCenter/ScVolume/GetList',
|
||||
pf.payload)
|
||||
if self._check_result(r):
|
||||
@ -2625,7 +2662,7 @@ class StorageCenterApi(object):
|
||||
return volumes[0]
|
||||
return None
|
||||
|
||||
def _remove_mappings(self, scvol):
|
||||
def remove_mappings(self, scvol):
|
||||
"""Peels all the mappings off of scvol.
|
||||
|
||||
:param scvol:
|
||||
@ -2636,7 +2673,7 @@ class StorageCenterApi(object):
|
||||
self._get_id(scvol),
|
||||
{})
|
||||
return self._check_result(r)
|
||||
return None
|
||||
return False
|
||||
|
||||
def break_replication(self, volumename, destssn):
|
||||
"""This just breaks the replication.
|
||||
@ -2646,8 +2683,7 @@ class StorageCenterApi(object):
|
||||
every time this goes south.
|
||||
|
||||
:param volumename:
|
||||
:param destssn:
|
||||
:return: True False
|
||||
:return:
|
||||
"""
|
||||
ret = False
|
||||
replid = None
|
||||
@ -2661,14 +2697,11 @@ class StorageCenterApi(object):
|
||||
# stuffing it into the recycle bin.
|
||||
# Instead we try to unmap the destination volume which will break
|
||||
# the replication but leave the replication object on the SC.
|
||||
ret = self._remove_mappings(screplvol)
|
||||
ret = self.remove_mappings(screplvol)
|
||||
# If the volume is free of replication.
|
||||
if ret:
|
||||
# Move and rename it.
|
||||
ret = self.rename_volume(screplvol,
|
||||
self._failover_name(volumename))
|
||||
# Try to kill mappings on the source.
|
||||
# We don't care that this succeeded or failed. Just move on.
|
||||
self._remove_mappings(scvolume)
|
||||
self.remove_mappings(scvolume)
|
||||
|
||||
return ret
|
||||
|
@ -17,7 +17,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.dell import dell_storagecenter_api
|
||||
@ -50,7 +49,7 @@ CONF.register_opts(common_opts)
|
||||
|
||||
|
||||
class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
driver.ExtendVD,
|
||||
driver.ExtendVD, driver.ManageableSnapshotsVD,
|
||||
driver.SnapshotVD, driver.BaseVD):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -62,6 +61,8 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
self.backends = self.configuration.safe_get('replication_device')
|
||||
self.replication_enabled = True if self.backends else False
|
||||
self.is_direct_connect = False
|
||||
self.active_backend_id = kwargs.get('active_backend_id', None)
|
||||
self.failed_over = (self.active_backend_id is not None)
|
||||
|
||||
def _bytes_to_gb(self, spacestring):
|
||||
"""Space is returned in a string like ...
|
||||
@ -89,7 +90,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
specific helpers.
|
||||
"""
|
||||
self._client = dell_storagecenter_api.StorageCenterApiHelper(
|
||||
self.configuration)
|
||||
self.configuration, self.active_backend_id)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Validates the configuration information."""
|
||||
@ -101,11 +102,10 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
'not supported with direct connect.')
|
||||
raise exception.InvalidHost(reason=msg)
|
||||
|
||||
if self.replication_enabled:
|
||||
# If we are a healthy replicated system make sure our backend
|
||||
# is alive.
|
||||
if self.replication_enabled and not self.failed_over:
|
||||
# Check that our replication destinations are available.
|
||||
# TODO(tswanson): Check if we need a diskfolder. (Or not.)
|
||||
# TODO(tswanson): Can we check that the backend specifies
|
||||
# TODO(tswanson): the same ssn as target_device_id.
|
||||
for backend in self.backends:
|
||||
replssn = backend['target_device_id']
|
||||
try:
|
||||
@ -151,7 +151,8 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
"""
|
||||
do_repl = False
|
||||
sync = False
|
||||
if not self.is_direct_connect:
|
||||
# Repl does not work with direct connect.
|
||||
if not self.failed_over and not self.is_direct_connect:
|
||||
specs = self._get_volume_extra_specs(volume)
|
||||
do_repl = specs.get('replication_enabled') == '<is> True'
|
||||
sync = specs.get('replication_type') == '<in> sync'
|
||||
@ -255,12 +256,22 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
|
||||
return model_update
|
||||
|
||||
def _split(self, replication_driver_data):
|
||||
def _split_driver_data(self, replication_driver_data):
|
||||
"""Splits the replication_driver_data into an array of ssn strings.
|
||||
|
||||
:param replication_driver_data: A string of comma separated SSNs.
|
||||
:returns: SSNs in an array of strings.
|
||||
"""
|
||||
ssnstrings = []
|
||||
# We have any replication_driver_data.
|
||||
if replication_driver_data:
|
||||
# Split the array and wiffle through the entries.
|
||||
for str in replication_driver_data.split(','):
|
||||
# Strip any junk from the string.
|
||||
ssnstring = str.strip()
|
||||
# Anything left?
|
||||
if ssnstring:
|
||||
# Add it to our array.
|
||||
ssnstrings.append(ssnstring)
|
||||
return ssnstrings
|
||||
|
||||
@ -281,7 +292,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
scvol = api.find_volume(volume_name)
|
||||
replication_driver_data = volume.get('replication_driver_data')
|
||||
# This is just a string of ssns separated by commas.
|
||||
ssnstrings = self._split(replication_driver_data)
|
||||
ssnstrings = self._split_driver_data(replication_driver_data)
|
||||
# Trundle through these and delete them all.
|
||||
for ssnstring in ssnstrings:
|
||||
ssn = int(ssnstring)
|
||||
@ -571,6 +582,13 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
if self.replication_enabled:
|
||||
data['replication_type'] = ['async', 'sync']
|
||||
data['replication_count'] = len(self.backends)
|
||||
replication_targets = []
|
||||
# Trundle through our backends.
|
||||
for backend in self.backends:
|
||||
target_device_id = backend.get('target_device_id')
|
||||
if target_device_id:
|
||||
replication_targets.append(target_device_id)
|
||||
data['replication_targets'] = replication_targets
|
||||
|
||||
self._stats = data
|
||||
LOG.debug('Total cap %(total)s Free cap %(free)s',
|
||||
@ -645,8 +663,6 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
# If we are here because we found no profile that should be fine
|
||||
# as we are trying to delete it anyway.
|
||||
|
||||
# Now whack the volumes. So get our list.
|
||||
volumes = self.db.volume_get_all_by_group(context, gid)
|
||||
# Trundle through the list deleting the volumes.
|
||||
for volume in volumes:
|
||||
self.delete_volume(volume)
|
||||
@ -702,6 +718,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
|
||||
:param context: the context of the caller.
|
||||
:param cgsnapshot: Information about the snapshot to take.
|
||||
:param snapshots: List of snapshots for this cgsnapshot.
|
||||
:returns: Updated model_update, snapshots.
|
||||
:raises: VolumeBackendAPIException.
|
||||
"""
|
||||
@ -713,8 +730,6 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
if profile:
|
||||
LOG.debug('profile %s replayid %s', profile, snapshotid)
|
||||
if api.snap_cg_replay(profile, snapshotid, 0):
|
||||
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
|
||||
context, snapshotid)
|
||||
for snapshot in snapshots:
|
||||
snapshot.status = 'available'
|
||||
|
||||
@ -755,8 +770,6 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
% snapshotid)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
|
||||
context, snapshotid)
|
||||
for snapshot in snapshots:
|
||||
snapshot.status = 'deleted'
|
||||
|
||||
@ -967,98 +980,18 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
return model_update
|
||||
return True
|
||||
|
||||
def replication_enable(self, context, vref):
|
||||
"""Re-enable replication on vref.
|
||||
|
||||
:param context: NA
|
||||
:param vref: Cinder volume reference.
|
||||
:return: model_update.
|
||||
"""
|
||||
volumename = vref.get('id')
|
||||
LOG.info(_LI('Enabling replication on %s'), volumename)
|
||||
model_update = {}
|
||||
with self._client.open_connection() as api:
|
||||
replication_driver_data = vref.get('replication_driver_data')
|
||||
destssns = self._split(replication_driver_data)
|
||||
do_repl, sync = self._do_repl(api, vref)
|
||||
if destssns and do_repl:
|
||||
scvolume = api.find_volume(volumename)
|
||||
if scvolume:
|
||||
for destssn in destssns:
|
||||
if not api.resume_replication(scvolume, int(destssn)):
|
||||
LOG.error(_LE('Unable to resume replication on '
|
||||
'volume %(vol)s to SC %(ssn)s'),
|
||||
{'vol': volumename,
|
||||
'ssn': destssn})
|
||||
model_update['replication_status'] = 'error'
|
||||
break
|
||||
else:
|
||||
LOG.error(_LE('Volume %s not found'), volumename)
|
||||
else:
|
||||
LOG.error(_LE('Replication not enabled or no replication '
|
||||
'destinations found. %s'),
|
||||
volumename)
|
||||
return model_update
|
||||
|
||||
def replication_disable(self, context, vref):
|
||||
"""Disable replication on vref.
|
||||
|
||||
:param context: NA
|
||||
:param vref: Cinder volume reference.
|
||||
:return: model_update.
|
||||
"""
|
||||
volumename = vref.get('id')
|
||||
LOG.info(_LI('Disabling replication on %s'), volumename)
|
||||
model_update = {}
|
||||
with self._client.open_connection() as api:
|
||||
replication_driver_data = vref.get('replication_driver_data')
|
||||
destssns = self._split(replication_driver_data)
|
||||
do_repl, sync = self._do_repl(api, vref)
|
||||
if destssns and do_repl:
|
||||
scvolume = api.find_volume(volumename)
|
||||
if scvolume:
|
||||
for destssn in destssns:
|
||||
if not api.pause_replication(scvolume, int(destssn)):
|
||||
LOG.error(_LE('Unable to pause replication on '
|
||||
'volume %(vol)s to SC %(ssn)s'),
|
||||
{'vol': volumename,
|
||||
'ssn': destssn})
|
||||
model_update['replication_status'] = 'error'
|
||||
break
|
||||
else:
|
||||
LOG.error(_LE('Volume %s not found'), volumename)
|
||||
else:
|
||||
LOG.error(_LE('Replication not enabled or no replication '
|
||||
'destinations found. %s'),
|
||||
volumename)
|
||||
return model_update
|
||||
|
||||
def _find_host(self, ssnstring):
|
||||
"""Find the backend associated with this ssnstring.
|
||||
|
||||
:param ssnstring: The ssn of the storage center we are looking for.
|
||||
:return: The managed_backend_name associated with said storage center.
|
||||
"""
|
||||
for backend in self.backends:
|
||||
if ssnstring == backend['target_device_id']:
|
||||
return backend['managed_backend_name']
|
||||
return None
|
||||
|
||||
def _parse_secondary(self, api, vref, secondary):
|
||||
def _parse_secondary(self, api, secondary):
|
||||
"""Find the replication destination associated with secondary.
|
||||
|
||||
:param api: Dell StorageCenterApi
|
||||
:param vref: Cinder Volume
|
||||
:param secondary: String indicating the secondary to failover to.
|
||||
:return: Destination SSN and the host string for the given secondary.
|
||||
:return: Destination SSN for the given secondary.
|
||||
"""
|
||||
LOG.debug('_parse_secondary. Looking for %s.', secondary)
|
||||
replication_driver_data = vref['replication_driver_data']
|
||||
destssn = None
|
||||
host = None
|
||||
ssnstrings = self._split(replication_driver_data)
|
||||
# Trundle through these and delete them all.
|
||||
for ssnstring in ssnstrings:
|
||||
# Trundle through these looking for our secondary.
|
||||
for backend in self.backends:
|
||||
ssnstring = backend['target_device_id']
|
||||
# If they list a secondary it has to match.
|
||||
# If they do not list a secondary we return the first
|
||||
# replication on a working system.
|
||||
@ -1069,142 +1002,197 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
# way to pick a destination to failover to. So just
|
||||
# look for one that is just up.
|
||||
try:
|
||||
# If the SC ssn exists check if we are configured to
|
||||
# use it.
|
||||
# If the SC ssn exists use it.
|
||||
if api.find_sc(ssn):
|
||||
host = self._find_host(ssnstring)
|
||||
# If host then we are configured.
|
||||
if host:
|
||||
# Save our ssn and get out of here.
|
||||
destssn = ssn
|
||||
break
|
||||
destssn = ssn
|
||||
break
|
||||
except exception.VolumeBackendAPIException:
|
||||
LOG.warning(_LW('SSN %s appears to be down.'), ssn)
|
||||
LOG.info(_LI('replication failover secondary is %(ssn)s %(host)s'),
|
||||
{'ssn': destssn,
|
||||
'host': host})
|
||||
return destssn, host
|
||||
LOG.info(_LI('replication failover secondary is %(ssn)s'),
|
||||
{'ssn': destssn})
|
||||
return destssn
|
||||
|
||||
def replication_failover(self, context, vref, secondary):
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
"""Failover to secondary.
|
||||
|
||||
The flow is as follows.
|
||||
1.The user explicitly requests a failover of a replicated volume.
|
||||
2.Driver breaks replication.
|
||||
a. Neatly by deleting the SCReplication object if the
|
||||
primary is still up.
|
||||
b. Brutally by unmapping the replication volume if it isn't.
|
||||
3.We rename the volume to "Cinder failover <Volume GUID>"
|
||||
4.Change Cinder DB entry for which backend controls the volume
|
||||
to the backend listed in the replication_device.
|
||||
5.That's it.
|
||||
:param context: security context
|
||||
:param secondary_id: Specifies rep target to fail over to
|
||||
:param volumes: List of volumes serviced by this backend.
|
||||
:returns : destssn, volume_updates data structure
|
||||
|
||||
Completion of the failover is done on first use on the new backend.
|
||||
We do this by modifying the find_volume function.
|
||||
|
||||
Find volume searches the following places in order:
|
||||
1. "<Volume GUID>" in the backend's volume folder.
|
||||
2. "<Volume GUID>" outside of the volume folder.
|
||||
3. "Cinder failover <Volume GUID>" anywhere on the system.
|
||||
|
||||
If "Cinder failover <Volume GUID>" was found:
|
||||
1.Volume is renamed to "<Volume GUID>".
|
||||
2.Volume is moved to the new backend's volume folder.
|
||||
3.The volume is now available on the secondary backend.
|
||||
|
||||
:param context;
|
||||
:param vref: Cinder volume reference.
|
||||
:param secondary: SSN of the destination Storage Center
|
||||
:return: model_update on failover.
|
||||
Example volume_updates data structure:
|
||||
[{'volume_id': <cinder-uuid>,
|
||||
'updates': {'provider_id': 8,
|
||||
'replication_status': 'failed-over',
|
||||
'replication_extended_status': 'whatever',...}},]
|
||||
"""
|
||||
LOG.info(_LI('Failing replication %(vol)s to %(sec)s'),
|
||||
{'vol': vref.get('id'),
|
||||
'sec': secondary})
|
||||
# If we fall through this is our error.
|
||||
msg = _('Unable to failover replication.')
|
||||
with self._client.open_connection() as api:
|
||||
# Basic check. We should never get here.
|
||||
do_repl, sync = self._do_repl(api, vref)
|
||||
if not do_repl:
|
||||
# If we did get here then there is a disconnect. Set our
|
||||
# message and raise (below).
|
||||
msg = _('Unable to failover unreplicated volume.')
|
||||
else:
|
||||
|
||||
# We do not allow failback. Dragons be there.
|
||||
if self.failed_over:
|
||||
raise exception.VolumeBackendAPIException(message=_(
|
||||
'Backend has already been failed over. Unable to fail back.'))
|
||||
|
||||
LOG.info(_LI('Failing backend to %s'), secondary_id)
|
||||
# basic check
|
||||
if self.replication_enabled:
|
||||
with self._client.open_connection() as api:
|
||||
# Look for the specified secondary.
|
||||
destssn, host = self._parse_secondary(api, vref, secondary)
|
||||
if destssn and host:
|
||||
volumename = vref.get('id')
|
||||
# This will break the replication on the SC side. At the
|
||||
# conclusion of this the destination volume will be
|
||||
# renamed to indicate failover is in progress. We will
|
||||
# pick the volume up on the destination backend later.
|
||||
if api.break_replication(volumename, destssn):
|
||||
destssn = self._parse_secondary(api, secondary_id)
|
||||
if destssn:
|
||||
# We roll through trying to break replications.
|
||||
# Is failing here a complete failure of failover?
|
||||
volume_updates = []
|
||||
for volume in volumes:
|
||||
model_update = {}
|
||||
model_update['host'] = host
|
||||
model_update['replication_driver_data'] = None
|
||||
return model_update
|
||||
# We are here. Nothing went well.
|
||||
LOG.error(_LE('Unable to break replication from '
|
||||
'%(from)s to %(to)d.'),
|
||||
{'from': volumename,
|
||||
'to': destssn})
|
||||
if volume.get('replication_driver_data'):
|
||||
ret = api.break_replication(volume['id'], destssn)
|
||||
LOG.info(_LI('Failing over volume %(id)s '
|
||||
'replication: %(res)s.'),
|
||||
{'id': volume['id'],
|
||||
'res': ('FAILED', 'SUCCESS')[ret]})
|
||||
# We should note that we are now failed over.
|
||||
model_update = {
|
||||
'replication_status': 'failed-over'}
|
||||
else:
|
||||
# Not a replicated volume. Try to unmap it.
|
||||
scvolume = api.find_volume(volume['id'])
|
||||
api.remove_mappings(scvolume)
|
||||
model_update = {'status': 'error'}
|
||||
# Either we are failed over or our status is now error.
|
||||
volume_updates.append({'volume_id': volume['id'],
|
||||
'updates': model_update})
|
||||
|
||||
# this is it.
|
||||
return destssn, volume_updates
|
||||
else:
|
||||
LOG.error(_LE('Unable to find valid destination.'))
|
||||
raise exception.InvalidInput(message=(
|
||||
_('replication_failover failed. %s not found.') %
|
||||
secondary_id))
|
||||
# I don't think we should ever get here.
|
||||
raise exception.VolumeBackendAPIException(message=(
|
||||
_('replication_failover failed. '
|
||||
'Backend not configured for failover')))
|
||||
|
||||
# We raise to indicate something bad happened.
|
||||
raise exception.ReplicationError(volume_id=vref.get('id'),
|
||||
reason=msg)
|
||||
def _get_unmanaged_replay(self, api, volume_name, existing_ref):
|
||||
replay_name = None
|
||||
if existing_ref:
|
||||
replay_name = existing_ref.get('source-name')
|
||||
if not replay_name:
|
||||
msg = _('_get_unmanaged_replay: Must specify source-name.')
|
||||
LOG.error(msg)
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=existing_ref, reason=msg)
|
||||
# Find our volume.
|
||||
scvolume = api.find_volume(volume_name)
|
||||
if not scvolume:
|
||||
# Didn't find it.
|
||||
msg = (_('_get_unmanaged_replay: Cannot find volume id %s')
|
||||
% volume_name)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
# Find our replay.
|
||||
screplay = api.find_replay(scvolume, replay_name)
|
||||
if not screplay:
|
||||
# Didn't find it. Reference must be invalid.
|
||||
msg = (_('_get_unmanaged_replay: Cannot '
|
||||
'find snapshot named %s') % replay_name)
|
||||
LOG.error(msg)
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=existing_ref, reason=msg)
|
||||
return screplay
|
||||
|
||||
def list_replication_targets(self, context, vref):
|
||||
"""Lists replication targets for the given vref.
|
||||
def manage_existing_snapshot(self, snapshot, existing_ref):
|
||||
"""Brings an existing backend storage object under Cinder management.
|
||||
|
||||
We return targets the volume has been setup to replicate to and that
|
||||
are configured on this backend.
|
||||
existing_ref is passed straight through from the API request's
|
||||
manage_existing_ref value, and it is up to the driver how this should
|
||||
be interpreted. It should be sufficient to identify a storage object
|
||||
that the driver should somehow associate with the newly-created cinder
|
||||
snapshot structure.
|
||||
|
||||
:param context: NA
|
||||
:param vref: Cinder volume object.
|
||||
:return: A dict of the form {'volume_id': id,
|
||||
'targets': [ {'type': xxx,
|
||||
'target_device_id': xxx,
|
||||
'backend_name': xxx}]}
|
||||
There are two ways to do this:
|
||||
|
||||
1. Rename the backend storage object so that it matches the
|
||||
snapshot['name'] which is how drivers traditionally map between a
|
||||
cinder snapshot and the associated backend storage object.
|
||||
|
||||
2. Place some metadata on the snapshot, or somewhere in the backend,
|
||||
that allows other driver requests (e.g. delete) to locate the
|
||||
backend storage object when required.
|
||||
|
||||
If the existing_ref doesn't make sense, or doesn't refer to an existing
|
||||
backend storage object, raise a ManageExistingInvalidReference
|
||||
exception.
|
||||
"""
|
||||
LOG.debug('list_replication_targets for volume %s', vref.get('id'))
|
||||
targets = []
|
||||
volume_name = snapshot.get('volume_id')
|
||||
snapshot_id = snapshot.get('id')
|
||||
with self._client.open_connection() as api:
|
||||
do_repl, sync = self._do_repl(api, vref)
|
||||
# If we have no replication_driver_data then we have no replication
|
||||
# targets
|
||||
replication_driver_data = vref.get('replication_driver_data')
|
||||
ssnstrings = self._split(replication_driver_data)
|
||||
# If we have data.
|
||||
if ssnstrings:
|
||||
# Trundle through our backends.
|
||||
for backend in self.backends:
|
||||
# If we find a backend then we report it.
|
||||
if ssnstrings.count(backend['target_device_id']):
|
||||
target = {}
|
||||
target['type'] = 'managed'
|
||||
target['target_device_id'] = (
|
||||
backend['target_device_id'])
|
||||
target['backend_name'] = (
|
||||
backend['managed_backend_name'])
|
||||
targets.append(target)
|
||||
else:
|
||||
# We note if the source is not replicated to a
|
||||
# configured destination for the backend.
|
||||
LOG.info(_LI('Volume %(guid)s not replicated to '
|
||||
'backend %(name)s'),
|
||||
{'guid': vref['id'],
|
||||
'name': backend['managed_backend_name']})
|
||||
# At this point we note that what we found and what we
|
||||
# expected to find were two different things.
|
||||
if len(ssnstrings) != len(targets):
|
||||
LOG.warning(_LW('Expected replication count %(rdd)d does '
|
||||
'match configured replication count '
|
||||
'%(tgt)d.'),
|
||||
{'rdd': len(ssnstrings),
|
||||
'tgt': len(targets)})
|
||||
# Format response.
|
||||
replication_targets = {'volume_id': vref.get('id'), 'targets': targets}
|
||||
LOG.info(_LI('list_replication_targets: %s'), replication_targets)
|
||||
return replication_targets
|
||||
# Find our unmanaged snapshot. This will raise on error.
|
||||
screplay = self._get_unmanaged_replay(api, volume_name,
|
||||
existing_ref)
|
||||
# Manage means update description and update expiration.
|
||||
if not api.manage_replay(screplay, snapshot_id):
|
||||
# That didn't work. Error.
|
||||
msg = (_('manage_existing_snapshot: Error managing '
|
||||
'existing replay %(ss)s on volume %(vol)s') %
|
||||
{'ss': screplay.get('description'),
|
||||
'vol': volume_name})
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
# Life is good. Let the world know what we've done.
|
||||
LOG.info(_LI('manage_existing_snapshot: snapshot %(exist)s on '
|
||||
'volume %(volume)s has been renamed to %(id)s and is '
|
||||
'now managed by Cinder.'),
|
||||
{'exist': screplay.get('description'),
|
||||
'volume': volume_name,
|
||||
'id': snapshot_id})
|
||||
|
||||
# NOTE: Can't use abstractmethod before all drivers implement it
|
||||
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
|
||||
"""Return size of snapshot to be managed by manage_existing.
|
||||
|
||||
When calculating the size, round up to the next GB.
|
||||
"""
|
||||
volume_name = snapshot.get('volume_id')
|
||||
with self._client.open_connection() as api:
|
||||
screplay = self._get_unmanaged_replay(api, volume_name,
|
||||
existing_ref)
|
||||
sz, rem = dell_storagecenter_api.StorageCenterApi.size_to_gb(
|
||||
screplay['size'])
|
||||
if rem > 0:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=_('Volume size must be a multiple of 1 GB.'))
|
||||
return sz
|
||||
|
||||
# NOTE: Can't use abstractmethod before all drivers implement it
|
||||
def unmanage_snapshot(self, snapshot):
|
||||
"""Removes the specified snapshot from Cinder management.
|
||||
|
||||
Does not delete the underlying backend storage object.
|
||||
|
||||
NOTE: We do set the expire countdown to 1 day. Once a snapshot is
|
||||
unmanaged it will expire 24 hours later.
|
||||
"""
|
||||
volume_name = snapshot.get('volume_id')
|
||||
snapshot_id = snapshot.get('id')
|
||||
with self._client.open_connection() as api:
|
||||
# Find our volume.
|
||||
scvolume = api.find_volume(volume_name)
|
||||
if not scvolume:
|
||||
# Didn't find it.
|
||||
msg = (_('unmanage_snapshot: Cannot find volume id %s')
|
||||
% volume_name)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
# Find our replay.
|
||||
screplay = api.find_replay(scvolume, snapshot_id)
|
||||
if not screplay:
|
||||
# Didn't find it. Reference must be invalid.
|
||||
msg = (_('unmanage_snapshot: Cannot find snapshot named %s')
|
||||
% snapshot_id)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
# Free our snapshot.
|
||||
api.unmanage_replay(screplay)
|
||||
# Do not check our result.
|
||||
|
@ -45,9 +45,11 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
|
||||
2.3.0 - Added Legacy Port Mode Support
|
||||
2.3.1 - Updated error handling.
|
||||
2.4.0 - Added Replication V2 support.
|
||||
2.4.1 - Updated Replication support to V2.1.
|
||||
2.5.0 - ManageableSnapshotsVD implemented.
|
||||
"""
|
||||
|
||||
VERSION = '2.4.0'
|
||||
VERSION = '2.5.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DellStorageCenterFCDriver, self).__init__(*args, **kwargs)
|
||||
|
@ -44,15 +44,16 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
|
||||
2.3.0 - Added Legacy Port Mode Support
|
||||
2.3.1 - Updated error handling.
|
||||
2.4.0 - Added Replication V2 support.
|
||||
2.4.1 - Updated Replication support to V2.1.
|
||||
2.5.0 - ManageableSnapshotsVD implemented.
|
||||
"""
|
||||
|
||||
VERSION = '2.4.0'
|
||||
VERSION = '2.5.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DellStorageCenterISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.backend_name = (
|
||||
self.configuration.safe_get('volume_backend_name')
|
||||
or 'Dell-iSCSI')
|
||||
self.configuration.safe_get('volume_backend_name') or 'Dell-iSCSI')
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
# Initialize_connection will find or create a server identified by the
|
||||
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
features:
|
||||
- Added replication v2 support to the Dell Storage Center drivers.
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Added replication v2.1 support to the Dell Storage Center drivers.
|
Loading…
Reference in New Issue
Block a user