PowerMax Driver - SRDF Enhancement
This enhancement addresses changes in the PowerMax for Cinder driver where all methods that require replication specific functionality will use public Unisphere REST API endpoints. Change-Id: Ic49a133b5ceb9013b4d53298e58ea2b21838870b Implements: blueprint powermax-srdf-enhancement
This commit is contained in:
parent
fcbfa927a3
commit
1870d2ac0c
@ -40,6 +40,8 @@ class PowerMaxData(object):
|
||||
srp = 'SRP_1'
|
||||
srp2 = 'SRP_2'
|
||||
slo = 'Diamond'
|
||||
slo_diamond = 'Diamond'
|
||||
slo_silver = 'Silver'
|
||||
workload = 'DSS'
|
||||
port_group_name_f = 'OS-fibre-PG'
|
||||
port_group_name_i = 'OS-iscsi-PG'
|
||||
@ -57,6 +59,7 @@ class PowerMaxData(object):
|
||||
default_sg_no_slo = 'OS-no_SLO-SG'
|
||||
default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG'
|
||||
default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG'
|
||||
default_sg_no_slo_re_enabled = 'OS-SRP_1-Diamond-NONE-RE-SG'
|
||||
failed_resource = 'OS-failed-resource'
|
||||
fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123'
|
||||
new_host = 'HostX@Backend#Silver+OLTP+SRP_1+000197800123'
|
||||
@ -173,6 +176,11 @@ class PowerMaxData(object):
|
||||
provider_location5 = {'array': remote_array,
|
||||
'device_id': device_id}
|
||||
|
||||
replication_update = (
|
||||
{'replication_status': 'enabled',
|
||||
'replication_driver_data': six.text_type(
|
||||
{'array': remote_array, 'device_id': device_id2})})
|
||||
|
||||
legacy_provider_location = {
|
||||
'classname': 'Symm_StorageVolume',
|
||||
'keybindings': {'CreationClassName': u'Symm_StorageVolume',
|
||||
@ -296,6 +304,10 @@ class PowerMaxData(object):
|
||||
rep_extra_specs['retries'] = 1
|
||||
rep_extra_specs['srp'] = srp2
|
||||
rep_extra_specs['rep_mode'] = 'Synchronous'
|
||||
rep_extra_specs['sync_interval'] = 3
|
||||
rep_extra_specs['sync_retries'] = 200
|
||||
rep_extra_specs['rdf_group_label'] = rdf_group_name
|
||||
rep_extra_specs['rdf_group_no'] = rdf_group_no
|
||||
rep_extra_specs2 = deepcopy(rep_extra_specs)
|
||||
rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f
|
||||
rep_extra_specs3 = deepcopy(rep_extra_specs)
|
||||
@ -319,6 +331,36 @@ class PowerMaxData(object):
|
||||
extra_specs_tags = deepcopy(extra_specs)
|
||||
extra_specs_tags.update({utils.STORAGE_GROUP_TAGS: sg_tags})
|
||||
|
||||
rep_extra_specs_mgmt = deepcopy(rep_extra_specs)
|
||||
rep_extra_specs_mgmt['srp'] = srp
|
||||
rep_extra_specs_mgmt['mgmt_sg_name'] = rdf_managed_async_grp
|
||||
rep_extra_specs_mgmt['sg_name'] = default_sg_no_slo_re_enabled
|
||||
rep_extra_specs_mgmt['rdf_group_no'] = rdf_group_no
|
||||
rep_extra_specs_mgmt['rdf_group_label'] = rdf_group_name
|
||||
rep_extra_specs_mgmt['target_array_model'] = array_model
|
||||
rep_extra_specs_mgmt['slo'] = 'Diamond'
|
||||
rep_extra_specs_mgmt['workload'] = 'NONE'
|
||||
rep_extra_specs_mgmt['sync_interval'] = 2
|
||||
rep_extra_specs_mgmt['sync_retries'] = 200
|
||||
|
||||
rep_extra_specs_metro = deepcopy(rep_extra_specs)
|
||||
rep_extra_specs_metro[utils.REP_MODE] = utils.REP_METRO
|
||||
rep_extra_specs_metro[utils.METROBIAS] = True
|
||||
rep_extra_specs_metro['replication_enabled'] = '<is> True'
|
||||
|
||||
rep_config = {
|
||||
'array': remote_array, 'srp': srp, 'portgroup': port_group_name_i,
|
||||
'rdf_group_no': rdf_group_no, 'sync_retries': 200,
|
||||
'sync_interval': 1, 'rdf_group_label': rdf_group_name,
|
||||
'allow_extend': True, 'mode': utils.REP_METRO}
|
||||
|
||||
ex_specs_rep_config = deepcopy(rep_extra_specs_metro)
|
||||
ex_specs_rep_config['array'] = array
|
||||
ex_specs_rep_config['rep_config'] = rep_config
|
||||
|
||||
ex_specs_rep_config_no_extend = deepcopy(ex_specs_rep_config)
|
||||
ex_specs_rep_config_no_extend['rep_config']['allow_extend'] = False
|
||||
|
||||
test_volume_type_1 = volume_type.VolumeType(
|
||||
id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc',
|
||||
extra_specs=extra_specs)
|
||||
@ -559,6 +601,16 @@ class PowerMaxData(object):
|
||||
'rdfGroupNumber': rdf_group_no,
|
||||
'states': ['Failed Over']}]
|
||||
|
||||
sg_rdf_group_details = {
|
||||
"storageGroupName": test_vol_grp_name,
|
||||
"symmetrixId": array,
|
||||
"volumeRdfTypes": ["R1"],
|
||||
"modes": ["Asynchronous"],
|
||||
"totalTracks": 8205,
|
||||
"largerRdfSides": ["Equal"],
|
||||
"rdfGroupNumber": 1,
|
||||
"states": ["suspended"]}
|
||||
|
||||
sg_list = {'storageGroupId': [storagegroup_name_f,
|
||||
defaultstoragegroup_name]}
|
||||
|
||||
@ -781,6 +833,14 @@ class PowerMaxData(object):
|
||||
'remoteVolumeState': 'Write Disabled',
|
||||
'remoteSymmetrixId': remote_array}
|
||||
|
||||
rdf_group_vol_details_not_synced = {
|
||||
'remoteRdfGroupNumber': rdf_group_no, 'localSymmetrixId': array,
|
||||
'volumeConfig': 'RDF1+TDEV', 'localRdfGroupNumber': rdf_group_no,
|
||||
'localVolumeName': device_id, 'rdfpairState': 'syncinprog',
|
||||
'remoteVolumeName': device_id2, 'localVolumeState': 'Ready',
|
||||
'rdfMode': 'Synchronous', 'remoteVolumeState': 'Write Disabled',
|
||||
'remoteSymmetrixId': remote_array}
|
||||
|
||||
# system
|
||||
job_list = [{'status': 'SUCCEEDED',
|
||||
'jobId': '12345',
|
||||
@ -1262,3 +1322,33 @@ class PowerMaxData(object):
|
||||
'default_sg_name': 'default-sg',
|
||||
'service_level': 'Diamond'
|
||||
}
|
||||
|
||||
rep_info_dict = {
|
||||
'device_id': device_id,
|
||||
'local_array': array, 'remote_array': remote_array,
|
||||
'target_device_id': device_id2, 'target_name': 'test_vol',
|
||||
'rdf_group_no': rdf_group_no, 'rep_mode': 'Metro',
|
||||
'replication_status': 'Enabled', 'rdf_group_label': rdf_group_name,
|
||||
'target_array_model': array_model,
|
||||
'rdf_mgmt_grp': rdf_managed_async_grp}
|
||||
|
||||
create_vol_with_replication_payload = {
|
||||
'executionOption': 'ASYNCHRONOUS',
|
||||
'editStorageGroupActionParam': {
|
||||
'expandStorageGroupParam': {
|
||||
'addVolumeParam': {
|
||||
'emulation': 'FBA',
|
||||
'create_new_volumes': 'False',
|
||||
'volumeAttributes': [
|
||||
{'num_of_vols': 1,
|
||||
'volumeIdentifier': {
|
||||
'identifier_name': (
|
||||
volume_details[0]['volume_identifier']),
|
||||
'volumeIdentifierChoice': 'identifier_name'},
|
||||
'volume_size': test_volume.size,
|
||||
'capacityUnit': 'GB'}],
|
||||
'remoteSymmSGInfoParam': {
|
||||
'force': 'true',
|
||||
'remote_symmetrix_1_id': remote_array,
|
||||
'remote_symmetrix_1_sgs': [
|
||||
defaultstoragegroup_name]}}}}}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -162,6 +162,25 @@ class PowerMaxMaskingTest(test.TestCase):
|
||||
self.data.extra_specs)
|
||||
mock_rm.assert_called_once()
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg')
|
||||
@mock.patch.object(masking.PowerMaxMasking, 'get_parent_sg_from_child',
|
||||
side_effect=[None, tpd.PowerMaxData.parent_sg_f])
|
||||
@mock.patch.object(rest.PowerMaxRest, 'move_volume_between_storage_groups')
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_num_vols_in_sg', return_value=1)
|
||||
def test_force_move_volume_between_storage_groups(
|
||||
self, mock_num, mock_move, mock_parent, mock_rm):
|
||||
|
||||
self.driver.masking.move_volume_between_storage_groups(
|
||||
self.data.array, self.data.device_id,
|
||||
self.data.storagegroup_name_i, self.data.storagegroup_name_f,
|
||||
self.data.extra_specs, force=True)
|
||||
|
||||
mock_move.assert_called_once_with(
|
||||
self.data.array, self.data.device_id,
|
||||
self.data.storagegroup_name_i, self.data.storagegroup_name_f,
|
||||
self.data.extra_specs, True)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_masking_view',
|
||||
side_effect=[tpd.PowerMaxData.maskingview,
|
||||
tpd.PowerMaxData.maskingview, None])
|
||||
|
@ -13,7 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from copy import deepcopy
|
||||
from unittest import mock
|
||||
|
||||
from cinder import exception
|
||||
@ -26,7 +25,6 @@ from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
|
||||
from cinder.volume.drivers.dell_emc.powermax import iscsi
|
||||
from cinder.volume.drivers.dell_emc.powermax import provision
|
||||
from cinder.volume.drivers.dell_emc.powermax import rest
|
||||
from cinder.volume.drivers.dell_emc.powermax import utils
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
|
||||
@ -78,6 +76,23 @@ class PowerMaxProvisionTest(test.TestCase):
|
||||
array, volume_name, storagegroup_name, volume_size, extra_specs)
|
||||
self.assertEqual(ref_dict, volume_dict)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'create_volume_from_sg')
|
||||
def test_create_volume_from_sg_with_rep_info(self, mck_create):
|
||||
array = self.data.array
|
||||
storagegroup_name = self.data.storagegroup_name_f
|
||||
volume_id = self.data.test_volume.id
|
||||
volume_name = self.utils.get_volume_element_name(volume_id)
|
||||
volume_size = self.data.test_volume.size
|
||||
extra_specs = self.data.extra_specs
|
||||
rep_info_dict = self.data.rep_info_dict
|
||||
|
||||
self.provision.create_volume_from_sg(
|
||||
array, volume_name, storagegroup_name, volume_size, extra_specs,
|
||||
rep_info=rep_info_dict)
|
||||
mck_create.assert_called_once_with(
|
||||
array, volume_name, storagegroup_name, volume_size, extra_specs,
|
||||
rep_info_dict)
|
||||
|
||||
def test_delete_volume_from_srp(self):
|
||||
array = self.data.array
|
||||
device_id = self.data.device_id
|
||||
@ -117,7 +132,7 @@ class PowerMaxProvisionTest(test.TestCase):
|
||||
snap_name, extra_specs, create_snap=True)
|
||||
mock_modify.assert_called_once_with(
|
||||
array, source_device_id, target_device_id, snap_name,
|
||||
extra_specs, link=True, copy_mode=False)
|
||||
extra_specs, link=True, copy=False)
|
||||
mock_create_snapvx.assert_called_once_with(
|
||||
array, source_device_id, snap_name, extra_specs, ttl=ttl)
|
||||
|
||||
@ -136,10 +151,10 @@ class PowerMaxProvisionTest(test.TestCase):
|
||||
snap_name, extra_specs, create_snap=False, copy_mode=True)
|
||||
mock_modify.assert_called_once_with(
|
||||
array, source_device_id, target_device_id, snap_name,
|
||||
extra_specs, link=True, copy_mode=True)
|
||||
extra_specs, link=True, copy=True)
|
||||
mock_create_snapvx.assert_not_called()
|
||||
|
||||
def test_break_replication_relationship(self):
|
||||
def test_unlink_snapvx_tgt_volume(self):
|
||||
array = self.data.array
|
||||
source_device_id = self.data.device_id
|
||||
target_device_id = self.data.device_id2
|
||||
@ -148,7 +163,7 @@ class PowerMaxProvisionTest(test.TestCase):
|
||||
|
||||
with mock.patch.object(
|
||||
self.provision, '_unlink_volume') as mock_unlink:
|
||||
self.provision.break_replication_relationship(
|
||||
self.provision.unlink_snapvx_tgt_volume(
|
||||
array, target_device_id, source_device_id, snap_name,
|
||||
extra_specs, generation=6, loop=True)
|
||||
mock_unlink.assert_called_once_with(
|
||||
@ -374,56 +389,38 @@ class PowerMaxProvisionTest(test.TestCase):
|
||||
self.data.array, self.data.defaultstoragegroup_name))
|
||||
self.assertEqual(ref_settings3, sg_slo_settings3)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_rdf_consistent_state')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'delete_rdf_pair')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'modify_rdf_device_pair')
|
||||
def test_break_rdf_relationship(self, mock_mod, mock_del, mock_wait):
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_delete_device_pair')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_suspend_replication')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_rdf_pair_sync')
|
||||
def test_break_rdf_relationship(self, mock_wait, mock_suspend, mock_del):
|
||||
array = self.data.array
|
||||
device_id = self.data.device_id
|
||||
target_device = self.data.device_id2
|
||||
rdf_group_name = self.data.rdf_group_name
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
# State is suspended
|
||||
self.provision.break_rdf_relationship(
|
||||
array, device_id, target_device,
|
||||
rdf_group_name, rep_extra_specs, 'Suspended')
|
||||
mock_mod.assert_not_called()
|
||||
mock_del.assert_called_once_with(
|
||||
array, device_id, rdf_group_name)
|
||||
mock_del.reset_mock()
|
||||
# State is synchronized
|
||||
self.provision.break_rdf_relationship(
|
||||
array, device_id, target_device,
|
||||
rdf_group_name, rep_extra_specs, 'Synchronized')
|
||||
mock_mod.assert_called_once_with(
|
||||
array, device_id, rdf_group_name, rep_extra_specs,
|
||||
suspend=True)
|
||||
mock_del.assert_called_once_with(
|
||||
array, device_id, rdf_group_name)
|
||||
sg_name = self.data.storagegroup_name_f
|
||||
rdf_group = self.data.rdf_group_no
|
||||
extra_specs = self.data.rep_extra_specs
|
||||
|
||||
# sync still in progress
|
||||
self.provision.break_rdf_relationship(
|
||||
array, device_id, target_device,
|
||||
rdf_group_name, rep_extra_specs, 'SyncInProg')
|
||||
mock_wait.assert_called_once()
|
||||
array, device_id, sg_name, rdf_group, extra_specs, 'SyncInProg')
|
||||
mock_wait.assert_called_once_with(array, rdf_group, device_id,
|
||||
extra_specs)
|
||||
mock_del.assert_called_once_with(array, rdf_group, device_id)
|
||||
mock_wait.reset_mock()
|
||||
mock_suspend.reset_mock()
|
||||
mock_del.reset_mock()
|
||||
|
||||
@mock.patch.object(provision.PowerMaxProvision,
|
||||
'disable_group_replication')
|
||||
@mock.patch.object(provision.PowerMaxProvision, 'delete_rdf_pair')
|
||||
def test_break_metro_rdf_pair(self, mock_del, mock_disable):
|
||||
self.provision.break_metro_rdf_pair(
|
||||
self.data.array, self.data.device_id, self.data.device_id2,
|
||||
self.data.rdf_group_no, self.data.rep_extra_specs, 'metro_grp')
|
||||
mock_del.assert_called_once()
|
||||
# State is Consistent, need to suspend
|
||||
self.provision.break_rdf_relationship(
|
||||
array, device_id, sg_name, rdf_group, extra_specs, 'Consistent')
|
||||
mock_suspend.assert_called_once_with(array, sg_name, rdf_group,
|
||||
extra_specs)
|
||||
mock_del.assert_called_once_with(array, rdf_group, device_id)
|
||||
mock_del.reset_mock()
|
||||
|
||||
def test_delete_rdf_pair_async(self):
|
||||
with mock.patch.object(
|
||||
self.provision.rest, 'delete_rdf_pair') as mock_del_rdf:
|
||||
extra_specs = deepcopy(self.data.extra_specs)
|
||||
extra_specs[utils.REP_MODE] = utils.REP_ASYNC
|
||||
self.provision.delete_rdf_pair(
|
||||
self.data.array, self.data.device_id,
|
||||
self.data.rdf_group_no, self.data.device_id2, extra_specs)
|
||||
mock_del_rdf.assert_called_once()
|
||||
# State is synchronized
|
||||
self.provision.break_rdf_relationship(
|
||||
array, device_id, sg_name, rdf_group, extra_specs, 'Synchronized')
|
||||
mock_del.assert_called_once_with(array, rdf_group, device_id)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group',
|
||||
return_value=None)
|
||||
@ -526,50 +523,6 @@ class PowerMaxProvisionTest(test.TestCase):
|
||||
self.data.extra_specs)
|
||||
mock_create.assert_called_once()
|
||||
|
||||
def test_enable_group_replication(self):
|
||||
with mock.patch.object(self.rest,
|
||||
'modify_storagegroup_rdf') as mock_mod:
|
||||
self.provision.enable_group_replication(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no, self.data.extra_specs)
|
||||
mock_mod.assert_called_once()
|
||||
|
||||
def test_disable_group_replication(self):
|
||||
with mock.patch.object(self.rest,
|
||||
'modify_storagegroup_rdf') as mock_mod:
|
||||
self.provision.disable_group_replication(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no, self.data.extra_specs)
|
||||
mock_mod.assert_called_once()
|
||||
|
||||
def test_failover_group(self):
|
||||
with mock.patch.object(self.rest,
|
||||
'modify_storagegroup_rdf') as mock_fo:
|
||||
# Failover
|
||||
self.provision.failover_group(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no, self.data.extra_specs)
|
||||
mock_fo.assert_called_once_with(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no, 'Failover', self.data.extra_specs)
|
||||
mock_fo.reset_mock()
|
||||
# Failback
|
||||
self.provision.failover_group(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no, self.data.extra_specs, False)
|
||||
mock_fo.assert_called_once_with(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no, 'Failback', self.data.extra_specs)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'modify_storagegroup_rdf')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'delete_storagegroup_rdf')
|
||||
def test_delete_group_replication(self, mock_mod, mock_del):
|
||||
self.provision.delete_group_replication(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no, self.data.extra_specs)
|
||||
mock_mod.assert_called_once()
|
||||
mock_del.assert_called_once()
|
||||
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_snap_linked_device_list',
|
||||
side_effect=[[{'targetDevice': tpd.PowerMaxData.device_id2}],
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -16,12 +16,12 @@
|
||||
from copy import deepcopy
|
||||
import time
|
||||
from unittest import mock
|
||||
from unittest.mock import call
|
||||
|
||||
import requests
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests.unit import utils as test_utils
|
||||
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
|
||||
powermax_data as tpd)
|
||||
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
|
||||
@ -428,6 +428,45 @@ class PowerMaxRestTest(test.TestCase):
|
||||
volume_name, self.data.failed_resource,
|
||||
self.data.test_volume.size, self.data.extra_specs)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'rename_volume')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_volume_list',
|
||||
return_value=['00001', '00002', '00003', '00004'])
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'modify_storage_group',
|
||||
return_value=(200, 'job'))
|
||||
def test_create_volume_from_sg_rep_info(
|
||||
self, mck_modify, mck_wait, mck_get_vol, mck_rename):
|
||||
volume_name = self.data.volume_details[0]['volume_identifier']
|
||||
sg_name = self.data.defaultstoragegroup_name
|
||||
rep_info = self.data.rep_info_dict
|
||||
rep_info['initial_device_list'] = ['00001', '00002', '00003']
|
||||
ref_payload = self.data.create_vol_with_replication_payload
|
||||
ref_volume_dict = {utils.ARRAY: self.data.array,
|
||||
utils.DEVICE_ID: '00004'}
|
||||
|
||||
volume_dict = self.rest.create_volume_from_sg(
|
||||
self.data.array, volume_name, sg_name,
|
||||
self.data.test_volume.size, self.data.extra_specs, rep_info)
|
||||
mck_modify.assert_called_once_with(
|
||||
self.data.array, self.data.defaultstoragegroup_name, ref_payload)
|
||||
self.assertEqual(ref_volume_dict, volume_dict)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_volume_list',
|
||||
return_value=['00001', '00002', '00003', '00004'])
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'modify_storage_group',
|
||||
return_value=(200, 'job'))
|
||||
def test_create_volume_from_sg_rep_info_vol_cnt_exception(
|
||||
self, mck_modify, mck_wait, mck_get_vol):
|
||||
volume_name = self.data.volume_details[0]['volume_identifier']
|
||||
sg_name = self.data.defaultstoragegroup_name
|
||||
rep_info = self.data.rep_info_dict
|
||||
rep_info['initial_device_list'] = ['00001', '00002']
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.rest.create_volume_from_sg, self.data.array,
|
||||
volume_name, sg_name, self.data.test_volume.size,
|
||||
self.data.extra_specs, rep_info)
|
||||
|
||||
def test_add_vol_to_sg_success(self):
|
||||
operation = 'Add volume to sg'
|
||||
status_code = 202
|
||||
@ -1151,6 +1190,14 @@ class PowerMaxRestTest(test.TestCase):
|
||||
self.rest.modify_volume_snap(array, source_id, target_id,
|
||||
snap_name, extra_specs)
|
||||
mock_modify.assert_not_called()
|
||||
# copy mode is True
|
||||
payload['copy'] = 'true'
|
||||
self.rest.modify_volume_snap(
|
||||
array, source_id, target_id, snap_name, extra_specs, link=True,
|
||||
copy=True)
|
||||
mock_modify.assert_called_once_with(
|
||||
array, 'replication', 'snapshot', payload,
|
||||
resource_name=snap_name, private='/private')
|
||||
|
||||
def test_delete_volume_snap(self):
|
||||
array = self.data.array
|
||||
@ -1283,7 +1330,7 @@ class PowerMaxRestTest(test.TestCase):
|
||||
'host_io_limit_mb_sec': '4000'}}
|
||||
self.data.sg_details.append(sg_qos)
|
||||
array = self.data.array
|
||||
extra_specs = self.data.extra_specs
|
||||
extra_specs = deepcopy(self.data.extra_specs)
|
||||
extra_specs['qos'] = {'total_iops_sec': '4000',
|
||||
'DistributionType': 'Always'}
|
||||
return_value = self.rest.update_storagegroup_qos(
|
||||
@ -1298,7 +1345,7 @@ class PowerMaxRestTest(test.TestCase):
|
||||
def test_update_storagegroup_qos_exception(self):
|
||||
array = self.data.array
|
||||
storage_group = self.data.defaultstoragegroup_name
|
||||
extra_specs = self.data.extra_specs
|
||||
extra_specs = deepcopy(self.data.extra_specs)
|
||||
extra_specs['qos'] = {'total_iops_sec': '4000',
|
||||
'DistributionType': 'Wrong',
|
||||
'total_bytes_sec': '4194304000'}
|
||||
@ -1371,152 +1418,40 @@ class PowerMaxRestTest(test.TestCase):
|
||||
self.data.array, self.data.rdf_group_name)
|
||||
self.assertIsNone(rdfg_num3)
|
||||
|
||||
def test_create_rdf_device_pair(self):
|
||||
ref_dict = {'array': self.data.remote_array,
|
||||
'device_id': self.data.device_id2}
|
||||
extra_specs = deepcopy(self.data.extra_specs)
|
||||
extra_specs[utils.REP_MODE] = utils.REP_SYNC
|
||||
rdf_dict = self.rest.create_rdf_device_pair(
|
||||
self.data.array, self.data.device_id, self.data.rdf_group_no,
|
||||
self.data.device_id2, self.data.remote_array, extra_specs)
|
||||
self.assertEqual(ref_dict, rdf_dict)
|
||||
|
||||
def test_create_rdf_device_pair_async(self):
|
||||
ref_dict = {'array': self.data.remote_array,
|
||||
'device_id': self.data.device_id2}
|
||||
extra_specs = deepcopy(self.data.extra_specs)
|
||||
extra_specs[utils.REP_MODE] = utils.REP_ASYNC
|
||||
rdf_dict = self.rest.create_rdf_device_pair(
|
||||
self.data.array, self.data.device_id, self.data.rdf_group_no,
|
||||
self.data.device_id2, self.data.remote_array, extra_specs)
|
||||
self.assertEqual(ref_dict, rdf_dict)
|
||||
|
||||
def test_create_rdf_device_pair_metro(self):
|
||||
ref_dict = {'array': self.data.remote_array,
|
||||
'device_id': self.data.device_id2}
|
||||
extra_specs = deepcopy(self.data.extra_specs)
|
||||
extra_specs[utils.REP_MODE] = utils.REP_METRO
|
||||
extra_specs[utils.METROBIAS] = True
|
||||
rdf_dict = self.rest.create_rdf_device_pair(
|
||||
self.data.array, self.data.device_id, self.data.rdf_group_no,
|
||||
self.data.device_id2, self.data.remote_array, extra_specs)
|
||||
self.assertEqual(ref_dict, rdf_dict)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'create_resource',
|
||||
return_value=(200, 'job'))
|
||||
@mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array',
|
||||
side_effect=[True, True, False, False])
|
||||
def test_test_create_rdf_device_pair_metro_cons_exempt(
|
||||
self, mck_nxt_gen, mck_create, mck_wait):
|
||||
extra_specs = deepcopy(self.data.extra_specs)
|
||||
extra_specs[utils.REP_MODE] = utils.REP_METRO
|
||||
extra_specs[utils.METROBIAS] = True
|
||||
|
||||
ref_payload = ({
|
||||
"deviceNameListSource": [{"name": self.data.device_id}],
|
||||
"deviceNameListTarget": [{"name": self.data.device_id2}],
|
||||
"replicationMode": 'Active',
|
||||
"establish": 'true',
|
||||
"rdfType": 'RDF1'})
|
||||
|
||||
get_payload_true = {'rdfType': 'RDF1', 'consExempt': 'true'}
|
||||
get_payload_false = {'rdfType': 'RDF1', 'consExempt': 'false'}
|
||||
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_metro_payload_info',
|
||||
side_effect=[get_payload_true,
|
||||
get_payload_false]) as mock_payload:
|
||||
ref_extra_specs = deepcopy(extra_specs)
|
||||
|
||||
ref_extra_specs[utils.RDF_CONS_EXEMPT] = True
|
||||
self.rest.create_rdf_device_pair(
|
||||
self.data.array, self.data.device_id, self.data.rdf_group_no,
|
||||
self.data.device_id2, self.data.remote_array, extra_specs)
|
||||
mock_payload.assert_called_once_with(
|
||||
self.data.array, ref_payload, self.data.rdf_group_no,
|
||||
ref_extra_specs)
|
||||
|
||||
mock_payload.reset_mock()
|
||||
|
||||
ref_extra_specs[utils.RDF_CONS_EXEMPT] = False
|
||||
self.rest.create_rdf_device_pair(
|
||||
self.data.array, self.data.device_id, self.data.rdf_group_no,
|
||||
self.data.device_id2, self.data.remote_array, extra_specs)
|
||||
mock_payload.assert_called_once_with(
|
||||
self.data.array, ref_payload, self.data.rdf_group_no,
|
||||
ref_extra_specs)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_group',
|
||||
side_effect=[{'numDevices': 0}, {'numDevices': 0},
|
||||
{'numDevices': 1}, {'numDevices': 1}])
|
||||
def test_get_metro_payload_info(self, mock_rdfg):
|
||||
payload_in = {'establish': 'true', 'rdfType': 'RDF1'}
|
||||
payload_in = {'establish': 'true', 'rdfMode': 'Active',
|
||||
'rdfType': 'RDF1'}
|
||||
|
||||
# First volume out, Metro use bias not set
|
||||
act_payload_1 = self.rest.get_metro_payload_info(
|
||||
self.data.array, payload_in.copy(), self.data.rdf_group_no, {})
|
||||
self.data.array, payload_in.copy(), self.data.rdf_group_no, {},
|
||||
True)
|
||||
self.assertEqual(payload_in, act_payload_1)
|
||||
|
||||
# First volume out, Metro use bias set
|
||||
act_payload_2 = self.rest.get_metro_payload_info(
|
||||
self.data.array, payload_in.copy(), self.data.rdf_group_no,
|
||||
{'metro_bias': True})
|
||||
{'metro_bias': True}, True)
|
||||
self.assertEqual('true', act_payload_2['metroBias'])
|
||||
|
||||
# Not first vol in RDFG, consistency exempt not set
|
||||
act_payload_3 = self.rest.get_metro_payload_info(
|
||||
self.data.array, payload_in.copy(), self.data.rdf_group_no,
|
||||
{'consExempt': False})
|
||||
ref_payload_3 = {'rdfType': 'NA', 'format': 'true'}
|
||||
{'exempt': False}, False)
|
||||
ref_payload_3 = {'rdfMode': 'Active', 'rdfType': 'RDF1'}
|
||||
self.assertEqual(ref_payload_3, act_payload_3)
|
||||
|
||||
# Not first vol in RDFG, consistency exempt set
|
||||
act_payload_4 = self.rest.get_metro_payload_info(
|
||||
self.data.array, payload_in.copy(), self.data.rdf_group_no,
|
||||
{'consExempt': True})
|
||||
ref_payload_4 = {'rdfType': 'RDF1', 'consExempt': 'true'}
|
||||
{'exempt': True}, True)
|
||||
ref_payload_4 = {'rdfType': 'RDF1', 'exempt': 'true',
|
||||
'rdfMode': 'Active'}
|
||||
self.assertEqual(ref_payload_4, act_payload_4)
|
||||
|
||||
def test_modify_rdf_device_pair(self):
|
||||
resource_name = '70/volume/00001'
|
||||
common_opts = {'force': 'false', 'symForce': 'false', 'star': 'false',
|
||||
'hop2': 'false', 'bypass': 'false'}
|
||||
suspend_payload = {'action': 'SUSPEND',
|
||||
'executionOption': 'ASYNCHRONOUS',
|
||||
'suspend': common_opts}
|
||||
|
||||
failover_opts = deepcopy(common_opts)
|
||||
failover_opts.update({'establish': 'true', 'restore': 'false',
|
||||
'remote': 'false', 'immediate': 'false'})
|
||||
failover_payload = {'action': 'Failover',
|
||||
'executionOption': 'ASYNCHRONOUS',
|
||||
'failover': failover_opts}
|
||||
with mock.patch.object(
|
||||
self.rest, 'modify_resource', return_value=(
|
||||
200, self.data.job_list[0])) as mock_mod:
|
||||
self.rest.modify_rdf_device_pair(
|
||||
self.data.array, self.data.device_id, self.data.rdf_group_no,
|
||||
self.data.extra_specs, suspend=True)
|
||||
mock_mod.assert_called_once_with(
|
||||
self.data.array, 'replication', 'rdf_group',
|
||||
suspend_payload, resource_name=resource_name,
|
||||
private='/private')
|
||||
mock_mod.reset_mock()
|
||||
self.rest.modify_rdf_device_pair(
|
||||
self.data.array, self.data.device_id, self.data.rdf_group_no,
|
||||
self.data.extra_specs, suspend=False)
|
||||
mock_mod.assert_called_once_with(
|
||||
self.data.array, 'replication', 'rdf_group',
|
||||
failover_payload, resource_name=resource_name,
|
||||
private='/private')
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'delete_resource')
|
||||
def test_delete_rdf_pair(self, mock_del):
|
||||
self.rest.delete_rdf_pair(
|
||||
self.data.array, self.data.device_id, self.data.rdf_group_no)
|
||||
mock_del.assert_called_once()
|
||||
|
||||
def test_get_storage_group_rep(self):
|
||||
array = self.data.array
|
||||
source_group_name = self.data.storagegroup_name_source
|
||||
@ -1582,16 +1517,6 @@ class PowerMaxRestTest(test.TestCase):
|
||||
self.data.rdf_group_no, 'Establish')
|
||||
self.assertTrue(verify2)
|
||||
|
||||
def test_modify_storagegroup_rdf(self):
|
||||
with mock.patch.object(
|
||||
self.rest, 'modify_resource',
|
||||
return_value=(202, self.data.job_list[0])) as mock_mod:
|
||||
self.rest.modify_storagegroup_rdf(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no, 'Failover',
|
||||
self.data.extra_specs)
|
||||
mock_mod.assert_called_once()
|
||||
|
||||
def test_delete_storagegroup_rdf(self):
|
||||
with mock.patch.object(
|
||||
self.rest, 'delete_resource') as mock_del:
|
||||
@ -1616,24 +1541,6 @@ class PowerMaxRestTest(test.TestCase):
|
||||
self.assertTrue(is_next_gen2)
|
||||
self.assertEqual('PowerMax 2000', array_model_powermax)
|
||||
|
||||
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
|
||||
new=test_utils.ZeroIntervalLoopingCall)
|
||||
@mock.patch.object(rest.PowerMaxRest, 'are_vols_rdf_paired',
|
||||
side_effect=[('', '', 'syncinprog'),
|
||||
('', '', 'consistent'),
|
||||
exception.CinderException])
|
||||
def test_wait_for_rdf_consistent_state(self, mock_paired):
|
||||
self.rest.wait_for_rdf_consistent_state(
|
||||
self.data.array, self.data.remote_array,
|
||||
self.data.device_id, self.data.device_id2,
|
||||
self.data.extra_specs)
|
||||
self.assertEqual(2, mock_paired.call_count)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.rest.wait_for_rdf_consistent_state,
|
||||
self.data.array, self.data.remote_array,
|
||||
self.data.device_id, self.data.device_id2,
|
||||
self.data.extra_specs)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'modify_resource',
|
||||
return_value=('200', 'JobComplete'))
|
||||
def test_modify_volume_snap_rename(self, mock_modify):
|
||||
@ -1819,3 +1726,372 @@ class PowerMaxRestTest(test.TestCase):
|
||||
self.assertFalse(valid_version)
|
||||
request_count = mock_req.call_count
|
||||
self.assertEqual(2, request_count)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_resource',
|
||||
return_value=tpd.PowerMaxData.sg_rdf_group_details)
|
||||
def test_get_storage_group_rdf_group_state(self, mck_get):
|
||||
ref_get_resource = ('storagegroup/%(sg)s/rdf_group/%(rdfg)s' % {
|
||||
'sg': self.data.test_vol_grp_name, 'rdfg': self.data.rdf_group_no})
|
||||
states = self.rest.get_storage_group_rdf_group_state(
|
||||
self.data.array, self.data.test_vol_grp_name,
|
||||
self.data.rdf_group_no)
|
||||
mck_get.assert_called_once_with(
|
||||
self.data.array, 'replication', ref_get_resource)
|
||||
self.assertEqual(states, [utils.RDF_SUSPENDED_STATE])
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_resource')
|
||||
def test_get_rdf_pair_volume(self, mck_get):
|
||||
rdf_grp_no = self.data.rdf_group_no
|
||||
device_id = self.data.device_id
|
||||
array = self.data.array
|
||||
ref_get_resource = ('rdf_group/%(rdf_group)s/volume/%(device)s' % {
|
||||
'rdf_group': rdf_grp_no, 'device': device_id})
|
||||
self.rest.get_rdf_pair_volume(array, rdf_grp_no, device_id)
|
||||
mck_get.assert_called_once_with(array, 'replication', ref_get_resource)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'create_resource',
|
||||
return_value=(200, 'job'))
|
||||
def test_srdf_protect_storage_group(self, mck_create, mck_wait):
|
||||
array_id = self.data.array
|
||||
remote_array_id = self.data.remote_array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
replication_mode = utils.REP_METRO
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
service_level = 'Diamond'
|
||||
extra_specs = self.data.rep_extra_specs_metro
|
||||
remote_sg = self.data.rdf_managed_async_grp
|
||||
|
||||
ref_payload = {
|
||||
'executionOption': 'ASYNCHRONOUS', 'metroBias': 'true',
|
||||
'replicationMode': 'Active', 'remoteSLO': service_level,
|
||||
'remoteSymmId': remote_array_id, 'rdfgNumber': rdf_group_no,
|
||||
'remoteStorageGroupName': remote_sg, 'establish': 'true'}
|
||||
ref_resource = ('storagegroup/%(sg_name)s/rdf_group' % {
|
||||
'sg_name': sg_name})
|
||||
|
||||
self.rest.srdf_protect_storage_group(
|
||||
array_id, remote_array_id, rdf_group_no, replication_mode,
|
||||
sg_name, service_level, extra_specs, target_sg=remote_sg)
|
||||
mck_create.assert_called_once_with(
|
||||
array_id, 'replication', ref_resource, ref_payload)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'modify_resource',
|
||||
return_value=(200, 'job'))
|
||||
def test_srdf_modify_group(self, mck_modify, mck_wait):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
payload = {'executionOption': 'ASYNCHRONOUS', 'action': 'Suspend'}
|
||||
extra_specs = self.data.rep_extra_specs
|
||||
msg = 'test'
|
||||
resource = ('storagegroup/%(sg_name)s/rdf_group/%(rdf_group_no)s' % {
|
||||
'sg_name': sg_name, 'rdf_group_no': rdf_group_no})
|
||||
|
||||
self.rest.srdf_modify_group(
|
||||
array_id, rdf_group_no, sg_name, payload, extra_specs, msg)
|
||||
mck_modify.assert_called_once_with(
|
||||
array_id, 'replication', resource, payload)
|
||||
mck_wait.assert_called_once_with(msg, 200, 'job', extra_specs)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'modify_resource',
|
||||
return_value=(200, 'job'))
|
||||
def test_srdf_modify_group_async_call_false(self, mck_modify, mck_wait):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
payload = {'action': 'Suspend'}
|
||||
extra_specs = self.data.rep_extra_specs
|
||||
msg = 'test'
|
||||
resource = ('storagegroup/%(sg_name)s/rdf_group/%(rdf_group_no)s' % {
|
||||
'sg_name': sg_name, 'rdf_group_no': rdf_group_no})
|
||||
|
||||
self.rest.srdf_modify_group(
|
||||
array_id, rdf_group_no, sg_name, payload, extra_specs, msg, False)
|
||||
mck_modify.assert_called_once_with(
|
||||
array_id, 'replication', resource, payload)
|
||||
mck_wait.assert_not_called()
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
|
||||
return_value=[utils.RDF_CONSISTENT_STATE])
|
||||
def test_srdf_suspend_replication(self, mck_get, mck_modify):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
self.rest.srdf_suspend_replication(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_modify.assert_called_once_with(
|
||||
array_id, rdf_group_no, sg_name,
|
||||
{'suspend': {'force': 'true'}, 'action': 'Suspend'},
|
||||
rep_extra_specs, 'Suspend SRDF Group Replication')
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
|
||||
return_value=[utils.RDF_SUSPENDED_STATE])
|
||||
def test_srdf_suspend_replication_already_suspended(self, mck_get,
|
||||
mck_modify):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
self.rest.srdf_suspend_replication(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_modify.assert_not_called()
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
|
||||
return_value=[utils.RDF_SUSPENDED_STATE])
|
||||
def test_srdf_resume_replication(self, mck_get, mck_modify):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
self.rest.srdf_resume_replication(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_modify.assert_called_once_with(
|
||||
array_id, rdf_group_no, sg_name, {'action': 'Resume'},
|
||||
rep_extra_specs, 'Resume SRDF Group Replication', True)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
|
||||
return_value=[utils.RDF_SUSPENDED_STATE])
|
||||
def test_srdf_resume_replication_metro(self, mck_get, mck_modify):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = self.data.rep_extra_specs_metro
|
||||
|
||||
self.rest.srdf_resume_replication(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_modify.assert_called_once_with(
|
||||
array_id, rdf_group_no, sg_name,
|
||||
{"action": "Establish", "establish": {"metroBias": "true"}},
|
||||
rep_extra_specs, 'Resume SRDF Group Replication', True)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
|
||||
return_value=[utils.RDF_CONSISTENT_STATE])
|
||||
def test_srdf_resume_replication_already_resumed(self, mck_get,
|
||||
mck_modify):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
self.rest.srdf_resume_replication(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_modify.assert_not_called()
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
|
||||
return_value=[utils.RDF_CONSISTENT_STATE])
|
||||
def test_srdf_establish_replication(self, mck_get, mck_modify):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
first_call = call(array_id, rdf_group_no, sg_name,
|
||||
{'action': 'Suspend'}, rep_extra_specs,
|
||||
'Suspend SRDF Group Replication')
|
||||
second_call = call(array_id, rdf_group_no, sg_name,
|
||||
{'action': 'Establish'}, rep_extra_specs,
|
||||
'Incremental Establish SRDF Group Replication')
|
||||
|
||||
self.rest.srdf_establish_replication(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_modify.assert_has_calls([first_call, second_call], any_order=False)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
|
||||
def test_srdf_failover_group(self, mck_modify):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
self.rest.srdf_failover_group(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_modify.assert_called_once_with(
|
||||
array_id, rdf_group_no, sg_name, {'action': 'Failover'},
|
||||
rep_extra_specs, 'Failing over SRDF group replication')
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
|
||||
def test_srdf_failback_group(self, mck_modify):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
self.rest.srdf_failback_group(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_modify.assert_called_once_with(
|
||||
array_id, rdf_group_no, sg_name, {'action': 'Failback'},
|
||||
rep_extra_specs, 'Failing back SRDF group replication')
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'modify_storage_group',
|
||||
return_value=(200, 'job'))
|
||||
def test_srdf_remove_device_pair_from_storage_group(self, mck_modify,
|
||||
mck_wait):
|
||||
array_id = self.data.array
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
remote_array_id = self.data.remote_array
|
||||
device_id = self.data.device_id
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
ref_payload = {
|
||||
'editStorageGroupActionParam': {
|
||||
'removeVolumeParam': {
|
||||
'volumeId': [device_id],
|
||||
'remoteSymmSGInfoParam': {
|
||||
'remote_symmetrix_1_id': remote_array_id,
|
||||
'remote_symmetrix_1_sgs': [sg_name]}}}}
|
||||
|
||||
self.rest.srdf_remove_device_pair_from_storage_group(
|
||||
array_id, sg_name, remote_array_id, device_id, rep_extra_specs)
|
||||
mck_modify.assert_called_once_with(
|
||||
array_id, sg_name, ref_payload)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'delete_resource')
|
||||
def test_srdf_delete_device_pair(self, mck_del):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
device_id = self.data.device_id
|
||||
ref_resource = ('%(rdfg)s/volume/%(dev)s' % {
|
||||
'rdfg': rdf_group_no, 'dev': device_id})
|
||||
|
||||
self.rest.srdf_delete_device_pair(
|
||||
array_id, rdf_group_no, device_id)
|
||||
mck_del.assert_called_once_with(
|
||||
array_id, 'replication', 'rdf_group', ref_resource)
|
||||
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_rdf_pair_volume',
|
||||
return_value=tpd.PowerMaxData.rdf_group_vol_details)
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'create_resource',
|
||||
return_value=(200, 'job'))
|
||||
def test_srdf_create_device_pair_async(
|
||||
self, mck_create, mck_wait, mck_get):
|
||||
array_id = self.data.array
|
||||
remote_array = self.data.remote_array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
mode = utils.REP_ASYNC
|
||||
device_id = self.data.device_id
|
||||
tgt_device_id = self.data.device_id2
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
ref_payload = {
|
||||
'executionOption': 'ASYNCHRONOUS', 'rdfMode': mode,
|
||||
'localDeviceListCriteriaParam': {'localDeviceList': [device_id]},
|
||||
'rdfType': 'RDF1', 'invalidateR2': 'true', 'exempt': 'true'}
|
||||
ref_resource = 'rdf_group/%(rdfg)s/volume' % {'rdfg': rdf_group_no}
|
||||
ref_response = {
|
||||
'array': array_id, 'remote_array': remote_array,
|
||||
'src_device': device_id, 'tgt_device': tgt_device_id,
|
||||
'session_info': self.data.rdf_group_vol_details}
|
||||
|
||||
create_response = self.rest.srdf_create_device_pair(
|
||||
array_id, rdf_group_no, mode, device_id, rep_extra_specs, True)
|
||||
mck_create.assert_called_once_with(
|
||||
array_id, 'replication', ref_resource, ref_payload)
|
||||
mck_get.assert_called_once_with(
|
||||
array_id, rdf_group_no, device_id)
|
||||
self.assertEqual(ref_response, create_response)
|
||||
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_rdf_pair_volume',
|
||||
return_value=tpd.PowerMaxData.rdf_group_vol_details)
|
||||
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'create_resource',
|
||||
return_value=(200, 'job'))
|
||||
def test_srdf_create_device_pair_sync(
|
||||
self, mck_create, mck_wait, mck_get):
|
||||
array_id = self.data.array
|
||||
remote_array = self.data.remote_array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
mode = utils.REP_SYNC
|
||||
device_id = self.data.device_id
|
||||
tgt_device_id = self.data.device_id2
|
||||
rep_extra_specs = self.data.rep_extra_specs
|
||||
|
||||
ref_payload = {
|
||||
'executionOption': 'ASYNCHRONOUS', 'rdfMode': mode,
|
||||
'localDeviceListCriteriaParam': {'localDeviceList': [device_id]},
|
||||
'rdfType': 'RDF1', 'establish': 'true'}
|
||||
ref_resource = 'rdf_group/%(rdfg)s/volume' % {'rdfg': rdf_group_no}
|
||||
ref_response = {
|
||||
'array': array_id, 'remote_array': remote_array,
|
||||
'src_device': device_id, 'tgt_device': tgt_device_id,
|
||||
'session_info': self.data.rdf_group_vol_details}
|
||||
|
||||
create_response = self.rest.srdf_create_device_pair(
|
||||
array_id, rdf_group_no, mode, device_id, rep_extra_specs, True)
|
||||
mck_create.assert_called_once_with(
|
||||
array_id, 'replication', ref_resource, ref_payload)
|
||||
mck_get.assert_called_once_with(
|
||||
array_id, rdf_group_no, device_id)
|
||||
self.assertEqual(ref_response, create_response)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
|
||||
return_value=[utils.RDF_CONSISTENT_STATE])
|
||||
def test_wait_for_rdf_group_sync(self, mck_get):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
|
||||
rep_extra_specs['sync_retries'] = 2
|
||||
rep_extra_specs['sync_interval'] = 1
|
||||
|
||||
self.rest.wait_for_rdf_group_sync(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_get.assert_called_once_with(array_id, sg_name, rdf_group_no)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
|
||||
return_value=[utils.RDF_SYNCINPROG_STATE])
|
||||
def test_wait_for_rdf_group_sync_fail(self, mck_get):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
|
||||
rep_extra_specs['sync_retries'] = 1
|
||||
rep_extra_specs['sync_interval'] = 1
|
||||
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.rest.wait_for_rdf_group_sync,
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume',
|
||||
return_value=tpd.PowerMaxData.rdf_group_vol_details)
|
||||
def test_wait_for_rdf_pair_sync(self, mck_get):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
|
||||
rep_extra_specs['sync_retries'] = 2
|
||||
rep_extra_specs['sync_interval'] = 1
|
||||
|
||||
self.rest.wait_for_rdf_pair_sync(
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
mck_get.assert_called_once_with(array_id, sg_name, rdf_group_no)
|
||||
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_rdf_pair_volume',
|
||||
return_value=tpd.PowerMaxData.rdf_group_vol_details_not_synced)
|
||||
def test_wait_for_rdf_pair_sync_fail(self, mck_get):
|
||||
array_id = self.data.array
|
||||
rdf_group_no = self.data.rdf_group_no
|
||||
sg_name = self.data.default_sg_re_enabled
|
||||
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
|
||||
rep_extra_specs['sync_retries'] = 1
|
||||
rep_extra_specs['sync_interval'] = 1
|
||||
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.rest.wait_for_rdf_pair_sync,
|
||||
array_id, sg_name, rdf_group_no, rep_extra_specs)
|
||||
|
@ -221,7 +221,7 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
extra_specs)
|
||||
self.assertTrue(do_disable_compression)
|
||||
# Compression disabled by no SL/WL combination
|
||||
extra_specs2 = self.data.extra_specs
|
||||
extra_specs2 = deepcopy(self.data.extra_specs)
|
||||
extra_specs2[utils.SLO] = None
|
||||
do_disable_compression2 = self.utils.is_compression_disabled(
|
||||
extra_specs2)
|
||||
@ -295,14 +295,29 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
rep_device_list6[0]['mode'] = 'metro'
|
||||
rep_config6 = self.utils.get_replication_config(rep_device_list6)
|
||||
self.assertFalse(rep_config6['metro_bias'])
|
||||
self.assertFalse(rep_config6['allow_delete_metro'])
|
||||
# Success, mode is metro - metro options true
|
||||
rep_device_list7 = rep_device_list6
|
||||
rep_device_list6[0].update(
|
||||
{'allow_delete_metro': 'true', 'metro_use_bias': 'true'})
|
||||
rep_device_list6[0].update({'metro_use_bias': 'true'})
|
||||
rep_config7 = self.utils.get_replication_config(rep_device_list7)
|
||||
self.assertTrue(rep_config7['metro_bias'])
|
||||
self.assertTrue(rep_config7['allow_delete_metro'])
|
||||
|
||||
def test_get_replication_config_sync_retries_intervals(self):
|
||||
# Default sync interval & retry values
|
||||
rep_device_list1 = [{'target_device_id': self.data.remote_array,
|
||||
'remote_pool': self.data.srp,
|
||||
'remote_port_group': self.data.port_group_name_f,
|
||||
'rdf_group_label': self.data.rdf_group_name}]
|
||||
|
||||
rep_config1 = self.utils.get_replication_config(rep_device_list1)
|
||||
self.assertEqual(200, rep_config1['sync_retries'])
|
||||
self.assertEqual(3, rep_config1['sync_interval'])
|
||||
|
||||
# User set interval & retry values
|
||||
rep_device_list2 = deepcopy(rep_device_list1)
|
||||
rep_device_list2[0].update({'sync_retries': 300, 'sync_interval': 1})
|
||||
rep_config2 = self.utils.get_replication_config(rep_device_list2)
|
||||
self.assertEqual(300, rep_config2['sync_retries'])
|
||||
self.assertEqual(1, rep_config2['sync_interval'])
|
||||
|
||||
def test_is_volume_failed_over(self):
|
||||
vol = deepcopy(self.data.test_volume)
|
||||
@ -405,10 +420,10 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
metro_prefix = self.utils.get_replication_prefix(utils.REP_METRO)
|
||||
self.assertEqual('-RM', metro_prefix)
|
||||
|
||||
def test_get_async_rdf_managed_grp_name(self):
|
||||
def test_get_rdf_management_group_name(self):
|
||||
rep_config = {'rdf_group_label': self.data.rdf_group_name,
|
||||
'mode': utils.REP_ASYNC}
|
||||
grp_name = self.utils.get_async_rdf_managed_grp_name(rep_config)
|
||||
grp_name = self.utils.get_rdf_management_group_name(rep_config)
|
||||
self.assertEqual(self.data.rdf_managed_async_grp, grp_name)
|
||||
|
||||
def test_is_metro_device(self):
|
||||
@ -422,9 +437,9 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
self.assertFalse(is_metro2)
|
||||
|
||||
def test_does_vol_need_rdf_management_group(self):
|
||||
self.assertFalse(self.utils.does_vol_need_rdf_management_group(
|
||||
self.data.rep_extra_specs))
|
||||
extra_specs = deepcopy(self.data.rep_extra_specs)
|
||||
self.assertFalse(self.utils.does_vol_need_rdf_management_group(
|
||||
extra_specs))
|
||||
extra_specs[utils.REP_MODE] = utils.REP_ASYNC
|
||||
self.assertTrue(self.utils.does_vol_need_rdf_management_group(
|
||||
extra_specs))
|
||||
@ -1139,3 +1154,64 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
data_dict = self.utils.update_values_in_dict(
|
||||
update_dict, update_list)
|
||||
self.assertEqual(ret_dict, data_dict)
|
||||
|
||||
def test_get_unique_device_ids_from_lists(self):
|
||||
list_a = ['00001', '00002', '00003']
|
||||
list_b = ['00002', '00003', '00004']
|
||||
unique_ids = self.utils.get_unique_device_ids_from_lists(list_a,
|
||||
list_b)
|
||||
self.assertEqual(['00004'], unique_ids)
|
||||
|
||||
def test_update_payload_for_rdf_vol_create(self):
|
||||
payload = {
|
||||
'array': self.data.array,
|
||||
'editStorageGroupActionParam': {
|
||||
'expandStorageGroupParam': {
|
||||
'addVolumeParam': {}}}}
|
||||
|
||||
updated_payload = self.utils.update_payload_for_rdf_vol_create(
|
||||
payload, self.data.remote_array, self.data.storagegroup_name_f)
|
||||
expected_payload = {
|
||||
'array': self.data.array,
|
||||
'editStorageGroupActionParam': {
|
||||
'expandStorageGroupParam': {
|
||||
'addVolumeParam': {
|
||||
'remoteSymmSGInfoParam': {
|
||||
'force': 'true',
|
||||
'remote_symmetrix_1_id': self.data.remote_array,
|
||||
'remote_symmetrix_1_sgs': [
|
||||
self.data.storagegroup_name_f]}}}}}
|
||||
self.assertEqual(expected_payload, updated_payload)
|
||||
|
||||
def test_is_retype_supported(self):
|
||||
# Volume source type not replicated, target type Metro replicated,
|
||||
# volume is detached, host-assisted retype supported
|
||||
volume = self.data.test_volume
|
||||
volume.attach_status = 'detached'
|
||||
|
||||
src_extra_specs = deepcopy(self.data.extra_specs)
|
||||
src_extra_specs['rep_mode'] = None
|
||||
|
||||
tgt_extra_specs = deepcopy(self.data.rep_extra_specs)
|
||||
tgt_extra_specs['rep_mode'] = utils.REP_METRO
|
||||
|
||||
self.assertTrue(self.utils.is_retype_supported(volume, src_extra_specs,
|
||||
tgt_extra_specs))
|
||||
|
||||
# Volume source type not replicated, target type Metro replicated,
|
||||
# volume is attached, host-assisted retype not supported
|
||||
volume.attach_status = 'attached'
|
||||
self.assertFalse(self.utils.is_retype_supported(
|
||||
volume, src_extra_specs, tgt_extra_specs))
|
||||
|
||||
# Volume source type Async replicated, target type Metro replicated,
|
||||
# volume is attached, host-assisted retype not supported
|
||||
src_extra_specs['rep_mode'] = utils.REP_ASYNC
|
||||
self.assertFalse(self.utils.is_retype_supported(
|
||||
volume, src_extra_specs, tgt_extra_specs))
|
||||
|
||||
# Volume source type Metro replicated, target type Metro replicated,
|
||||
# volume is attached, host-assisted retype supported
|
||||
src_extra_specs['rep_mode'] = utils.REP_METRO
|
||||
self.assertTrue(self.utils.is_retype_supported(
|
||||
volume, src_extra_specs, tgt_extra_specs))
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -119,6 +119,7 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
|
||||
4.2.0 - Support of Unisphere storage group and array tags
|
||||
- User defined override for short host name and port group name
|
||||
(bp powermax-user-defined-hostname-portgroup)
|
||||
- Switch to Unisphere REST API public replication endpoints
|
||||
"""
|
||||
|
||||
VERSION = "4.2.0"
|
||||
@ -571,7 +572,7 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
|
||||
:param offset: Number of volumes to skip after marker.
|
||||
:param sort_keys: Results sort key. Valid keys: size, reference.
|
||||
:param sort_dirs: Results sort direction. Valid dirs: asc, desc.
|
||||
:return: List of dicts containing all manageable volumes.
|
||||
:returns: List of dicts containing all manageable volumes.
|
||||
"""
|
||||
return self.common.get_manageable_volumes(marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
@ -588,7 +589,7 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
|
||||
:param offset: Number of snapshots to skip after marker.
|
||||
:param sort_keys: Results sort key. Valid keys: size, reference.
|
||||
:param sort_dirs: Results sort direction. Valid dirs: asc, desc.
|
||||
:return: List of dicts containing all manageable snapshots.
|
||||
:returns: List of dicts containing all manageable snapshots.
|
||||
"""
|
||||
return self.common.get_manageable_snapshots(marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
@ -124,6 +124,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
|
||||
4.2.0 - Support of Unisphere storage group and array tags
|
||||
- User defined override for short host name and port group name
|
||||
(bp powermax-user-defined-hostname-portgroup)
|
||||
- Switch to Unisphere REST API public replication endpoints
|
||||
"""
|
||||
|
||||
VERSION = "4.2.0"
|
||||
@ -483,7 +484,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
|
||||
:param offset: Number of volumes to skip after marker.
|
||||
:param sort_keys: Results sort key. Valid keys: size, reference.
|
||||
:param sort_dirs: Results sort direction. Valid dirs: asc, desc.
|
||||
:return: List of dicts containing all manageable volumes.
|
||||
:returns: List of dicts containing all manageable volumes.
|
||||
"""
|
||||
return self.common.get_manageable_volumes(marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
@ -500,7 +501,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
|
||||
:param offset: Number of snapshots to skip after marker.
|
||||
:param sort_keys: Results sort key. Valid keys: size, reference.
|
||||
:param sort_dirs: Results sort direction. Valid dirs: asc, desc.
|
||||
:return: List of dicts containing all manageable snapshots.
|
||||
:returns: List of dicts containing all manageable snapshots.
|
||||
"""
|
||||
return self.common.get_manageable_snapshots(marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
@ -531,7 +531,7 @@ class PowerMaxMasking(object):
|
||||
"emc-sg-{target_storagegroup_name}-{serial_number}")
|
||||
def move_volume_between_storage_groups(
|
||||
self, serial_number, device_id, source_storagegroup_name,
|
||||
target_storagegroup_name, extra_specs):
|
||||
target_storagegroup_name, extra_specs, force=False):
|
||||
"""Move a volume between storage groups.
|
||||
|
||||
:param serial_number: the array serial number
|
||||
@ -539,6 +539,7 @@ class PowerMaxMasking(object):
|
||||
:param source_storagegroup_name: the source sg
|
||||
:param target_storagegroup_name: the target sg
|
||||
:param extra_specs: the extra specifications
|
||||
:param force: optional Force flag required for replicated vols
|
||||
"""
|
||||
num_vol_in_sg = self.rest.get_num_vols_in_sg(
|
||||
serial_number, source_storagegroup_name)
|
||||
@ -548,7 +549,7 @@ class PowerMaxMasking(object):
|
||||
'sg_name': source_storagegroup_name})
|
||||
self.rest.move_volume_between_storage_groups(
|
||||
serial_number, device_id, source_storagegroup_name,
|
||||
target_storagegroup_name, extra_specs)
|
||||
target_storagegroup_name, extra_specs, force)
|
||||
if num_vol_in_sg == 1:
|
||||
# Check if storage group is a child sg
|
||||
parent_sg_name = self.get_parent_sg_from_child(
|
||||
|
@ -79,15 +79,16 @@ class PowerMaxProvision(object):
|
||||
return do_create_storage_group(storagegroup_name, array)
|
||||
|
||||
def create_volume_from_sg(self, array, volume_name, storagegroup_name,
|
||||
volume_size, extra_specs):
|
||||
volume_size, extra_specs, rep_info=None):
|
||||
"""Create a new volume in the given storage group.
|
||||
|
||||
:param array: the array serial number
|
||||
:param volume_name: the volume name (String)
|
||||
:param volume_name: the volume name -- string
|
||||
:param storagegroup_name: the storage group name
|
||||
:param volume_size: volume size (String)
|
||||
:param extra_specs: the extra specifications
|
||||
:returns: dict -- volume_dict - the volume dict
|
||||
:param volume_size: volume size -- string
|
||||
:param extra_specs: extra specifications
|
||||
:param rep_info: replication session info dict -- optional
|
||||
:returns: volume info -- dict
|
||||
"""
|
||||
@coordination.synchronized("emc-sg-{storage_group}-{array}")
|
||||
def do_create_volume_from_sg(storage_group, array):
|
||||
@ -95,7 +96,7 @@ class PowerMaxProvision(object):
|
||||
|
||||
volume_dict = self.rest.create_volume_from_sg(
|
||||
array, volume_name, storage_group,
|
||||
volume_size, extra_specs)
|
||||
volume_size, extra_specs, rep_info)
|
||||
|
||||
LOG.debug("Create volume from storage group "
|
||||
"took: %(delta)s H:MM:SS.",
|
||||
@ -154,6 +155,7 @@ class PowerMaxProvision(object):
|
||||
:param snap_name: the name for the snap shot
|
||||
:param extra_specs: extra specifications
|
||||
:param create_snap: Flag for create snapvx
|
||||
:param copy_mode: If copy mode should be used for SnapVX target links
|
||||
"""
|
||||
start_time = time.time()
|
||||
if create_snap:
|
||||
@ -166,7 +168,7 @@ class PowerMaxProvision(object):
|
||||
def do_modify_volume_snap(src_device_id):
|
||||
self.rest.modify_volume_snap(
|
||||
array, src_device_id, target_device_id, snap_name,
|
||||
extra_specs, link=True, copy_mode=copy_mode)
|
||||
extra_specs, link=True, copy=copy_mode)
|
||||
|
||||
do_modify_volume_snap(source_device_id)
|
||||
|
||||
@ -174,7 +176,7 @@ class PowerMaxProvision(object):
|
||||
{'delta': self.utils.get_time_delta(start_time,
|
||||
time.time())})
|
||||
|
||||
def break_replication_relationship(
|
||||
def unlink_snapvx_tgt_volume(
|
||||
self, array, target_device_id, source_device_id, snap_name,
|
||||
extra_specs, generation=0, loop=True):
|
||||
"""Unlink a snapshot from its target volume.
|
||||
@ -213,7 +215,7 @@ class PowerMaxProvision(object):
|
||||
:param list_volume_pairs: list of volume pairs, optional
|
||||
:param generation: the generation number of the snapshot
|
||||
:param loop: if looping call is required for handling retries
|
||||
:return: return code
|
||||
:returns: return code
|
||||
"""
|
||||
def _unlink_vol():
|
||||
"""Called at an interval until the synchronization is finished.
|
||||
@ -548,105 +550,32 @@ class PowerMaxProvision(object):
|
||||
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}
|
||||
|
||||
@coordination.synchronized('emc-rg-{rdf_group}')
|
||||
def break_rdf_relationship(self, array, device_id, target_device,
|
||||
def break_rdf_relationship(self, array, device_id, sg_name,
|
||||
rdf_group, rep_extra_specs, state):
|
||||
"""Break the rdf relationship between a pair of devices.
|
||||
|
||||
Resuming replication after suspending is necessary where this function
|
||||
is called from. Doing so in here will disrupt the ability to perform
|
||||
further actions on the RDFG without suspending again.
|
||||
|
||||
:param array: the array serial number
|
||||
:param device_id: the source device id
|
||||
:param target_device: target device id
|
||||
:param sg_name: storage grto
|
||||
:param rdf_group: the rdf group number
|
||||
:param rep_extra_specs: replication extra specs
|
||||
:param state: the state of the rdf pair
|
||||
"""
|
||||
LOG.info("Suspending rdf pair: source device: %(src)s "
|
||||
"target device: %(tgt)s.",
|
||||
{'src': device_id, 'tgt': target_device})
|
||||
LOG.info("Suspending RDF group %(rdf)s to delete source device "
|
||||
"%(dev)s RDF pair.", {'rdf': rdf_group, 'dev': device_id})
|
||||
|
||||
if state.lower() == utils.RDF_SYNCINPROG_STATE:
|
||||
self.rest.wait_for_rdf_consistent_state(
|
||||
array, device_id, target_device,
|
||||
rep_extra_specs, state)
|
||||
if state.lower() == utils.RDF_SUSPENDED_STATE:
|
||||
LOG.info("RDF pair is already suspended")
|
||||
else:
|
||||
self.rest.modify_rdf_device_pair(
|
||||
array, device_id, rdf_group, rep_extra_specs, suspend=True)
|
||||
self.delete_rdf_pair(array, device_id, rdf_group,
|
||||
target_device, rep_extra_specs)
|
||||
self.rest.wait_for_rdf_pair_sync(
|
||||
array, rdf_group, device_id, rep_extra_specs)
|
||||
if state.lower() != utils.RDF_SUSPENDED_STATE:
|
||||
self.rest.srdf_suspend_replication(
|
||||
array, sg_name, rdf_group, rep_extra_specs)
|
||||
|
||||
def break_metro_rdf_pair(self, array, device_id, target_device,
|
||||
rdf_group, rep_extra_specs, metro_grp):
|
||||
"""Delete replication for a Metro device pair.
|
||||
|
||||
Need to suspend the entire group before we can delete a single pair.
|
||||
:param array: the array serial number
|
||||
:param device_id: the device id
|
||||
:param target_device: the target device id
|
||||
:param rdf_group: the rdf group number
|
||||
:param rep_extra_specs: the replication extra specifications
|
||||
:param metro_grp: the metro storage group name
|
||||
"""
|
||||
# Suspend I/O on the RDF links...
|
||||
LOG.info("Suspending I/O for all volumes in the RDF group: %(rdfg)s",
|
||||
{'rdfg': rdf_group})
|
||||
self.disable_group_replication(
|
||||
array, metro_grp, rdf_group, rep_extra_specs)
|
||||
self.delete_rdf_pair(array, device_id, rdf_group,
|
||||
target_device, rep_extra_specs)
|
||||
|
||||
def delete_rdf_pair(
|
||||
self, array, device_id, rdf_group, target_device, extra_specs):
|
||||
"""Delete an rdf pairing.
|
||||
|
||||
If the replication mode is synchronous, only one attempt is required
|
||||
to delete the pair. Otherwise, we need to wait until all the tracks
|
||||
are cleared before the delete will be successful. As there is
|
||||
currently no way to track this information, we keep attempting the
|
||||
operation until it is successful.
|
||||
|
||||
:param array: the array serial number
|
||||
:param device_id: source volume device id
|
||||
:param rdf_group: the rdf group number
|
||||
:param target_device: the target device
|
||||
:param extra_specs: extra specifications
|
||||
"""
|
||||
LOG.info("Deleting rdf pair: source device: %(src)s "
|
||||
"target device: %(tgt)s.",
|
||||
{'src': device_id, 'tgt': target_device})
|
||||
if (extra_specs.get(utils.REP_MODE) and
|
||||
extra_specs.get(utils.REP_MODE) == utils.REP_SYNC):
|
||||
return self.rest.delete_rdf_pair(array, device_id, rdf_group)
|
||||
|
||||
def _delete_pair():
|
||||
"""Delete a rdf volume pair.
|
||||
|
||||
Called at an interval until all the tracks are cleared
|
||||
and the operation is successful.
|
||||
|
||||
:raises: loopingcall.LoopingCallDone
|
||||
"""
|
||||
retries = kwargs['retries']
|
||||
try:
|
||||
kwargs['retries'] = retries + 1
|
||||
if not kwargs['delete_pair_success']:
|
||||
self.rest.delete_rdf_pair(
|
||||
array, device_id, rdf_group)
|
||||
kwargs['delete_pair_success'] = True
|
||||
except exception.VolumeBackendAPIException:
|
||||
pass
|
||||
|
||||
if kwargs['retries'] > UNLINK_RETRIES:
|
||||
LOG.error("Delete volume pair failed after %(retries)d "
|
||||
"tries.", {'retries': retries})
|
||||
raise loopingcall.LoopingCallDone(retvalue=30)
|
||||
if kwargs['delete_pair_success']:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
kwargs = {'retries': 0,
|
||||
'delete_pair_success': False}
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_delete_pair)
|
||||
rc = timer.start(interval=UNLINK_INTERVAL).wait()
|
||||
return rc
|
||||
self.rest.srdf_delete_device_pair(array, rdf_group, device_id)
|
||||
|
||||
def get_or_create_volume_group(self, array, group, extra_specs):
|
||||
"""Get or create a volume group.
|
||||
@ -657,7 +586,7 @@ class PowerMaxProvision(object):
|
||||
:param array: the array serial number
|
||||
:param group: the group object
|
||||
:param extra_specs: the extra specifications
|
||||
:return: group name
|
||||
:returns: group name
|
||||
"""
|
||||
vol_grp_name = self.utils.update_volume_group_name(group)
|
||||
return self.get_or_create_group(array, vol_grp_name, extra_specs)
|
||||
@ -668,7 +597,7 @@ class PowerMaxProvision(object):
|
||||
:param array: the array serial number
|
||||
:param group_name: the group name
|
||||
:param extra_specs: the extra specifications
|
||||
:return: group name
|
||||
:returns: group name
|
||||
"""
|
||||
storage_group = self.rest.get_storage_group(array, group_name)
|
||||
if not storage_group:
|
||||
@ -708,8 +637,6 @@ class PowerMaxProvision(object):
|
||||
:param array: the array serial number
|
||||
:param snap_name: the name for the snap shot
|
||||
:param source_group_name: the source group name
|
||||
:param src_dev_ids: the list of source device ids
|
||||
:param extra_specs: extra specifications
|
||||
"""
|
||||
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
|
||||
"snapshot: %(snap_name)s.",
|
||||
@ -762,75 +689,6 @@ class PowerMaxProvision(object):
|
||||
source_devices = [a for a, b in list_volume_pairs]
|
||||
self.delete_volume_snap(array, snap_name, source_devices)
|
||||
|
||||
def enable_group_replication(self, array, storagegroup_name,
|
||||
rdf_group_num, extra_specs, establish=False):
|
||||
"""Resume rdf replication on a storage group.
|
||||
|
||||
Replication is enabled by default. This allows resuming
|
||||
replication on a suspended group.
|
||||
:param array: the array serial number
|
||||
:param storagegroup_name: the storagegroup name
|
||||
:param rdf_group_num: the rdf group number
|
||||
:param extra_specs: the extra specifications
|
||||
:param establish: flag to indicate 'establish' instead of 'resume'
|
||||
"""
|
||||
action = "Establish" if establish is True else "Resume"
|
||||
self.rest.modify_storagegroup_rdf(
|
||||
array, storagegroup_name, rdf_group_num, action, extra_specs)
|
||||
|
||||
def disable_group_replication(self, array, storagegroup_name,
|
||||
rdf_group_num, extra_specs):
|
||||
"""Suspend rdf replication on a storage group.
|
||||
|
||||
This does not delete the rdf pairs, that can only be done
|
||||
by deleting the group. This method suspends all i/o activity
|
||||
on the rdf links.
|
||||
:param array: the array serial number
|
||||
:param storagegroup_name: the storagegroup name
|
||||
:param rdf_group_num: the rdf group number
|
||||
:param extra_specs: the extra specifications
|
||||
"""
|
||||
action = "Suspend"
|
||||
self.rest.modify_storagegroup_rdf(
|
||||
array, storagegroup_name, rdf_group_num, action, extra_specs)
|
||||
|
||||
def failover_group(self, array, storagegroup_name,
|
||||
rdf_group_num, extra_specs, failover=True):
|
||||
"""Failover or failback replication on a storage group.
|
||||
|
||||
:param array: the array serial number
|
||||
:param storagegroup_name: the storagegroup name
|
||||
:param rdf_group_num: the rdf group number
|
||||
:param extra_specs: the extra specifications
|
||||
:param failover: flag to indicate failover/ failback
|
||||
"""
|
||||
action = "Failover" if failover else "Failback"
|
||||
self.rest.modify_storagegroup_rdf(
|
||||
array, storagegroup_name, rdf_group_num, action, extra_specs)
|
||||
|
||||
def delete_group_replication(self, array, storagegroup_name,
|
||||
rdf_group_num, extra_specs):
|
||||
"""Split replication for a group and delete the pairs.
|
||||
|
||||
:param array: the array serial number
|
||||
:param storagegroup_name: the storage group name
|
||||
:param rdf_group_num: the rdf group number
|
||||
:param extra_specs: the extra specifications
|
||||
"""
|
||||
group_details = self.rest.get_storage_group_rep(
|
||||
array, storagegroup_name)
|
||||
if (group_details and group_details.get('rdf')
|
||||
and group_details['rdf'] is True):
|
||||
action = "Split"
|
||||
LOG.debug("Splitting remote replication for group %(sg)s",
|
||||
{'sg': storagegroup_name})
|
||||
self.rest.modify_storagegroup_rdf(
|
||||
array, storagegroup_name, rdf_group_num, action, extra_specs)
|
||||
LOG.debug("Deleting remote replication for group %(sg)s",
|
||||
{'sg': storagegroup_name})
|
||||
self.rest.delete_storagegroup_rdf(
|
||||
array, storagegroup_name, rdf_group_num)
|
||||
|
||||
def revert_volume_snapshot(self, array, source_device_id,
|
||||
snap_name, extra_specs):
|
||||
"""Revert a volume snapshot
|
||||
|
@ -26,7 +26,6 @@ import requests.exceptions as r_exc
|
||||
import requests.packages.urllib3.util.retry as requests_retry
|
||||
import six
|
||||
|
||||
from cinder import coordination
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.utils import retry
|
||||
@ -99,7 +98,6 @@ class PowerMaxRest(object):
|
||||
"""Set the environment failover Unisphere targets and configuration..
|
||||
|
||||
:param failover_info: failover target record
|
||||
:return:
|
||||
"""
|
||||
self.u4p_failover_enabled = True
|
||||
self.primary_u4p = failover_info['u4p_primary']
|
||||
@ -283,8 +281,8 @@ class PowerMaxRest(object):
|
||||
except Exception as e:
|
||||
msg = _("The %(method)s request to URL %(url)s failed with "
|
||||
"exception %(e)s")
|
||||
LOG.exception(msg, {'method': method, 'url': url,
|
||||
'e': six.text_type(e)})
|
||||
LOG.error(msg, {'method': method, 'url': url,
|
||||
'e': six.text_type(e)})
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message=(msg, {'method': method, 'url': url,
|
||||
'e': six.text_type(e)}))
|
||||
@ -323,7 +321,7 @@ class PowerMaxRest(object):
|
||||
kwargs['result'], kwargs['task'] = result, task
|
||||
except Exception:
|
||||
exception_message = (_("Issue encountered waiting for job."))
|
||||
LOG.exception(exception_message)
|
||||
LOG.error(exception_message)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message=exception_message)
|
||||
|
||||
@ -579,7 +577,7 @@ class PowerMaxRest(object):
|
||||
def get_uni_version(self):
|
||||
"""Get the unisphere version from the server.
|
||||
|
||||
:return: version and major_version(e.g. ("V8.4.0.16", "84"))
|
||||
:returns: version and major_version(e.g. ("V8.4.0.16", "84"))
|
||||
"""
|
||||
version, major_version = None, None
|
||||
response = self.get_unisphere_version()
|
||||
@ -661,7 +659,7 @@ class PowerMaxRest(object):
|
||||
"""Get the PowerMax/VMAX model.
|
||||
|
||||
:param array: the array serial number
|
||||
:return: the PowerMax/VMAX model
|
||||
:returns: the PowerMax/VMAX model
|
||||
"""
|
||||
vmax_version = None
|
||||
system_info = self.get_array_detail(array)
|
||||
@ -673,7 +671,7 @@ class PowerMaxRest(object):
|
||||
"""Get the PowerMax/VMAX model.
|
||||
|
||||
:param array: the array serial number
|
||||
:return: the PowerMax/VMAX model
|
||||
:returns: the PowerMax/VMAX model
|
||||
"""
|
||||
array_model = None
|
||||
is_next_gen = False
|
||||
@ -690,7 +688,7 @@ class PowerMaxRest(object):
|
||||
"""Get the PowerMax/VMAX uCode version.
|
||||
|
||||
:param array: the array serial number
|
||||
:return: the PowerMax/VMAX uCode version
|
||||
:returns: the PowerMax/VMAX uCode version
|
||||
"""
|
||||
ucode_version = None
|
||||
system_info = self.get_array_detail(array)
|
||||
@ -882,7 +880,7 @@ class PowerMaxRest(object):
|
||||
return status_code, message
|
||||
|
||||
def create_volume_from_sg(self, array, volume_name, storagegroup_name,
|
||||
volume_size, extra_specs):
|
||||
volume_size, extra_specs, rep_info=None):
|
||||
"""Create a new volume in the given storage group.
|
||||
|
||||
:param array: the array serial number
|
||||
@ -890,6 +888,7 @@ class PowerMaxRest(object):
|
||||
:param storagegroup_name: the storage group name
|
||||
:param volume_size: volume size (String)
|
||||
:param extra_specs: the extra specifications
|
||||
:param rep_info: replication info dict if volume is replication enabled
|
||||
:returns: dict -- volume_dict - the volume dict
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
@ -910,6 +909,11 @@ class PowerMaxRest(object):
|
||||
},
|
||||
"volume_size": volume_size,
|
||||
"capacityUnit": "GB"}]}}}})
|
||||
|
||||
if rep_info:
|
||||
payload = self.utils.update_payload_for_rdf_vol_create(
|
||||
payload, rep_info[utils.REMOTE_ARRAY], storagegroup_name)
|
||||
|
||||
status_code, job = self.modify_storage_group(
|
||||
array, storagegroup_name, payload)
|
||||
|
||||
@ -920,9 +924,27 @@ class PowerMaxRest(object):
|
||||
task = self.wait_for_job('Create volume', status_code,
|
||||
job, extra_specs)
|
||||
|
||||
# Find the newly created volume.
|
||||
device_id = None
|
||||
if task:
|
||||
if rep_info:
|
||||
updated_device_list = self.get_volume_list(
|
||||
array, {'storageGroupId': storagegroup_name,
|
||||
'rdf_group_number': rep_info['rdf_group_no']})
|
||||
unique_devices = self.utils.get_unique_device_ids_from_lists(
|
||||
rep_info['initial_device_list'], updated_device_list)
|
||||
|
||||
if 0 < len(unique_devices) < 2:
|
||||
device_id = unique_devices[0]
|
||||
self.rename_volume(array, device_id, volume_name)
|
||||
else:
|
||||
raise exception.VolumeBackendAPIException(_(
|
||||
"There has been more than one volume created in the "
|
||||
"SRDF protected Storage Group since the current create "
|
||||
"volume process begun. Not possible to discern what "
|
||||
"volume has been created by PowerMax Cinder driver."))
|
||||
|
||||
# Find the newly created volume if not located as part of replication
|
||||
# OPT workaround
|
||||
if not device_id and task:
|
||||
for t in task:
|
||||
try:
|
||||
desc = t["description"]
|
||||
@ -936,13 +958,12 @@ class PowerMaxRest(object):
|
||||
except Exception as e:
|
||||
LOG.info("Could not retrieve device id from job. "
|
||||
"Exception received was %(e)s. Attempting "
|
||||
"retrieval by volume_identifier.",
|
||||
{'e': e})
|
||||
"retrieval by volume_identifier.", {'e': e})
|
||||
|
||||
if not device_id:
|
||||
device_id = self.find_volume_device_id(array, volume_name)
|
||||
|
||||
volume_dict = {'array': array, 'device_id': device_id}
|
||||
volume_dict = {utils.ARRAY: array, utils.DEVICE_ID: device_id}
|
||||
return volume_dict
|
||||
|
||||
def add_storage_group_tag(self, array, storagegroup_name,
|
||||
@ -1065,8 +1086,8 @@ class PowerMaxRest(object):
|
||||
:param extra_specs: the extra specifications
|
||||
"""
|
||||
|
||||
force_vol_remove = ("true" if "force_vol_remove" in extra_specs
|
||||
else "false")
|
||||
force_vol_remove = (
|
||||
"true" if utils.FORCE_VOL_REMOVE in extra_specs else "false")
|
||||
if not isinstance(device_id, list):
|
||||
device_id = [device_id]
|
||||
payload = ({"executionOption": "ASYNCHRONOUS",
|
||||
@ -1393,7 +1414,7 @@ class PowerMaxRest(object):
|
||||
array, SLOPROVISIONING, 'maskingview',
|
||||
resource_name=resource_name, params=params)
|
||||
if not connection_info:
|
||||
LOG.error('Cannot retrive masking view connection information '
|
||||
LOG.error('Cannot retrieve masking view connection information '
|
||||
'for %(mv)s.', {'mv': maskingview})
|
||||
else:
|
||||
try:
|
||||
@ -1850,8 +1871,7 @@ class PowerMaxRest(object):
|
||||
def modify_volume_snap(self, array, source_id, target_id, snap_name,
|
||||
extra_specs, link=False, unlink=False,
|
||||
rename=False, new_snap_name=None, restore=False,
|
||||
list_volume_pairs=None, generation=0,
|
||||
copy_mode=False):
|
||||
list_volume_pairs=None, generation=0, copy=False):
|
||||
"""Modify a snapvx snapshot
|
||||
|
||||
:param array: the array serial number
|
||||
@ -1866,8 +1886,11 @@ class PowerMaxRest(object):
|
||||
:param restore: Flag to indicate action = Restore
|
||||
:param list_volume_pairs: list of volume pairs to link, optional
|
||||
:param generation: the generation number of the snapshot
|
||||
:param copy: If copy mode should be used for SnapVX target links
|
||||
"""
|
||||
action, operation, payload = '', '', {}
|
||||
copy = 'true' if copy else 'false'
|
||||
|
||||
if link:
|
||||
action = "Link"
|
||||
elif unlink:
|
||||
@ -1877,8 +1900,6 @@ class PowerMaxRest(object):
|
||||
elif restore:
|
||||
action = "Restore"
|
||||
|
||||
copy = 'true' if copy_mode else 'false'
|
||||
|
||||
payload = {}
|
||||
if action == "Restore":
|
||||
operation = 'Restore snapVx snapshot'
|
||||
@ -2001,7 +2022,6 @@ class PowerMaxRest(object):
|
||||
rdf_grp = None
|
||||
volume_details = self.get_volume(array, device_id)
|
||||
if volume_details:
|
||||
LOG.debug("Vol details: %(vol)s", {'vol': volume_details})
|
||||
if volume_details.get('snapvx_target'):
|
||||
snapvx_tgt = volume_details['snapvx_target']
|
||||
if volume_details.get('snapvx_source'):
|
||||
@ -2039,7 +2059,7 @@ class PowerMaxRest(object):
|
||||
except Exception:
|
||||
exception_message = (_("Issue encountered waiting for "
|
||||
"synchronization."))
|
||||
LOG.exception(exception_message)
|
||||
LOG.error(exception_message)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message=exception_message)
|
||||
|
||||
@ -2143,7 +2163,7 @@ class PowerMaxRest(object):
|
||||
:param source_device_id: source device id
|
||||
:param snap_name: the snapshot name
|
||||
:param state: filter for state of the link
|
||||
:return: list of dict of generations with linked devices
|
||||
:returns: list of dict of generations with linked devices
|
||||
"""
|
||||
snap_dict_list = []
|
||||
snap_list = self._find_snap_vx_source_sessions(
|
||||
@ -2231,6 +2251,22 @@ class PowerMaxRest(object):
|
||||
return self.get_resource(array, REPLICATION, 'rdf_group',
|
||||
rdf_number)
|
||||
|
||||
def get_storage_group_rdf_group_state(self, array, storage_group,
|
||||
rdf_group_no):
|
||||
"""Get the RDF group state from a replication enabled Storage Group.
|
||||
|
||||
:param array: the array serial number
|
||||
:param storage_group: the storage group name
|
||||
:param rdf_group_no: the RDF group number
|
||||
:returns: storage group RDF group state
|
||||
"""
|
||||
resource = ('storagegroup/%(sg)s/rdf_group/%(rdfg)s' % {
|
||||
'sg': storage_group, 'rdfg': rdf_group_no})
|
||||
|
||||
rdf_group = self.get_resource(array, REPLICATION, resource)
|
||||
|
||||
return rdf_group.get('states', list()) if rdf_group else dict()
|
||||
|
||||
def get_rdf_group_list(self, array):
|
||||
"""Get rdf group list from array.
|
||||
|
||||
@ -2253,6 +2289,19 @@ class PowerMaxRest(object):
|
||||
LOG.warning("Cannot locate source RDF volume %s", src_device_id)
|
||||
return rdf_session
|
||||
|
||||
def get_rdf_pair_volume(self, array, rdf_group_no, device_id):
|
||||
"""Get information on an RDF pair from the source volume.
|
||||
|
||||
:param array: the array serial number
|
||||
:param rdf_group_no: the RDF group number
|
||||
:param device_id: the source device ID
|
||||
:returns: RDF pair information -- dict
|
||||
"""
|
||||
resource = ('rdf_group/%(rdf_group)s/volume/%(device)s' % {
|
||||
'rdf_group': rdf_group_no, 'device': device_id})
|
||||
|
||||
return self.get_resource(array, REPLICATION, resource)
|
||||
|
||||
def are_vols_rdf_paired(self, array, remote_array,
|
||||
device_id, target_device):
|
||||
"""Check if a pair of volumes are RDF paired.
|
||||
@ -2277,53 +2326,140 @@ class PowerMaxRest(object):
|
||||
LOG.warning("Cannot locate RDF session for volume %s", device_id)
|
||||
return paired, local_vol_state, rdf_pair_state
|
||||
|
||||
def wait_for_rdf_consistent_state(
|
||||
self, array, remote_array, device_id, target_device, extra_specs):
|
||||
"""Wait for async pair to be in a consistent state before suspending.
|
||||
def wait_for_rdf_group_sync(self, array, storage_group, rdf_group_no,
|
||||
rep_extra_specs):
|
||||
"""Wait for an RDF group to reach 'Synchronised' state.
|
||||
|
||||
:param array: the array serial number
|
||||
:param remote_array: the remote array serial number
|
||||
:param device_id: the device id
|
||||
:param target_device: the target device id
|
||||
:param extra_specs: the extra specifications
|
||||
:param storage_group: the storage group name
|
||||
:param rdf_group_no: the RDF group number
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
:raises: exception.VolumeBackendAPIException
|
||||
"""
|
||||
def _wait_for_synced_state():
|
||||
try:
|
||||
kwargs['retries'] -= 1
|
||||
if not kwargs['synced']:
|
||||
rdf_group_state = self.get_storage_group_rdf_group_state(
|
||||
array, storage_group, rdf_group_no)
|
||||
if rdf_group_state:
|
||||
kwargs['state'] = rdf_group_state[0]
|
||||
if kwargs['state'].lower() in utils.RDF_SYNCED_STATES:
|
||||
kwargs['synced'] = True
|
||||
kwargs['rc'] = 0
|
||||
except Exception as e_msg:
|
||||
ex_msg = _("Issue encountered waiting for job: %(e)s" % {
|
||||
'e': e_msg})
|
||||
LOG.error(ex_msg)
|
||||
raise exception.VolumeBackendAPIException(message=ex_msg)
|
||||
if kwargs['retries'] == 0:
|
||||
ex_msg = _("Wait for RDF Sync State failed after %(r)d "
|
||||
"tries." % {'r': rep_extra_specs['sync_retries']})
|
||||
LOG.error(ex_msg)
|
||||
raise exception.VolumeBackendAPIException(message=ex_msg)
|
||||
|
||||
if kwargs['synced']:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
kwargs = {'retries': rep_extra_specs['sync_retries'],
|
||||
'synced': False, 'rc': 0, 'state': 'syncinprog'}
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_synced_state)
|
||||
timer.start(interval=rep_extra_specs['sync_interval']).wait()
|
||||
LOG.debug("Return code is: %(rc)lu. State is %(state)s",
|
||||
{'rc': kwargs['rc'], 'state': kwargs['state']})
|
||||
|
||||
def wait_for_rdf_pair_sync(self, array, rdf_group_no, device_id,
|
||||
rep_extra_specs):
|
||||
"""Wait for an RDF device pair to reach 'Synchronised' state.
|
||||
|
||||
:param array: the array serial number
|
||||
:param rdf_group_no: the RDF group number
|
||||
:param device_id: the source device ID
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
:raises: exception.VolumeBackendAPIException
|
||||
"""
|
||||
def _wait_for_synced_state():
|
||||
try:
|
||||
kwargs['retries'] -= 1
|
||||
if not kwargs['synced']:
|
||||
rdf_pair = self.get_rdf_pair_volume(array, rdf_group_no,
|
||||
device_id)
|
||||
kwargs['state'] = rdf_pair['rdfpairState']
|
||||
|
||||
if kwargs['state'].lower() in utils.RDF_SYNCED_STATES:
|
||||
kwargs['synced'] = True
|
||||
kwargs['rc'] = 0
|
||||
|
||||
except Exception as e_msg:
|
||||
ex_msg = _("Issue encountered waiting for job: %(e)s" % {
|
||||
'e': e_msg})
|
||||
LOG.error(ex_msg)
|
||||
raise exception.VolumeBackendAPIException(message=ex_msg)
|
||||
|
||||
if kwargs['retries'] == 0:
|
||||
ex_msg = _("Wait for RDF Sync State failed after %(r)d "
|
||||
"tries." % {'r': rep_extra_specs['sync_retries']})
|
||||
LOG.error(ex_msg)
|
||||
raise exception.VolumeBackendAPIException(message=ex_msg)
|
||||
|
||||
if kwargs['synced']:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
kwargs = {'retries': rep_extra_specs['sync_retries'],
|
||||
'synced': False, 'rc': 0, 'state': 'syncinprog'}
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_synced_state)
|
||||
timer.start(interval=rep_extra_specs['sync_interval']).wait()
|
||||
LOG.debug("Return code is: %(rc)lu. State is %(state)s",
|
||||
{'rc': kwargs['rc'], 'state': kwargs['state']})
|
||||
|
||||
def rdf_resume_with_retries(self, array, rep_extra_specs):
|
||||
"""Resume RDF on a RDF group with retry operator included.
|
||||
|
||||
The retry operator is required here because on occassion when we are
|
||||
waiting on a snap copy session to complete we have no way of
|
||||
determining if the copy is complete, operation is retried until
|
||||
either the copy completes or the max interval/retries has been met.
|
||||
|
||||
:param array: the array serial number
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
:raises: exception.VolumeBackendAPIException
|
||||
"""
|
||||
|
||||
def _wait_for_consistent_state():
|
||||
# Called at an interval until the state of the
|
||||
# rdf pair is 'consistent'.
|
||||
retries = kwargs['retries']
|
||||
try:
|
||||
kwargs['retries'] = retries + 1
|
||||
if not kwargs['consistent_state']:
|
||||
__, __, state = (
|
||||
self.are_vols_rdf_paired(
|
||||
array, remote_array, device_id, target_device))
|
||||
kwargs['state'] = state
|
||||
if state.lower() == utils.RDF_CONSISTENT_STATE:
|
||||
kwargs['consistent_state'] = True
|
||||
kwargs['rc'] = 0
|
||||
except Exception:
|
||||
exception_message = _("Issue encountered waiting for job.")
|
||||
LOG.exception(exception_message)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message=exception_message)
|
||||
def wait_for_copy_complete():
|
||||
kwargs['retries'] -= 1
|
||||
if not kwargs['copied']:
|
||||
try:
|
||||
self.srdf_resume_replication(
|
||||
array, rep_extra_specs['sg_name'],
|
||||
rep_extra_specs['rdf_group_no'], rep_extra_specs,
|
||||
async_call=False)
|
||||
kwargs['copied'] = True
|
||||
kwargs['state'] = 'copy_complete'
|
||||
kwargs['rc'] = 0
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if retries > int(extra_specs[utils.RETRIES]):
|
||||
LOG.error("_wait_for_consistent_state failed after "
|
||||
"%(retries)d tries.", {'retries': retries})
|
||||
kwargs['rc'] = -1
|
||||
except exception.VolumeBackendAPIException:
|
||||
LOG.debug('Snapshot copy process still ongoing, Cinder '
|
||||
'will retry again in %(interval)s seconds. '
|
||||
'There are %(retries)s remaining.', {
|
||||
'interval': rep_extra_specs['sync_interval'],
|
||||
'retries': kwargs['retries']})
|
||||
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if kwargs['consistent_state']:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if kwargs['retries'] == 0:
|
||||
ex_msg = _("Wait for snapshot copy complete failed after "
|
||||
"%(r)d tries." % {
|
||||
'r': rep_extra_specs['sync_retries']})
|
||||
LOG.error(ex_msg)
|
||||
raise exception.VolumeBackendAPIException(message=ex_msg)
|
||||
|
||||
kwargs = {'retries': 0, 'consistent_state': False,
|
||||
'rc': 0, 'state': 'syncinprog'}
|
||||
kwargs = {'retries': rep_extra_specs['sync_retries'],
|
||||
'copied': False, 'rc': 0, 'state': 'copy_in_progress'}
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(
|
||||
_wait_for_consistent_state)
|
||||
timer.start(interval=int(extra_specs[utils.INTERVAL])).wait()
|
||||
LOG.debug("Return code is: %(rc)lu. State is %(state)s",
|
||||
timer = loopingcall.FixedIntervalLoopingCall(wait_for_copy_complete)
|
||||
timer.start(interval=rep_extra_specs['sync_interval']).wait()
|
||||
LOG.debug("Return code: %(rc)lu. State: %(state)s",
|
||||
{'rc': kwargs['rc'], 'state': kwargs['state']})
|
||||
|
||||
def get_rdf_group_number(self, array, rdf_group_label):
|
||||
@ -2345,156 +2481,300 @@ class PowerMaxRest(object):
|
||||
number = None
|
||||
return number
|
||||
|
||||
@coordination.synchronized('emc-rg-{rdf_group_no}')
|
||||
def create_rdf_device_pair(self, array, device_id, rdf_group_no,
|
||||
target_device, remote_array, extra_specs):
|
||||
"""Create an RDF pairing.
|
||||
|
||||
Create a remote replication relationship between source and target
|
||||
devices.
|
||||
:param array: the array serial number
|
||||
:param device_id: the device id
|
||||
:param rdf_group_no: the rdf group number
|
||||
:param target_device: the target device id
|
||||
:param remote_array: the remote array serial
|
||||
:param extra_specs: the extra specs
|
||||
:returns: rdf_dict
|
||||
"""
|
||||
rep_mode = extra_specs[utils.REP_MODE]
|
||||
if rep_mode == utils.REP_METRO:
|
||||
rep_mode = 'Active'
|
||||
payload = ({"deviceNameListSource": [{"name": device_id}],
|
||||
"deviceNameListTarget": [{"name": target_device}],
|
||||
"replicationMode": rep_mode,
|
||||
"establish": 'true',
|
||||
"rdfType": 'RDF1'})
|
||||
if rep_mode == utils.REP_ASYNC:
|
||||
payload_update = self._get_async_payload_info(array, rdf_group_no)
|
||||
payload.update(payload_update)
|
||||
elif rep_mode == 'Active':
|
||||
# Check if arrays are next gen to support add data vol to existing
|
||||
# metro enabled rdfg, else format drive before adding
|
||||
r1_nxt_gen = self.is_next_gen_array(array)
|
||||
r2_nxt_gen = self.is_next_gen_array(remote_array)
|
||||
if r1_nxt_gen and r2_nxt_gen:
|
||||
extra_specs[utils.RDF_CONS_EXEMPT] = True
|
||||
else:
|
||||
extra_specs[utils.RDF_CONS_EXEMPT] = False
|
||||
payload = self.get_metro_payload_info(
|
||||
array, payload, rdf_group_no, extra_specs)
|
||||
resource_type = ("rdf_group/%(rdf_num)s/volume"
|
||||
% {'rdf_num': rdf_group_no})
|
||||
status_code, job = self.create_resource(array, REPLICATION,
|
||||
resource_type, payload,
|
||||
private="/private")
|
||||
self.wait_for_job('Create rdf pair', status_code,
|
||||
job, extra_specs)
|
||||
rdf_dict = {'array': remote_array, 'device_id': target_device}
|
||||
return rdf_dict
|
||||
|
||||
def _get_async_payload_info(self, array, rdf_group_no):
|
||||
"""Get the payload details for an async create pair.
|
||||
|
||||
:param array: the array serial number
|
||||
:param rdf_group_no: the rdf group number
|
||||
:return: payload_update
|
||||
:returns: payload_update
|
||||
"""
|
||||
num_vols, payload_update = 0, {}
|
||||
rdfg_details = self.get_rdf_group(array, rdf_group_no)
|
||||
if rdfg_details is not None and rdfg_details.get('numDevices'):
|
||||
num_vols = int(rdfg_details['numDevices'])
|
||||
if num_vols > 0:
|
||||
payload_update = {'consExempt': 'true'}
|
||||
payload_update = {'exempt': 'true'}
|
||||
return payload_update
|
||||
|
||||
def get_metro_payload_info(self, array, payload,
|
||||
rdf_group_no, extra_specs):
|
||||
rdf_group_no, extra_specs, next_gen):
|
||||
"""Get the payload details for a metro active create pair.
|
||||
|
||||
:param array: the array serial number
|
||||
:param payload: the payload
|
||||
:param rdf_group_no: the rdf group number
|
||||
:param extra_specs: the replication configuration
|
||||
:return: updated payload
|
||||
:param next_gen: if the array is next gen uCode
|
||||
:returns: updated payload
|
||||
"""
|
||||
num_vols = 0
|
||||
payload["rdfMode"] = "Active"
|
||||
payload['rdfType'] = "RDF1"
|
||||
|
||||
rdfg_details = self.get_rdf_group(array, rdf_group_no)
|
||||
if rdfg_details is not None and rdfg_details.get('numDevices'):
|
||||
num_vols = int(rdfg_details['numDevices'])
|
||||
|
||||
if num_vols == 0:
|
||||
# First volume - set bias if required
|
||||
if (extra_specs.get(utils.METROBIAS)
|
||||
and extra_specs[utils.METROBIAS] is True):
|
||||
payload.update({'metroBias': 'true'})
|
||||
if extra_specs.get(utils.METROBIAS):
|
||||
payload.update({"metroBias": "true"})
|
||||
else:
|
||||
if (extra_specs.get(utils.RDF_CONS_EXEMPT)
|
||||
and extra_specs[utils.RDF_CONS_EXEMPT] is True):
|
||||
payload['consExempt'] = 'true'
|
||||
payload['rdfType'] = 'RDF1'
|
||||
else:
|
||||
LOG.warning("Adding HyperMax OS volumes to an existing RDFG "
|
||||
"requires the volumes to be formatted in advance,"
|
||||
"please upgrade to PowerMax OS to bypass this "
|
||||
"restriction.")
|
||||
payload['format'] = 'true'
|
||||
payload['rdfType'] = 'NA'
|
||||
|
||||
payload.pop('establish')
|
||||
if next_gen:
|
||||
payload["exempt"] = "true"
|
||||
if payload.get('establish'):
|
||||
payload.pop('establish')
|
||||
return payload
|
||||
|
||||
def modify_rdf_device_pair(
|
||||
self, array, device_id, rdf_group, extra_specs, suspend=False):
|
||||
"""Modify an rdf device pair.
|
||||
def srdf_protect_storage_group(
|
||||
self, array_id, remote_array_id, rdf_group_no, replication_mode,
|
||||
sg_name, service_level, extra_specs, target_sg=None):
|
||||
"""SRDF protect a storage group.
|
||||
|
||||
:param array: the array serial number
|
||||
:param device_id: the device id
|
||||
:param rdf_group: the rdf group
|
||||
:param extra_specs: the extra specs
|
||||
:param suspend: flag to indicate "suspend" action
|
||||
:param array_id: local array serial number
|
||||
:param remote_array_id: remote array serial number
|
||||
:param rdf_group_no: RDF group number
|
||||
:param replication_mode: replication mode
|
||||
:param sg_name: storage group name
|
||||
:param service_level: service level
|
||||
:param extra_specs: extra specifications
|
||||
:param target_sg: target storage group -- optional
|
||||
"""
|
||||
common_opts = {"force": 'false',
|
||||
"symForce": 'false',
|
||||
"star": 'false',
|
||||
"hop2": 'false',
|
||||
"bypass": 'false'}
|
||||
if suspend:
|
||||
if (extra_specs.get(utils.REP_MODE)
|
||||
and extra_specs[utils.REP_MODE] == utils.REP_ASYNC):
|
||||
common_opts.update({"immediate": 'false',
|
||||
"consExempt": 'true'})
|
||||
payload = {"action": "SUSPEND",
|
||||
"executionOption": "ASYNCHRONOUS",
|
||||
"suspend": common_opts}
|
||||
remote_sg = target_sg if target_sg else sg_name
|
||||
|
||||
else:
|
||||
common_opts.update({"establish": 'true',
|
||||
"restore": 'false',
|
||||
"remote": 'false',
|
||||
"immediate": 'false'})
|
||||
payload = {"action": "Failover",
|
||||
"executionOption": "ASYNCHRONOUS",
|
||||
"failover": common_opts}
|
||||
resource_name = ("%(rdf_num)s/volume/%(device_id)s"
|
||||
% {'rdf_num': rdf_group, 'device_id': device_id})
|
||||
sc, job = self.modify_resource(
|
||||
array, REPLICATION, 'rdf_group',
|
||||
payload, resource_name=resource_name, private="/private")
|
||||
self.wait_for_job('Modify device pair', sc,
|
||||
payload = {
|
||||
"executionOption": "ASYNCHRONOUS",
|
||||
"replicationMode": replication_mode, "remoteSLO": service_level,
|
||||
"remoteSymmId": remote_array_id, "rdfgNumber": rdf_group_no,
|
||||
"remoteStorageGroupName": remote_sg, "establish": "true"}
|
||||
|
||||
# Metro specific configuration
|
||||
if replication_mode == utils.REP_METRO:
|
||||
bias = "true" if extra_specs[utils.METROBIAS] else "False"
|
||||
payload.update({
|
||||
"replicationMode": "Active", "metroBias": bias})
|
||||
|
||||
LOG.debug('SRDF Protect Payload: %(pay)s', {'pay': payload})
|
||||
resource = 'storagegroup/%(sg_name)s/rdf_group' % {'sg_name': sg_name}
|
||||
status_code, job = self.create_resource(array_id, REPLICATION,
|
||||
resource, payload)
|
||||
self.wait_for_job('SRDF Protect Storage Group', status_code,
|
||||
job, extra_specs)
|
||||
|
||||
def delete_rdf_pair(self, array, device_id, rdf_group):
|
||||
"""Delete an rdf pair.
|
||||
def srdf_modify_group(self, array, rdf_group_no, storage_group, payload,
|
||||
extra_specs, msg, async_call=True):
|
||||
"""Modify RDF enabled storage group replication options.
|
||||
|
||||
:param array: the array serial number
|
||||
:param device_id: the device id
|
||||
:param rdf_group: the rdf group
|
||||
:param array: array serial number
|
||||
:param rdf_group_no: RDF group number
|
||||
:param storage_group: storage group name
|
||||
:param payload: REST request payload dict
|
||||
:param extra_specs: extra specifications
|
||||
:param msg: message to use for logs when waiting on job to complete
|
||||
:param async_call: if the REST call should be run, this only comes into
|
||||
effect when trying to resume replication and
|
||||
interval/retries are a factor.
|
||||
"""
|
||||
params = {'half': 'false', 'force': 'true', 'symforce': 'false',
|
||||
'star': 'false', 'bypass': 'false'}
|
||||
resource_name = ("%(rdf_num)s/volume/%(device_id)s"
|
||||
% {'rdf_num': rdf_group, 'device_id': device_id})
|
||||
self.delete_resource(array, REPLICATION, 'rdf_group', resource_name,
|
||||
params=params)
|
||||
resource = ('storagegroup/%(sg_name)s/rdf_group/%(rdf_group_no)s' % {
|
||||
'sg_name': storage_group, 'rdf_group_no': rdf_group_no})
|
||||
|
||||
if async_call:
|
||||
payload.update({"executionOption": "ASYNCHRONOUS"})
|
||||
status_code, job = self.modify_resource(array, REPLICATION,
|
||||
resource, payload)
|
||||
self.wait_for_job(msg, status_code, job, extra_specs)
|
||||
else:
|
||||
self.modify_resource(array, REPLICATION, resource, payload)
|
||||
|
||||
def srdf_suspend_replication(self, array_id, storage_group, rdf_group_no,
|
||||
rep_extra_specs):
|
||||
"""Suspend replication on a RDF group.
|
||||
|
||||
:param array_id: array serial number
|
||||
:param storage_group: storage group name
|
||||
:param rdf_group_no: RDF group number
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
"""
|
||||
group_state = self.get_storage_group_rdf_group_state(
|
||||
array_id, storage_group, rdf_group_no)
|
||||
|
||||
if group_state:
|
||||
group_state = [x.lower() for x in group_state]
|
||||
|
||||
if utils.RDF_SUSPENDED_STATE not in group_state:
|
||||
self.srdf_modify_group(
|
||||
array_id, rdf_group_no, storage_group,
|
||||
{"suspend": {"force": "true"}, "action": "Suspend"},
|
||||
rep_extra_specs, 'Suspend SRDF Group Replication')
|
||||
else:
|
||||
LOG.info('SRDF Group %(grp_num)s is already in a suspended state',
|
||||
{'grp_num': rdf_group_no})
|
||||
|
||||
def srdf_resume_replication(self, array_id, storage_group, rdf_group_no,
|
||||
rep_extra_specs, async_call=True):
|
||||
"""Resume replication on a RDF group.
|
||||
|
||||
:param array_id: array serial number
|
||||
:param storage_group: storage group name
|
||||
:param rdf_group_no: RDF group number
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
:param async_call: if the REST call should be run, this only comes into
|
||||
effect when trying to resume replication and
|
||||
interval/retries are a factor.
|
||||
"""
|
||||
if self.get_storage_group(array_id, storage_group):
|
||||
group_state = self.get_storage_group_rdf_group_state(
|
||||
array_id, storage_group, rdf_group_no)
|
||||
if group_state:
|
||||
group_state = [x.lower() for x in group_state]
|
||||
if utils.RDF_SUSPENDED_STATE in group_state:
|
||||
payload = {"action": "Resume"}
|
||||
if rep_extra_specs['rep_mode'] == utils.REP_METRO:
|
||||
payload = {"action": "Establish"}
|
||||
if rep_extra_specs.get(utils.METROBIAS):
|
||||
payload.update({"establish": {"metroBias": "true"}})
|
||||
|
||||
self.srdf_modify_group(
|
||||
array_id, rdf_group_no, storage_group, payload,
|
||||
rep_extra_specs, 'Resume SRDF Group Replication',
|
||||
async_call)
|
||||
else:
|
||||
LOG.debug('SRDF Group %(grp_num)s is already in a resumed '
|
||||
'state.', {'grp_num': rdf_group_no})
|
||||
else:
|
||||
LOG.debug('Storage Group %(sg)s not present on array '
|
||||
'%(array)s, no resume required.', {
|
||||
'sg': storage_group, 'array': array_id})
|
||||
|
||||
def srdf_establish_replication(self, array_id, storage_group, rdf_group_no,
|
||||
rep_extra_specs):
|
||||
"""Establish replication on a RDF group.
|
||||
|
||||
:param array_id: array serial number
|
||||
:param storage_group: storage group name
|
||||
:param rdf_group_no: RDF group number
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
"""
|
||||
group_state = self.get_storage_group_rdf_group_state(
|
||||
array_id, storage_group, rdf_group_no)
|
||||
|
||||
if utils.RDF_SUSPENDED_STATE not in group_state:
|
||||
LOG.info('Suspending SRDF Group %(grp_num)s', {
|
||||
'grp_num': rdf_group_no})
|
||||
self.srdf_modify_group(
|
||||
array_id, rdf_group_no, storage_group, {"action": "Suspend"},
|
||||
rep_extra_specs, 'Suspend SRDF Group Replication')
|
||||
|
||||
wait_msg = 'Incremental Establish SRDF Group Replication'
|
||||
LOG.info('Initiating incremental establish on SRDF Group %(grp_num)s',
|
||||
{'grp_num': rdf_group_no})
|
||||
self.srdf_modify_group(
|
||||
array_id, rdf_group_no, storage_group, {"action": "Establish"},
|
||||
rep_extra_specs, wait_msg)
|
||||
|
||||
def srdf_failover_group(self, array_id, storage_group, rdf_group_no,
|
||||
rep_extra_specs):
|
||||
"""Failover a RDFG/SG volume group to replication target.
|
||||
|
||||
:param array_id: array serial number
|
||||
:param storage_group: storage group name
|
||||
:param rdf_group_no: RDF group number
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
"""
|
||||
self.srdf_modify_group(
|
||||
array_id, rdf_group_no, storage_group, {"action": "Failover"},
|
||||
rep_extra_specs, 'Failing over SRDF group replication')
|
||||
|
||||
def srdf_failback_group(self, array_id, storage_group, rdf_group_no,
|
||||
rep_extra_specs):
|
||||
"""Failback a RDFG/SG volume group from replication target.
|
||||
|
||||
:param array_id:
|
||||
:param storage_group:
|
||||
:param rdf_group_no:
|
||||
:param rep_extra_specs:
|
||||
"""
|
||||
self.srdf_modify_group(
|
||||
array_id, rdf_group_no, storage_group, {"action": "Failback"},
|
||||
rep_extra_specs, 'Failing back SRDF group replication')
|
||||
|
||||
def srdf_remove_device_pair_from_storage_group(
|
||||
self, array_id, storage_group, remote_array_id, device_id,
|
||||
rep_extra_specs):
|
||||
"""Remove a volume from local and remote storage groups simultaneously.
|
||||
|
||||
:param array_id: local array serial number
|
||||
:param storage_group: storage group name
|
||||
:param remote_array_id: remote array serial number
|
||||
:param device_id: source device id
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
"""
|
||||
payload = {
|
||||
"editStorageGroupActionParam": {
|
||||
"removeVolumeParam": {
|
||||
"volumeId": [device_id],
|
||||
"remoteSymmSGInfoParam": {
|
||||
"remote_symmetrix_1_id": remote_array_id,
|
||||
"remote_symmetrix_1_sgs": [storage_group]}}}}
|
||||
|
||||
status_code, job = self.modify_storage_group(array_id, storage_group,
|
||||
payload)
|
||||
self.wait_for_job('SRDF Group remove device pair', status_code,
|
||||
job, rep_extra_specs)
|
||||
|
||||
def srdf_delete_device_pair(self, array, rdf_group_no, local_device_id):
|
||||
"""Delete a RDF device pair.
|
||||
|
||||
:param array: array serial number
|
||||
:param rdf_group_no: RDF group number
|
||||
:param local_device_id: local device id
|
||||
"""
|
||||
resource = ('%(rdfg)s/volume/%(dev)s' % {
|
||||
'rdfg': rdf_group_no, 'dev': local_device_id})
|
||||
|
||||
self.delete_resource(array, REPLICATION, 'rdf_group', resource)
|
||||
LOG.debug("Device Pair successfully deleted.")
|
||||
|
||||
def srdf_create_device_pair(self, array, rdf_group_no, mode, device_id,
|
||||
rep_extra_specs, next_gen):
|
||||
"""Create a RDF device pair in an existing RDF group.
|
||||
|
||||
:param array: array serial number
|
||||
:param rdf_group_no: RDF group number
|
||||
:param mode: replication mode
|
||||
:param device_id: local device ID
|
||||
:param rep_extra_specs: replication extra specifications
|
||||
:param next_gen: if the array is next gen uCode
|
||||
:returns: replication session info -- dict
|
||||
"""
|
||||
payload = {
|
||||
"executionOption": "ASYNCHRONOUS", "rdfMode": mode,
|
||||
"localDeviceListCriteriaParam": {"localDeviceList": [device_id]},
|
||||
"rdfType": "RDF1"}
|
||||
|
||||
if mode == utils.REP_SYNC:
|
||||
payload.update({"establish": "true"})
|
||||
elif mode == utils.REP_ASYNC:
|
||||
payload.update({"invalidateR2": "true", "exempt": "true"})
|
||||
elif mode.lower() in [utils.REP_METRO.lower(),
|
||||
utils.RDF_ACTIVE.lower()]:
|
||||
payload = self.get_metro_payload_info(
|
||||
array, payload, rdf_group_no, rep_extra_specs, next_gen)
|
||||
|
||||
LOG.debug('Create Pair Payload: %(pay)s', {'pay': payload})
|
||||
resource = 'rdf_group/%(rdfg)s/volume' % {'rdfg': rdf_group_no}
|
||||
status_code, job = self.create_resource(
|
||||
array, REPLICATION, resource, payload)
|
||||
self.wait_for_job('SRDF Group remove device pair', status_code,
|
||||
job, rep_extra_specs)
|
||||
|
||||
session_info = self.get_rdf_pair_volume(array, rdf_group_no, device_id)
|
||||
r2_device_id = session_info['remoteVolumeName']
|
||||
|
||||
return {'array': session_info['localSymmetrixId'],
|
||||
'remote_array': session_info['remoteSymmetrixId'],
|
||||
'src_device': device_id, 'tgt_device': r2_device_id,
|
||||
'session_info': session_info}
|
||||
|
||||
def get_storage_group_rep(self, array, storage_group_name):
|
||||
"""Given a name, return storage group details wrt replication.
|
||||
@ -2646,40 +2926,6 @@ class PowerMaxRest(object):
|
||||
break
|
||||
return mod_rqd
|
||||
|
||||
def modify_storagegroup_rdf(self, array, storagegroup_name,
|
||||
rdf_group_num, action, extra_specs):
|
||||
"""Modify the rdf state of a storage group.
|
||||
|
||||
:param array: the array serial number
|
||||
:param storagegroup_name: the name of the storage group
|
||||
:param rdf_group_num: the number of the rdf group
|
||||
:param action: the required action
|
||||
:param extra_specs: the extra specifications
|
||||
"""
|
||||
# Check if group is in valid state for desired action
|
||||
mod_reqd = self._verify_rdf_state(array, storagegroup_name,
|
||||
rdf_group_num, action)
|
||||
if mod_reqd:
|
||||
payload = {"executionOption": "ASYNCHRONOUS", "action": action}
|
||||
if action.lower() == 'suspend':
|
||||
payload['suspend'] = {"force": "true"}
|
||||
elif action.lower() == 'establish':
|
||||
metro_bias = (
|
||||
True if extra_specs.get(utils.METROBIAS) and extra_specs[
|
||||
utils.METROBIAS] is True else False)
|
||||
payload['establish'] = {"metroBias": metro_bias,
|
||||
"full": 'false'}
|
||||
resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s'
|
||||
% {'sg_name': storagegroup_name,
|
||||
'rdf_num': rdf_group_num})
|
||||
|
||||
status_code, job = self.modify_resource(
|
||||
array, REPLICATION, 'storagegroup', payload,
|
||||
resource_name=resource_name)
|
||||
|
||||
self.wait_for_job('Modify storagegroup rdf',
|
||||
status_code, job, extra_specs)
|
||||
|
||||
def delete_storagegroup_rdf(self, array, storagegroup_name,
|
||||
rdf_group_num):
|
||||
"""Delete the rdf pairs for a storage group.
|
||||
@ -2698,7 +2944,7 @@ class PowerMaxRest(object):
|
||||
"""Process lists under or over the maxPageSize
|
||||
|
||||
:param list_info: the object list information
|
||||
:return: the result list
|
||||
:returns: the result list
|
||||
"""
|
||||
result_list = []
|
||||
try:
|
||||
@ -2735,7 +2981,7 @@ class PowerMaxRest(object):
|
||||
:param start_position: position to begin iterator from
|
||||
:param end_position: position to stop iterator
|
||||
:param max_page_size: the max page size
|
||||
:return: list -- merged results from multiple pages
|
||||
:returns: list -- merged results from multiple pages
|
||||
"""
|
||||
iterator_result = []
|
||||
has_more_entries = True
|
||||
|
@ -48,6 +48,7 @@ UPPER_HOST_CHARS = 16
|
||||
UPPER_PORT_GROUP_CHARS = 12
|
||||
|
||||
ARRAY = 'array'
|
||||
REMOTE_ARRAY = 'remote_array'
|
||||
SLO = 'slo'
|
||||
WORKLOAD = 'workload'
|
||||
SRP = 'srp'
|
||||
@ -55,6 +56,7 @@ PORTGROUPNAME = 'storagetype:portgroupname'
|
||||
DEVICE_ID = 'device_id'
|
||||
INITIATOR_CHECK = 'initiator_check'
|
||||
SG_NAME = 'storagegroup_name'
|
||||
SG_ID = 'storageGroupId'
|
||||
MV_NAME = 'maskingview_name'
|
||||
IG_NAME = 'init_group_name'
|
||||
PARENT_SG_NAME = 'parent_sg_name'
|
||||
@ -76,13 +78,18 @@ RDF_FAILEDOVER_STATE = 'failed over'
|
||||
RDF_ACTIVE = 'active'
|
||||
RDF_ACTIVEACTIVE = 'activeactive'
|
||||
RDF_ACTIVEBIAS = 'activebias'
|
||||
RDF_CONS_EXEMPT = 'consExempt'
|
||||
RDF_CONS_EXEMPT = 'exempt'
|
||||
RDF_ALLOW_METRO_DELETE = 'allow_delete_metro'
|
||||
RDF_GROUP_NO = 'rdf_group_number'
|
||||
METROBIAS = 'metro_bias'
|
||||
DEFAULT_PORT = 8443
|
||||
CLONE_SNAPSHOT_NAME = "snapshot_for_clone"
|
||||
STORAGE_GROUP_TAGS = 'storagetype:storagegrouptags'
|
||||
TAG_LIST = 'tag_list'
|
||||
USED_HOST_NAME = "used_host_name"
|
||||
RDF_SYNCED_STATES = [RDF_SYNC_STATE, RDF_CONSISTENT_STATE,
|
||||
RDF_ACTIVEACTIVE, RDF_ACTIVEBIAS]
|
||||
FORCE_VOL_REMOVE = 'force_vol_remove'
|
||||
|
||||
# Multiattach constants
|
||||
IS_MULTIATTACH = 'multiattach'
|
||||
@ -285,7 +292,7 @@ class PowerMaxUtils(object):
|
||||
:param snapshot_name: the old snapshot backend display name
|
||||
:param manage: (bool) if the operation is managing a snapshot
|
||||
:param unmanage: (bool) if the operation is unmanaging a snapshot
|
||||
:return: snapshot name ready for backend PowerMax/VMAX assignment
|
||||
:returns: snapshot name ready for backend PowerMax/VMAX assignment
|
||||
"""
|
||||
new_snap_name = None
|
||||
if manage:
|
||||
@ -361,7 +368,7 @@ class PowerMaxUtils(object):
|
||||
"""Construct a temporary snapshot name for clone operation
|
||||
|
||||
:param source_device_id: the source device id
|
||||
:return: snap_name
|
||||
:returns: snap_name
|
||||
"""
|
||||
snap_name = ("temp-%(device)s-%(snap_name)s"
|
||||
% {'device': source_device_id,
|
||||
@ -434,7 +441,7 @@ class PowerMaxUtils(object):
|
||||
|
||||
:param vol_is_replicated: from source
|
||||
:param new_type: from target
|
||||
:return: bool
|
||||
:returns: bool
|
||||
"""
|
||||
is_tgt_rep = self.is_replication_enabled(new_type['extra_specs'])
|
||||
return vol_is_replicated != is_tgt_rep
|
||||
@ -477,6 +484,17 @@ class PowerMaxUtils(object):
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message=error_message)
|
||||
|
||||
try:
|
||||
rep_config['sync_retries'] = int(target['sync_retries'])
|
||||
rep_config['sync_interval'] = int(target['sync_interval'])
|
||||
except (KeyError, ValueError) as ke:
|
||||
LOG.debug("SRDF Sync wait/retries options not set or set "
|
||||
"incorrectly, defaulting to 200 retries with a 3 "
|
||||
"second wait. Configuration load warning: %(ke)s.",
|
||||
{'ke': six.text_type(ke)})
|
||||
rep_config['sync_retries'] = 200
|
||||
rep_config['sync_interval'] = 3
|
||||
|
||||
allow_extend = target.get('allow_extend', 'false')
|
||||
if strutils.bool_from_string(allow_extend):
|
||||
rep_config['allow_extend'] = True
|
||||
@ -493,11 +511,6 @@ class PowerMaxUtils(object):
|
||||
rep_config[METROBIAS] = True
|
||||
else:
|
||||
rep_config[METROBIAS] = False
|
||||
allow_delete_metro = target.get('allow_delete_metro', 'false')
|
||||
if strutils.bool_from_string(allow_delete_metro):
|
||||
rep_config['allow_delete_metro'] = True
|
||||
else:
|
||||
rep_config['allow_delete_metro'] = False
|
||||
else:
|
||||
rep_config['mode'] = REP_SYNC
|
||||
|
||||
@ -633,7 +646,7 @@ class PowerMaxUtils(object):
|
||||
"""Add legacy pools to allow extending a volume after upgrade.
|
||||
|
||||
:param pools: the pool list
|
||||
:return: pools - the updated pool list
|
||||
:returns: pools - the updated pool list
|
||||
"""
|
||||
extra_pools = []
|
||||
for pool in pools:
|
||||
@ -707,7 +720,7 @@ class PowerMaxUtils(object):
|
||||
synchronous, asynchronous, or metro replication mode.
|
||||
|
||||
:param rep_mode: flag to indicate if replication is async
|
||||
:return: prefix
|
||||
:returns: prefix
|
||||
"""
|
||||
if rep_mode == REP_ASYNC:
|
||||
prefix = "-RA"
|
||||
@ -718,11 +731,11 @@ class PowerMaxUtils(object):
|
||||
return prefix
|
||||
|
||||
@staticmethod
|
||||
def get_async_rdf_managed_grp_name(rep_config):
|
||||
def get_rdf_management_group_name(rep_config):
|
||||
"""Get the name of the group used for async replication management.
|
||||
|
||||
:param rep_config: the replication configuration
|
||||
:return: group name
|
||||
:returns: group name
|
||||
"""
|
||||
async_grp_name = ("OS-%(rdf)s-%(mode)s-rdf-sg"
|
||||
% {'rdf': rep_config['rdf_group_label'],
|
||||
@ -736,7 +749,7 @@ class PowerMaxUtils(object):
|
||||
|
||||
:param rep_config: the replication configuration
|
||||
:param extra_specs: the extra specifications
|
||||
:return: bool
|
||||
:returns: bool
|
||||
"""
|
||||
is_metro = (True if self.is_replication_enabled(extra_specs)
|
||||
and rep_config is not None
|
||||
@ -747,7 +760,7 @@ class PowerMaxUtils(object):
|
||||
"""Determine if a volume is a Metro or Async.
|
||||
|
||||
:param extra_specs: the extra specifications
|
||||
:return: bool
|
||||
:returns: bool
|
||||
"""
|
||||
if (self.is_replication_enabled(extra_specs) and
|
||||
extra_specs.get(REP_MODE, None) in
|
||||
@ -973,7 +986,7 @@ class PowerMaxUtils(object):
|
||||
"""Get the production storage group
|
||||
|
||||
:param device_info: the device info dict
|
||||
:return: str -- the storage group id
|
||||
:returns: str -- the storage group id
|
||||
dict -- storage group details
|
||||
"""
|
||||
try:
|
||||
@ -1082,21 +1095,6 @@ class PowerMaxUtils(object):
|
||||
my_list1 = sorted(list_str1.split(","))
|
||||
return [x for x in my_list1 if x.lower() not in common_list]
|
||||
|
||||
def _get_intersection(self, list_str1, list_str2):
|
||||
"""Get the common values between 2 comma separated list
|
||||
|
||||
:param list_str1: list one
|
||||
:param list_str2: list two
|
||||
:returns: sorted list
|
||||
"""
|
||||
list_str1 = re.sub(r"\s+", "", list_str1).lower()
|
||||
list_str2 = re.sub(r"\s+", "", list_str2).lower()
|
||||
my_list1 = sorted(list_str1.split(","))
|
||||
my_list2 = sorted(list_str2.split(","))
|
||||
sorted_common_list = (
|
||||
sorted(list(set(my_list1).intersection(set(my_list2)))))
|
||||
return sorted_common_list
|
||||
|
||||
def verify_tag_list(self, tag_list):
|
||||
"""Verify that the tag list has allowable character
|
||||
|
||||
@ -1509,3 +1507,74 @@ class PowerMaxUtils(object):
|
||||
datadict.update({tuple[1]: datadict.get(tuple[0])})
|
||||
del datadict[tuple[0]]
|
||||
return datadict
|
||||
|
||||
@staticmethod
|
||||
def _get_intersection(list_str1, list_str2):
|
||||
"""Get the common values between 2 comma separated list
|
||||
|
||||
:param list_str1: list one
|
||||
:param list_str2: list two
|
||||
:returns: sorted list
|
||||
"""
|
||||
list_str1 = re.sub(r"\s+", "", list_str1).lower()
|
||||
list_str2 = re.sub(r"\s+", "", list_str2).lower()
|
||||
my_list1 = sorted(list_str1.split(","))
|
||||
my_list2 = sorted(list_str2.split(","))
|
||||
sorted_common_list = (
|
||||
sorted(list(set(my_list1).intersection(set(my_list2)))))
|
||||
return sorted_common_list
|
||||
|
||||
@staticmethod
|
||||
def get_unique_device_ids_from_lists(list_a, list_b):
|
||||
"""Get the unique values from list B that don't appear in list A.
|
||||
|
||||
:param list_a: list A
|
||||
:param list_b: list B
|
||||
:returns: values unique between two lists -- list
|
||||
"""
|
||||
set_a = set(list_a)
|
||||
return [dev_id for dev_id in list_b if dev_id not in set_a]
|
||||
|
||||
@staticmethod
|
||||
def update_payload_for_rdf_vol_create(payload, remote_array_id,
|
||||
storage_group_name):
|
||||
"""Construct the REST payload for creating RDF enabled volumes.
|
||||
|
||||
:param payload: the existing payload -- dict
|
||||
:param remote_array_id: the remote array serial number -- str
|
||||
:param storage_group_name: the storage group name -- str
|
||||
:returns: updated payload -- dict
|
||||
"""
|
||||
remote_dict = {"remoteSymmSGInfoParam": {
|
||||
"remote_symmetrix_1_id": remote_array_id,
|
||||
"remote_symmetrix_1_sgs": [storage_group_name],
|
||||
"force": "true"}}
|
||||
|
||||
payload["editStorageGroupActionParam"]["expandStorageGroupParam"][
|
||||
"addVolumeParam"].update(remote_dict)
|
||||
|
||||
return payload
|
||||
|
||||
@staticmethod
|
||||
def is_retype_supported(volume, src_extra_specs, tgt_extra_specs):
|
||||
"""Determine if a retype operation involving Metro is supported.
|
||||
|
||||
:param volume: the volume object -- obj
|
||||
:param src_extra_specs: the source extra specs -- dict
|
||||
:param tgt_extra_specs: the target extra specs -- dict
|
||||
:returns: is supported -- bool
|
||||
"""
|
||||
if volume.attach_status == 'detached':
|
||||
return True
|
||||
|
||||
src_rep_mode = src_extra_specs.get('rep_mode', None)
|
||||
tgt_rep_mode = tgt_extra_specs.get('rep_mode', None)
|
||||
|
||||
if tgt_rep_mode != REP_METRO:
|
||||
return True
|
||||
else:
|
||||
if src_rep_mode == REP_METRO:
|
||||
return True
|
||||
else:
|
||||
if not src_rep_mode or src_rep_mode in [REP_SYNC, REP_ASYNC]:
|
||||
return False
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
other:
|
||||
- |
|
||||
PowerMax Driver - Two new replication specific configuration options
|
||||
sync_interval and sync_retries have been added to PowerMax cinder
|
||||
configuration. These configuration options determine how many times to
|
||||
retry checks to see if a SnapVX copy mode has completed with a replication
|
||||
enabled volume, and how long to wait between retries.
|
Loading…
x
Reference in New Issue
Block a user