PowerMax Driver - Fix for deleting replication group

Fix to suspend the storage group you are about to delete
and then add a force flag to delete the volume pairs within
the storage group.

Change-Id: I66387dbf516109a31821b368f67491dc4b854c8a
This commit is contained in:
Helen Walsh 2021-03-05 14:10:21 +00:00
parent 1a267bf6ff
commit fe55090f9f
5 changed files with 29 additions and 12 deletions

View File

@ -2350,6 +2350,8 @@ class PowerMaxCommonTest(test.TestCase):
array_id, volume, device_id, srp, target_slo, target_workload,
volume_name, new_type, extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
return_value=['activebias'])
@mock.patch.object(common.PowerMaxCommon,
'_post_retype_srdf_protect_storage_group',
return_value=(True, True, True))
@ -2369,7 +2371,7 @@ class PowerMaxCommonTest(test.TestCase):
return_value=True)
def test_cleanup_on_migrate_failure(
self, mck_rep_enabled, mck_retype_remote, mck_break, mck_resume,
mck_retype, mck_configure, mck_get_vname, mck_protect):
mck_retype, mck_configure, mck_get_vname, mck_protect, mck_states):
rdf_pair_broken = True
rdf_pair_created = True
vol_retyped = True

View File

@ -1449,6 +1449,11 @@ class PowerMaxReplicationTest(test.TestCase):
self.common.cleanup_rdf_device_pair, array, rdf_group_no,
device_id, extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
return_value=['activebias'])
@mock.patch.object(
rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=True)
@mock.patch.object(
rest.PowerMaxRest, 'srdf_resume_replication')
@mock.patch.object(
@ -1484,7 +1489,8 @@ class PowerMaxReplicationTest(test.TestCase):
def test_cleanup_rdf_device_pair(
self, mck_get_rdf, mck_get_rep, mck_get_rdf_pair, mck_get_sg_list,
mck_wait, mck_get_mgmt_grp, mck_get_num_vols, mck_suspend,
mck_srdf_remove, mck_remove, mck_delete, mck_cleanup, mck_resume):
mck_srdf_remove, mck_remove, mck_delete, mck_cleanup, mck_resume,
mock_is_vol, mock_states):
array = self.data.array
rdf_group_no = self.data.rdf_group_no_1
device_id = self.data.device_id

View File

@ -5819,14 +5819,13 @@ class PowerMaxCommon(object):
group_details = self.rest.get_storage_group_rep(
array, vol_grp_name)
if group_details and group_details.get('rdf', False):
if extra_specs['rep_mode'] in [utils.REP_ASYNC, utils.REP_METRO]:
self.rest.srdf_suspend_replication(
array, vol_grp_name, rdf_group_no, extra_specs)
if volume_device_ids:
LOG.debug("Deleting remote replication for group %(sg)s", {
'sg': vol_grp_name})
self.rest.delete_storagegroup_rdf(array, vol_grp_name,
rdf_group_no)
self.rest.srdf_suspend_replication(
array, vol_grp_name, rdf_group_no, extra_specs)
if volume_device_ids:
LOG.debug("Deleting remote replication for group %(sg)s", {
'sg': vol_grp_name})
self.rest.delete_storagegroup_rdf(array, vol_grp_name,
rdf_group_no)
remote_device_ids = self._get_members_of_volume_group(
remote_array, vol_grp_name)
# Remove volumes from remote replication group
@ -5841,7 +5840,8 @@ class PowerMaxCommon(object):
self._delete_from_srp(
remote_array, device_id, "group vol", extra_specs)
# Once all volumes are deleted then delete the SG
self.rest.delete_storage_group(remote_array, vol_grp_name)
if self.rest.get_storage_group(remote_array, vol_grp_name):
self.rest.delete_storage_group(remote_array, vol_grp_name)
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a generic volume group snapshot.

View File

@ -3231,8 +3231,10 @@ class PowerMaxRest(object):
resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s'
% {'sg_name': storagegroup_name,
'rdf_num': rdf_group_num})
query_params = {'force': 'true'}
self.delete_resource(
array, REPLICATION, 'storagegroup', resource_name=resource_name)
array, REPLICATION, 'storagegroup', resource_name=resource_name,
params=query_params)
def list_pagination(self, list_info):
"""Process lists under or over the maxPageSize

View File

@ -0,0 +1,7 @@
---
fixes:
- |
PowerMax driver:
Fix to suspend the storage group you are about to delete
and then add a force flag to delete the volume pairs within
the storage group.