From fe55090f9f9740ac732bbff3eaf7d0770fcff402 Mon Sep 17 00:00:00 2001 From: Helen Walsh Date: Fri, 5 Mar 2021 14:10:21 +0000 Subject: [PATCH] PowerMax Driver - Fix for deleting replication group Fix to suspend the storage group you are about to delete and then add a force flag to delete the volume pairs within the storage group. Change-Id: I66387dbf516109a31821b368f67491dc4b854c8a --- .../dell_emc/powermax/test_powermax_common.py | 4 +++- .../powermax/test_powermax_replication.py | 8 +++++++- .../volume/drivers/dell_emc/powermax/common.py | 18 +++++++++--------- .../volume/drivers/dell_emc/powermax/rest.py | 4 +++- ...ete-replication-group-76656e96262201d5.yaml | 7 +++++++ 5 files changed, 29 insertions(+), 12 deletions(-) create mode 100644 releasenotes/notes/powermax-delete-replication-group-76656e96262201d5.yaml diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py index c480048976c..3ca7f0aa742 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py @@ -2350,6 +2350,8 @@ class PowerMaxCommonTest(test.TestCase): array_id, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) + @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', + return_value=['activebias']) @mock.patch.object(common.PowerMaxCommon, '_post_retype_srdf_protect_storage_group', return_value=(True, True, True)) @@ -2369,7 +2371,7 @@ class PowerMaxCommonTest(test.TestCase): return_value=True) def test_cleanup_on_migrate_failure( self, mck_rep_enabled, mck_retype_remote, mck_break, mck_resume, - mck_retype, mck_configure, mck_get_vname, mck_protect): + mck_retype, mck_configure, mck_get_vname, mck_protect, mck_states): rdf_pair_broken = True rdf_pair_created = True vol_retyped = True diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py index aa999e62cc4..db1b95642dc 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py @@ -1449,6 +1449,11 @@ class PowerMaxReplicationTest(test.TestCase): self.common.cleanup_rdf_device_pair, array, rdf_group_no, device_id, extra_specs) + @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', + return_value=['activebias']) + @mock.patch.object( + rest.PowerMaxRest, 'is_volume_in_storagegroup', + return_value=True) @mock.patch.object( rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( @@ -1484,7 +1489,8 @@ class PowerMaxReplicationTest(test.TestCase): def test_cleanup_rdf_device_pair( self, mck_get_rdf, mck_get_rep, mck_get_rdf_pair, mck_get_sg_list, mck_wait, mck_get_mgmt_grp, mck_get_num_vols, mck_suspend, - mck_srdf_remove, mck_remove, mck_delete, mck_cleanup, mck_resume): + mck_srdf_remove, mck_remove, mck_delete, mck_cleanup, mck_resume, + mock_is_vol, mock_states): array = self.data.array rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id diff --git a/cinder/volume/drivers/dell_emc/powermax/common.py b/cinder/volume/drivers/dell_emc/powermax/common.py index 07ab99deebe..adfa998a0a6 100644 --- a/cinder/volume/drivers/dell_emc/powermax/common.py +++ b/cinder/volume/drivers/dell_emc/powermax/common.py @@ -5819,14 +5819,13 @@ class PowerMaxCommon(object): group_details = self.rest.get_storage_group_rep( array, vol_grp_name) if group_details and group_details.get('rdf', False): - if extra_specs['rep_mode'] in [utils.REP_ASYNC, utils.REP_METRO]: - self.rest.srdf_suspend_replication( - array, vol_grp_name, rdf_group_no, extra_specs) - if volume_device_ids: - LOG.debug("Deleting remote replication for group %(sg)s", { - 'sg': vol_grp_name}) - self.rest.delete_storagegroup_rdf(array, vol_grp_name, - rdf_group_no) + self.rest.srdf_suspend_replication( + array, vol_grp_name, rdf_group_no, extra_specs) + if volume_device_ids: + LOG.debug("Deleting remote replication for group %(sg)s", { + 'sg': vol_grp_name}) + self.rest.delete_storagegroup_rdf(array, vol_grp_name, + rdf_group_no) remote_device_ids = self._get_members_of_volume_group( remote_array, vol_grp_name) # Remove volumes from remote replication group @@ -5841,7 +5840,8 @@ class PowerMaxCommon(object): self._delete_from_srp( remote_array, device_id, "group vol", extra_specs) # Once all volumes are deleted then delete the SG - self.rest.delete_storage_group(remote_array, vol_grp_name) + if self.rest.get_storage_group(remote_array, vol_grp_name): + self.rest.delete_storage_group(remote_array, vol_grp_name) def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a generic volume group snapshot. diff --git a/cinder/volume/drivers/dell_emc/powermax/rest.py b/cinder/volume/drivers/dell_emc/powermax/rest.py index 0585939d68f..26692b59e2a 100644 --- a/cinder/volume/drivers/dell_emc/powermax/rest.py +++ b/cinder/volume/drivers/dell_emc/powermax/rest.py @@ -3231,8 +3231,10 @@ class PowerMaxRest(object): resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s' % {'sg_name': storagegroup_name, 'rdf_num': rdf_group_num}) + query_params = {'force': 'true'} self.delete_resource( - array, REPLICATION, 'storagegroup', resource_name=resource_name) + array, REPLICATION, 'storagegroup', resource_name=resource_name, + params=query_params) def list_pagination(self, list_info): """Process lists under or over the maxPageSize diff --git a/releasenotes/notes/powermax-delete-replication-group-76656e96262201d5.yaml b/releasenotes/notes/powermax-delete-replication-group-76656e96262201d5.yaml new file mode 100644 index 00000000000..304fb132b22 --- /dev/null +++ b/releasenotes/notes/powermax-delete-replication-group-76656e96262201d5.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + PowerMax driver: + Fix to suspend the storage group you are about to delete + and then add a force flag to delete the volume pairs within + the storage group.