diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax.py index 3f50238ff62..ed21f1960fa 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax.py @@ -8434,16 +8434,29 @@ class PowerMaxCommonReplicationTest(test.TestCase): rep_extra_specs2 = self.common._get_replication_extra_specs( extra_specs1, rep_config) self.assertEqual(ref_specs2, rep_extra_specs2) - # Path three - slo not valid - extra_specs3 = deepcopy(self.extra_specs) - ref_specs3 = deepcopy(ref_specs1) - ref_specs3['slo'] = None - ref_specs3['workload'] = None + + def test_get_replication_extra_specs_powermax(self): + rep_config = self.utils.get_replication_config( + [self.replication_device]) + rep_specs = deepcopy(self.data.rep_extra_specs2) + extra_specs = deepcopy(self.extra_specs) + + # SLO not valid, both SLO and Workload set to NONE + rep_specs['slo'] = None + rep_specs['workload'] = None with mock.patch.object(self.provision, 'verify_slo_workload', return_value=(False, False)): - rep_extra_specs3 = self.common._get_replication_extra_specs( - extra_specs3, rep_config) - self.assertEqual(ref_specs3, rep_extra_specs3) + rep_extra_specs = self.common._get_replication_extra_specs( + extra_specs, rep_config) + self.assertEqual(rep_specs, rep_extra_specs) + # SL valid, workload invalid, only workload set to NONE + rep_specs['slo'] = 'Diamond' + rep_specs['workload'] = None + with mock.patch.object(self.provision, 'verify_slo_workload', + return_value=(True, False)): + rep_extra_specs = self.common._get_replication_extra_specs( + extra_specs, rep_config) + self.assertEqual(rep_specs, rep_extra_specs) def test_get_secondary_stats(self): rep_config = self.utils.get_replication_config( diff --git a/cinder/volume/drivers/dell_emc/powermax/common.py b/cinder/volume/drivers/dell_emc/powermax/common.py index 95762cfd1a8..ad320bc074d 100644 --- a/cinder/volume/drivers/dell_emc/powermax/common.py +++ b/cinder/volume/drivers/dell_emc/powermax/common.py @@ -184,9 +184,9 @@ class PowerMaxCommon(object): self.failover = False self._get_replication_info() self._get_u4p_failover_info() + self.nextGen = False self._gather_info() self.version_dict = {} - self.nextGen = False def _gather_info(self): """Gather the relevant information for update_volume_stats.""" @@ -1651,8 +1651,8 @@ class PowerMaxCommon(object): :raises: VolumeBackendAPIException: """ array = extra_specs[utils.ARRAY] - self.nextGen = self.rest.is_next_gen_array(array) - if self.nextGen: + nextGen = self.rest.is_next_gen_array(array) + if nextGen: extra_specs[utils.WORKLOAD] = 'NONE' is_valid_slo, is_valid_workload = self.provision.verify_slo_workload( array, extra_specs[utils.SLO], @@ -3852,16 +3852,18 @@ class PowerMaxCommon(object): extra_specs[utils.SLO], rep_extra_specs[utils.WORKLOAD], rep_extra_specs[utils.SRP])) - if not is_valid_slo or not is_valid_workload: - LOG.warning("The target array does not support the storage " - "pool setting for SLO %(slo)s or workload " - "%(workload)s. Not assigning any SLO or " - "workload.", - {'slo': extra_specs[utils.SLO], - 'workload': extra_specs[utils.WORKLOAD]}) + if not is_valid_slo: + LOG.warning("The target array does not support the " + "storage pool setting for SLO %(slo)s, " + "setting to NONE.", + {'slo': extra_specs[utils.SLO]}) rep_extra_specs[utils.SLO] = None - if extra_specs[utils.WORKLOAD]: - rep_extra_specs[utils.WORKLOAD] = None + if not is_valid_workload: + LOG.warning("The target array does not support the " + "storage pool setting for workload " + "%(workload)s, setting to NONE.", + {'workload': extra_specs[utils.WORKLOAD]}) + rep_extra_specs[utils.WORKLOAD] = None return rep_extra_specs diff --git a/cinder/volume/drivers/dell_emc/powermax/fc.py b/cinder/volume/drivers/dell_emc/powermax/fc.py index d997863f018..80a94a3d7d3 100644 --- a/cinder/volume/drivers/dell_emc/powermax/fc.py +++ b/cinder/volume/drivers/dell_emc/powermax/fc.py @@ -105,6 +105,7 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver): (bp/vmax-unisphere-failover) - Rebrand from VMAX to PowerMax(bp/vmax-powermax-rebrand) - Change from 84 to 90 REST endpoints (bug #1808539) + - Fix for PowerMax OS replication settings (bug #1812685) """ VERSION = "4.0.0" diff --git a/cinder/volume/drivers/dell_emc/powermax/iscsi.py b/cinder/volume/drivers/dell_emc/powermax/iscsi.py index bcb9b9e8f05..89c242bb8c3 100644 --- a/cinder/volume/drivers/dell_emc/powermax/iscsi.py +++ b/cinder/volume/drivers/dell_emc/powermax/iscsi.py @@ -110,6 +110,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver): (bp/vmax-unisphere-failover) - Rebrand from VMAX to PowerMax(bp/vmax-powermax-rebrand) - Change from 84 to 90 REST endpoints (bug #1808539) + - Fix for PowerMax OS replication settings (bug #1812685) """ VERSION = "4.0.0" diff --git a/releasenotes/notes/bug-1812685-powermax-replication-specs-fix-aa6b13b93b4059d6.yaml b/releasenotes/notes/bug-1812685-powermax-replication-specs-fix-aa6b13b93b4059d6.yaml new file mode 100644 index 00000000000..6fdc7cae424 --- /dev/null +++ b/releasenotes/notes/bug-1812685-powermax-replication-specs-fix-aa6b13b93b4059d6.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - When using a PowerMax OS array as a replication target, where the source is + an All-Flash/Hybrid array running HyperMax OS, service level and workload + settings are not correctly applied for devices on the replication target if + a workload is specified. Instead of setting only the workload to None, both + service level and workload are set to None. This fix corrects the + application of service level and workload settings for replication sessions + where the source is HyperMax OS and the target is PowerMax OS.