diff --git a/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py b/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py index 2544b498a7d..6e5fa140b1e 100644 --- a/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py +++ b/cinder/tests/unit/volume/drivers/emc/test_emc_vmax.py @@ -102,6 +102,10 @@ class CIM_IPProtocolEndpoint(dict): pass +class Symm_ArrayChassis(dict): + pass + + class SE_ReplicationSettingData(dict): def __init__(self, *args, **kwargs): self['DefaultInstance'] = self.createInstance() @@ -598,7 +602,7 @@ class EMCVMAXCommonData(object): 'controller': {'host': '10.00.00.00'}, 'hostlunid': 3} test_ctxt = {} - new_type = {} + new_type = {'extra_specs': {}} diff = {} extra_specs = {'storagetype:pool': u'SRP_1', 'volume_backend_name': 'V3_BE', @@ -781,6 +785,8 @@ class FakeEcomConnection(object): result = self._enum_storageSyncSvSv() elif name == 'Symm_SRPStoragePool': result = self._enum_srpstoragepool() + elif name == 'Symm_ArrayChassis': + result = self._enum_arraychassis() else: result = self._default_enum() return result @@ -1858,6 +1864,20 @@ class FakeEcomConnection(object): ipprotocolendpoints.append(iqnprotocolendpoint) return ipprotocolendpoints + def _enum_arraychassis(self): + arraychassiss = [] + arraychassis = Symm_ArrayChassis() + arraychassis['CreationClassName'] = ( + 'Symm_ArrayChassis') + arraychassis['SystemName'] = self.data.storage_system_v3 + arraychassis['Tag'] = self.data.storage_system_v3 + cimproperty = Fake_CIMProperty() + cimproperty.value = 'VMAX250F' + properties = {u'Model': cimproperty} + arraychassis.properties = properties + arraychassiss.append(arraychassis) + return arraychassiss + def _default_enum(self): names = [] name = {} @@ -7271,7 +7291,8 @@ class EMCV3MultiPoolDriverTestCase(test.TestCase): self.driver.create_snapshot(self.data.test_snapshot_1_v3) utils = self.driver.common.provisionv3.utils utils.get_v3_default_sg_instance_name.assert_called_once_with( - self.conn, u'SRP_1', u'Bronze', u'DSS', u'SYMMETRIX+000195900551') + self.conn, u'SRP_1', u'Bronze', u'DSS', u'SYMMETRIX+000195900551', + False) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, @@ -7518,7 +7539,7 @@ class EMCVMAXProvisionV3Test(test.TestCase): return_value=self.data.default_sg_instance_name) newstoragegroup = provisionv3.create_storage_group_v3( conn, controllerConfigService, groupName, srp, slo, workload, - extraSpecs) + extraSpecs, False) self.assertEqual(self.data.default_sg_instance_name, newstoragegroup) def test_create_element_replica(self): @@ -8514,6 +8535,24 @@ class EMCVMAXUtilsTest(test.TestCase): issynched = self.driver.common.utils._is_sync_complete(conn, syncname) self.assertFalse(issynched) + def test_get_v3_storage_group_name_compression_disabled(self): + poolName = 'SRP_1' + slo = 'Diamond' + workload = 'DSS' + isCompressionDisabled = True + storageGroupName = self.driver.utils.get_v3_storage_group_name( + poolName, slo, workload, isCompressionDisabled) + self.assertEqual("OS-SRP_1-Diamond-DSS-CD-SG", storageGroupName) + + @mock.patch.object( + emc_vmax_utils.EMCVMAXUtils, + 'get_smi_version', + return_value=831) + def test_is_all_flash(self, mock_version): + conn = FakeEcomConnection() + array = '000197200056' + self.assertTrue(self.driver.utils.is_all_flash(conn, array)) + class EMCVMAXCommonTest(test.TestCase): def setUp(self): @@ -8878,6 +8917,140 @@ class EMCVMAXCommonTest(test.TestCase): self.assertEqual('OS-fakehost-SRP_1-Diamond-DSS-I-MV', maskingViewDict['maskingViewName']) + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_find_lun', + return_value=( + {'SystemName': EMCVMAXCommonData.storage_system})) + def test_populate_masking_dict_v3_compression(self, mock_find_lun): + extraSpecs = {'storagetype:pool': u'SRP_1', + 'volume_backend_name': 'COMPRESSION_BE', + 'storagetype:array': u'1234567891011', + 'isV3': True, + 'portgroupname': u'OS-portgroup-PG', + 'storagetype:slo': u'Diamond', + 'storagetype:workload': u'DSS', + 'storagetype:disablecompression': 'True'} + connector = self.data.connector + maskingViewDict = self.driver.common._populate_masking_dict( + self.data.test_volume, connector, extraSpecs) + self.assertEqual( + 'OS-fakehost-SRP_1-Diamond-DSS-I-CD-SG', + maskingViewDict['sgGroupName']) + self.assertEqual( + 'OS-fakehost-SRP_1-Diamond-DSS-I-CD-MV', + maskingViewDict['maskingViewName']) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_find_lun', + return_value=( + {'SystemName': EMCVMAXCommonData.storage_system})) + def test_populate_masking_dict_v3_compression_no_slo(self, mock_find_lun): + # Compression is no applicable when there is no slo + extraSpecs = {'storagetype:pool': u'SRP_1', + 'volume_backend_name': 'COMPRESSION_BE', + 'storagetype:array': u'1234567891011', + 'isV3': True, + 'portgroupname': u'OS-portgroup-PG', + 'storagetype:slo': None, + 'storagetype:workload': None, + 'storagetype:disablecompression': 'True'} + connector = self.data.connector + maskingViewDict = self.driver.common._populate_masking_dict( + self.data.test_volume, connector, extraSpecs) + self.assertEqual( + 'OS-fakehost-No_SLO-I-SG', maskingViewDict['sgGroupName']) + self.assertEqual( + 'OS-fakehost-No_SLO-I-MV', maskingViewDict['maskingViewName']) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_migrate_volume_v3', + return_value=True) + def test_slo_workload_migration_compression_enabled(self, mock_migrate): + extraSpecs = {'storagetype:pool': u'SRP_1', + 'volume_backend_name': 'COMPRESSION_BE', + 'storagetype:array': u'1234567891011', + 'isV3': True, + 'portgroupname': u'OS-portgroup-PG', + 'storagetype:slo': u'Diamond', + 'storagetype:workload': u'DSS', + 'storagetype:disablecompression': 'True'} + new_type_extra_specs = extraSpecs.copy() + new_type_extra_specs.pop('storagetype:disablecompression', None) + new_type = {'extra_specs': new_type_extra_specs} + common = self.driver.common + common.conn = FakeEcomConnection() + volumeName = 'retype_compression' + + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + volumeInstance = common.conn.GetInstance(volumeInstanceName) + + self.assertTrue(self.driver.common._slo_workload_migration( + volumeInstance, self.data.test_source_volume_1_v3, + self.data.test_host_1_v3, volumeName, 'retyping', new_type, + extraSpecs)) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_migrate_volume_v3', + return_value=True) + def test_slo_workload_migration_compression_disabled(self, mock_migrate): + extraSpecs = {'storagetype:pool': u'SRP_1', + 'volume_backend_name': 'COMPRESSION_BE', + 'storagetype:array': u'1234567891011', + 'isV3': True, + 'portgroupname': u'OS-portgroup-PG', + 'storagetype:slo': u'Diamond', + 'storagetype:workload': u'DSS'} + new_type_extra_specs = extraSpecs.copy() + new_type_extra_specs['storagetype:disablecompression'] = 'True' + new_type = {'extra_specs': new_type_extra_specs} + common = self.driver.common + common.conn = FakeEcomConnection() + volumeName = 'retype_compression' + + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + volumeInstance = common.conn.GetInstance(volumeInstanceName) + + self.assertTrue(self.driver.common._slo_workload_migration( + volumeInstance, self.data.test_source_volume_1_v3, + self.data.test_host_1_v3, volumeName, 'retyping', new_type, + extraSpecs)) + + @mock.patch.object( + emc_vmax_common.EMCVMAXCommon, + '_migrate_volume_v3', + return_value=True) + def test_slo_workload_migration_compression_false(self, mock_migrate): + # Cannot retype because both volume types have the same slo/workload + # and both are false for disable compression, one by omission + extraSpecs = {'storagetype:pool': u'SRP_1', + 'volume_backend_name': 'COMPRESSION_BE', + 'storagetype:array': u'1234567891011', + 'isV3': True, + 'portgroupname': u'OS-portgroup-PG', + 'storagetype:slo': u'Diamond', + 'storagetype:workload': u'DSS'} + new_type_extra_specs = extraSpecs.copy() + new_type_extra_specs['storagetype:disablecompression'] = 'false' + new_type = {'extra_specs': new_type_extra_specs} + common = self.driver.common + common.conn = FakeEcomConnection() + volumeName = 'retype_compression' + + volumeInstanceName = ( + common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) + volumeInstance = common.conn.GetInstance(volumeInstanceName) + + self.assertFalse(self.driver.common._slo_workload_migration( + volumeInstance, self.data.test_source_volume_1_v3, + self.data.test_host_1_v3, volumeName, 'retyping', new_type, + extraSpecs)) + class EMCVMAXProvisionTest(test.TestCase): def setUp(self): diff --git a/cinder/volume/drivers/emc/emc_vmax_common.py b/cinder/volume/drivers/emc/emc_vmax_common.py index 1134e06dca4..1020d226932 100644 --- a/cinder/volume/drivers/emc/emc_vmax_common.py +++ b/cinder/volume/drivers/emc/emc_vmax_common.py @@ -1320,7 +1320,8 @@ class EMCVMAXCommon(object): def _is_valid_for_storage_assisted_migration_v3( self, volumeInstanceName, host, sourceArraySerialNumber, - sourcePoolName, volumeName, volumeStatus, sgName): + sourcePoolName, volumeName, volumeStatus, sgName, + doChangeCompression): """Check if volume is suitable for storage assisted (pool) migration. :param volumeInstanceName: the volume instance id @@ -1331,6 +1332,7 @@ class EMCVMAXCommon(object): :param volumeName: the name of the volume to be migrated :param volumeStatus: the status of the volume :param sgName: storage group name + :param doChangeCompression: do change compression :returns: boolean -- True/False :returns: string -- targetSlo :returns: string -- targetWorkload @@ -1390,13 +1392,16 @@ class EMCVMAXCommon(object): % {'targetSlo': targetSlo, 'targetWorkload': targetWorkload}) if targetCombination in emcFastSetting: - LOG.error(_LE( - "No action required. Volume: %(volumeName)s is " - "already part of slo/workload combination: " - "%(targetCombination)s."), - {'volumeName': volumeName, - 'targetCombination': targetCombination}) - return falseRet + # Check if migration is from compression to non compression + # of vice versa + if not doChangeCompression: + LOG.error(_LE( + "No action required. Volume: %(volumeName)s is " + "already part of slo/workload combination: " + "%(targetCombination)s."), + {'volumeName': volumeName, + 'targetCombination': targetCombination}) + return falseRet return (True, targetSlo, targetWorkload) @@ -2036,6 +2041,7 @@ class EMCVMAXCommon(object): protocol = self.utils.get_short_protocol_type(self.protocol) shortHostName = self.utils.get_host_short_name(hostName) if isV3: + maskingViewDict['isCompressionDisabled'] = False slo = extraSpecs[SLO] workload = extraSpecs[WORKLOAD] maskingViewDict['slo'] = slo @@ -2050,6 +2056,12 @@ class EMCVMAXCommon(object): 'slo': slo, 'workload': workload, 'protocol': protocol})) + doDisableCompression = self.utils.is_compression_disabled( + extraSpecs) + if doDisableCompression: + prefix = ("%(prefix)s-CD" + % {'prefix': prefix}) + maskingViewDict['isCompressionDisabled'] = True else: prefix = ( ("OS-%(shortHostName)s-No_SLO-%(protocol)s" @@ -3148,13 +3160,15 @@ class EMCVMAXCommon(object): storageConfigService = self.utils.find_storage_configuration_service( self.conn, storageSystemName) + doDisableCompression = self.utils.is_compression_disabled(extraSpecs) # A volume created without specifying a storage group during # creation time is allocated from the default SRP pool and # assigned the optimized SLO. sgInstanceName = self._get_or_create_storage_group_v3( extraSpecs[POOL], extraSpecs[SLO], - extraSpecs[WORKLOAD], storageSystemName, extraSpecs) + extraSpecs[WORKLOAD], doDisableCompression, + storageSystemName, extraSpecs) volumeDict, rc = self.provisionv3.create_volume_from_sg( self.conn, storageConfigService, volumeName, sgInstanceName, volumeSize, extraSpecs) @@ -3162,23 +3176,40 @@ class EMCVMAXCommon(object): return rc, volumeDict, storageSystemName def _get_or_create_storage_group_v3( - self, poolName, slo, workload, storageSystemName, extraSpecs): + self, poolName, slo, workload, doDisableCompression, + storageSystemName, extraSpecs): """Get or create storage group_v3 (V3). :param poolName: the SRP pool nsmr :param slo: the SLO :param workload: the workload + :param doDisableCompression: flag for compression :param storageSystemName: storage system name :param extraSpecs: extra specifications :returns: sgInstanceName """ storageGroupName, controllerConfigService, sgInstanceName = ( self.utils.get_v3_default_sg_instance_name( - self.conn, poolName, slo, workload, storageSystemName)) + self.conn, poolName, slo, workload, storageSystemName, + doDisableCompression)) if sgInstanceName is None: sgInstanceName = self.provisionv3.create_storage_group_v3( self.conn, controllerConfigService, storageGroupName, - poolName, slo, workload, extraSpecs) + poolName, slo, workload, extraSpecs, doDisableCompression) + else: + # Check that SG is not part of a masking view + mvInstanceName = self.masking.get_masking_view_from_storage_group( + self.conn, sgInstanceName) + if mvInstanceName: + exceptionMessage = (_( + "Default storage group %(storageGroupName)s is part of " + "masking view %(mvInstanceName)s. Please remove it " + "from this and all masking views") + % {'storageGroupName': storageGroupName, + 'mvInstanceName': mvInstanceName}) + LOG.error(exceptionMessage) + raise exception.VolumeBackendAPIException( + data=exceptionMessage) # If qos exists, update storage group to reflect qos parameters if 'qos' in extraSpecs: self.utils.update_storagegroup_qos( @@ -3273,14 +3304,20 @@ class EMCVMAXCommon(object): :param extraSpecs: extra specifications :returns: boolean -- True if migration succeeded, False if error. """ + isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs) storageGroupName = self.utils.get_v3_storage_group_name( - extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD]) + extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], + isCompressionDisabled) + # Check if old type and new type have different compression types + doChangeCompression = ( + self.utils.change_compression_type( + isCompressionDisabled, newType)) volumeInstanceName = volumeInstance.path isValid, targetSlo, targetWorkload = ( self._is_valid_for_storage_assisted_migration_v3( volumeInstanceName, host, extraSpecs[ARRAY], extraSpecs[POOL], volumeName, volumeStatus, - storageGroupName)) + storageGroupName, doChangeCompression)) storageSystemName = volumeInstance['SystemName'] if not isValid: @@ -3289,13 +3326,14 @@ class EMCVMAXCommon(object): "assisted migration using retype."), {'name': volumeName}) return False - if volume['host'] != host['host']: + if volume['host'] != host['host'] or doChangeCompression: LOG.debug( "Retype Volume %(name)s from source host %(sourceHost)s " - "to target host %(targetHost)s.", + "to target host %(targetHost)s. Compression change is %(cc)r.", {'name': volumeName, 'sourceHost': volume['host'], - 'targetHost': host['host']}) + 'targetHost': host['host'], + 'cc': doChangeCompression}) return self._migrate_volume_v3( volume, volumeInstance, extraSpecs[POOL], targetSlo, targetWorkload, storageSystemName, newType, extraSpecs) @@ -3325,10 +3363,10 @@ class EMCVMAXCommon(object): controllerConfigService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) - + isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs) defaultSgName = self.utils.get_v3_storage_group_name( - extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD]) - + extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], + isCompressionDisabled) foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( self.conn, volumeInstance.path, defaultSgName)) @@ -3342,12 +3380,16 @@ class EMCVMAXCommon(object): self.conn, controllerConfigService, volumeInstance, volumeName, extraSpecs, None, False) + targetExtraSpecs = newType['extra_specs'] + isCompressionDisabled = self.utils.is_compression_disabled( + targetExtraSpecs) + storageGroupName = self.utils.get_v3_storage_group_name( - poolName, targetSlo, targetWorkload) + poolName, targetSlo, targetWorkload, isCompressionDisabled) targetSgInstanceName = self._get_or_create_storage_group_v3( - poolName, targetSlo, targetWorkload, storageSystemName, - extraSpecs) + poolName, targetSlo, targetWorkload, isCompressionDisabled, + storageSystemName, extraSpecs) if targetSgInstanceName is None: LOG.error(_LE( "Failed to get or create storage group %(storageGroupName)s."), @@ -3561,6 +3603,17 @@ class EMCVMAXCommon(object): extraSpecs[ISV3] = True extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord) + if self.utils.is_all_flash(self.conn, extraSpecs[ARRAY]): + try: + extraSpecs[self.utils.DISABLECOMPRESSION] + # If not True remove it. + if not self.utils.str2bool( + extraSpecs[self.utils.DISABLECOMPRESSION]): + extraSpecs.pop(self.utils.DISABLECOMPRESSION, None) + except KeyError: + pass + else: + extraSpecs.pop(self.utils.DISABLECOMPRESSION, None) LOG.debug("Pool is: %(pool)s " "Array is: %(array)s " "SLO is: %(slo)s " diff --git a/cinder/volume/drivers/emc/emc_vmax_fc.py b/cinder/volume/drivers/emc/emc_vmax_fc.py index 698bdadd68d..a678df3ef34 100644 --- a/cinder/volume/drivers/emc/emc_vmax_fc.py +++ b/cinder/volume/drivers/emc/emc_vmax_fc.py @@ -74,6 +74,7 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver): - MVs and SGs not reflecting correct protocol (bug #1640222) - Storage assisted volume migration via retype (bp vmax-volume-migration) + - Support for compression on All Flash """ diff --git a/cinder/volume/drivers/emc/emc_vmax_iscsi.py b/cinder/volume/drivers/emc/emc_vmax_iscsi.py index 952a56818fb..9403e2a8bc4 100644 --- a/cinder/volume/drivers/emc/emc_vmax_iscsi.py +++ b/cinder/volume/drivers/emc/emc_vmax_iscsi.py @@ -80,6 +80,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): - MVs and SGs not reflecting correct protocol (bug #1640222) - Storage assisted volume migration via retype (bp vmax-volume-migration) + - Support for compression on All Flash """ diff --git a/cinder/volume/drivers/emc/emc_vmax_masking.py b/cinder/volume/drivers/emc/emc_vmax_masking.py index 5c0854a7a3b..cb0f253a2f6 100644 --- a/cinder/volume/drivers/emc/emc_vmax_masking.py +++ b/cinder/volume/drivers/emc/emc_vmax_masking.py @@ -203,7 +203,8 @@ class EMCVMAXMasking(object): defaultSgGroupName = self.utils.get_v3_storage_group_name( maskingviewdict['pool'], maskingviewdict['slo'], - maskingviewdict['workload']) + maskingviewdict['workload'], + maskingviewdict['isCompressionDisabled']) assocStorageGroupInstanceNames = ( self.utils.get_storage_groups_from_volume( conn, volumeinstance.path)) @@ -777,7 +778,8 @@ class EMCVMAXMasking(object): foundStorageGroupInstanceName = ( self.provisionv3.create_storage_group_v3( conn, controllerConfigService, storageGroupName, - pool, slo, workload, maskingViewDict['extraSpecs'])) + pool, slo, workload, maskingViewDict['extraSpecs'], + maskingViewDict['isCompressionDisabled'])) else: fastPolicyName = maskingViewDict['fastPolicy'] volumeInstance = maskingViewDict['volumeInstance'] @@ -2234,9 +2236,10 @@ class EMCVMAXMasking(object): :param extraSpecs: additional info :raises: VolumeBackendAPIException """ + isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs) storageGroupName = self.utils.get_v3_storage_group_name( extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], - extraSpecs[self.utils.WORKLOAD]) + extraSpecs[self.utils.WORKLOAD], isCompressionDisabled) storageGroupInstanceName = self.utils.find_storage_masking_group( conn, controllerConfigurationService, storageGroupName) @@ -2245,7 +2248,8 @@ class EMCVMAXMasking(object): self.provisionv3.create_storage_group_v3( conn, controllerConfigurationService, storageGroupName, extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], - extraSpecs[self.utils.WORKLOAD], extraSpecs)) + extraSpecs[self.utils.WORKLOAD], extraSpecs, + isCompressionDisabled)) if not storageGroupInstanceName: errorMessage = (_("Failed to create storage group " "%(storageGroupName)s.") % diff --git a/cinder/volume/drivers/emc/emc_vmax_provision_v3.py b/cinder/volume/drivers/emc/emc_vmax_provision_v3.py index c422e05e8de..e2fe4c58eb0 100644 --- a/cinder/volume/drivers/emc/emc_vmax_provision_v3.py +++ b/cinder/volume/drivers/emc/emc_vmax_provision_v3.py @@ -256,11 +256,13 @@ class EMCVMAXProvisionV3(object): 'syncType': syncType, 'source': sourceInstance.path}) storageSystemName = sourceInstance['SystemName'] + doDisableCompression = self.utils.is_compression_disabled(extraSpecs) __, __, sgInstanceName = ( self.utils.get_v3_default_sg_instance_name( conn, extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], - extraSpecs[self.utils.WORKLOAD], storageSystemName)) + extraSpecs[self.utils.WORKLOAD], storageSystemName, + doDisableCompression)) try: storageGroupInstance = conn.GetInstance(sgInstanceName) except Exception: @@ -377,7 +379,8 @@ class EMCVMAXProvisionV3(object): extraSpecs, force) def create_storage_group_v3(self, conn, controllerConfigService, - groupName, srp, slo, workload, extraSpecs): + groupName, srp, slo, workload, extraSpecs, + doDisableCompression): """Create the volume in the specified pool. :param conn: the connection information to the ecom server @@ -387,28 +390,40 @@ class EMCVMAXProvisionV3(object): :param slo: the SLO (String) :param workload: the workload (String) :param extraSpecs: additional info + :param doDisableCompression: disable compression flag :returns: storageGroupInstanceName - storage group instance name """ startTime = time.time() @lockutils.synchronized(groupName, "emc-sg-", True) def do_create_storage_group_v3(): - if slo and workload: - rc, job = conn.InvokeMethod( - 'CreateGroup', - controllerConfigService, - GroupName=groupName, - Type=self.utils.get_num(4, '16'), - EMCSRP=srp, - EMCSLO=slo, - EMCWorkload=workload) + if doDisableCompression: + if slo and workload: + rc, job = conn.InvokeMethod( + 'CreateGroup', + controllerConfigService, + GroupName=groupName, + Type=self.utils.get_num(4, '16'), + EMCSRP=srp, + EMCSLO=slo, + EMCWorkload=workload, + EMCDisableCompression=True) else: - rc, job = conn.InvokeMethod( - 'CreateGroup', - controllerConfigService, - GroupName=groupName, - Type=self.utils.get_num(4, '16')) - + if slo and workload: + rc, job = conn.InvokeMethod( + 'CreateGroup', + controllerConfigService, + GroupName=groupName, + Type=self.utils.get_num(4, '16'), + EMCSRP=srp, + EMCSLO=slo, + EMCWorkload=workload) + else: + rc, job = conn.InvokeMethod( + 'CreateGroup', + controllerConfigService, + GroupName=groupName, + Type=self.utils.get_num(4, '16')) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete( conn, job, extraSpecs) diff --git a/cinder/volume/drivers/emc/emc_vmax_utils.py b/cinder/volume/drivers/emc/emc_vmax_utils.py index ba574de0450..1d01f927911 100644 --- a/cinder/volume/drivers/emc/emc_vmax_utils.py +++ b/cinder/volume/drivers/emc/emc_vmax_utils.py @@ -63,6 +63,7 @@ RETRIES = 'storagetype:retries' CIM_ERR_NOT_FOUND = 6 VOLUME_ELEMENT_NAME_PREFIX = 'OS-' SYNCHRONIZED = 4 +SMI_VERSION_83 = 830 class EMCVMAXUtils(object): @@ -74,6 +75,7 @@ class EMCVMAXUtils(object): SLO = 'storagetype:slo' WORKLOAD = 'storagetype:workload' POOL = 'storagetype:pool' + DISABLECOMPRESSION = 'storagetype:disablecompression' def __init__(self, prtcl): if not pywbemAvailable: @@ -1571,21 +1573,31 @@ class EMCVMAXUtils(object): return isValidSLO, isValidWorkload - def get_v3_storage_group_name(self, poolName, slo, workload): + def get_v3_storage_group_name(self, poolName, slo, workload, + isCompressionDisabled): """Determine default v3 storage group from extraSpecs. :param poolName: the poolName :param slo: the SLO string e.g Bronze :param workload: the workload string e.g DSS + :param isCompressionDisabled: is compression disabled :returns: storageGroupName """ if slo and workload: - storageGroupName = ("OS-%(poolName)s-%(slo)s-%(workload)s-SG" - % {'poolName': poolName, - 'slo': slo, - 'workload': workload}) + if isCompressionDisabled: + postfix = 'CD-SG' + else: + postfix = 'SG' + + storageGroupName = ( + "OS-%(poolName)s-%(slo)s-%(workload)s-%(postfix)s" + % {'poolName': poolName, + 'slo': slo, + 'workload': workload, + 'postfix': postfix}) else: storageGroupName = ("OS-no_SLO-SG") + return storageGroupName def _get_fast_settings_from_storage_group(self, storageGroupInstance): @@ -2611,7 +2623,8 @@ class EMCVMAXUtils(object): return rsdInstance def get_v3_default_sg_instance_name( - self, conn, poolName, slo, workload, storageSystemName): + self, conn, poolName, slo, workload, storageSystemName, + isCompressionDisabled): """Get the V3 default instance name :param conn: the connection to the ecom server @@ -2619,10 +2632,11 @@ class EMCVMAXUtils(object): :param slo: the SLO :param workload: the workload :param storageSystemName: the storage system name + :param isCompressionDisabled: is compression disabled :returns: the storage group instance name """ storageGroupName = self.get_v3_storage_group_name( - poolName, slo, workload) + poolName, slo, workload, isCompressionDisabled) controllerConfigService = ( self.find_controller_configuration_service( conn, storageSystemName)) @@ -2823,3 +2837,74 @@ class EMCVMAXUtils(object): "%(igName)s.", {'igName': foundinitiatorGroupInstanceName}) return foundinitiatorGroupInstanceName + + def is_all_flash(self, conn, array): + """Check if array is all flash. + + :param conn: connection the ecom server + :param array: + :returns: True/False + """ + smi_version = self.get_smi_version(conn) + if smi_version >= SMI_VERSION_83: + return self._is_all_flash(conn, array) + else: + return False + + def _is_all_flash(self, conn, array): + """Check if array is all flash. + + :param conn: connection the ecom server + :param array: + :returns: True/False + """ + is_all_flash = False + arrayChassisInstanceNames = conn.EnumerateInstanceNames( + 'Symm_ArrayChassis') + for arrayChassisInstanceName in arrayChassisInstanceNames: + tag = arrayChassisInstanceName['Tag'] + if array in tag: + arrayChassisInstance = ( + conn.GetInstance(arrayChassisInstanceName)) + propertiesList = arrayChassisInstance.properties.items() + for properties in propertiesList: + if properties[0] == 'Model': + cimProperties = properties[1] + model = cimProperties.value + if re.search('^VMAX\s?[0-9]+FX?$', model): + is_all_flash = True + return is_all_flash + + def is_compression_disabled(self, extraSpecs): + """Check is compression is to be disabled. + + :param extraSpecs: extra specifications + :returns: dict -- a dictionary with masking view information + """ + doDisableCompression = False + if self.DISABLECOMPRESSION in extraSpecs: + if self.str2bool(extraSpecs[self.DISABLECOMPRESSION]): + doDisableCompression = True + return doDisableCompression + + def change_compression_type(self, isSourceCompressionDisabled, newType): + """Check if volume type have different compression types. + + :param isCompressionDisabled: from source + :param newType: from target + :returns: boolean + """ + extraSpecs = newType['extra_specs'] + isTargetCompressionDisabled = self.is_compression_disabled(extraSpecs) + if isTargetCompressionDisabled == isSourceCompressionDisabled: + return False + else: + return True + + def str2bool(self, value): + """Check if value is yes or true. + + :param value - string value + :returns: boolean + """ + return value.lower() in ("yes", "true") diff --git a/releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml b/releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml new file mode 100644 index 00000000000..74481ec6468 --- /dev/null +++ b/releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml @@ -0,0 +1,3 @@ +--- +features: + - Support for compression on VMAX All Flash in the VMAX driver.