From 1f18d81829b0817863db464b6eb0206b7d9fcc13 Mon Sep 17 00:00:00 2001 From: Rich Hagarty Date: Fri, 13 Feb 2015 08:19:42 -0800 Subject: [PATCH] Add flash cache policy to 3PAR driver 3PAR now supports flash cache. Flash cache policy is set by the extra spec key "hp3par:flash_cache", with value being true or false. This feature requires SSD disks and 3PAR firmware version 3.2.1 MU2 or greater. Change-Id: I6d6fdab982ede3676289e587071520c798f4ce0f Implements: blueprint 3par-flash-cache --- cinder/tests/test_hp3par.py | 200 +++++++++++++----- .../volume/drivers/san/hp/hp_3par_common.py | 89 ++++++-- 2 files changed, 216 insertions(+), 73 deletions(-) diff --git a/cinder/tests/test_hp3par.py b/cinder/tests/test_hp3par.py index 848bceb00a2..97241b45fba 100644 --- a/cinder/tests/test_hp3par.py +++ b/cinder/tests/test_hp3par.py @@ -54,6 +54,9 @@ HP3PAR_SAN_SSH_PRIVATE = 'foobar' CHAP_USER_KEY = "HPQ-cinder-CHAP-name" CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret" +FLASH_CACHE_ENABLED = 1 +FLASH_CACHE_DISABLED = 2 + class HP3PARBaseDriver(object): @@ -70,6 +73,7 @@ class HP3PARBaseDriver(object): VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7' CLONE_ID = 'd03338a9-9115-48a3-8dfc-000000000000' VOLUME_TYPE_ID_DEDUP = 'd03338a9-9115-48a3-8dfc-11111111111' + VOLUME_TYPE_ID_FLASH_CACHE = 'd03338a9-9115-48a3-8dfc-22222222222' VOLUME_NAME = 'volume-' + VOLUME_ID VOLUME_NAME_3PAR = 'osv-0DM4qZEVSKON-DXN-NwVpw' SNAPSHOT_ID = '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31' @@ -137,6 +141,14 @@ class HP3PARBaseDriver(object): 'volume_type': None, 'volume_type_id': 'gold'} + volume_flash_cache = {'name': VOLUME_NAME, + 'id': VOLUME_ID, + 'display_name': 'Foo Volume', + 'size': 2, + 'host': FAKE_HOST, + 'volume_type': None, + 'volume_type_id': VOLUME_TYPE_ID_FLASH_CACHE} + snapshot = {'name': SNAPSHOT_NAME, 'id': SNAPSHOT_ID, 'user_id': USER_ID, @@ -178,6 +190,16 @@ class HP3PARBaseDriver(object): 'deleted_at': None, 'id': VOLUME_TYPE_ID_DEDUP} + volume_type_flash_cache = {'name': 'flash-cache-on', + 'deleted': False, + 'updated_at': None, + 'extra_specs': {'cpg': HP3PAR_CPG2, + 'hp3par:flash_cache': 'true'}, + 'deleted_at': None, + 'id': VOLUME_TYPE_ID_FLASH_CACHE} + + flash_cache_3par_keys = {'flash_cache': 'true'} + cpgs = [ {'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]}, 'incrementMiB': 8192}, @@ -411,16 +433,24 @@ class HP3PARBaseDriver(object): # intentionally removed to make _retype more usable for other use cases. RETYPE_DIFF = None - wsapi_version = {'major': 1, - 'build': 30201120, - 'minor': 4, - 'revision': 1} - wsapi_version_312 = {'major': 1, 'build': 30102422, 'minor': 3, 'revision': 1} + wsapi_version_for_dedup = {'major': 1, + 'build': 30201120, + 'minor': 4, + 'revision': 1} + + wsapi_version_for_flash_cache = {'major': 1, + 'build': 30201200, + 'minor': 4, + 'revision': 2} + + # Use this to point to latest version of wsapi + wsapi_version_latest = wsapi_version_for_flash_cache + standard_login = [ mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS), mock.call.setSSHOptions( @@ -635,7 +665,7 @@ class HP3PARBaseDriver(object): @mock.patch.object(volume_types, 'get_volume_type') def test_unsupported_dedup_volume_type(self, _mock_volume_types): - mock_client = self.setup_driver_312() + mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312) _mock_volume_types.return_value = { 'name': 'dedup', 'extra_specs': { @@ -828,7 +858,6 @@ class HP3PARBaseDriver(object): 'tpvv': False, 'tdvv': True, 'snapCPG': HP3PAR_CPG_SNAP})] - mock_client.assert_has_calls( self.standard_login + expected + @@ -838,6 +867,97 @@ class HP3PARBaseDriver(object): self.FAKE_HOST, HP3PAR_CPG_QOS)}) + @mock.patch.object(volume_types, 'get_volume_type') + def test_create_volume_flash_cache(self, _mock_volume_types): + # Setup_mock_client drive with default configuration + # and return the mock HTTP 3PAR client + mock_client = self.setup_driver() + + _mock_volume_types.return_value = { + 'name': 'flash-cache-on', + 'extra_specs': { + 'cpg': HP3PAR_CPG2, + 'snap_cpg': HP3PAR_CPG_SNAP, + 'vvs_name': self.VVS_NAME, + 'qos': self.QOS, + 'tpvv': True, + 'tdvv': False, + 'hp3par:flash_cache': 'true', + 'volume_type': self.volume_type_flash_cache}} + + with mock.patch.object(hpcommon.HP3PARCommon, + '_create_client') as mock_create_client: + mock_create_client.return_value = mock_client + mock_client.getCPG.return_value = {'domain': None} + mock_client.FLASH_CACHE_ENABLED = FLASH_CACHE_ENABLED + mock_client.FLASH_CACHE_DISABLED = FLASH_CACHE_DISABLED + + return_model = self.driver.create_volume(self.volume_flash_cache) + comment = ( + '{"volume_type_name": "flash-cache-on", ' + '"display_name": "Foo Volume", ' + '"name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", ' + '"volume_type_id": "d03338a9-9115-48a3-8dfc-22222222222", ' + '"volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", ' + '"qos": {}, "type": "OpenStack"}') + + expected = [ + mock.call.getCPG(HP3PAR_CPG2), + mock.call.createVolume( + self.VOLUME_3PAR_NAME, + HP3PAR_CPG2, + 1907, { + 'comment': comment, + 'tpvv': True, + 'tdvv': False, + 'snapCPG': HP3PAR_CPG_SNAP}), + mock.call.getCPG(HP3PAR_CPG2), + mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None), + mock.call.createQoSRules( + 'vvs-0DM4qZEVSKON-DXN-NwVpw', + {'priority': 2} + ), + mock.call.modifyVolumeSet( + 'vvs-0DM4qZEVSKON-DXN-NwVpw', flashCachePolicy=1), + mock.call.addVolumeToVolumeSet( + 'vvs-0DM4qZEVSKON-DXN-NwVpw', + 'osv-0DM4qZEVSKON-DXN-NwVpw')] + + mock_client.assert_has_calls( + [mock.call.getWsApiVersion()] + + self.standard_login + + expected + + self.standard_logout) + self.assertEqual(return_model, + {'host': volume_utils.append_host( + self.FAKE_HOST, + HP3PAR_CPG2)}) + + @mock.patch.object(volume_types, 'get_volume_type') + def test_unsupported_flash_cache_volume(self, _mock_volume_types): + + mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312) + _mock_volume_types.return_value = { + 'name': 'flash-cache-on', + 'extra_specs': { + 'cpg': HP3PAR_CPG2, + 'snap_cpg': HP3PAR_CPG_SNAP, + 'vvs_name': self.VVS_NAME, + 'qos': self.QOS, + 'tpvv': True, + 'tdvv': False, + 'hp3par:flash_cache': 'true', + 'volume_type': self.volume_type_flash_cache}} + + with mock.patch.object(hpcommon.HP3PARCommon, + '_create_client') as mock_create_client: + mock_create_client.return_value = mock_client + common = self.driver._login() + + self.assertRaises(exception.InvalidInput, + common.get_flash_cache_policy, + self.flash_cache_3par_keys) + @mock.patch.object(volume_types, 'get_volume_type') def test_retype_not_3par(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 @@ -1160,6 +1280,7 @@ class HP3PARBaseDriver(object): None, cpg, cpg, snap_cpg, snap_cpg, True, False, False, True, None, None, self.QOS_SPECS, self.RETYPE_QOS_SPECS, + None, None, "{}") expected = [ @@ -1194,6 +1315,7 @@ class HP3PARBaseDriver(object): None, cpg, cpg, snap_cpg, snap_cpg, True, False, False, True, None, None, self.QOS_SPECS, self.RETYPE_QOS_SPECS, + None, None, "{}") expected = [ @@ -2845,7 +2967,7 @@ class TestHP3PARFCDriver(HP3PARBaseDriver, test.TestCase): ['0987654321234', '123456789000987'], }}} - def setup_driver(self, config=None, mock_conf=None): + def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( @@ -2853,27 +2975,12 @@ class TestHP3PARFCDriver(HP3PARBaseDriver, test.TestCase): m_conf=mock_conf, driver=hpfcdriver.HP3PARFCDriver) - mock_client.getWsApiVersion.return_value = self.wsapi_version - - expected = [ - mock.call.getCPG(HP3PAR_CPG), - mock.call.getCPG(HP3PAR_CPG2)] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - return mock_client - - def setup_driver_312(self, config=None, mock_conf=None): - - self.ctxt = context.get_admin_context() - mock_client = self.setup_mock_client( - conf=config, - m_conf=mock_conf, - driver=hpfcdriver.HP3PARFCDriver) - - mock_client.getWsApiVersion.return_value = self.wsapi_version_312 + if wsapi_version: + mock_client.getWsApiVersion.return_value = ( + wsapi_version) + else: + mock_client.getWsApiVersion.return_value = ( + self.wsapi_version_latest) expected = [ mock.call.getCPG(HP3PAR_CPG), @@ -3509,7 +3616,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase): 'target_lun': TARGET_LUN, 'target_portal': '1.1.1.2:1234'}} - def setup_driver(self, config=None, mock_conf=None): + def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): self.ctxt = context.get_admin_context() @@ -3518,33 +3625,12 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase): m_conf=mock_conf, driver=hpdriver.HP3PARISCSIDriver) - mock_client.getWsApiVersion.return_value = self.wsapi_version - - expected_get_cpgs = [ - mock.call.getCPG(HP3PAR_CPG), - mock.call.getCPG(HP3PAR_CPG2)] - expected_get_ports = [mock.call.getPorts()] - mock_client.assert_has_calls( - self.standard_login + - expected_get_cpgs + - self.standard_logout + - self.standard_login + - expected_get_ports + - self.standard_logout) - mock_client.reset_mock() - - return mock_client - - def setup_driver_312(self, config=None, mock_conf=None): - - self.ctxt = context.get_admin_context() - - mock_client = self.setup_mock_client( - conf=config, - m_conf=mock_conf, - driver=hpdriver.HP3PARISCSIDriver) - - mock_client.getWsApiVersion.return_value = self.wsapi_version_312 + if wsapi_version: + mock_client.getWsApiVersion.return_value = ( + wsapi_version) + else: + mock_client.getWsApiVersion.return_value = ( + self.wsapi_version_latest) expected_get_cpgs = [ mock.call.getCPG(HP3PAR_CPG), diff --git a/cinder/volume/drivers/san/hp/hp_3par_common.py b/cinder/volume/drivers/san/hp/hp_3par_common.py index 4d8d44967ad..00b645d0d51 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_common.py +++ b/cinder/volume/drivers/san/hp/hp_3par_common.py @@ -70,6 +70,7 @@ LOG = logging.getLogger(__name__) MIN_CLIENT_VERSION = '3.1.2' DEDUP_API_VERSION = 30201120 +FLASH_CACHE_API_VERSION = 30201200 hp3par_opts = [ cfg.StrOpt('hp3par_api_url', @@ -166,10 +167,11 @@ class HP3PARCommon(object): 2.0.34 - Fix log messages to match guidelines. bug #1411370 2.0.35 - Fix default snapCPG for manage_existing bug #1393609 2.0.36 - Added support for dedup provisioning + 2.0.37 - Added support for enabling Flash Cache """ - VERSION = "2.0.36" + VERSION = "2.0.37" stats = {} @@ -203,7 +205,8 @@ class HP3PARCommon(object): hp_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency', 'priority'] qos_priority_level = {'low': 1, 'normal': 2, 'high': 3} - hp3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs'] + hp3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs', + 'flash_cache'] def __init__(self, config): self.config = config @@ -888,8 +891,42 @@ class HP3PARCommon(object): with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating QOS rule %s"), qosRule) + def get_flash_cache_policy(self, hp3par_keys): + if hp3par_keys is not None: + # First check list of extra spec keys + val = self._get_key_value(hp3par_keys, 'flash_cache', None) + if val is not None: + # If requested, see if supported on back end + if self.API_VERSION < FLASH_CACHE_API_VERSION: + err = (_("Flash Cache Policy requires " + "WSAPI version '%(fcache_version)s' " + "version '%(version)s' is installed.") % + {'fcache_version': FLASH_CACHE_API_VERSION, + 'version': self.API_VERSION}) + LOG.error(err) + raise exception.InvalidInput(reason=err) + else: + if val.lower() == 'true': + return self.client.FLASH_CACHE_ENABLED + else: + return self.client.FLASH_CACHE_DISABLED + + return None + + def _set_flash_cache_policy_in_vvs(self, flash_cache, vvs_name): + # Update virtual volume set + if flash_cache: + try: + self.client.modifyVolumeSet(vvs_name, + flashCachePolicy=flash_cache) + LOG.info(_LI("Flash Cache policy set to %s"), flash_cache) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error setting Flash Cache policy " + "to %s - exception"), flash_cache) + def _add_volume_to_volume_set(self, volume, volume_name, - cpg, vvs_name, qos): + cpg, vvs_name, qos, flash_cache): if vvs_name is not None: # Admin has set a volume set name to add the volume to try: @@ -904,10 +941,11 @@ class HP3PARCommon(object): self.client.createVolumeSet(vvs_name, domain) try: self._set_qos_rule(qos, vvs_name) + self._set_flash_cache_policy_in_vvs(flash_cache, vvs_name) self.client.addVolumeToVolumeSet(vvs_name, volume_name) except Exception as ex: # Cleanup the volume set if unable to create the qos rule - # or add the volume to the volume set + # or flash cache policy or add the volume to the volume set self.client.deleteVolumeSet(vvs_name) raise exception.CinderException(ex) @@ -1103,6 +1141,7 @@ class HP3PARCommon(object): snap_cpg = type_info['snap_cpg'] tpvv = type_info['tpvv'] tdvv = type_info['tdvv'] + flash_cache = self.get_flash_cache_policy(type_info['hp3par_keys']) type_id = volume.get('volume_type_id', None) if type_id is not None: @@ -1124,10 +1163,11 @@ class HP3PARCommon(object): capacity = self._capacity_from_size(volume['size']) volume_name = self._get_3par_vol_name(volume['id']) self.client.createVolume(volume_name, cpg, capacity, extras) - if qos or vvs_name is not None: + if qos or vvs_name or flash_cache is not None: try: self._add_volume_to_volume_set(volume, volume_name, - cpg, vvs_name, qos) + cpg, vvs_name, qos, + flash_cache) except exception.InvalidInput as ex: # Delete the volume if unable to add it to the volume set self.client.deleteVolume(volume_name) @@ -1364,12 +1404,16 @@ class HP3PARCommon(object): self.client.deleteVolume(volume_name) raise exception.CinderException(ex) - if qos or vvs_name is not None: + # Check for flash cache setting in extra specs + flash_cache = self.get_flash_cache_policy(hp3par_keys) + + if qos or vvs_name or flash_cache is not None: cpg_names = self._get_key_value(hp3par_keys, 'cpg', self.config.hp3par_cpg) try: self._add_volume_to_volume_set(volume, volume_name, - cpg_names[0], vvs_name, qos) + cpg_names[0], vvs_name, + qos, flash_cache) except Exception as ex: # Delete the volume if unable to add it to the volume set self.client.deleteVolume(volume_name) @@ -1807,8 +1851,10 @@ class HP3PARCommon(object): def _retype(self, volume, volume_name, new_type_name, new_type_id, host, new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg, - old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_vvs, new_vvs, - old_qos, new_qos, old_comment): + old_tpvv, new_tpvv, old_tdvv, new_tdvv, + old_vvs, new_vvs, old_qos, new_qos, + old_flash_cache, new_flash_cache, + old_comment): action = "volume:retype" @@ -1836,6 +1882,8 @@ class HP3PARCommon(object): 'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg, 'old_vvs': old_vvs, 'new_vvs': new_vvs, 'old_qos': old_qos, 'new_qos': new_qos, + 'old_flash_cache': old_flash_cache, + 'new_flash_cache': new_flash_cache, 'new_type_name': new_type_name, 'new_type_id': new_type_id, 'old_comment': old_comment }) @@ -1879,8 +1927,12 @@ class HP3PARCommon(object): new_hp3par_keys = new_volume_settings['hp3par_keys'] if 'persona' in new_hp3par_keys: new_persona = new_hp3par_keys['persona'] + new_flash_cache = self.get_flash_cache_policy(new_hp3par_keys) + old_qos = old_volume_settings['qos'] old_vvs = old_volume_settings['vvs_name'] + old_hp3par_keys = old_volume_settings['hp3par_keys'] + old_flash_cache = self.get_flash_cache_policy(old_hp3par_keys) # Get the current volume info because we can get in a bad state # if we trust that all the volume type settings are still the @@ -1901,8 +1953,9 @@ class HP3PARCommon(object): self._retype(volume, volume_name, new_type_name, new_type_id, host, new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg, old_tpvv, new_tpvv, - old_tdvv, new_tdvv, old_vvs, new_vvs, old_qos, - new_qos, old_comment) + old_tdvv, new_tdvv, old_vvs, new_vvs, + old_qos, new_qos, old_flash_cache, new_flash_cache, + old_comment) if host: return True, self._get_model_update(host['host'], new_cpg) @@ -2109,9 +2162,12 @@ class ModifySpecsTask(flow_utils.CinderTask): super(ModifySpecsTask, self).__init__(addons=[action]) def execute(self, common, volume_name, volume, old_cpg, new_cpg, - old_vvs, new_vvs, old_qos, new_qos): + old_vvs, new_vvs, old_qos, new_qos, + old_flash_cache, new_flash_cache): - if old_vvs != new_vvs or old_qos != new_qos: + if (old_vvs != new_vvs or + old_qos != new_qos or + old_flash_cache != new_flash_cache): # Remove VV from old VV Set. if old_vvs is not None and old_vvs != new_vvs: @@ -2133,9 +2189,10 @@ class ModifySpecsTask(flow_utils.CinderTask): "deleteVolumeSet(%s)"), vvs_name) raise ex - if new_vvs or new_qos: + if new_vvs or new_qos or new_flash_cache: common._add_volume_to_volume_set( - volume, volume_name, new_cpg, new_vvs, new_qos) + volume, volume_name, new_cpg, new_vvs, new_qos, + new_flash_cache) self.needs_revert = True def revert(self, common, volume_name, volume, old_vvs, new_vvs, old_qos,