diff --git a/cinder/opts.py b/cinder/opts.py index 0bc4c47813b..9ef804512fe 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -114,6 +114,8 @@ from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import nimble as \ cinder_volume_drivers_hpe_nimble +from cinder.volume.drivers.hpe.xp import hpe_xp_rest as \ + cinder_volume_drivers_hpe_xp_hpexprest from cinder.volume.drivers.huawei import common as \ cinder_volume_drivers_huawei_common from cinder.volume.drivers.ibm import flashsystem_common as \ @@ -356,6 +358,9 @@ def list_opts(): cinder_volume_drivers_hitachi_hbsdrestfc.FC_VOLUME_OPTS, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_hpe_nimble.nimble_opts, + cinder_volume_drivers_hpe_xp_hpexprest.COMMON_VOLUME_OPTS, + cinder_volume_drivers_hpe_xp_hpexprest.REST_VOLUME_OPTS, + cinder_volume_drivers_hpe_xp_hpexprest.FC_VOLUME_OPTS, cinder_volume_drivers_huawei_common.huawei_opts, cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts, cinder_volume_drivers_ibm_flashsystemiscsi. diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py new file mode 100644 index 00000000000..1134cbdee04 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py @@ -0,0 +1,1150 @@ +# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Unit tests for Hewlett Packard Enterprise Driver.""" + +import functools +from unittest import mock + +from oslo_config import cfg +import requests +from requests import models + +from cinder import context as cinder_context +from cinder.db.sqlalchemy import api as sqlalchemy_api +from cinder import exception +from cinder.objects import group_snapshot as obj_group_snap +from cinder.objects import snapshot as obj_snap +from cinder.tests.unit import fake_group +from cinder.tests.unit import fake_group_snapshot +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder.tests.unit import test +from cinder.volume import configuration as conf +from cinder.volume import driver +from cinder.volume.drivers.hitachi import hbsd_common +from cinder.volume.drivers.hitachi import hbsd_rest +from cinder.volume.drivers.hitachi import hbsd_rest_api +from cinder.volume.drivers.hpe.xp import hpe_xp_fc +from cinder.volume.drivers.hpe.xp import hpe_xp_rest +from cinder.volume import volume_types +from cinder.volume import volume_utils +from cinder.zonemanager import utils as fczm_utils + +# Configuration parameter values +CONFIG_MAP = { + 'serial': '886000123456', + 'my_ip': '127.0.0.1', + 'rest_server_ip_addr': '172.16.18.108', + 'rest_server_ip_port': '23451', + 'port_id': 'CL1-A', + 'host_grp_name': 'HPEXP-0123456789abcdef', + 'host_mode': 'LINUX/IRIX', + 'host_wwn': '0123456789abcdef', + 'target_wwn': '1111111123456789', + 'user_id': 'user', + 'user_pass': 'password', + 'pool_name': 'test_pool', + 'auth_user': 'auth_user', + 'auth_password': 'auth_password', +} + +# Dummy response for FC zoning device mapping +DEVICE_MAP = { + 'fabric_name': { + 'initiator_port_wwn_list': [CONFIG_MAP['host_wwn']], + 'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}} + +DEFAULT_CONNECTOR = { + 'host': 'host', + 'ip': CONFIG_MAP['my_ip'], + 'wwpns': [CONFIG_MAP['host_wwn']], + 'multipath': False, +} + +CTXT = cinder_context.get_admin_context() + +TEST_VOLUME = [] +for i in range(4): + volume = {} + volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) + volume['name'] = 'test-volume{0:d}'.format(i) + if i == 3: + volume['provider_location'] = None + else: + volume['provider_location'] = '{0:d}'.format(i) + volume['size'] = 128 + if i == 2: + volume['status'] = 'in-use' + else: + volume['status'] = 'available' + volume = fake_volume.fake_volume_obj(CTXT, **volume) + TEST_VOLUME.append(volume) + + +def _volume_get(context, volume_id): + """Return predefined volume info.""" + return TEST_VOLUME[int(volume_id.replace("-", ""))] + + +TEST_SNAPSHOT = [] +snapshot = {} +snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) +snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) +snapshot['provider_location'] = '{0:d}'.format(1) +snapshot['status'] = 'available' +snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) +snapshot['volume'] = _volume_get(None, snapshot['volume_id']) +snapshot['volume_name'] = 'test-volume{0:d}'.format(0) +snapshot['volume_size'] = 128 +snapshot = obj_snap.Snapshot._from_db_object( + CTXT, obj_snap.Snapshot(), + fake_snapshot.fake_db_snapshot(**snapshot)) +TEST_SNAPSHOT.append(snapshot) + +TEST_GROUP = [] +for i in range(2): + group = {} + group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) + group['status'] = 'available' + group = fake_group.fake_group_obj(CTXT, **group) + TEST_GROUP.append(group) + +TEST_GROUP_SNAP = [] +group_snapshot = {} +group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) +group_snapshot['status'] = 'available' +group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( + CTXT, obj_group_snap.GroupSnapshot(), + fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) +TEST_GROUP_SNAP.append(group_snapshot) + +# Dummy response for REST API +POST_SESSIONS_RESULT = { + "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", + "sessionId": 0, +} + +GET_PORTS_RESULT = { + "data": [ + { + "portId": CONFIG_MAP['port_id'], + "portType": "FIBRE", + "portAttributes": [ + "TAR", + "MCU", + "RCU", + "ELUN" + ], + "fabricMode": True, + "portConnection": "PtoP", + "lunSecuritySetting": True, + "wwn": CONFIG_MAP['target_wwn'], + }, + ], +} + +GET_HOST_WWNS_RESULT = { + "data": [ + { + "hostGroupNumber": 0, + "hostWwn": CONFIG_MAP['host_wwn'], + }, + ], +} + +COMPLETED_SUCCEEDED_RESULT = { + "status": "Completed", + "state": "Succeeded", + "affectedResources": ('a/b/c/1',), +} + +COMPLETED_FAILED_RESULT_LU_DEFINED = { + "status": "Completed", + "state": "Failed", + "error": { + "errorCode": { + "SSB1": "B958", + "SSB2": "015A", + }, + }, +} + +GET_LDEV_RESULT = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", +} + +GET_LDEV_RESULT_MAPPED = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", + "ports": [ + { + "portId": CONFIG_MAP['port_id'], + "hostGroupNumber": 0, + "hostGroupName": CONFIG_MAP['host_grp_name'], + "lun": 1 + }, + ], +} + +GET_LDEV_RESULT_PAIR = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP", "FS"], + "status": "NML", +} + +GET_POOL_RESULT = { + "availableVolumeCapacity": 480144, + "totalPoolCapacity": 507780, + "totalLocatedCapacity": 71453172, +} + +GET_SNAPSHOTS_RESULT = { + "data": [ + { + "primaryOrSecondary": "S-VOL", + "status": "PSUS", + "pvolLdevId": 0, + "muNumber": 1, + "svolLdevId": 1, + }, + ], +} + +GET_SNAPSHOTS_RESULT_PAIR = { + "data": [ + { + "primaryOrSecondary": "S-VOL", + "status": "PAIR", + "pvolLdevId": 0, + "muNumber": 1, + "svolLdevId": 1, + }, + ], +} + +GET_SNAPSHOTS_RESULT_BUSY = { + "data": [ + { + "primaryOrSecondary": "P-VOL", + "status": "PSUP", + "pvolLdevId": 0, + "muNumber": 1, + "svolLdevId": 1, + }, + ], +} + +GET_POOLS_RESULT = { + "data": [ + { + "poolId": 30, + "poolName": CONFIG_MAP['pool_name'], + "availableVolumeCapacity": 480144, + "totalPoolCapacity": 507780, + "totalLocatedCapacity": 71453172, + "virtualVolumeCapacityRate": -1, + }, + ], +} + +GET_LUNS_RESULT = { + "data": [ + { + "ldevId": 0, + "lun": 1, + }, + ], +} + +GET_HOST_GROUP_RESULT = { + "hostGroupName": CONFIG_MAP['host_grp_name'], +} + +GET_HOST_GROUPS_RESULT = { + "data": [ + { + "hostGroupNumber": 0, + "portId": CONFIG_MAP['port_id'], + "hostGroupName": "HPEXP-test", + }, + ], +} + +GET_LDEVS_RESULT = { + "data": [ + { + "ldevId": 0, + "label": "15960cc738c94c5bb4f1365be5eeed44", + }, + { + "ldevId": 1, + "label": "15960cc738c94c5bb4f1365be5eeed45", + }, + ], +} + +NOTFOUND_RESULT = { + "data": [], +} + +ERROR_RESULT = { + "errorSource": "", + "message": "", + "solution": "", + "messageId": "", + "errorCode": { + "SSB1": "", + "SSB2": "", + } +} + + +def _brick_get_connector_properties(multipath=False, enforce_multipath=False): + """Return a predefined connector object.""" + return DEFAULT_CONNECTOR + + +def reduce_retrying_time(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + backup_lock_waittime = hbsd_rest_api._LOCK_TIMEOUT + backup_exec_max_waittime = hbsd_rest_api._REST_TIMEOUT + backup_job_api_response_timeout = ( + hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) + backup_get_api_response_timeout = ( + hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) + backup_extend_waittime = hbsd_rest_api._EXTEND_TIMEOUT + backup_exec_retry_interval = hbsd_rest_api._EXEC_RETRY_INTERVAL + backup_rest_server_restart_timeout = ( + hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT) + backup_state_transition_timeout = ( + hbsd_rest._STATE_TRANSITION_TIMEOUT) + hbsd_rest_api._LOCK_TIMEOUT = 0.01 + hbsd_rest_api._REST_TIMEOUT = 0.01 + hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = 0.01 + hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = 0.01 + hbsd_rest_api._EXTEND_TIMEOUT = 0.01 + hbsd_rest_api._EXEC_RETRY_INTERVAL = 0.004 + hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = 0.02 + hbsd_rest._STATE_TRANSITION_TIMEOUT = 0.01 + func(*args, **kwargs) + hbsd_rest_api._LOCK_TIMEOUT = backup_lock_waittime + hbsd_rest_api._REST_TIMEOUT = backup_exec_max_waittime + hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = ( + backup_job_api_response_timeout) + hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = ( + backup_get_api_response_timeout) + hbsd_rest_api._EXTEND_TIMEOUT = backup_extend_waittime + hbsd_rest_api._EXEC_RETRY_INTERVAL = backup_exec_retry_interval + hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = ( + backup_rest_server_restart_timeout) + hbsd_rest._STATE_TRANSITION_TIMEOUT = ( + backup_state_transition_timeout) + return wrapper + + +class FakeLookupService(): + """Dummy FC zoning mapping lookup service class.""" + + def get_device_mapping_from_network(self, initiator_wwns, target_wwns): + """Return predefined FC zoning mapping.""" + return DEVICE_MAP + + +class FakeResponse(): + + def __init__(self, status_code, data=None, headers=None): + self.status_code = status_code + self.data = data + self.text = data + self.content = data + self.headers = {'Content-Type': 'json'} if headers is None else headers + + def json(self): + return self.data + + +class HPEXPRESTFCDriverTest(test.TestCase): + """Unit test class for HPEXP REST interface fibre channel module.""" + + test_existing_ref = {'source-id': '1'} + test_existing_ref_name = { + 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} + + def setUp(self): + """Set up the test environment.""" + def _set_required(opts, required): + for opt in opts: + opt.required = required + + # Initialize Cinder and avoid checking driver options. + rest_required_opts = [ + opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] + common_required_opts = [ + opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] + _set_required(rest_required_opts, False) + _set_required(common_required_opts, False) + super(HPEXPRESTFCDriverTest, self).setUp() + _set_required(rest_required_opts, True) + _set_required(common_required_opts, True) + + self.configuration = mock.Mock(conf.Configuration) + self.ctxt = cinder_context.get_admin_context() + self._setup_config() + self._setup_driver() + + def _setup_config(self): + """Set configuration parameter values.""" + self.configuration.config_group = "REST" + + self.configuration.volume_backend_name = "RESTFC" + self.configuration.volume_driver = ( + "cinder.volume.drivers.hpe.xp.hpe_xp_fc.HPEXPFCDriver") + self.configuration.reserved_percentage = "0" + self.configuration.use_multipath_for_image_xfer = False + self.configuration.enforce_multipath_for_image_xfer = False + self.configuration.max_over_subscription_ratio = 500.0 + self.configuration.driver_ssl_cert_verify = False + + self.configuration.hpexp_storage_id = CONFIG_MAP['serial'] + self.configuration.hpexp_pool = ["30"] + self.configuration.hpexp_snap_pool = None + self.configuration.hpexp_ldev_range = "0-1" + self.configuration.hpexp_target_ports = [CONFIG_MAP['port_id']] + self.configuration.hpexp_compute_target_ports = [ + CONFIG_MAP['port_id']] + self.configuration.hpexp_group_create = True + self.configuration.hpexp_group_delete = True + self.configuration.hpexp_copy_speed = 3 + self.configuration.hpexp_copy_check_interval = 3 + self.configuration.hpexp_async_copy_check_interval = 10 + + self.configuration.san_login = CONFIG_MAP['user_id'] + self.configuration.san_password = CONFIG_MAP['user_pass'] + self.configuration.san_ip = CONFIG_MAP[ + 'rest_server_ip_addr'] + self.configuration.san_api_port = CONFIG_MAP[ + 'rest_server_ip_port'] + self.configuration.hpexp_rest_disable_io_wait = True + self.configuration.hpexp_rest_tcp_keepalive = True + self.configuration.hpexp_discard_zero_page = True + self.configuration.hpexp_rest_number = "0" + self.configuration.hpexp_lun_timeout = hbsd_rest._LUN_TIMEOUT + self.configuration.hpexp_lun_retry_interval = ( + hbsd_rest._LUN_RETRY_INTERVAL) + self.configuration.hpexp_restore_timeout = hbsd_rest._RESTORE_TIMEOUT + self.configuration.hpexp_state_transition_timeout = ( + hbsd_rest._STATE_TRANSITION_TIMEOUT) + self.configuration.hpexp_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT + self.configuration.hpexp_rest_timeout = hbsd_rest_api._REST_TIMEOUT + self.configuration.hpexp_extend_timeout = ( + hbsd_rest_api._EXTEND_TIMEOUT) + self.configuration.hpexp_exec_retry_interval = ( + hbsd_rest_api._EXEC_RETRY_INTERVAL) + self.configuration.hpexp_rest_connect_timeout = ( + hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) + self.configuration.hpexp_rest_job_api_response_timeout = ( + hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) + self.configuration.hpexp_rest_get_api_response_timeout = ( + hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) + self.configuration.hpexp_rest_server_busy_timeout = ( + hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) + self.configuration.hpexp_rest_keep_session_loop_interval = ( + hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) + self.configuration.hpexp_rest_another_ldev_mapped_retry_timeout = ( + hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) + self.configuration.hpexp_rest_tcp_keepidle = ( + hbsd_rest_api._TCP_KEEPIDLE) + self.configuration.hpexp_rest_tcp_keepintvl = ( + hbsd_rest_api._TCP_KEEPINTVL) + self.configuration.hpexp_rest_tcp_keepcnt = ( + hbsd_rest_api._TCP_KEEPCNT) + self.configuration.hpexp_host_mode_options = [] + + self.configuration.hpexp_zoning_request = False + + self.configuration.san_thin_provision = True + self.configuration.san_private_key = '' + self.configuration.san_clustername = '' + self.configuration.san_ssh_port = '22' + self.configuration.san_is_local = False + self.configuration.ssh_conn_timeout = '30' + self.configuration.ssh_min_pool_conn = '1' + self.configuration.ssh_max_pool_conn = '5' + + self.configuration.use_chap_auth = True + self.configuration.chap_username = CONFIG_MAP['auth_user'] + self.configuration.chap_password = CONFIG_MAP['auth_password'] + + self.configuration.safe_get = self._fake_safe_get + + CONF = cfg.CONF + CONF.my_ip = CONFIG_MAP['my_ip'] + + def _fake_safe_get(self, value): + """Retrieve a configuration value avoiding throwing an exception.""" + try: + val = getattr(self.configuration, value) + except AttributeError: + val = None + return val + + @mock.patch.object(requests.Session, "request") + @mock.patch.object( + volume_utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + def _setup_driver( + self, brick_get_connector_properties=None, request=None): + """Set up the driver environment.""" + self.driver = hpe_xp_fc.HPEXPFCDriver( + configuration=self.configuration) + request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), + FakeResponse(200, GET_PORTS_RESULT), + FakeResponse(200, GET_HOST_WWNS_RESULT)] + self.driver.do_setup(None) + self.driver.check_for_setup_error() + self.driver.local_path(None) + self.driver.create_export(None, None, None) + self.driver.ensure_export(None, None) + self.driver.remove_export(None, None) + self.driver.create_export_snapshot(None, None, None) + self.driver.remove_export_snapshot(None, None) + # stop the Loopingcall within the do_setup treatment + self.driver.common.client.keep_session_loop.stop() + + def tearDown(self): + self.client = None + super(HPEXPRESTFCDriverTest, self).tearDown() + + # API test cases + @mock.patch.object(requests.Session, "request") + @mock.patch.object( + volume_utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + def test_do_setup(self, brick_get_connector_properties, request): + drv = hpe_xp_fc.HPEXPFCDriver( + configuration=self.configuration) + self._setup_config() + request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), + FakeResponse(200, GET_PORTS_RESULT), + FakeResponse(200, GET_HOST_WWNS_RESULT)] + drv.do_setup(None) + self.assertEqual( + {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, + drv.common.storage_info['wwns']) + self.assertEqual(1, brick_get_connector_properties.call_count) + self.assertEqual(3, request.call_count) + # stop the Loopingcall within the do_setup treatment + self.driver.common.client.keep_session_loop.stop() + self.driver.common.client.keep_session_loop.wait() + + @mock.patch.object(requests.Session, "request") + @mock.patch.object( + volume_utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + def test_do_setup_create_hg(self, brick_get_connector_properties, request): + """Normal case: The host group not exists.""" + drv = hpe_xp_fc.HPEXPFCDriver( + configuration=self.configuration) + self._setup_config() + request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), + FakeResponse(200, GET_PORTS_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + drv.do_setup(None) + self.assertEqual( + {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, + drv.common.storage_info['wwns']) + self.assertEqual(1, brick_get_connector_properties.call_count) + self.assertEqual(8, request.call_count) + # stop the Loopingcall within the do_setup treatment + self.driver.common.client.keep_session_loop.stop() + self.driver.common.client.keep_session_loop.wait() + + @mock.patch.object(requests.Session, "request") + @mock.patch.object( + volume_utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + def test_do_setup_pool_name(self, brick_get_connector_properties, request): + """Normal case: Specify a pool name instead of pool id""" + drv = hpe_xp_fc.HPEXPFCDriver( + configuration=self.configuration) + self._setup_config() + tmp_pool = self.configuration.hitachi_pool + self.configuration.hitachi_pool = [CONFIG_MAP['pool_name']] + request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), + FakeResponse(200, GET_POOLS_RESULT), + FakeResponse(200, GET_PORTS_RESULT), + FakeResponse(200, GET_HOST_WWNS_RESULT)] + drv.do_setup(None) + self.assertEqual( + {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, + drv.common.storage_info['wwns']) + self.assertEqual(1, brick_get_connector_properties.call_count) + self.assertEqual(4, request.call_count) + self.configuration.hitachi_pool = tmp_pool + # stop the Loopingcall within the do_setup treatment + self.driver.common.client.keep_session_loop.stop() + self.driver.common.client.keep_session_loop.wait() + + @mock.patch.object(requests.Session, "request") + def test_create_volume(self, request): + request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(2, request.call_count) + + @reduce_retrying_time + @mock.patch.object(requests.Session, "request") + def test_create_volume_timeout(self, request): + request.return_value = FakeResponse( + 500, ERROR_RESULT, + headers={'Content-Type': 'json'}) + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + self.assertRaises(exception.VolumeDriverException, + self.driver.create_volume, + fake_volume.fake_volume_obj(self.ctxt)) + self.assertGreater(request.call_count, 1) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_temporary_busy(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(7, request.call_count) + + @reduce_retrying_time + @mock.patch.object(requests.Session, "request") + def test_delete_volume_busy_timeout(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_LDEV_RESULT_PAIR)] + self.assertRaises(exception.VolumeDriverException, + self.driver.delete_volume, + TEST_VOLUME[0]) + self.assertGreater(request.call_count, 2) + + @mock.patch.object(requests.Session, "request") + def test_extend_volume(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.extend_volume(TEST_VOLUME[0], 256) + self.assertEqual(3, request.call_count) + + @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") + @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") + @mock.patch.object(requests.Session, "request") + def test__update_volume_stats( + self, request, get_filter_function, get_goodness_function): + request.return_value = FakeResponse(200, GET_POOLS_RESULT) + get_filter_function.return_value = None + get_goodness_function.return_value = None + self.driver._update_volume_stats() + self.assertEqual( + 'Hewlett Packard Enterprise', self.driver._stats['vendor_name']) + self.assertTrue(self.driver._stats["pools"][0]['multiattach']) + self.assertEqual(1, request.call_count) + self.assertEqual(1, get_filter_function.call_count) + self.assertEqual(1, get_goodness_function.call_count) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot(self, volume_get, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_snapshot(TEST_SNAPSHOT[0]) + self.assertEqual(10, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot_no_pair(self, request): + """Normal case: Delete a snapshot without pair.""" + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_snapshot(TEST_SNAPSHOT[0]) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_create_cloned_volume(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) + self.assertEqual('1', vol['provider_location']) + self.assertEqual(5, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_create_volume_from_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + vol = self.driver.create_volume_from_snapshot( + TEST_VOLUME[0], TEST_SNAPSHOT[0]) + self.assertEqual('1', vol['provider_location']) + self.assertEqual(5, request.call_count) + + @mock.patch.object(fczm_utils, "add_fc_zone") + @mock.patch.object(requests.Session, "request") + def test_initialize_connection(self, request, add_fc_zone): + self.driver.common.conf.hitachi_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.initialize_connection( + TEST_VOLUME[0], DEFAULT_CONNECTOR) + self.assertEqual('fibre_channel', ret['driver_volume_type']) + self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) + self.assertEqual(1, ret['data']['target_lun']) + self.assertEqual(2, request.call_count) + self.assertEqual(1, add_fc_zone.call_count) + + @mock.patch.object(fczm_utils, "add_fc_zone") + @mock.patch.object(requests.Session, "request") + def test_initialize_connection_already_mapped(self, request, add_fc_zone): + """Normal case: ldev have already mapped.""" + self.driver.common.conf.hitachi_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + request.side_effect = [ + FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(202, COMPLETED_FAILED_RESULT_LU_DEFINED), + FakeResponse(200, GET_LUNS_RESULT), + ] + ret = self.driver.initialize_connection( + TEST_VOLUME[0], DEFAULT_CONNECTOR) + self.assertEqual('fibre_channel', ret['driver_volume_type']) + self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) + self.assertEqual(1, ret['data']['target_lun']) + self.assertEqual(3, request.call_count) + self.assertEqual(1, add_fc_zone.call_count) + + @mock.patch.object(fczm_utils, "add_fc_zone") + @mock.patch.object(requests.Session, "request") + def test_initialize_connection_shared_target(self, request, add_fc_zone): + """Normal case: A target shared with other systems.""" + self.driver.common.conf.hitachi_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + request.side_effect = [FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_HOST_GROUPS_RESULT), + FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.initialize_connection( + TEST_VOLUME[0], DEFAULT_CONNECTOR) + self.assertEqual('fibre_channel', ret['driver_volume_type']) + self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) + self.assertEqual(1, ret['data']['target_lun']) + self.assertEqual(5, request.call_count) + self.assertEqual(1, add_fc_zone.call_count) + + @mock.patch.object(fczm_utils, "remove_fc_zone") + @mock.patch.object(requests.Session, "request") + def test_terminate_connection(self, request, remove_fc_zone): + self.driver.common.conf.hitachi_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(200, GET_LDEV_RESULT_MAPPED), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) + self.assertEqual(5, request.call_count) + self.assertEqual(1, remove_fc_zone.call_count) + + @mock.patch.object(fczm_utils, "remove_fc_zone") + @mock.patch.object(requests.Session, "request") + def test_terminate_connection_not_connector(self, request, remove_fc_zone): + """Normal case: Connector is None.""" + self.driver.common.conf.hitachi_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED), + FakeResponse(200, GET_HOST_GROUP_RESULT), + FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(200, GET_LDEV_RESULT_MAPPED), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.terminate_connection(TEST_VOLUME[2], None) + self.assertEqual(8, request.call_count) + self.assertEqual(1, remove_fc_zone.call_count) + + @mock.patch.object(fczm_utils, "remove_fc_zone") + @mock.patch.object(requests.Session, "request") + def test_terminate_connection_not_lun(self, request, remove_fc_zone): + """Normal case: Lun already not exist.""" + self.driver.common.conf.hitachi_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(200, GET_LDEV_RESULT)] + self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) + self.assertEqual(2, request.call_count) + self.assertEqual(1, remove_fc_zone.call_count) + + @mock.patch.object(fczm_utils, "add_fc_zone") + @mock.patch.object(requests.Session, "request") + def test_initialize_connection_snapshot(self, request, add_fc_zone): + self.driver.common.conf.hitachi_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.initialize_connection_snapshot( + TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) + self.assertEqual('fibre_channel', ret['driver_volume_type']) + self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) + self.assertEqual(1, ret['data']['target_lun']) + self.assertEqual(2, request.call_count) + self.assertEqual(1, add_fc_zone.call_count) + + @mock.patch.object(fczm_utils, "remove_fc_zone") + @mock.patch.object(requests.Session, "request") + def test_terminate_connection_snapshot(self, request, remove_fc_zone): + self.driver.common.conf.hitachi_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), + FakeResponse(200, GET_LDEV_RESULT_MAPPED), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.terminate_connection_snapshot( + TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) + self.assertEqual(5, request.call_count) + self.assertEqual(1, remove_fc_zone.call_count) + + @mock.patch.object(requests.Session, "request") + def test_manage_existing(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.manage_existing( + TEST_VOLUME[0], self.test_existing_ref) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_manage_existing_name(self, request): + request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.manage_existing( + TEST_VOLUME[0], self.test_existing_ref_name) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(3, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_manage_existing_get_size(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT) + self.driver.manage_existing_get_size( + TEST_VOLUME[0], self.test_existing_ref) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_manage_existing_get_size_name(self, request): + request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), + FakeResponse(200, GET_LDEV_RESULT)] + self.driver.manage_existing_get_size( + TEST_VOLUME[0], self.test_existing_ref_name) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_unmanage(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT)] + self.driver.unmanage(TEST_VOLUME[0]) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_copy_image_to_volume(self, request): + image_service = 'fake_image_service' + image_id = 'fake_image_id' + request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) + with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ + as mock_copy_image: + self.driver.copy_image_to_volume( + self.ctxt, TEST_VOLUME[0], image_service, image_id) + mock_copy_image.assert_called_with( + self.ctxt, TEST_VOLUME[0], image_service, image_id) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_update_migrated_volume(self, request): + request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) + self.assertRaises( + NotImplementedError, + self.driver.update_migrated_volume, + self.ctxt, + TEST_VOLUME[0], + TEST_VOLUME[1], + "available") + self.assertEqual(1, request.call_count) + + def test_unmanage_snapshot(self): + """The driver don't support unmange_snapshot.""" + self.assertRaises( + NotImplementedError, + self.driver.unmanage_snapshot, + TEST_SNAPSHOT[0]) + + def test_retype(self): + new_specs = {'hpe_xp:test': 'test'} + new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) + diff = {} + host = {} + ret = self.driver.retype( + self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host) + self.assertFalse(ret) + + def test_backup_use_temp_snapshot(self): + self.assertTrue(self.driver.backup_use_temp_snapshot()) + + @mock.patch.object(requests.Session, "request") + def test_revert_to_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT)] + self.driver.revert_to_snapshot( + self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) + self.assertEqual(5, request.call_count) + + def test_session___call__(self): + session = self.driver.common.client.Session('id', 'token') + req = models.Response() + ret = session.__call__(req) + self.assertEqual('Session token', ret.headers['Authorization']) + + def test_create_group(self): + ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) + self.assertIsNone(ret) + + @mock.patch.object(requests.Session, "request") + def test_delete_group(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.delete_group( + self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) + self.assertEqual(4, request.call_count) + actual = ( + {'status': TEST_GROUP[0]['status']}, + [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] + ) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + def test_create_group_from_src_volume(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_group_from_src( + self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], + source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] + ) + self.assertEqual(5, request.call_count) + actual = ( + None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + def test_create_group_from_src_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_group_from_src( + self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], + group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] + ) + self.assertEqual(5, request.call_count) + actual = ( + None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) + self.assertTupleEqual(actual, ret) + + def test_create_group_from_src_volume_error(self): + self.assertRaises( + exception.VolumeDriverException, self.driver.create_group_from_src, + self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], + source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]] + ) + + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') + def test_update_group(self, is_group_a_cg_snapshot_type): + is_group_a_cg_snapshot_type.return_value = False + ret = self.driver.update_group( + self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) + self.assertTupleEqual((None, None, None), ret) + + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') + def test_update_group_error(self, is_group_a_cg_snapshot_type): + is_group_a_cg_snapshot_type.return_value = True + self.assertRaises( + exception.VolumeDriverException, self.driver.update_group, + self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]], + remove_volumes=[TEST_VOLUME[0]] + ) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') + def test_create_group_snapshot_non_cg( + self, is_group_a_cg_snapshot_type, volume_get, request): + is_group_a_cg_snapshot_type.return_value = False + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_group_snapshot( + self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] + ) + self.assertEqual(4, request.call_count) + actual = ( + {'status': 'available'}, + [{'id': TEST_SNAPSHOT[0]['id'], + 'provider_location': '1', + 'status': 'available'}] + ) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') + def test_create_group_snapshot_cg( + self, is_group_a_cg_snapshot_type, volume_get, request): + is_group_a_cg_snapshot_type.return_value = True + request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_group_snapshot( + self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] + ) + self.assertEqual(5, request.call_count) + actual = ( + None, + [{'id': TEST_SNAPSHOT[0]['id'], + 'provider_location': '1', + 'status': 'available'}] + ) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + def test_delete_group_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.delete_group_snapshot( + self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) + self.assertEqual(10, request.call_count) + actual = ( + {'status': TEST_GROUP_SNAP[0]['status']}, + [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] + ) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(hpe_xp_fc.HPEXPFCDriver, "_get_oslo_driver_opts") + def test_get_driver_options(self, _get_oslo_driver_opts): + _get_oslo_driver_opts.return_value = [] + ret = self.driver.get_driver_options() + actual = (hpe_xp_rest.COMMON_VOLUME_OPTS + + hpe_xp_rest.REST_VOLUME_OPTS + + hpe_xp_rest.FC_VOLUME_OPTS) + self.assertEqual(actual, ret) diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py new file mode 100644 index 00000000000..c8f3928d52a --- /dev/null +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py @@ -0,0 +1,956 @@ +# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Unit tests for Hewlett Packard Enterprise Driver.""" + +from unittest import mock + +from oslo_config import cfg +import requests + +from cinder import context as cinder_context +from cinder.db.sqlalchemy import api as sqlalchemy_api +from cinder import exception +from cinder.objects import group_snapshot as obj_group_snap +from cinder.objects import snapshot as obj_snap +from cinder.tests.unit import fake_group +from cinder.tests.unit import fake_group_snapshot +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder.tests.unit import test +from cinder.volume import configuration as conf +from cinder.volume import driver +from cinder.volume.drivers.hitachi import hbsd_common +from cinder.volume.drivers.hitachi import hbsd_rest +from cinder.volume.drivers.hitachi import hbsd_rest_api +from cinder.volume.drivers.hpe.xp import hpe_xp_iscsi +from cinder.volume.drivers.hpe.xp import hpe_xp_rest +from cinder.volume import volume_types +from cinder.volume import volume_utils + +# Configuration parameter values +CONFIG_MAP = { + 'serial': '886000123456', + 'my_ip': '127.0.0.1', + 'rest_server_ip_addr': '172.16.18.108', + 'rest_server_ip_port': '23451', + 'port_id': 'CL1-A', + 'host_grp_name': 'HPEXP-127.0.0.1', + 'host_mode': 'LINUX/IRIX', + 'host_iscsi_name': 'iqn.hpexp-test-host', + 'target_iscsi_name': 'iqn.hpexp-test-target', + 'user_id': 'user', + 'user_pass': 'password', + 'pool_name': 'test_pool', + 'ipv4Address': '111.22.333.44', + 'tcpPort': '5555', + 'auth_user': 'auth_user', + 'auth_password': 'auth_password', +} + +DEFAULT_CONNECTOR = { + 'host': 'host', + 'ip': CONFIG_MAP['my_ip'], + 'initiator': CONFIG_MAP['host_iscsi_name'], + 'multipath': False, +} + +CTXT = cinder_context.get_admin_context() + +TEST_VOLUME = [] +for i in range(4): + volume = {} + volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) + volume['name'] = 'test-volume{0:d}'.format(i) + if i == 3: + volume['provider_location'] = None + else: + volume['provider_location'] = '{0:d}'.format(i) + volume['size'] = 128 + if i == 2: + volume['status'] = 'in-use' + else: + volume['status'] = 'available' + volume = fake_volume.fake_volume_obj(CTXT, **volume) + TEST_VOLUME.append(volume) + + +def _volume_get(context, volume_id): + """Return predefined volume info.""" + return TEST_VOLUME[int(volume_id.replace("-", ""))] + + +TEST_SNAPSHOT = [] +snapshot = {} +snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) +snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) +snapshot['provider_location'] = '{0:d}'.format(1) +snapshot['status'] = 'available' +snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) +snapshot['volume'] = _volume_get(None, snapshot['volume_id']) +snapshot['volume_name'] = 'test-volume{0:d}'.format(0) +snapshot['volume_size'] = 128 +snapshot = obj_snap.Snapshot._from_db_object( + CTXT, obj_snap.Snapshot(), + fake_snapshot.fake_db_snapshot(**snapshot)) +TEST_SNAPSHOT.append(snapshot) + +TEST_GROUP = [] +for i in range(2): + group = {} + group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) + group['status'] = 'available' + group = fake_group.fake_group_obj(CTXT, **group) + TEST_GROUP.append(group) + +TEST_GROUP_SNAP = [] +group_snapshot = {} +group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) +group_snapshot['status'] = 'available' +group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( + CTXT, obj_group_snap.GroupSnapshot(), + fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) +TEST_GROUP_SNAP.append(group_snapshot) + +# Dummy response for REST API +POST_SESSIONS_RESULT = { + "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", + "sessionId": 0, +} + +GET_PORTS_RESULT = { + "data": [ + { + "portId": CONFIG_MAP['port_id'], + "portType": "ISCSI", + "portAttributes": [ + "TAR", + "MCU", + "RCU", + "ELUN" + ], + "portSpeed": "AUT", + "loopId": "00", + "fabricMode": False, + "lunSecuritySetting": True, + }, + ], +} + +GET_PORT_RESULT = { + "ipv4Address": CONFIG_MAP['ipv4Address'], + "tcpPort": CONFIG_MAP['tcpPort'], +} + +GET_HOST_ISCSIS_RESULT = { + "data": [ + { + "hostGroupNumber": 0, + "iscsiName": CONFIG_MAP['host_iscsi_name'], + }, + ], +} + +GET_HOST_GROUP_RESULT = { + "hostGroupName": CONFIG_MAP['host_grp_name'], + "iscsiName": CONFIG_MAP['target_iscsi_name'], +} + +GET_HOST_GROUPS_RESULT = { + "data": [ + { + "hostGroupNumber": 0, + "portId": CONFIG_MAP['port_id'], + "hostGroupName": "HPEXP-test", + "iscsiName": CONFIG_MAP['target_iscsi_name'], + }, + ], +} + +COMPLETED_SUCCEEDED_RESULT = { + "status": "Completed", + "state": "Succeeded", + "affectedResources": ('a/b/c/1',), +} + +GET_LDEV_RESULT = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", +} + +GET_LDEV_RESULT_MAPPED = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", + "ports": [ + { + "portId": CONFIG_MAP['port_id'], + "hostGroupNumber": 0, + "hostGroupName": CONFIG_MAP['host_grp_name'], + "lun": 1 + }, + ], +} + +GET_LDEV_RESULT_PAIR = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP", "FS"], + "status": "NML", +} + +GET_POOLS_RESULT = { + "data": [ + { + "poolId": 30, + "poolName": CONFIG_MAP['pool_name'], + "availableVolumeCapacity": 480144, + "totalPoolCapacity": 507780, + "totalLocatedCapacity": 71453172, + "virtualVolumeCapacityRate": -1, + }, + ], +} + +GET_SNAPSHOTS_RESULT = { + "data": [ + { + "primaryOrSecondary": "S-VOL", + "status": "PSUS", + "pvolLdevId": 0, + "muNumber": 1, + "svolLdevId": 1, + }, + ], +} + +GET_SNAPSHOTS_RESULT_PAIR = { + "data": [ + { + "primaryOrSecondary": "S-VOL", + "status": "PAIR", + "pvolLdevId": 0, + "muNumber": 1, + "svolLdevId": 1, + }, + ], +} + +GET_LDEVS_RESULT = { + "data": [ + { + "ldevId": 0, + "label": "15960cc738c94c5bb4f1365be5eeed44", + }, + { + "ldevId": 1, + "label": "15960cc738c94c5bb4f1365be5eeed45", + }, + ], +} + +NOTFOUND_RESULT = { + "data": [], +} + + +def _brick_get_connector_properties(multipath=False, enforce_multipath=False): + """Return a predefined connector object.""" + return DEFAULT_CONNECTOR + + +class FakeResponse(): + + def __init__(self, status_code, data=None, headers=None): + self.status_code = status_code + self.data = data + self.text = data + self.content = data + self.headers = {'Content-Type': 'json'} if headers is None else headers + + def json(self): + return self.data + + +class HPEXPRESTISCSIDriverTest(test.TestCase): + """Unit test class for HPEXP REST interface iSCSI module.""" + + test_existing_ref = {'source-id': '1'} + test_existing_ref_name = { + 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} + + def setUp(self): + """Set up the test environment.""" + def _set_required(opts, required): + for opt in opts: + opt.required = required + + # Initialize Cinder and avoid checking driver options. + rest_required_opts = [ + opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] + common_required_opts = [ + opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] + _set_required(rest_required_opts, False) + _set_required(common_required_opts, False) + super(HPEXPRESTISCSIDriverTest, self).setUp() + _set_required(rest_required_opts, True) + _set_required(common_required_opts, True) + + self.configuration = mock.Mock(conf.Configuration) + self.ctxt = cinder_context.get_admin_context() + self._setup_config() + self._setup_driver() + + def _setup_config(self): + """Set configuration parameter values.""" + self.configuration.config_group = "REST" + + self.configuration.volume_backend_name = "RESTISCSI" + self.configuration.volume_driver = ( + "cinder.volume.drivers.hpe.xp.hpe_xp_iscsi.HPEXPISCSIDriver") + self.configuration.reserved_percentage = "0" + self.configuration.use_multipath_for_image_xfer = False + self.configuration.enforce_multipath_for_image_xfer = False + self.configuration.max_over_subscription_ratio = 500.0 + self.configuration.driver_ssl_cert_verify = False + + self.configuration.hpexp_storage_id = CONFIG_MAP['serial'] + self.configuration.hpexp_pool = ["30"] + self.configuration.hpexp_snap_pool = None + self.configuration.hpexp_ldev_range = "0-1" + self.configuration.hpexp_target_ports = [CONFIG_MAP['port_id']] + self.configuration.hpexp_compute_target_ports = [ + CONFIG_MAP['port_id']] + self.configuration.hpexp_group_create = True + self.configuration.hpexp_group_delete = True + self.configuration.hpexp_copy_speed = 3 + self.configuration.hpexp_copy_check_interval = 3 + self.configuration.hpexp_async_copy_check_interval = 10 + + self.configuration.san_login = CONFIG_MAP['user_id'] + self.configuration.san_password = CONFIG_MAP['user_pass'] + self.configuration.san_ip = CONFIG_MAP[ + 'rest_server_ip_addr'] + self.configuration.san_api_port = CONFIG_MAP[ + 'rest_server_ip_port'] + self.configuration.hpexp_rest_disable_io_wait = True + self.configuration.hpexp_rest_tcp_keepalive = True + self.configuration.hpexp_discard_zero_page = True + self.configuration.hpexp_rest_number = "0" + self.configuration.hpexp_lun_timeout = hbsd_rest._LUN_TIMEOUT + self.configuration.hpexp_lun_retry_interval = ( + hbsd_rest._LUN_RETRY_INTERVAL) + self.configuration.hpexp_restore_timeout = hbsd_rest._RESTORE_TIMEOUT + self.configuration.hpexp_state_transition_timeout = ( + hbsd_rest._STATE_TRANSITION_TIMEOUT) + self.configuration.hpexp_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT + self.configuration.hpexp_rest_timeout = hbsd_rest_api._REST_TIMEOUT + self.configuration.hpexp_extend_timeout = ( + hbsd_rest_api._EXTEND_TIMEOUT) + self.configuration.hpexp_exec_retry_interval = ( + hbsd_rest_api._EXEC_RETRY_INTERVAL) + self.configuration.hpexp_rest_connect_timeout = ( + hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) + self.configuration.hpexp_rest_job_api_response_timeout = ( + hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) + self.configuration.hpexp_rest_get_api_response_timeout = ( + hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) + self.configuration.hpexp_rest_server_busy_timeout = ( + hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) + self.configuration.hpexp_rest_keep_session_loop_interval = ( + hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) + self.configuration.hpexp_rest_another_ldev_mapped_retry_timeout = ( + hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) + self.configuration.hpexp_rest_tcp_keepidle = ( + hbsd_rest_api._TCP_KEEPIDLE) + self.configuration.hpexp_rest_tcp_keepintvl = ( + hbsd_rest_api._TCP_KEEPINTVL) + self.configuration.hpexp_rest_tcp_keepcnt = ( + hbsd_rest_api._TCP_KEEPCNT) + self.configuration.hpexp_host_mode_options = [] + + self.configuration.use_chap_auth = True + self.configuration.chap_username = CONFIG_MAP['auth_user'] + self.configuration.chap_password = CONFIG_MAP['auth_password'] + + self.configuration.san_thin_provision = True + self.configuration.san_private_key = '' + self.configuration.san_clustername = '' + self.configuration.san_ssh_port = '22' + self.configuration.san_is_local = False + self.configuration.ssh_conn_timeout = '30' + self.configuration.ssh_min_pool_conn = '1' + self.configuration.ssh_max_pool_conn = '5' + + self.configuration.safe_get = self._fake_safe_get + + CONF = cfg.CONF + CONF.my_ip = CONFIG_MAP['my_ip'] + + def _fake_safe_get(self, value): + """Retrieve a configuration value avoiding throwing an exception.""" + try: + val = getattr(self.configuration, value) + except AttributeError: + val = None + return val + + @mock.patch.object(requests.Session, "request") + @mock.patch.object( + volume_utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + def _setup_driver( + self, brick_get_connector_properties=None, request=None): + """Set up the driver environment.""" + self.driver = hpe_xp_iscsi.HPEXPISCSIDriver( + configuration=self.configuration) + request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), + FakeResponse(200, GET_PORTS_RESULT), + FakeResponse(200, GET_PORT_RESULT), + FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(200, GET_HOST_GROUP_RESULT)] + self.driver.do_setup(None) + self.driver.check_for_setup_error() + self.driver.local_path(None) + self.driver.create_export(None, None, None) + self.driver.ensure_export(None, None) + self.driver.remove_export(None, None) + self.driver.create_export_snapshot(None, None, None) + self.driver.remove_export_snapshot(None, None) + # stop the Loopingcall within the do_setup treatment + self.driver.common.client.keep_session_loop.stop() + + def tearDown(self): + self.client = None + super(HPEXPRESTISCSIDriverTest, self).tearDown() + + # API test cases + @mock.patch.object(requests.Session, "request") + @mock.patch.object( + volume_utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + def test_do_setup(self, brick_get_connector_properties, request): + drv = hpe_xp_iscsi.HPEXPISCSIDriver( + configuration=self.configuration) + self._setup_config() + request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), + FakeResponse(200, GET_PORTS_RESULT), + FakeResponse(200, GET_PORT_RESULT), + FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(200, GET_HOST_GROUP_RESULT)] + drv.do_setup(None) + self.assertEqual( + {CONFIG_MAP['port_id']: + '%(ip)s:%(port)s' % { + 'ip': CONFIG_MAP['ipv4Address'], + 'port': CONFIG_MAP['tcpPort']}}, + drv.common.storage_info['portals']) + self.assertEqual(1, brick_get_connector_properties.call_count) + self.assertEqual(5, request.call_count) + # stop the Loopingcall within the do_setup treatment + self.driver.common.client.keep_session_loop.stop() + self.driver.common.client.keep_session_loop.wait() + + @mock.patch.object(requests.Session, "request") + @mock.patch.object( + volume_utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + def test_do_setup_create_hg(self, brick_get_connector_properties, request): + """Normal case: The host group not exists.""" + drv = hpe_xp_iscsi.HPEXPISCSIDriver( + configuration=self.configuration) + self._setup_config() + request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), + FakeResponse(200, GET_PORTS_RESULT), + FakeResponse(200, GET_PORT_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + drv.do_setup(None) + self.assertEqual( + {CONFIG_MAP['port_id']: + '%(ip)s:%(port)s' % { + 'ip': CONFIG_MAP['ipv4Address'], + 'port': CONFIG_MAP['tcpPort']}}, + drv.common.storage_info['portals']) + self.assertEqual(1, brick_get_connector_properties.call_count) + self.assertEqual(8, request.call_count) + # stop the Loopingcall within the do_setup treatment + self.driver.common.client.keep_session_loop.stop() + self.driver.common.client.keep_session_loop.wait() + + @mock.patch.object(requests.Session, "request") + def test_extend_volume(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.extend_volume(TEST_VOLUME[0], 256) + self.assertEqual(3, request.call_count) + + @mock.patch.object(driver.ISCSIDriver, "get_goodness_function") + @mock.patch.object(driver.ISCSIDriver, "get_filter_function") + @mock.patch.object(requests.Session, "request") + def test__update_volume_stats( + self, request, get_filter_function, get_goodness_function): + request.return_value = FakeResponse(200, GET_POOLS_RESULT) + get_filter_function.return_value = None + get_goodness_function.return_value = None + self.driver._update_volume_stats() + self.assertEqual( + 'Hewlett Packard Enterprise', self.driver._stats['vendor_name']) + self.assertTrue(self.driver._stats["pools"][0]['multiattach']) + self.assertEqual(1, request.call_count) + self.assertEqual(1, get_filter_function.call_count) + self.assertEqual(1, get_goodness_function.call_count) + + @mock.patch.object(requests.Session, "request") + def test_create_volume(self, request): + request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot(self, volume_get, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_snapshot(TEST_SNAPSHOT[0]) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_create_cloned_volume(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) + self.assertEqual('1', vol['provider_location']) + self.assertEqual(5, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_create_volume_from_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + vol = self.driver.create_volume_from_snapshot( + TEST_VOLUME[0], TEST_SNAPSHOT[0]) + self.assertEqual('1', vol['provider_location']) + self.assertEqual(5, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_initialize_connection(self, request): + request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(200, GET_HOST_GROUP_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.initialize_connection( + TEST_VOLUME[0], DEFAULT_CONNECTOR) + self.assertEqual('iscsi', ret['driver_volume_type']) + self.assertEqual( + '%(ip)s:%(port)s' % { + 'ip': CONFIG_MAP['ipv4Address'], + 'port': CONFIG_MAP['tcpPort'], + }, + ret['data']['target_portal']) + self.assertEqual(CONFIG_MAP['target_iscsi_name'], + ret['data']['target_iqn']) + self.assertEqual('CHAP', ret['data']['auth_method']) + self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) + self.assertEqual( + CONFIG_MAP['auth_password'], ret['data']['auth_password']) + self.assertEqual(1, ret['data']['target_lun']) + self.assertEqual(3, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_initialize_connection_shared_target(self, request): + """Normal case: A target shared with other systems.""" + request.side_effect = [FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_HOST_GROUPS_RESULT), + FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.initialize_connection( + TEST_VOLUME[0], DEFAULT_CONNECTOR) + self.assertEqual('iscsi', ret['driver_volume_type']) + self.assertEqual( + '%(ip)s:%(port)s' % { + 'ip': CONFIG_MAP['ipv4Address'], + 'port': CONFIG_MAP['tcpPort'], + }, + ret['data']['target_portal']) + self.assertEqual(CONFIG_MAP['target_iscsi_name'], + ret['data']['target_iqn']) + self.assertEqual('CHAP', ret['data']['auth_method']) + self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) + self.assertEqual( + CONFIG_MAP['auth_password'], ret['data']['auth_password']) + self.assertEqual(1, ret['data']['target_lun']) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_terminate_connection(self, request): + request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(200, GET_HOST_GROUP_RESULT), + FakeResponse(200, GET_LDEV_RESULT_MAPPED), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) + self.assertEqual(6, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_terminate_connection_not_connector(self, request): + """Normal case: Connector is None.""" + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED), + FakeResponse(200, GET_HOST_GROUP_RESULT), + FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(200, GET_HOST_GROUPS_RESULT), + FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(200, GET_LDEV_RESULT_MAPPED), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.terminate_connection(TEST_VOLUME[2], None) + self.assertEqual(9, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_initialize_connection_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(200, GET_HOST_GROUP_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.initialize_connection_snapshot( + TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) + self.assertEqual('iscsi', ret['driver_volume_type']) + self.assertEqual( + '%(ip)s:%(port)s' % { + 'ip': CONFIG_MAP['ipv4Address'], + 'port': CONFIG_MAP['tcpPort'], + }, + ret['data']['target_portal']) + self.assertEqual(CONFIG_MAP['target_iscsi_name'], + ret['data']['target_iqn']) + self.assertEqual('CHAP', ret['data']['auth_method']) + self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) + self.assertEqual( + CONFIG_MAP['auth_password'], ret['data']['auth_password']) + self.assertEqual(1, ret['data']['target_lun']) + self.assertEqual(3, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_terminate_connection_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), + FakeResponse(200, GET_HOST_GROUP_RESULT), + FakeResponse(200, GET_LDEV_RESULT_MAPPED), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.terminate_connection_snapshot( + TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) + self.assertEqual(6, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_manage_existing(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.manage_existing( + TEST_VOLUME[0], self.test_existing_ref) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_manage_existing_name(self, request): + request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.manage_existing( + TEST_VOLUME[0], self.test_existing_ref_name) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(3, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_manage_existing_get_size(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT) + self.driver.manage_existing_get_size( + TEST_VOLUME[0], self.test_existing_ref) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_manage_existing_get_size_name(self, request): + request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), + FakeResponse(200, GET_LDEV_RESULT)] + self.driver.manage_existing_get_size( + TEST_VOLUME[0], self.test_existing_ref_name) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_unmanage(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT)] + self.driver.unmanage(TEST_VOLUME[0]) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_copy_image_to_volume(self, request): + image_service = 'fake_image_service' + image_id = 'fake_image_id' + request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) + with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ + as mock_copy_image: + self.driver.copy_image_to_volume( + self.ctxt, TEST_VOLUME[0], image_service, image_id) + mock_copy_image.assert_called_with( + self.ctxt, TEST_VOLUME[0], image_service, image_id) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_update_migrated_volume(self, request): + request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) + self.assertRaises( + NotImplementedError, + self.driver.update_migrated_volume, + self.ctxt, + TEST_VOLUME[0], + TEST_VOLUME[1], + "available") + self.assertEqual(1, request.call_count) + + def test_unmanage_snapshot(self): + """The driver don't support unmange_snapshot.""" + self.assertRaises( + NotImplementedError, + self.driver.unmanage_snapshot, + TEST_SNAPSHOT[0]) + + def test_retype(self): + new_specs = {'hpe_xp:test': 'test'} + new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) + diff = {} + host = {} + ret = self.driver.retype( + self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host) + self.assertFalse(ret) + + def test_backup_use_temp_snapshot(self): + self.assertTrue(self.driver.backup_use_temp_snapshot()) + + @mock.patch.object(requests.Session, "request") + def test_revert_to_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT)] + self.driver.revert_to_snapshot( + self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) + self.assertEqual(5, request.call_count) + + def test_create_group(self): + ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) + self.assertIsNone(ret) + + @mock.patch.object(requests.Session, "request") + def test_delete_group(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.delete_group( + self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) + self.assertEqual(4, request.call_count) + actual = ( + {'status': TEST_GROUP[0]['status']}, + [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] + ) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + def test_create_group_from_src_volume(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_group_from_src( + self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], + source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] + ) + self.assertEqual(5, request.call_count) + actual = ( + None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + def test_create_group_from_src_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_group_from_src( + self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], + group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] + ) + self.assertEqual(5, request.call_count) + actual = ( + None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) + self.assertTupleEqual(actual, ret) + + def test_create_group_from_src_volume_error(self): + self.assertRaises( + exception.VolumeDriverException, self.driver.create_group_from_src, + self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], + source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]] + ) + + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') + def test_update_group(self, is_group_a_cg_snapshot_type): + is_group_a_cg_snapshot_type.return_value = False + ret = self.driver.update_group( + self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) + self.assertTupleEqual((None, None, None), ret) + + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') + def test_update_group_error(self, is_group_a_cg_snapshot_type): + is_group_a_cg_snapshot_type.return_value = True + self.assertRaises( + exception.VolumeDriverException, self.driver.update_group, + self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]], + remove_volumes=[TEST_VOLUME[0]] + ) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') + def test_create_group_snapshot_non_cg( + self, is_group_a_cg_snapshot_type, volume_get, request): + is_group_a_cg_snapshot_type.return_value = False + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_group_snapshot( + self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] + ) + self.assertEqual(4, request.call_count) + actual = ( + {'status': 'available'}, + [{'id': TEST_SNAPSHOT[0]['id'], + 'provider_location': '1', + 'status': 'available'}] + ) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') + def test_create_group_snapshot_cg( + self, is_group_a_cg_snapshot_type, volume_get, request): + is_group_a_cg_snapshot_type.return_value = True + request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_group_snapshot( + self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] + ) + self.assertEqual(5, request.call_count) + actual = ( + None, + [{'id': TEST_SNAPSHOT[0]['id'], + 'provider_location': '1', + 'status': 'available'}] + ) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + def test_delete_group_snapshot(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.delete_group_snapshot( + self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) + self.assertEqual(10, request.call_count) + actual = ( + {'status': TEST_GROUP_SNAP[0]['status']}, + [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] + ) + self.assertTupleEqual(actual, ret) + + @mock.patch.object(hpe_xp_iscsi.HPEXPISCSIDriver, "_get_oslo_driver_opts") + def test_get_driver_options(self, _get_oslo_driver_opts): + _get_oslo_driver_opts.return_value = [] + ret = self.driver.get_driver_options() + actual = (hpe_xp_rest.COMMON_VOLUME_OPTS + + hpe_xp_rest.REST_VOLUME_OPTS) + self.assertEqual(actual, ret) diff --git a/cinder/volume/drivers/hpe/xp/hpe_xp_fc.py b/cinder/volume/drivers/hpe/xp/hpe_xp_fc.py new file mode 100644 index 00000000000..ccea832d442 --- /dev/null +++ b/cinder/volume/drivers/hpe/xp/hpe_xp_fc.py @@ -0,0 +1,88 @@ +# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Fibre channel module for Hewlett Packard Enterprise Driver.""" + +from cinder import interface +from cinder.volume.drivers.hitachi import hbsd_common +from cinder.volume.drivers.hitachi import hbsd_fc +from cinder.volume.drivers.hitachi import hbsd_rest +from cinder.volume.drivers.hitachi import hbsd_utils +from cinder.volume.drivers.hpe.xp import hpe_xp_rest as rest +from cinder.volume.drivers.hpe.xp import hpe_xp_utils as utils + +MSG = hbsd_utils.HBSDMsg + +_DRIVER_INFO = { + 'version': utils.VERSION, + 'proto': 'FC', + 'hba_id': 'wwpns', + 'hba_id_type': 'World Wide Name', + 'msg_id': { + 'target': MSG.CREATE_HOST_GROUP_FAILED, + }, + 'volume_backend_name': '%(prefix)sFC' % { + 'prefix': utils.DRIVER_PREFIX, + }, + 'volume_type': 'fibre_channel', + 'param_prefix': utils.PARAM_PREFIX, + 'vendor_name': utils.VENDOR_NAME, + 'driver_prefix': utils.DRIVER_PREFIX, + 'driver_file_prefix': utils.DRIVER_FILE_PREFIX, + 'target_prefix': utils.TARGET_PREFIX, + 'hdp_vol_attr': utils.HDP_VOL_ATTR, + 'hdt_vol_attr': utils.HDT_VOL_ATTR, + 'nvol_ldev_type': utils.NVOL_LDEV_TYPE, + 'target_iqn_suffix': utils.TARGET_IQN_SUFFIX, + 'pair_attr': utils.PAIR_ATTR, +} + + +@interface.volumedriver +class HPEXPFCDriver(hbsd_fc.HBSDFCDriver): + """Fibre channel class for Hewlett Packard Enterprise Driver. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver. + + """ + + VERSION = utils.VERSION + + # ThirdPartySystems wiki page + CI_WIKI_NAME = utils.CI_WIKI_NAME + + def __init__(self, *args, **kwargs): + """Initialize instance variables.""" + super(HPEXPFCDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(rest.COMMON_VOLUME_OPTS) + self.configuration.append_config_values(rest.FC_VOLUME_OPTS) + + def _init_common(self, conf, db): + return rest.HPEXPRESTFC(conf, _DRIVER_INFO, db) + + @staticmethod + def get_driver_options(): + additional_opts = HPEXPFCDriver._get_oslo_driver_opts( + *(hbsd_common._INHERITED_VOLUME_OPTS + + hbsd_rest._REQUIRED_REST_OPTS + + ['driver_ssl_cert_verify', 'driver_ssl_cert_path', + 'san_api_port', ])) + return (rest.COMMON_VOLUME_OPTS + + rest.REST_VOLUME_OPTS + + rest.FC_VOLUME_OPTS + + additional_opts) diff --git a/cinder/volume/drivers/hpe/xp/hpe_xp_iscsi.py b/cinder/volume/drivers/hpe/xp/hpe_xp_iscsi.py new file mode 100644 index 00000000000..0648f096fce --- /dev/null +++ b/cinder/volume/drivers/hpe/xp/hpe_xp_iscsi.py @@ -0,0 +1,86 @@ +# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""iSCSI channel module for Hewlett Packard Enterprise Driver.""" + +from cinder import interface +from cinder.volume.drivers.hitachi import hbsd_common +from cinder.volume.drivers.hitachi import hbsd_iscsi +from cinder.volume.drivers.hitachi import hbsd_rest +from cinder.volume.drivers.hitachi import hbsd_utils +from cinder.volume.drivers.hpe.xp import hpe_xp_rest as rest +from cinder.volume.drivers.hpe.xp import hpe_xp_utils as utils + +MSG = hbsd_utils.HBSDMsg + +_DRIVER_INFO = { + 'version': utils.VERSION, + 'proto': 'iSCSI', + 'hba_id': 'initiator', + 'hba_id_type': 'iSCSI initiator IQN', + 'msg_id': { + 'target': MSG.CREATE_ISCSI_TARGET_FAILED, + }, + 'volume_backend_name': '%(prefix)siSCSI' % { + 'prefix': utils.DRIVER_PREFIX, + }, + 'volume_type': 'iscsi', + 'param_prefix': utils.PARAM_PREFIX, + 'vendor_name': utils.VENDOR_NAME, + 'driver_prefix': utils.DRIVER_PREFIX, + 'driver_file_prefix': utils.DRIVER_FILE_PREFIX, + 'target_prefix': utils.TARGET_PREFIX, + 'hdp_vol_attr': utils.HDP_VOL_ATTR, + 'hdt_vol_attr': utils.HDT_VOL_ATTR, + 'nvol_ldev_type': utils.NVOL_LDEV_TYPE, + 'target_iqn_suffix': utils.TARGET_IQN_SUFFIX, + 'pair_attr': utils.PAIR_ATTR, +} + + +@interface.volumedriver +class HPEXPISCSIDriver(hbsd_iscsi.HBSDISCSIDriver): + """iSCSI class for Hewlett Packard Enterprise Driver. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver. + + """ + + VERSION = utils.VERSION + + # ThirdPartySystems wiki page + CI_WIKI_NAME = utils.CI_WIKI_NAME + + def __init__(self, *args, **kwargs): + """Initialize instance variables.""" + super(HPEXPISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(rest.COMMON_VOLUME_OPTS) + + def _init_common(self, conf, db): + return rest.HPEXPRESTISCSI(conf, _DRIVER_INFO, db) + + @staticmethod + def get_driver_options(): + additional_opts = HPEXPISCSIDriver._get_oslo_driver_opts( + *(hbsd_common._INHERITED_VOLUME_OPTS + + hbsd_rest._REQUIRED_REST_OPTS + + ['driver_ssl_cert_verify', 'driver_ssl_cert_path', + 'san_api_port', ])) + return (rest.COMMON_VOLUME_OPTS + + rest.REST_VOLUME_OPTS + + additional_opts) diff --git a/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py b/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py new file mode 100644 index 00000000000..15b82caad2c --- /dev/null +++ b/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py @@ -0,0 +1,337 @@ +# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""REST interface for Hewlett Packard Enterprise Driver.""" + +from oslo_config import cfg + +from cinder.volume import configuration +from cinder.volume.drivers.hitachi import hbsd_rest +from cinder.volume.drivers.hitachi import hbsd_rest_api +from cinder.volume.drivers.hitachi import hbsd_rest_fc +from cinder.volume.drivers.hitachi import hbsd_rest_iscsi + +COMMON_VOLUME_OPTS = [ + cfg.StrOpt( + 'hpexp_storage_id', + default=None, + help='Product number of the storage system.'), + cfg.ListOpt( + 'hpexp_pool', + default=[], + help='Pool number[s] or pool name[s] of the THP pool.'), + cfg.StrOpt( + 'hpexp_snap_pool', + default=None, + help='Pool number or pool name of the snapshot pool.'), + cfg.StrOpt( + 'hpexp_ldev_range', + default=None, + help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that ' + 'can be used by the driver. Values can be in decimal format ' + '(e.g. 1000) or in colon-separated hexadecimal format ' + '(e.g. 00:03:E8).'), + cfg.ListOpt( + 'hpexp_target_ports', + default=[], + help='IDs of the storage ports used to attach volumes to the ' + 'controller node. To specify multiple ports, connect them by ' + 'commas (e.g. CL1-A,CL2-A).'), + cfg.ListOpt( + 'hpexp_compute_target_ports', + default=[], + help='IDs of the storage ports used to attach volumes to compute ' + 'nodes. To specify multiple ports, connect them by commas ' + '(e.g. CL1-A,CL2-A).'), + cfg.BoolOpt( + 'hpexp_group_create', + default=False, + help='If True, the driver will create host groups or iSCSI targets on ' + 'storage ports as needed.'), + cfg.BoolOpt( + 'hpexp_group_delete', + default=False, + help='If True, the driver will delete host groups or iSCSI targets on ' + 'storage ports as needed.'), + cfg.IntOpt( + 'hpexp_copy_speed', + default=3, + min=1, max=15, + help='Copy speed of storage system. 1 or 2 indicates ' + 'low speed, 3 indicates middle speed, and a value between 4 and ' + '15 indicates high speed.'), + cfg.IntOpt( + 'hpexp_copy_check_interval', + default=3, + min=1, max=600, + help='Interval in seconds to check copy'), + cfg.IntOpt( + 'hpexp_async_copy_check_interval', + default=10, + min=1, max=600, + help='Interval in seconds to check copy asynchronously'), +] + +REST_VOLUME_OPTS = [ + cfg.BoolOpt( + 'hpexp_rest_disable_io_wait', + default=True, + help='It may take some time to detach volume after I/O. ' + 'This option will allow detaching volume to complete ' + 'immediately.'), + cfg.BoolOpt( + 'hpexp_rest_tcp_keepalive', + default=True, + help='Enables or disables use of REST API tcp keepalive'), + cfg.BoolOpt( + 'hpexp_discard_zero_page', + default=True, + help='Enable or disable zero page reclamation in a THP V-VOL.'), + cfg.IntOpt( + 'hpexp_lun_timeout', + default=hbsd_rest._LUN_TIMEOUT, + help='Maximum wait time in seconds for adding a LUN to complete.'), + cfg.IntOpt( + 'hpexp_lun_retry_interval', + default=hbsd_rest._LUN_RETRY_INTERVAL, + help='Retry interval in seconds for REST API adding a LUN.'), + cfg.IntOpt( + 'hpexp_restore_timeout', + default=hbsd_rest._RESTORE_TIMEOUT, + help='Maximum wait time in seconds for the restore operation to ' + 'complete.'), + cfg.IntOpt( + 'hpexp_state_transition_timeout', + default=hbsd_rest._STATE_TRANSITION_TIMEOUT, + help='Maximum wait time in seconds for a volume transition to ' + 'complete.'), + cfg.IntOpt( + 'hpexp_lock_timeout', + default=hbsd_rest_api._LOCK_TIMEOUT, + help='Maximum wait time in seconds for storage to be unlocked.'), + cfg.IntOpt( + 'hpexp_rest_timeout', + default=hbsd_rest_api._REST_TIMEOUT, + help='Maximum wait time in seconds for REST API execution to ' + 'complete.'), + cfg.IntOpt( + 'hpexp_extend_timeout', + default=hbsd_rest_api._EXTEND_TIMEOUT, + help='Maximum wait time in seconds for a volume extention to ' + 'complete.'), + cfg.IntOpt( + 'hpexp_exec_retry_interval', + default=hbsd_rest_api._EXEC_RETRY_INTERVAL, + help='Retry interval in seconds for REST API execution.'), + cfg.IntOpt( + 'hpexp_rest_connect_timeout', + default=hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT, + help='Maximum wait time in seconds for REST API connection to ' + 'complete.'), + cfg.IntOpt( + 'hpexp_rest_job_api_response_timeout', + default=hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT, + help='Maximum wait time in seconds for a response from REST API.'), + cfg.IntOpt( + 'hpexp_rest_get_api_response_timeout', + default=hbsd_rest_api._GET_API_RESPONSE_TIMEOUT, + help='Maximum wait time in seconds for a response against GET method ' + 'of REST API.'), + cfg.IntOpt( + 'hpexp_rest_server_busy_timeout', + default=hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT, + help='Maximum wait time in seconds when REST API returns busy.'), + cfg.IntOpt( + 'hpexp_rest_keep_session_loop_interval', + default=hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL, + help='Loop interval in seconds for keeping REST API session.'), + cfg.IntOpt( + 'hpexp_rest_another_ldev_mapped_retry_timeout', + default=hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT, + help='Retry time in seconds when new LUN allocation request fails.'), + cfg.IntOpt( + 'hpexp_rest_tcp_keepidle', + default=hbsd_rest_api._TCP_KEEPIDLE, + help='Wait time in seconds for sending a first TCP keepalive packet.'), + cfg.IntOpt( + 'hpexp_rest_tcp_keepintvl', + default=hbsd_rest_api._TCP_KEEPINTVL, + help='Interval of transmissions in seconds for TCP keepalive packet.'), + cfg.IntOpt( + 'hpexp_rest_tcp_keepcnt', + default=hbsd_rest_api._TCP_KEEPCNT, + help='Maximum number of transmissions for TCP keepalive packet.'), + cfg.ListOpt( + 'hpexp_host_mode_options', + default=[], + help='Host mode option for host group or iSCSI target.'), +] + +FC_VOLUME_OPTS = [ + cfg.BoolOpt( + 'hpexp_zoning_request', + default=False, + help='If True, the driver will configure FC zoning between the server ' + 'and the storage system provided that FC zoning manager is ' + 'enabled.'), +] + +CONF = cfg.CONF +CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) +CONF.register_opts(REST_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) +CONF.register_opts(FC_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) + + +class HPEXPRESTFC(hbsd_rest_fc.HBSDRESTFC): + """REST interface fibre channel class for + + Hewlett Packard Enterprise Driver. + + """ + + def __init__(self, conf, storage_protocol, db): + """Initialize instance variables.""" + conf.append_config_values(COMMON_VOLUME_OPTS) + conf.append_config_values(REST_VOLUME_OPTS) + conf.append_config_values(FC_VOLUME_OPTS) + super(HPEXPRESTFC, self).__init__(conf, storage_protocol, db) + self._update_conf() + + def _update_conf(self): + """Update configuration""" + # COMMON_VOLUME_OPTS + self.conf.hitachi_storage_id = self.conf.hpexp_storage_id + self.conf.hitachi_pool = self.conf.hpexp_pool + self.conf.hitachi_snap_pool = self.conf.hpexp_snap_pool + self.conf.hitachi_ldev_range = self.conf.hpexp_ldev_range + self.conf.hitachi_target_ports = self.conf.hpexp_target_ports + self.conf.hitachi_compute_target_ports = ( + self.conf.hpexp_compute_target_ports) + self.conf.hitachi_group_create = self.conf.hpexp_group_create + self.conf.hitachi_group_delete = self.conf.hpexp_group_delete + self.conf.hitachi_copy_speed = self.conf.hpexp_copy_speed + self.conf.hitachi_copy_check_interval = ( + self.conf.hpexp_copy_check_interval) + self.conf.hitachi_async_copy_check_interval = ( + self.conf.hpexp_async_copy_check_interval) + + # REST_VOLUME_OPTS + self.conf.hitachi_rest_disable_io_wait = ( + self.conf.hpexp_rest_disable_io_wait) + self.conf.hitachi_rest_tcp_keepalive = ( + self.conf.hpexp_rest_tcp_keepalive) + self.conf.hitachi_discard_zero_page = ( + self.conf.hpexp_discard_zero_page) + self.conf.hitachi_lun_timeout = self.conf.hpexp_lun_timeout + self.conf.hitachi_lun_retry_interval = ( + self.conf.hpexp_lun_retry_interval) + self.conf.hitachi_restore_timeout = self.conf.hpexp_restore_timeout + self.conf.hitachi_state_transition_timeout = ( + self.conf.hpexp_state_transition_timeout) + self.conf.hitachi_lock_timeout = self.conf.hpexp_lock_timeout + self.conf.hitachi_rest_timeout = self.conf.hpexp_rest_timeout + self.conf.hitachi_extend_timeout = self.conf.hpexp_extend_timeout + self.conf.hitachi_exec_retry_interval = ( + self.conf.hpexp_exec_retry_interval) + self.conf.hitachi_rest_connect_timeout = ( + self.conf.hpexp_rest_connect_timeout) + self.conf.hitachi_rest_job_api_response_timeout = ( + self.conf.hpexp_rest_job_api_response_timeout) + self.conf.hitachi_rest_get_api_response_timeout = ( + self.conf.hpexp_rest_get_api_response_timeout) + self.conf.hitachi_rest_server_busy_timeout = ( + self.conf.hpexp_rest_server_busy_timeout) + self.conf.hitachi_rest_keep_session_loop_interval = ( + self.conf.hpexp_rest_keep_session_loop_interval) + self.conf.hitachi_rest_another_ldev_mapped_retry_timeout = ( + self.conf.hpexp_rest_another_ldev_mapped_retry_timeout) + self.conf.hitachi_rest_tcp_keepidle = ( + self.conf.hpexp_rest_tcp_keepidle) + self.conf.hitachi_rest_tcp_keepintvl = ( + self.conf.hpexp_rest_tcp_keepintvl) + self.conf.hitachi_rest_tcp_keepcnt = ( + self.conf.hpexp_rest_tcp_keepcnt) + self.conf.hitachi_host_mode_options = ( + self.conf.hpexp_host_mode_options) + + # FC_VOLUME_OPTS + self.conf.hitachi_zoning_request = self.conf.hpexp_zoning_request + + +class HPEXPRESTISCSI(hbsd_rest_iscsi.HBSDRESTISCSI): + """REST interface iSCSI class for Hewlett Packard Enterprise Driver.""" + + def __init__(self, conf, storage_protocol, db): + """Initialize instance variables.""" + conf.append_config_values(COMMON_VOLUME_OPTS) + conf.append_config_values(REST_VOLUME_OPTS) + super(HPEXPRESTISCSI, self).__init__(conf, storage_protocol, db) + self._update_conf() + + def _update_conf(self): + """Update configuration""" + # COMMON_VOLUME_OPTS + self.conf.hitachi_storage_id = self.conf.hpexp_storage_id + self.conf.hitachi_pool = self.conf.hpexp_pool + self.conf.hitachi_snap_pool = self.conf.hpexp_snap_pool + self.conf.hitachi_ldev_range = self.conf.hpexp_ldev_range + self.conf.hitachi_target_ports = self.conf.hpexp_target_ports + self.conf.hitachi_compute_target_ports = ( + self.conf.hpexp_compute_target_ports) + self.conf.hitachi_group_create = self.conf.hpexp_group_create + self.conf.hitachi_group_delete = self.conf.hpexp_group_delete + self.conf.hitachi_copy_speed = self.conf.hpexp_copy_speed + self.conf.hitachi_copy_check_interval = ( + self.conf.hpexp_copy_check_interval) + self.conf.hitachi_async_copy_check_interval = ( + self.conf.hpexp_async_copy_check_interval) + + # REST_VOLUME_OPTS + self.conf.hitachi_rest_disable_io_wait = ( + self.conf.hpexp_rest_disable_io_wait) + self.conf.hitachi_rest_tcp_keepalive = ( + self.conf.hpexp_rest_tcp_keepalive) + self.conf.hitachi_discard_zero_page = ( + self.conf.hpexp_discard_zero_page) + self.conf.hitachi_lun_timeout = self.conf.hpexp_lun_timeout + self.conf.hitachi_lun_retry_interval = ( + self.conf.hpexp_lun_retry_interval) + self.conf.hitachi_restore_timeout = self.conf.hpexp_restore_timeout + self.conf.hitachi_state_transition_timeout = ( + self.conf.hpexp_state_transition_timeout) + self.conf.hitachi_lock_timeout = self.conf.hpexp_lock_timeout + self.conf.hitachi_rest_timeout = self.conf.hpexp_rest_timeout + self.conf.hitachi_extend_timeout = self.conf.hpexp_extend_timeout + self.conf.hitachi_exec_retry_interval = ( + self.conf.hpexp_exec_retry_interval) + self.conf.hitachi_rest_connect_timeout = ( + self.conf.hpexp_rest_connect_timeout) + self.conf.hitachi_rest_job_api_response_timeout = ( + self.conf.hpexp_rest_job_api_response_timeout) + self.conf.hitachi_rest_get_api_response_timeout = ( + self.conf.hpexp_rest_get_api_response_timeout) + self.conf.hitachi_rest_server_busy_timeout = ( + self.conf.hpexp_rest_server_busy_timeout) + self.conf.hitachi_rest_keep_session_loop_interval = ( + self.conf.hpexp_rest_keep_session_loop_interval) + self.conf.hitachi_rest_another_ldev_mapped_retry_timeout = ( + self.conf.hpexp_rest_another_ldev_mapped_retry_timeout) + self.conf.hitachi_rest_tcp_keepidle = ( + self.conf.hpexp_rest_tcp_keepidle) + self.conf.hitachi_rest_tcp_keepintvl = ( + self.conf.hpexp_rest_tcp_keepintvl) + self.conf.hitachi_rest_tcp_keepcnt = ( + self.conf.hpexp_rest_tcp_keepcnt) + self.conf.hitachi_host_mode_options = ( + self.conf.hpexp_host_mode_options) diff --git a/cinder/volume/drivers/hpe/xp/hpe_xp_utils.py b/cinder/volume/drivers/hpe/xp/hpe_xp_utils.py new file mode 100644 index 00000000000..2a8477307f2 --- /dev/null +++ b/cinder/volume/drivers/hpe/xp/hpe_xp_utils.py @@ -0,0 +1,28 @@ +# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Utility module for Hewlett Packard Enterprise Driver.""" + +VERSION = '1.0.0' +CI_WIKI_NAME = 'HPE_XP_Storage_CI' +PARAM_PREFIX = 'hpexp' +VENDOR_NAME = 'Hewlett Packard Enterprise' +DRIVER_PREFIX = 'HPEXP' +DRIVER_FILE_PREFIX = 'hpe_xp' +TARGET_PREFIX = 'HPEXP-' +HDP_VOL_ATTR = 'THP' +HDT_VOL_ATTR = 'ST' +NVOL_LDEV_TYPE = 'THP V-VOL' +TARGET_IQN_SUFFIX = '.hpexp-target' +PAIR_ATTR = 'FS' diff --git a/doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst b/doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst new file mode 100644 index 00000000000..34443f6b43b --- /dev/null +++ b/doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst @@ -0,0 +1,126 @@ +============================ +HPE XP block storage driver +============================ + +HPE XP block storage driver provides Fibre Channel and iSCSI support for +HPE XP storages. + +System requirements +~~~~~~~~~~~~~~~~~~~ + +Supported storages: + ++-----------------+------------------------+ +| Storage model | Firmware version | ++=================+========================+ +| XP8 | 90-01-41 or later | ++-----------------+------------------------+ +| XP7 | 80-05-43 or later | ++-----------------+------------------------+ + +Required storage licenses: + +* Thin Provisioning +* Fast Snap + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +* Create, delete, attach, and detach volumes. +* Create, list, and delete volume snapshots. +* Create a volume from a snapshot. +* Create, list, update, and delete consistency groups. +* Create, list, and delete consistency group snapshots. +* Copy a volume to an image. +* Copy an image to a volume. +* Clone a volume. +* Extend a volume. +* Migrate a volume. +* Get volume statistics. +* Efficient non-disruptive volume backup. +* Manage and unmanage a volume. +* Attach a volume to multiple instances at once (multi-attach). +* Revert a volume to a snapshot. + +.. note:: + + The volume having snapshots cannot be extended in this driver. + +Configuration +~~~~~~~~~~~~~ + +Set up HPE XP storage +---------------------- + +You need to specify settings as described below for storage systems. For +details about each setting, see the user's guide of the storage systems. + +#. User accounts + + Create a storage device account belonging to the Administrator User Group. + +#. THP pool + + Create a THP pool that is used by the driver. + +#. Ports + + Enable Port Security for the ports used by the driver. + +Set up HPE XP storage volume driver +------------------------------------ + +Set the volume driver to HPE XP block storage driver by setting the +volume_driver option in the cinder.conf file as follows: + +If you use Fibre Channel: + +.. code-block:: ini + + [hpe_xp] + volume_driver = cinder.volume.drivers.hpe.xp.hpe_xp_fc.HPEXPFCDriver + volume_backend_name = hpexp_fc + san_ip = 1.2.3.4 + san_login = hpexpuser + san_password = password + hpexp_storage_id = 123456789012 + hpexp_pool = pool0 + +If you use iSCSI: + +.. code-block:: ini + + [hpe_xp] + volume_driver = cinder.volume.drivers.hpe.xp.hpe_xp_iscsi.HPEXPISCSIDriver + volume_backend_name = hpexp_iscsi + san_ip = 1.2.3.4 + san_login = hpexpuser + san_password = password + hpexp_storage_id = 123456789012 + hpexp_pool = pool0 + +This table shows configuration options for HPE XP block storage driver. + +.. config-table:: + :config-target: HPE XP block storage driver + + cinder.volume.drivers.hpe.xp.hpe_xp_rest + +Required options +---------------- + +- ``san_ip`` + IP address of SAN controller + +- ``san_login`` + Username for SAN controller + +- ``san_password`` + Password for SAN controller + +- ``hpexp_storage_id`` + Product number of the storage system. + +- ``hpexp_pool`` + Pool number or pool name of the THP pool. + diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index 1900f604038..96367a027df 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -72,6 +72,9 @@ title=HPE MSA Driver (iSCSI, FC) [driver.hpe_nimble] title=HPE Nimble Storage Driver (iSCSI, FC) +[driver.hpe_xp] +title=HPE XP Storage Driver (FC, iSCSI) + [driver.huawei_t_v1] title=Huawei T Series V1 Driver (iSCSI, FC) @@ -254,6 +257,7 @@ driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=complete driver.hpe_nimble=complete +driver.hpe_xp=complete driver.huawei_t_v1=complete driver.huawei_t_v2=complete driver.huawei_v3=complete @@ -330,6 +334,7 @@ driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=complete driver.hpe_nimble=complete +driver.hpe_xp=complete driver.huawei_t_v1=complete driver.huawei_t_v2=complete driver.huawei_v3=complete @@ -409,6 +414,7 @@ driver.hitachi_vsp=missing driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=missing +driver.hpe_xp=missing driver.huawei_t_v1=missing driver.huawei_t_v2=complete driver.huawei_v3=complete @@ -487,6 +493,7 @@ driver.hitachi_vsp=missing driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=missing +driver.hpe_xp=missing driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=complete @@ -566,6 +573,7 @@ driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=complete +driver.hpe_xp=complete driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=complete @@ -644,6 +652,7 @@ driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=complete +driver.hpe_xp=complete driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=complete @@ -723,6 +732,7 @@ driver.hitachi_vsp=missing driver.hpe_3par=missing driver.hpe_msa=missing driver.hpe_nimble=missing +driver.hpe_xp=missing driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=complete @@ -802,6 +812,7 @@ driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=complete driver.hpe_nimble=complete +driver.hpe_xp=complete driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=missing @@ -878,6 +889,7 @@ driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=complete +driver.hpe_xp=complete driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=missing @@ -958,6 +970,7 @@ driver.hitachi_vsp=missing driver.hpe_3par=missing driver.hpe_msa=missing driver.hpe_nimble=missing +driver.hpe_xp=missing driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=missing diff --git a/releasenotes/notes/hpe-xp-fc-iscsi-cinder-driver-75e04febff42c9ba.yaml b/releasenotes/notes/hpe-xp-fc-iscsi-cinder-driver-75e04febff42c9ba.yaml new file mode 100644 index 00000000000..c631fa19f2f --- /dev/null +++ b/releasenotes/notes/hpe-xp-fc-iscsi-cinder-driver-75e04febff42c9ba.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added backend driver for HPE XP storage. +