Add Hitachi Block Storage Driver

This driver supports Hitachi VSP storages.
Hitachi driver was deleted once in Queens release because
we could not maintain 3rd party CI.
We restarted the cinder project and renewed a storage API.
We upload Hitachi driver based on REST API for Victoria release.

* Supported Protocol
 - FC
 - iSCSI

* Supported Feature
 - Volume Create/Delete
 - Volume Attach/Detach
 - Snapshot Create/Delete
 - Create Volume from Snapshot
 - Get Volume Stats
 - Copy Image to Volume
 - Copy Volume to Image
 - Clone Volume
 - Extend Volume
 - Manage Volume
 - Unmanage Volume

DocImpact
Implements: blueprint hitachi-storage-driver

Change-Id: I926407669a75a8c52a46e07c006aa699bae46006
This commit is contained in:
Kazumasa Nomura 2020-07-13 22:15:44 +09:00
parent cdfee5608b
commit 8aaf6a68b3
12 changed files with 5369 additions and 0 deletions

View File

@ -88,6 +88,12 @@ from cinder.volume.drivers.fujitsu.eternus_dx import eternus_dx_common as \
cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon
from cinder.volume.drivers.fusionstorage import dsware as \
cinder_volume_drivers_fusionstorage_dsware
from cinder.volume.drivers.hitachi import hbsd_common as \
cinder_volume_drivers_hitachi_hbsdcommon
from cinder.volume.drivers.hitachi import hbsd_fc as \
cinder_volume_drivers_hitachi_hbsdfc
from cinder.volume.drivers.hitachi import hbsd_rest as \
cinder_volume_drivers_hitachi_hbsdrest
from cinder.volume.drivers.hpe import hpe_3par_common as \
cinder_volume_drivers_hpe_hpe3parcommon
from cinder.volume.drivers.huawei import common as \
@ -298,6 +304,9 @@ def list_opts():
cinder_volume_drivers_dell_emc_xtremio.XTREMIO_OPTS,
cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon.
FJ_ETERNUS_DX_OPT_opts,
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdfc.FC_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdrest.REST_VOLUME_OPTS,
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
cinder_volume_drivers_huawei_common.huawei_opts,
cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts,

View File

@ -0,0 +1,856 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for Hitachi HBSD Driver."""
import functools
from unittest import mock
from oslo_config import cfg
import requests
from requests import models
from cinder import context as cinder_context
from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder.objects import snapshot as obj_snap
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_fc
from cinder.volume.drivers.hitachi import hbsd_rest
from cinder.volume.drivers.hitachi import hbsd_rest_api
from cinder.volume.drivers.hitachi import hbsd_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
# Configuration parameter values
CONFIG_MAP = {
'serial': '886000123456',
'my_ip': '127.0.0.1',
'rest_server_ip_addr': '172.16.18.108',
'rest_server_ip_port': '23451',
'port_id': 'CL1-A',
'host_grp_name': 'HBSD-0123456789abcdef',
'host_mode': 'LINUX/IRIX',
'host_wwn': '0123456789abcdef',
'target_wwn': '1111111123456789',
'user_id': 'user',
'user_pass': 'password',
'pool_name': 'test_pool',
'auth_user': 'auth_user',
'auth_password': 'auth_password',
}
# Dummy response for FC zoning device mapping
DEVICE_MAP = {
'fabric_name': {
'initiator_port_wwn_list': [CONFIG_MAP['host_wwn']],
'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}}
DEFAULT_CONNECTOR = {
'host': 'host',
'ip': CONFIG_MAP['my_ip'],
'wwpns': [CONFIG_MAP['host_wwn']],
'multipath': False,
}
CTXT = cinder_context.get_admin_context()
TEST_VOLUME = []
for i in range(3):
volume = {}
volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i)
volume['name'] = 'test-volume{0:d}'.format(i)
volume['provider_location'] = '{0:d}'.format(i)
volume['size'] = 128
if i == 2:
volume['status'] = 'in-use'
else:
volume['status'] = 'available'
volume = fake_volume.fake_volume_obj(CTXT, **volume)
TEST_VOLUME.append(volume)
def _volume_get(context, volume_id):
"""Return predefined volume info."""
return TEST_VOLUME[int(volume_id.replace("-", ""))]
TEST_SNAPSHOT = []
snapshot = {}
snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0)
snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0)
snapshot['provider_location'] = '{0:d}'.format(1)
snapshot['status'] = 'available'
snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0)
snapshot['volume'] = _volume_get(None, snapshot['volume_id'])
snapshot['volume_name'] = 'test-volume{0:d}'.format(0)
snapshot['volume_size'] = 128
snapshot = obj_snap.Snapshot._from_db_object(
CTXT, obj_snap.Snapshot(),
fake_snapshot.fake_db_snapshot(**snapshot))
TEST_SNAPSHOT.append(snapshot)
# Dummy response for REST API
POST_SESSIONS_RESULT = {
"token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3",
"sessionId": 0,
}
GET_PORTS_RESULT = {
"data": [
{
"portId": CONFIG_MAP['port_id'],
"portType": "FIBRE",
"portAttributes": [
"TAR",
"MCU",
"RCU",
"ELUN"
],
"fabricMode": True,
"portConnection": "PtoP",
"lunSecuritySetting": True,
"wwn": CONFIG_MAP['target_wwn'],
},
],
}
GET_HOST_WWNS_RESULT = {
"data": [
{
"hostGroupNumber": 0,
"hostWwn": CONFIG_MAP['host_wwn'],
},
],
}
COMPLETED_SUCCEEDED_RESULT = {
"status": "Completed",
"state": "Succeeded",
"affectedResources": ('a/b/c/1',),
}
COMPLETED_FAILED_RESULT_LU_DEFINED = {
"status": "Completed",
"state": "Failed",
"error": {
"errorCode": {
"SSB1": "B958",
"SSB2": "015A",
},
},
}
GET_LDEV_RESULT = {
"emulationType": "OPEN-V-CVS",
"blockCapacity": 2097152,
"attributes": ["CVS", "HDP"],
"status": "NML",
}
GET_LDEV_RESULT_MAPPED = {
"emulationType": "OPEN-V-CVS",
"blockCapacity": 2097152,
"attributes": ["CVS", "HDP"],
"status": "NML",
"ports": [
{
"portId": CONFIG_MAP['port_id'],
"hostGroupNumber": 0,
"hostGroupName": CONFIG_MAP['host_grp_name'],
"lun": 1
},
],
}
GET_LDEV_RESULT_PAIR = {
"emulationType": "OPEN-V-CVS",
"blockCapacity": 2097152,
"attributes": ["CVS", "HDP", "HTI"],
"status": "NML",
}
GET_POOL_RESULT = {
"availableVolumeCapacity": 480144,
"totalPoolCapacity": 507780,
"totalLocatedCapacity": 71453172,
}
GET_SNAPSHOTS_RESULT = {
"data": [
{
"primaryOrSecondary": "S-VOL",
"status": "PSUS",
"pvolLdevId": 0,
"muNumber": 1,
"svolLdevId": 1,
},
],
}
GET_POOLS_RESULT = {
"data": [
{
"poolId": 30,
"poolName": CONFIG_MAP['pool_name'],
},
],
}
GET_LUNS_RESULT = {
"data": [
{
"ldevId": 0,
"lun": 1,
},
],
}
GET_HOST_GROUP_RESULT = {
"hostGroupName": CONFIG_MAP['host_grp_name'],
}
GET_HOST_GROUPS_RESULT = {
"data": [
{
"hostGroupNumber": 0,
"portId": CONFIG_MAP['port_id'],
"hostGroupName": "HBSD-test",
},
],
}
GET_LDEVS_RESULT = {
"data": [
{
"ldevId": 0,
"label": "15960cc738c94c5bb4f1365be5eeed44",
},
{
"ldevId": 1,
"label": "15960cc738c94c5bb4f1365be5eeed45",
},
],
}
NOTFOUND_RESULT = {
"data": [],
}
ERROR_RESULT = {
"errorSource": "<URL>",
"message": "<message>",
"solution": "<solution>",
"messageId": "<messageId>",
"errorCode": {
"SSB1": "",
"SSB2": "",
}
}
def _brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""Return a predefined connector object."""
return DEFAULT_CONNECTOR
def reduce_retrying_time(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
backup_lock_waittime = hbsd_rest_api._LOCK_WAITTIME
backup_exec_max_waittime = hbsd_rest_api._EXEC_MAX_WAITTIME
backup_job_api_response_timeout = (
hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT)
backup_get_api_response_timeout = (
hbsd_rest_api._GET_API_RESPONSE_TIMEOUT)
backup_extend_waittime = hbsd_rest_api._EXTEND_WAITTIME
backup_exec_retry_interval = hbsd_rest_api._EXEC_RETRY_INTERVAL
backup_rest_server_restart_timeout = (
hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT)
hbsd_rest_api._LOCK_WAITTIME = 0.01
hbsd_rest_api._EXEC_MAX_WAITTIME = 0.01
hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = 0.01
hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = 0.01
hbsd_rest_api._EXTEND_WAITTIME = 0.01
hbsd_rest_api._EXEC_RETRY_INTERVAL = 0.004
hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = 0.02
func(*args, **kwargs)
hbsd_rest_api._LOCK_WAITTIME = backup_lock_waittime
hbsd_rest_api._EXEC_MAX_WAITTIME = backup_exec_max_waittime
hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = (
backup_job_api_response_timeout)
hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = (
backup_get_api_response_timeout)
hbsd_rest_api._EXTEND_WAITTIME = backup_extend_waittime
hbsd_rest_api._EXEC_RETRY_INTERVAL = backup_exec_retry_interval
hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = (
backup_rest_server_restart_timeout)
return wrapper
class FakeLookupService():
"""Dummy FC zoning mapping lookup service class."""
def get_device_mapping_from_network(self, initiator_wwns, target_wwns):
"""Return predefined FC zoning mapping."""
return DEVICE_MAP
class FakeResponse():
def __init__(self, status_code, data=None, headers=None):
self.status_code = status_code
self.data = data
self.text = data
self.content = data
self.headers = {'Content-Type': 'json'} if headers is None else headers
def json(self):
return self.data
class HBSDRESTFCDriverTest(test.TestCase):
"""Unit test class for HBSD REST interface fibre channel module."""
test_existing_ref = {'source-id': '1'}
test_existing_ref_name = {
'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'}
def setUp(self):
"""Set up the test environment."""
def _set_required(opts, required):
for opt in opts:
opt.required = required
# Initialize Cinder and avoid checking driver options.
rest_required_opts = [
opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required]
common_required_opts = [
opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required]
_set_required(rest_required_opts, False)
_set_required(common_required_opts, False)
super(HBSDRESTFCDriverTest, self).setUp()
_set_required(rest_required_opts, True)
_set_required(common_required_opts, True)
self.configuration = mock.Mock(conf.Configuration)
self.ctxt = cinder_context.get_admin_context()
self._setup_config()
self._setup_driver()
def _setup_config(self):
"""Set configuration parameter values."""
self.configuration.config_group = "REST"
self.configuration.volume_backend_name = "RESTFC"
self.configuration.volume_driver = (
"cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver")
self.configuration.reserved_percentage = "0"
self.configuration.use_multipath_for_image_xfer = False
self.configuration.enforce_multipath_for_image_xfer = False
self.configuration.max_over_subscription_ratio = 500.0
self.configuration.driver_ssl_cert_verify = False
self.configuration.hitachi_storage_id = CONFIG_MAP['serial']
self.configuration.hitachi_pool = "30"
self.configuration.hitachi_snap_pool = None
self.configuration.hitachi_ldev_range = "0-1"
self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']]
self.configuration.hitachi_compute_target_ports = [
CONFIG_MAP['port_id']]
self.configuration.hitachi_group_create = True
self.configuration.hitachi_group_delete = True
self.configuration.san_login = CONFIG_MAP['user_id']
self.configuration.san_password = CONFIG_MAP['user_pass']
self.configuration.san_ip = CONFIG_MAP[
'rest_server_ip_addr']
self.configuration.san_api_port = CONFIG_MAP[
'rest_server_ip_port']
self.configuration.hitachi_rest_tcp_keepalive = True
self.configuration.hitachi_discard_zero_page = True
self.configuration.hitachi_zoning_request = False
self.configuration.san_thin_provision = True
self.configuration.san_private_key = ''
self.configuration.san_clustername = ''
self.configuration.san_ssh_port = '22'
self.configuration.san_is_local = False
self.configuration.ssh_conn_timeout = '30'
self.configuration.ssh_min_pool_conn = '1'
self.configuration.ssh_max_pool_conn = '5'
self.configuration.use_chap_auth = True
self.configuration.chap_username = CONFIG_MAP['auth_user']
self.configuration.chap_password = CONFIG_MAP['auth_password']
self.configuration.safe_get = self._fake_safe_get
CONF = cfg.CONF
CONF.my_ip = CONFIG_MAP['my_ip']
def _fake_safe_get(self, value):
"""Retrieve a configuration value avoiding throwing an exception."""
try:
val = getattr(self.configuration, value)
except AttributeError:
val = None
return val
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties)
def _setup_driver(
self, brick_get_connector_properties=None, request=None):
"""Set up the driver environment."""
self.driver = hbsd_fc.HBSDFCDriver(
configuration=self.configuration, db=db)
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_HOST_WWNS_RESULT)]
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.driver.local_path(None)
self.driver.create_export(None, None, None)
self.driver.ensure_export(None, None)
self.driver.remove_export(None, None)
self.driver.create_export_snapshot(None, None, None)
self.driver.remove_export_snapshot(None, None)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
def tearDown(self):
self.client = None
super(HBSDRESTFCDriverTest, self).tearDown()
# API test cases
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties)
def test_do_setup(self, brick_get_connector_properties, request):
drv = hbsd_fc.HBSDFCDriver(
configuration=self.configuration, db=db)
self._setup_config()
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_HOST_WWNS_RESULT)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(3, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties)
def test_do_setup_create_hg(self, brick_get_connector_properties, request):
"""Normal case: The host group not exists."""
drv = hbsd_fc.HBSDFCDriver(
configuration=self.configuration, db=db)
self._setup_config()
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(8, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties)
def test_do_setup_pool_name(self, brick_get_connector_properties, request):
"""Normal case: Specify a pool name instead of pool id"""
drv = hbsd_fc.HBSDFCDriver(
configuration=self.configuration, db=db)
self._setup_config()
tmp_pool = self.configuration.hitachi_pool
self.configuration.hitachi_pool = CONFIG_MAP['pool_name']
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_POOLS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_HOST_WWNS_RESULT)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(4, request.call_count)
self.configuration.hitachi_pool = tmp_pool
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
def test_create_volume(self, request):
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt))
self.assertEqual('1', ret['provider_location'])
self.assertEqual(2, request.call_count)
@reduce_retrying_time
@mock.patch.object(requests.Session, "request")
def test_create_volume_timeout(self, request):
request.return_value = FakeResponse(
500, ERROR_RESULT,
headers={'Content-Type': 'json'})
self.assertRaises(hbsd_utils.HBSDError,
self.driver.create_volume,
fake_volume.fake_volume_obj(self.ctxt))
self.assertGreater(request.call_count, 1)
@mock.patch.object(requests.Session, "request")
def test_delete_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.delete_volume(TEST_VOLUME[0])
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_extend_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.extend_volume(TEST_VOLUME[0], 256)
self.assertEqual(3, request.call_count)
@mock.patch.object(driver.FibreChannelDriver, "get_goodness_function")
@mock.patch.object(driver.FibreChannelDriver, "get_filter_function")
@mock.patch.object(requests.Session, "request")
def test_get_volume_stats(
self, request, get_filter_function, get_goodness_function):
request.return_value = FakeResponse(200, GET_POOL_RESULT)
get_filter_function.return_value = None
get_goodness_function.return_value = None
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
self.assertTrue(stats["pools"][0]['multiattach'])
self.assertEqual(1, request.call_count)
self.assertEqual(1, get_filter_function.call_count)
self.assertEqual(1, get_goodness_function.call_count)
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
def test_create_snapshot(self, volume_get, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
self.assertEqual('1', ret['provider_location'])
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_delete_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.delete_snapshot(TEST_SNAPSHOT[0])
self.assertEqual(10, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_delete_snapshot_no_pair(self, request):
"""Normal case: Delete a snapshot without pair."""
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.delete_snapshot(TEST_SNAPSHOT[0])
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_create_cloned_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
self.assertEqual('1', vol['provider_location'])
self.assertEqual(5, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_create_volume_from_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
vol = self.driver.create_volume_from_snapshot(
TEST_VOLUME[0], TEST_SNAPSHOT[0])
self.assertEqual('1', vol['provider_location'])
self.assertEqual(5, request.call_count)
@mock.patch.object(fczm_utils, "add_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_initialize_connection(self, request, add_fc_zone):
self.configuration.hitachi_zoning_request = True
self.driver.common._lookup_service = FakeLookupService()
request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.initialize_connection(
TEST_VOLUME[0], DEFAULT_CONNECTOR)
self.assertEqual('fibre_channel', ret['driver_volume_type'])
self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn'])
self.assertEqual(1, ret['data']['target_lun'])
self.assertEqual(2, request.call_count)
self.assertEqual(1, add_fc_zone.call_count)
@mock.patch.object(fczm_utils, "add_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_initialize_connection_already_mapped(self, request, add_fc_zone):
"""Normal case: ldev have already mapped."""
self.configuration.hitachi_zoning_request = True
self.driver.common._lookup_service = FakeLookupService()
request.side_effect = [
FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(202, COMPLETED_FAILED_RESULT_LU_DEFINED),
FakeResponse(200, GET_LUNS_RESULT),
]
ret = self.driver.initialize_connection(
TEST_VOLUME[0], DEFAULT_CONNECTOR)
self.assertEqual('fibre_channel', ret['driver_volume_type'])
self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn'])
self.assertEqual(1, ret['data']['target_lun'])
self.assertEqual(3, request.call_count)
self.assertEqual(1, add_fc_zone.call_count)
@mock.patch.object(fczm_utils, "add_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_initialize_connection_shared_target(self, request, add_fc_zone):
"""Normal case: A target shared with other systems."""
self.configuration.hitachi_zoning_request = True
self.driver.common._lookup_service = FakeLookupService()
request.side_effect = [FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, GET_HOST_GROUPS_RESULT),
FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.initialize_connection(
TEST_VOLUME[0], DEFAULT_CONNECTOR)
self.assertEqual('fibre_channel', ret['driver_volume_type'])
self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn'])
self.assertEqual(1, ret['data']['target_lun'])
self.assertEqual(5, request.call_count)
self.assertEqual(1, add_fc_zone.call_count)
@mock.patch.object(fczm_utils, "remove_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_terminate_connection(self, request, remove_fc_zone):
self.configuration.hitachi_zoning_request = True
self.driver.common._lookup_service = FakeLookupService()
request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR)
self.assertEqual(5, request.call_count)
self.assertEqual(1, remove_fc_zone.call_count)
@mock.patch.object(fczm_utils, "remove_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_terminate_connection_not_connector(self, request, remove_fc_zone):
"""Normal case: Connector is None."""
self.configuration.hitachi_zoning_request = True
self.driver.common._lookup_service = FakeLookupService()
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED),
FakeResponse(200, GET_HOST_GROUP_RESULT),
FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.terminate_connection(TEST_VOLUME[2], None)
self.assertEqual(8, request.call_count)
self.assertEqual(1, remove_fc_zone.call_count)
@mock.patch.object(fczm_utils, "remove_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_terminate_connection_not_lun(self, request, remove_fc_zone):
"""Normal case: Lun already not exist."""
self.configuration.hitachi_zoning_request = True
self.driver.common._lookup_service = FakeLookupService()
request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(200, GET_LDEV_RESULT)]
self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR)
self.assertEqual(2, request.call_count)
self.assertEqual(1, remove_fc_zone.call_count)
@mock.patch.object(fczm_utils, "add_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_initialize_connection_snapshot(self, request, add_fc_zone):
self.configuration.hitachi_zoning_request = True
self.driver.common._lookup_service = FakeLookupService()
request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.initialize_connection_snapshot(
TEST_SNAPSHOT[0], DEFAULT_CONNECTOR)
self.assertEqual('fibre_channel', ret['driver_volume_type'])
self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn'])
self.assertEqual(1, ret['data']['target_lun'])
self.assertEqual(2, request.call_count)
self.assertEqual(1, add_fc_zone.call_count)
@mock.patch.object(fczm_utils, "remove_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_terminate_connection_snapshot(self, request, remove_fc_zone):
self.configuration.hitachi_zoning_request = True
self.driver.common._lookup_service = FakeLookupService()
request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT),
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.terminate_connection_snapshot(
TEST_SNAPSHOT[0], DEFAULT_CONNECTOR)
self.assertEqual(5, request.call_count)
self.assertEqual(1, remove_fc_zone.call_count)
@mock.patch.object(requests.Session, "request")
def test_manage_existing(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.manage_existing(
TEST_VOLUME[0], self.test_existing_ref)
self.assertEqual('1', ret['provider_location'])
self.assertEqual(2, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_manage_existing_name(self, request):
request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.manage_existing(
TEST_VOLUME[0], self.test_existing_ref_name)
self.assertEqual('1', ret['provider_location'])
self.assertEqual(3, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_manage_existing_get_size(self, request):
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
self.driver.manage_existing_get_size(
TEST_VOLUME[0], self.test_existing_ref)
self.assertEqual(1, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_manage_existing_get_size_name(self, request):
request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT),
FakeResponse(200, GET_LDEV_RESULT)]
self.driver.manage_existing_get_size(
TEST_VOLUME[0], self.test_existing_ref_name)
self.assertEqual(2, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_unmanage(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT)]
self.driver.unmanage(TEST_VOLUME[0])
self.assertEqual(2, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_copy_image_to_volume(self, request):
image_service = 'fake_image_service'
image_id = 'fake_image_id'
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \
as mock_copy_image:
self.driver.copy_image_to_volume(
self.ctxt, TEST_VOLUME[0], image_service, image_id)
mock_copy_image.assert_called_with(
self.ctxt, TEST_VOLUME[0], image_service, image_id)
self.assertEqual(1, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_update_migrated_volume(self, request):
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
self.assertRaises(
NotImplementedError,
self.driver.update_migrated_volume,
self.ctxt,
TEST_VOLUME[0],
TEST_VOLUME[1],
"available")
self.assertEqual(1, request.call_count)
def test_unmanage_snapshot(self):
"""The driver don't support unmange_snapshot."""
self.assertRaises(
NotImplementedError,
self.driver.unmanage_snapshot,
TEST_SNAPSHOT[0])
def test_retype(self):
new_specs = {'hbsd:test': 'test'}
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
diff = {}
host = {}
ret = self.driver.retype(
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
self.assertFalse(ret)
def test_backup_use_temp_snapshot(self):
self.assertTrue(self.driver.backup_use_temp_snapshot())
@mock.patch.object(requests.Session, "request")
def test_revert_to_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
self.driver.revert_to_snapshot(
self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0])
self.assertEqual(5, request.call_count)
def test_session___call__(self):
session = self.driver.common.client.Session('id', 'token')
req = models.Response()
ret = session.__call__(req)
self.assertEqual('Session token', ret.headers['Authorization'])

View File

@ -0,0 +1,702 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for Hitachi HBSD Driver."""
from unittest import mock
from oslo_config import cfg
import requests
from cinder import context as cinder_context
from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder.objects import snapshot as obj_snap
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_iscsi
from cinder.volume.drivers.hitachi import hbsd_rest
from cinder.volume import volume_types
# Configuration parameter values
CONFIG_MAP = {
'serial': '886000123456',
'my_ip': '127.0.0.1',
'rest_server_ip_addr': '172.16.18.108',
'rest_server_ip_port': '23451',
'port_id': 'CL1-A',
'host_grp_name': 'HBSD-127.0.0.1',
'host_mode': 'LINUX/IRIX',
'host_iscsi_name': 'iqn.hbsd-test-host',
'target_iscsi_name': 'iqn.hbsd-test-target',
'user_id': 'user',
'user_pass': 'password',
'ipv4Address': '111.22.333.44',
'tcpPort': '5555',
'auth_user': 'auth_user',
'auth_password': 'auth_password',
}
DEFAULT_CONNECTOR = {
'host': 'host',
'ip': CONFIG_MAP['my_ip'],
'initiator': CONFIG_MAP['host_iscsi_name'],
'multipath': False,
}
CTXT = cinder_context.get_admin_context()
TEST_VOLUME = []
for i in range(3):
volume = {}
volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i)
volume['name'] = 'test-volume{0:d}'.format(i)
volume['provider_location'] = '{0:d}'.format(i)
volume['size'] = 128
if i == 2:
volume['status'] = 'in-use'
else:
volume['status'] = 'available'
volume = fake_volume.fake_volume_obj(CTXT, **volume)
TEST_VOLUME.append(volume)
def _volume_get(context, volume_id):
"""Return predefined volume info."""
return TEST_VOLUME[int(volume_id.replace("-", ""))]
TEST_SNAPSHOT = []
snapshot = {}
snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0)
snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0)
snapshot['provider_location'] = '{0:d}'.format(1)
snapshot['status'] = 'available'
snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0)
snapshot['volume'] = _volume_get(None, snapshot['volume_id'])
snapshot['volume_name'] = 'test-volume{0:d}'.format(0)
snapshot['volume_size'] = 128
snapshot = obj_snap.Snapshot._from_db_object(
CTXT, obj_snap.Snapshot(),
fake_snapshot.fake_db_snapshot(**snapshot))
TEST_SNAPSHOT.append(snapshot)
# Dummy response for REST API
POST_SESSIONS_RESULT = {
"token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3",
"sessionId": 0,
}
GET_PORTS_RESULT = {
"data": [
{
"portId": CONFIG_MAP['port_id'],
"portType": "ISCSI",
"portAttributes": [
"TAR",
"MCU",
"RCU",
"ELUN"
],
"portSpeed": "AUT",
"loopId": "00",
"fabricMode": False,
"lunSecuritySetting": True,
},
],
}
GET_PORT_RESULT = {
"ipv4Address": CONFIG_MAP['ipv4Address'],
"tcpPort": CONFIG_MAP['tcpPort'],
}
GET_HOST_ISCSIS_RESULT = {
"data": [
{
"hostGroupNumber": 0,
"iscsiName": CONFIG_MAP['host_iscsi_name'],
},
],
}
GET_HOST_GROUP_RESULT = {
"hostGroupName": CONFIG_MAP['host_grp_name'],
"iscsiName": CONFIG_MAP['target_iscsi_name'],
}
GET_HOST_GROUPS_RESULT = {
"data": [
{
"hostGroupNumber": 0,
"portId": CONFIG_MAP['port_id'],
"hostGroupName": "HBSD-test",
"iscsiName": CONFIG_MAP['target_iscsi_name'],
},
],
}
COMPLETED_SUCCEEDED_RESULT = {
"status": "Completed",
"state": "Succeeded",
"affectedResources": ('a/b/c/1',),
}
GET_LDEV_RESULT = {
"emulationType": "OPEN-V-CVS",
"blockCapacity": 2097152,
"attributes": ["CVS", "HDP"],
"status": "NML",
}
GET_LDEV_RESULT_MAPPED = {
"emulationType": "OPEN-V-CVS",
"blockCapacity": 2097152,
"attributes": ["CVS", "HDP"],
"status": "NML",
"ports": [
{
"portId": CONFIG_MAP['port_id'],
"hostGroupNumber": 0,
"hostGroupName": CONFIG_MAP['host_grp_name'],
"lun": 1
},
],
}
GET_LDEV_RESULT_PAIR = {
"emulationType": "OPEN-V-CVS",
"blockCapacity": 2097152,
"attributes": ["CVS", "HDP", "HTI"],
"status": "NML",
}
GET_POOL_RESULT = {
"availableVolumeCapacity": 480144,
"totalPoolCapacity": 507780,
"totalLocatedCapacity": 71453172,
}
GET_SNAPSHOTS_RESULT = {
"data": [
{
"primaryOrSecondary": "S-VOL",
"status": "PSUS",
"pvolLdevId": 0,
"muNumber": 1,
"svolLdevId": 1,
},
],
}
GET_LDEVS_RESULT = {
"data": [
{
"ldevId": 0,
"label": "15960cc738c94c5bb4f1365be5eeed44",
},
{
"ldevId": 1,
"label": "15960cc738c94c5bb4f1365be5eeed45",
},
],
}
NOTFOUND_RESULT = {
"data": [],
}
def _brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""Return a predefined connector object."""
return DEFAULT_CONNECTOR
class FakeResponse():
def __init__(self, status_code, data=None, headers=None):
self.status_code = status_code
self.data = data
self.text = data
self.content = data
self.headers = {'Content-Type': 'json'} if headers is None else headers
def json(self):
return self.data
class HBSDRESTISCSIDriverTest(test.TestCase):
"""Unit test class for HBSD REST interface iSCSI module."""
test_existing_ref = {'source-id': '1'}
test_existing_ref_name = {
'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'}
def setUp(self):
"""Set up the test environment."""
def _set_required(opts, required):
for opt in opts:
opt.required = required
# Initialize Cinder and avoid checking driver options.
rest_required_opts = [
opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required]
common_required_opts = [
opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required]
_set_required(rest_required_opts, False)
_set_required(common_required_opts, False)
super(HBSDRESTISCSIDriverTest, self).setUp()
_set_required(rest_required_opts, True)
_set_required(common_required_opts, True)
self.configuration = mock.Mock(conf.Configuration)
self.ctxt = cinder_context.get_admin_context()
self._setup_config()
self._setup_driver()
def _setup_config(self):
"""Set configuration parameter values."""
self.configuration.config_group = "REST"
self.configuration.volume_backend_name = "RESTISCSI"
self.configuration.volume_driver = (
"cinder.volume.drivers.hitachi.hbsd_iscsi.HBSDISCSIDriver")
self.configuration.reserved_percentage = "0"
self.configuration.use_multipath_for_image_xfer = False
self.configuration.enforce_multipath_for_image_xfer = False
self.configuration.max_over_subscription_ratio = 500.0
self.configuration.driver_ssl_cert_verify = False
self.configuration.hitachi_storage_id = CONFIG_MAP['serial']
self.configuration.hitachi_pool = "30"
self.configuration.hitachi_snap_pool = None
self.configuration.hitachi_ldev_range = "0-1"
self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']]
self.configuration.hitachi_compute_target_ports = [
CONFIG_MAP['port_id']]
self.configuration.hitachi_group_create = True
self.configuration.hitachi_group_delete = True
self.configuration.san_login = CONFIG_MAP['user_id']
self.configuration.san_password = CONFIG_MAP['user_pass']
self.configuration.san_ip = CONFIG_MAP[
'rest_server_ip_addr']
self.configuration.san_api_port = CONFIG_MAP[
'rest_server_ip_port']
self.configuration.hitachi_rest_tcp_keepalive = True
self.configuration.hitachi_discard_zero_page = True
self.configuration.use_chap_auth = True
self.configuration.chap_username = CONFIG_MAP['auth_user']
self.configuration.chap_password = CONFIG_MAP['auth_password']
self.configuration.san_thin_provision = True
self.configuration.san_private_key = ''
self.configuration.san_clustername = ''
self.configuration.san_ssh_port = '22'
self.configuration.san_is_local = False
self.configuration.ssh_conn_timeout = '30'
self.configuration.ssh_min_pool_conn = '1'
self.configuration.ssh_max_pool_conn = '5'
self.configuration.safe_get = self._fake_safe_get
CONF = cfg.CONF
CONF.my_ip = CONFIG_MAP['my_ip']
def _fake_safe_get(self, value):
"""Retrieve a configuration value avoiding throwing an exception."""
try:
val = getattr(self.configuration, value)
except AttributeError:
val = None
return val
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties)
def _setup_driver(
self, brick_get_connector_properties=None, request=None):
"""Set up the driver environment."""
self.driver = hbsd_iscsi.HBSDISCSIDriver(
configuration=self.configuration, db=db)
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_PORT_RESULT),
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(200, GET_HOST_GROUP_RESULT)]
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.driver.local_path(None)
self.driver.create_export(None, None, None)
self.driver.ensure_export(None, None)
self.driver.remove_export(None, None)
self.driver.create_export_snapshot(None, None, None)
self.driver.remove_export_snapshot(None, None)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
def tearDown(self):
self.client = None
super(HBSDRESTISCSIDriverTest, self).tearDown()
# API test cases
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties)
def test_do_setup(self, brick_get_connector_properties, request):
drv = hbsd_iscsi.HBSDISCSIDriver(
configuration=self.configuration, db=db)
self._setup_config()
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_PORT_RESULT),
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(200, GET_HOST_GROUP_RESULT)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']:
'%(ip)s:%(port)s' % {
'ip': CONFIG_MAP['ipv4Address'],
'port': CONFIG_MAP['tcpPort']}},
drv.common.storage_info['portals'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(5, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties)
def test_do_setup_create_hg(self, brick_get_connector_properties, request):
"""Normal case: The host group not exists."""
drv = hbsd_iscsi.HBSDISCSIDriver(
configuration=self.configuration, db=db)
self._setup_config()
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_PORT_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']:
'%(ip)s:%(port)s' % {
'ip': CONFIG_MAP['ipv4Address'],
'port': CONFIG_MAP['tcpPort']}},
drv.common.storage_info['portals'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(8, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
def test_extend_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.extend_volume(TEST_VOLUME[0], 256)
self.assertEqual(3, request.call_count)
@mock.patch.object(driver.ISCSIDriver, "get_goodness_function")
@mock.patch.object(driver.ISCSIDriver, "get_filter_function")
@mock.patch.object(requests.Session, "request")
def test_get_volume_stats(
self, request, get_filter_function, get_goodness_function):
request.return_value = FakeResponse(200, GET_POOL_RESULT)
get_filter_function.return_value = None
get_goodness_function.return_value = None
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
self.assertTrue(stats["pools"][0]['multiattach'])
self.assertEqual(1, request.call_count)
self.assertEqual(1, get_filter_function.call_count)
self.assertEqual(1, get_goodness_function.call_count)
@mock.patch.object(requests.Session, "request")
def test_create_volume(self, request):
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt))
self.assertEqual('1', ret['provider_location'])
self.assertEqual(2, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_delete_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.delete_volume(TEST_VOLUME[0])
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
def test_create_snapshot(self, volume_get, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
self.assertEqual('1', ret['provider_location'])
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_delete_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.delete_snapshot(TEST_SNAPSHOT[0])
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_create_cloned_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
self.assertEqual('1', vol['provider_location'])
self.assertEqual(5, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_create_volume_from_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
vol = self.driver.create_volume_from_snapshot(
TEST_VOLUME[0], TEST_SNAPSHOT[0])
self.assertEqual('1', vol['provider_location'])
self.assertEqual(5, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_initialize_connection(self, request):
request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(200, GET_HOST_GROUP_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.initialize_connection(
TEST_VOLUME[0], DEFAULT_CONNECTOR)
self.assertEqual('iscsi', ret['driver_volume_type'])
self.assertEqual(
'%(ip)s:%(port)s' % {
'ip': CONFIG_MAP['ipv4Address'],
'port': CONFIG_MAP['tcpPort'],
},
ret['data']['target_portal'])
self.assertEqual(CONFIG_MAP['target_iscsi_name'],
ret['data']['target_iqn'])
self.assertEqual('CHAP', ret['data']['auth_method'])
self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username'])
self.assertEqual(
CONFIG_MAP['auth_password'], ret['data']['auth_password'])
self.assertEqual(1, ret['data']['target_lun'])
self.assertEqual(3, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_initialize_connection_shared_target(self, request):
"""Normal case: A target shared with other systems."""
request.side_effect = [FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, GET_HOST_GROUPS_RESULT),
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.initialize_connection(
TEST_VOLUME[0], DEFAULT_CONNECTOR)
self.assertEqual('iscsi', ret['driver_volume_type'])
self.assertEqual(
'%(ip)s:%(port)s' % {
'ip': CONFIG_MAP['ipv4Address'],
'port': CONFIG_MAP['tcpPort'],
},
ret['data']['target_portal'])
self.assertEqual(CONFIG_MAP['target_iscsi_name'],
ret['data']['target_iqn'])
self.assertEqual('CHAP', ret['data']['auth_method'])
self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username'])
self.assertEqual(
CONFIG_MAP['auth_password'], ret['data']['auth_password'])
self.assertEqual(1, ret['data']['target_lun'])
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_terminate_connection(self, request):
request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(200, GET_HOST_GROUP_RESULT),
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR)
self.assertEqual(6, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_terminate_connection_not_connector(self, request):
"""Normal case: Connector is None."""
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED),
FakeResponse(200, GET_HOST_GROUP_RESULT),
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(200, GET_HOST_GROUPS_RESULT),
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.terminate_connection(TEST_VOLUME[2], None)
self.assertEqual(9, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_initialize_connection_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(200, GET_HOST_GROUP_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.initialize_connection_snapshot(
TEST_SNAPSHOT[0], DEFAULT_CONNECTOR)
self.assertEqual('iscsi', ret['driver_volume_type'])
self.assertEqual(
'%(ip)s:%(port)s' % {
'ip': CONFIG_MAP['ipv4Address'],
'port': CONFIG_MAP['tcpPort'],
},
ret['data']['target_portal'])
self.assertEqual(CONFIG_MAP['target_iscsi_name'],
ret['data']['target_iqn'])
self.assertEqual('CHAP', ret['data']['auth_method'])
self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username'])
self.assertEqual(
CONFIG_MAP['auth_password'], ret['data']['auth_password'])
self.assertEqual(1, ret['data']['target_lun'])
self.assertEqual(3, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_terminate_connection_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT),
FakeResponse(200, GET_HOST_GROUP_RESULT),
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.terminate_connection_snapshot(
TEST_SNAPSHOT[0], DEFAULT_CONNECTOR)
self.assertEqual(6, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_manage_existing(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.manage_existing(
TEST_VOLUME[0], self.test_existing_ref)
self.assertEqual('1', ret['provider_location'])
self.assertEqual(2, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_manage_existing_name(self, request):
request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.manage_existing(
TEST_VOLUME[0], self.test_existing_ref_name)
self.assertEqual('1', ret['provider_location'])
self.assertEqual(3, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_manage_existing_get_size(self, request):
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
self.driver.manage_existing_get_size(
TEST_VOLUME[0], self.test_existing_ref)
self.assertEqual(1, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_manage_existing_get_size_name(self, request):
request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT),
FakeResponse(200, GET_LDEV_RESULT)]
self.driver.manage_existing_get_size(
TEST_VOLUME[0], self.test_existing_ref_name)
self.assertEqual(2, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_unmanage(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT)]
self.driver.unmanage(TEST_VOLUME[0])
self.assertEqual(2, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_copy_image_to_volume(self, request):
image_service = 'fake_image_service'
image_id = 'fake_image_id'
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \
as mock_copy_image:
self.driver.copy_image_to_volume(
self.ctxt, TEST_VOLUME[0], image_service, image_id)
mock_copy_image.assert_called_with(
self.ctxt, TEST_VOLUME[0], image_service, image_id)
self.assertEqual(1, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_update_migrated_volume(self, request):
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
self.assertRaises(
NotImplementedError,
self.driver.update_migrated_volume,
self.ctxt,
TEST_VOLUME[0],
TEST_VOLUME[1],
"available")
self.assertEqual(1, request.call_count)
def test_unmanage_snapshot(self):
"""The driver don't support unmange_snapshot."""
self.assertRaises(
NotImplementedError,
self.driver.unmanage_snapshot,
TEST_SNAPSHOT[0])
def test_retype(self):
new_specs = {'hbsd:test': 'test'}
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
diff = {}
host = {}
ret = self.driver.retype(
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
self.assertFalse(ret)
def test_backup_use_temp_snapshot(self):
self.assertTrue(self.driver.backup_use_temp_snapshot())
@mock.patch.object(requests.Session, "request")
def test_revert_to_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
self.driver.revert_to_snapshot(
self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0])
self.assertEqual(5, request.call_count)

View File

@ -0,0 +1,791 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Common module for Hitachi HBSD Driver."""
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import coordination
from cinder import exception
from cinder import utils as cinder_utils
from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.volume import volume_utils
VERSION = '2.0.0'
_STR_VOLUME = 'volume'
_STR_SNAPSHOT = 'snapshot'
_INHERITED_VOLUME_OPTS = [
'volume_backend_name',
'volume_driver',
'reserved_percentage',
'use_multipath_for_image_xfer',
'enforce_multipath_for_image_xfer',
'max_over_subscription_ratio',
'use_chap_auth',
'chap_username',
'chap_password',
]
COMMON_VOLUME_OPTS = [
cfg.StrOpt(
'hitachi_storage_id',
default=None,
help='Product number of the storage system.'),
cfg.StrOpt(
'hitachi_pool',
default=None,
help='Pool number or pool name of the DP pool.'),
cfg.StrOpt(
'hitachi_snap_pool',
default=None,
help='Pool number or pool name of the snapshot pool.'),
cfg.StrOpt(
'hitachi_ldev_range',
default=None,
help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that '
'can be used by the driver. Values can be in decimal format '
'(e.g. 1000) or in colon-separated hexadecimal format '
'(e.g. 00:03:E8).'),
cfg.ListOpt(
'hitachi_target_ports',
default=[],
help='IDs of the storage ports used to attach volumes to the '
'controller node. To specify multiple ports, connect them by '
'commas (e.g. CL1-A,CL2-A).'),
cfg.ListOpt(
'hitachi_compute_target_ports',
default=[],
help='IDs of the storage ports used to attach volumes to compute '
'nodes. To specify multiple ports, connect them by commas '
'(e.g. CL1-A,CL2-A).'),
cfg.BoolOpt(
'hitachi_group_create',
default=False,
help='If True, the driver will create host groups or iSCSI targets on '
'storage ports as needed.'),
cfg.BoolOpt(
'hitachi_group_delete',
default=False,
help='If True, the driver will delete host groups or iSCSI targets on '
'storage ports as needed.'),
]
_REQUIRED_COMMON_OPTS = [
'hitachi_storage_id',
'hitachi_pool',
]
CONF = cfg.CONF
CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
def _str2int(num):
"""Convert a string into an integer."""
if not num:
return None
if num.isdigit():
return int(num)
if not re.match(r'[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F]' +
'[0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]$', num):
return None
try:
return int(num.replace(':', ''), 16)
except ValueError:
return None
class HBSDCommon():
"""Common class for Hitachi HBSD Driver."""
def __init__(self, conf, driverinfo, db):
"""Initialize instance variables."""
self.conf = conf
self.db = db
self.ctxt = None
self.lock = {
'do_setup': 'do_setup',
}
self.driver_info = driverinfo
self.storage_info = {
'protocol': driverinfo['proto'],
'pool_id': None,
'snap_pool_id': None,
'ldev_range': [],
'controller_ports': [],
'compute_ports': [],
'wwns': {},
'portals': {},
}
self._stats = {}
def create_ldev(self, size):
"""Create an LDEV and return its LDEV number."""
raise NotImplementedError()
def modify_ldev_name(self, ldev, name):
"""Modify LDEV name."""
raise NotImplementedError()
def create_volume(self, volume):
"""Create a volume and return its properties."""
try:
ldev = self.create_ldev(volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
utils.output_log(MSG.CREATE_LDEV_FAILED)
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
return {
'provider_location': str(ldev),
}
def get_ldev_info(self, keys, ldev, **kwargs):
"""Return a dictionary of LDEV-related items."""
raise NotImplementedError()
def create_pair_on_storage(self, pvol, svol, is_snapshot=False):
"""Create a copy pair on the storage."""
raise NotImplementedError()
def _copy_on_storage(self, pvol, size, is_snapshot=False):
"""Create a copy of the specified LDEV on the storage."""
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
if ldev_info['status'] != 'NML':
msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
raise utils.HBSDError(msg)
svol = self.create_ldev(size)
try:
self.create_pair_on_storage(pvol, svol, is_snapshot)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.delete_ldev(svol)
except utils.HBSDError:
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol)
return svol
def create_volume_from_src(self, volume, src, src_type):
"""Create a volume from a volume or snapshot and return its properties.
"""
ldev = utils.get_ldev(src)
if ldev is None:
msg = utils.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type=src_type, id=src['id'])
raise utils.HBSDError(msg)
size = volume['size']
new_ldev = self._copy_on_storage(ldev, size)
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
return {
'provider_location': str(new_ldev),
}
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume and return its properties."""
return self.create_volume_from_src(volume, src_vref, _STR_VOLUME)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot and return its properties."""
return self.create_volume_from_src(volume, snapshot, _STR_SNAPSHOT)
def delete_pair_based_on_svol(self, pvol, svol_info):
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
raise NotImplementedError()
def get_pair_info(self, ldev):
"""Return volume pair info(LDEV number, pair status and pair type)."""
raise NotImplementedError()
def delete_pair(self, ldev):
"""Disconnect all volume pairs to which the specified LDEV belongs."""
pair_info = self.get_pair_info(ldev)
if not pair_info:
return
if pair_info['pvol'] == ldev:
utils.output_log(
MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'])
raise utils.HBSDBusy()
else:
self.delete_pair_based_on_svol(
pair_info['pvol'], pair_info['svol_info'][0])
def find_all_mapped_targets_from_storage(self, targets, ldev):
"""Add all port-gids connected with the LDEV to the list."""
raise NotImplementedError()
def unmap_ldev(self, targets, ldev):
"""Delete the LUN between the specified LDEV and port-gid."""
raise NotImplementedError()
def unmap_ldev_from_storage(self, ldev):
"""Delete the connection between the specified LDEV and servers."""
targets = {
'list': [],
}
self.find_all_mapped_targets_from_storage(targets, ldev)
self.unmap_ldev(targets, ldev)
def delete_ldev_from_storage(self, ldev):
"""Delete the specified LDEV from the storage."""
raise NotImplementedError()
def delete_ldev(self, ldev):
"""Delete the specified LDEV."""
self.delete_pair(ldev)
self.unmap_ldev_from_storage(ldev)
self.delete_ldev_from_storage(ldev)
def delete_volume(self, volume):
"""Delete the specified volume."""
ldev = utils.get_ldev(volume)
if ldev is None:
utils.output_log(
MSG.INVALID_LDEV_FOR_DELETION,
method='delete_volume', id=volume['id'])
return
try:
self.delete_ldev(ldev)
except utils.HBSDBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
def create_snapshot(self, snapshot):
"""Create a snapshot from a volume and return its properties."""
src_vref = snapshot.volume
ldev = utils.get_ldev(src_vref)
if ldev is None:
msg = utils.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='volume', id=src_vref['id'])
raise utils.HBSDError(msg)
size = snapshot['volume_size']
new_ldev = self._copy_on_storage(ldev, size, True)
return {
'provider_location': str(new_ldev),
}
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot."""
ldev = utils.get_ldev(snapshot)
if ldev is None:
utils.output_log(
MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot',
id=snapshot['id'])
return
try:
self.delete_ldev(ldev)
except utils.HBSDBusy:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
def get_pool_info(self):
"""Return the total and free capacity of the storage pool."""
raise NotImplementedError()
def update_volume_stats(self):
"""Update properties, capabilities and current states of the driver."""
data = {}
backend_name = (self.conf.safe_get('volume_backend_name') or
self.driver_info['volume_backend_name'])
data = {
'volume_backend_name': backend_name,
'vendor_name': 'Hitachi',
'driver_version': VERSION,
'storage_protocol': self.storage_info['protocol'],
'pools': [],
}
single_pool = {}
single_pool.update(dict(
pool_name=data['volume_backend_name'],
reserved_percentage=self.conf.safe_get('reserved_percentage'),
QoS_support=False,
thick_provisioning_support=False,
multiattach=True
))
try:
(total_capacity, free_capacity,
provisioned_capacity) = self.get_pool_info()
except utils.HBSDError:
single_pool.update(dict(
provisioned_capacity_gb=0,
backend_state='down'))
data["pools"].append(single_pool)
LOG.debug("Updating volume status. (%s)", data)
self._stats = data
utils.output_log(
MSG.POOL_INFO_RETRIEVAL_FAILED,
pool=self.conf.hitachi_pool)
return
single_pool.update(dict(
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
volume_utils.get_max_over_subscription_ratio(
self.conf.safe_get('max_over_subscription_ratio'),
True)),
thin_provisioning_support=True
))
single_pool.update(dict(backend_state='up'))
data["pools"].append(single_pool)
LOG.debug("Updating volume status. (%s)", data)
self._stats = data
def get_volume_stats(self, refresh=False):
"""Return properties, capabilities and current states of the driver."""
if refresh:
self.update_volume_stats()
return self._stats
def discard_zero_page(self, volume):
"""Return the volume's no-data pages to the storage pool."""
raise NotImplementedError()
def check_pair_svol(self, ldev):
"""Check if the specified LDEV is S-VOL in a copy pair."""
raise NotImplementedError()
def extend_ldev(self, ldev, old_size, new_size):
"""Extend the specified LDEV to the specified new size."""
raise NotImplementedError()
def extend_volume(self, volume, new_size):
"""Extend the specified volume to the specified size."""
ldev = utils.get_ldev(volume)
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION,
volume_id=volume['id'])
raise utils.HBSDError(msg)
if self.check_pair_svol(ldev):
msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND,
volume_id=volume['id'])
raise utils.HBSDError(msg)
self.delete_pair(ldev)
self.extend_ldev(ldev, volume['size'], new_size)
def get_ldev_by_name(self, name):
"""Get the LDEV number from the given name."""
raise NotImplementedError()
def check_ldev_manageability(self, ldev, existing_ref):
"""Check if the LDEV meets the criteria for being managed."""
raise NotImplementedError()
def manage_existing(self, volume, existing_ref):
"""Return volume properties which Cinder needs to manage the volume."""
if 'source-name' in existing_ref:
ldev = self.get_ldev_by_name(
existing_ref.get('source-name').replace('-', ''))
elif 'source-id' in existing_ref:
ldev = _str2int(existing_ref.get('source-id'))
self.check_ldev_manageability(ldev, existing_ref)
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
return {
'provider_location': str(ldev),
}
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
"""Return the size[GB] of the specified LDEV."""
raise NotImplementedError()
def manage_existing_get_size(self, existing_ref):
"""Return the size[GB] of the specified volume."""
ldev = None
if 'source-name' in existing_ref:
ldev = self.get_ldev_by_name(
existing_ref.get('source-name').replace("-", ""))
elif 'source-id' in existing_ref:
ldev = _str2int(existing_ref.get('source-id'))
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
return self.get_ldev_size_in_gigabyte(ldev, existing_ref)
def unmanage(self, volume):
"""Prepare the volume for removing it from Cinder management."""
ldev = utils.get_ldev(volume)
if ldev is None:
utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage',
id=volume['id'])
return
if self.check_pair_svol(ldev):
utils.output_log(
MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'],
volume_type=utils.NORMAL_LDEV_TYPE)
raise exception.VolumeIsBusy(volume_name=volume['name'])
try:
self.delete_pair(ldev)
except utils.HBSDBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
def _range2list(self, param):
"""Analyze a 'xxx-xxx' string and return a list of two integers."""
values = [_str2int(value) for value in
self.conf.safe_get(param).split('-')]
if len(values) != 2 or None in values or values[0] > values[1]:
msg = utils.output_log(MSG.INVALID_PARAMETER, param=param)
raise utils.HBSDError(msg)
return values
def check_param_iscsi(self):
"""Check iSCSI-related parameter values and consistency among them."""
if self.conf.use_chap_auth:
if not self.conf.chap_username:
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='chap_username')
raise utils.HBSDError(msg)
if not self.conf.chap_password:
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='chap_password')
raise utils.HBSDError(msg)
def check_param(self):
"""Check parameter values and consistency among them."""
utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS)
utils.check_opts(self.conf, COMMON_VOLUME_OPTS)
utils.check_opts(self.conf, self.driver_info['volume_opts'])
if self.conf.hitachi_ldev_range:
self.storage_info['ldev_range'] = self._range2list(
'hitachi_ldev_range')
if (not self.conf.hitachi_target_ports and
not self.conf.hitachi_compute_target_ports):
msg = utils.output_log(
MSG.INVALID_PARAMETER,
param='hitachi_target_ports or '
'hitachi_compute_target_ports')
raise utils.HBSDError(msg)
if (self.conf.hitachi_group_delete and
not self.conf.hitachi_group_create):
msg = utils.output_log(
MSG.INVALID_PARAMETER,
param='hitachi_group_delete or '
'hitachi_group_create')
raise utils.HBSDError(msg)
for opt in _REQUIRED_COMMON_OPTS:
if not self.conf.safe_get(opt):
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
raise utils.HBSDError(msg)
if self.storage_info['protocol'] == 'iSCSI':
self.check_param_iscsi()
def need_client_setup(self):
"""Check if the making of the communication client is necessary."""
raise NotImplementedError()
def setup_client(self):
"""Initialize RestApiClient."""
pass
def enter_keep_session(self):
"""Begin the keeping of the session."""
pass
def check_pool_id(self):
"""Check the pool id of hitachi_pool and hitachi_snap_pool."""
raise NotImplementedError()
def connect_storage(self):
"""Prepare for using the storage."""
self.check_pool_id()
utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID',
value=self.storage_info['pool_id'])
self.storage_info['controller_ports'] = []
self.storage_info['compute_ports'] = []
def find_targets_from_storage(self, targets, connector, target_ports):
"""Find mapped ports, memorize them and return unmapped port count."""
raise NotImplementedError()
def get_hba_ids_from_connector(self, connector):
"""Return the HBA ID stored in the connector."""
if self.driver_info['hba_id'] in connector:
return connector[self.driver_info['hba_id']]
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource=self.driver_info['hba_id_type'])
raise utils.HBSDError(msg)
def create_target_to_storage(self, port, connector, hba_ids):
"""Create a host group or an iSCSI target on the specified port."""
raise NotImplementedError()
def set_target_mode(self, port, gid):
"""Configure the target to meet the environment."""
raise NotImplementedError()
def set_hba_ids(self, port, gid, hba_ids):
"""Connect all specified HBAs with the specified port."""
raise NotImplementedError()
def delete_target_from_storage(self, port, gid):
"""Delete the host group or the iSCSI target from the port."""
raise NotImplementedError()
def _create_target(self, targets, port, connector, hba_ids):
"""Create a host group or an iSCSI target on the storage port."""
target_name, gid = self.create_target_to_storage(
port, connector, hba_ids)
utils.output_log(MSG.OBJECT_CREATED, object='a target',
details='port: %(port)s, gid: %(gid)s, target_name: '
'%(target)s' %
{'port': port, 'gid': gid, 'target': target_name})
try:
self.set_target_mode(port, gid)
self.set_hba_ids(port, gid, hba_ids)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_target_from_storage(port, gid)
targets['info'][port] = True
targets['list'].append((port, gid))
def create_mapping_targets(self, targets, connector):
"""Create server-storage connection for all specified storage ports."""
hba_ids = self.get_hba_ids_from_connector(connector)
for port in targets['info'].keys():
if targets['info'][port]:
continue
try:
self._create_target(targets, port, connector, hba_ids)
except utils.HBSDError:
utils.output_log(
self.driver_info['msg_id']['target'], port=port)
# When other threads created a host group at same time, need to
# re-find targets.
if not targets['list']:
self.find_targets_from_storage(
targets, connector, targets['info'].keys())
def init_cinder_hosts(self, **kwargs):
"""Initialize server-storage connection."""
targets = kwargs.pop(
'targets', {'info': {}, 'list': [], 'iqns': {}, 'target_map': {}})
connector = cinder_utils.brick_get_connector_properties(
multipath=self.conf.use_multipath_for_image_xfer,
enforce_multipath=self.conf.enforce_multipath_for_image_xfer)
target_ports = self.storage_info['controller_ports']
if target_ports:
if (self.find_targets_from_storage(
targets, connector, target_ports) and
self.conf.hitachi_group_create):
self.create_mapping_targets(targets, connector)
utils.require_target_existed(targets)
def do_setup(self, context):
"""Prepare for the startup of the driver."""
@coordination.synchronized('{self.lock[do_setup]}')
def _with_synchronized(self):
self.connect_storage()
self.init_cinder_hosts()
self.ctxt = context
self.check_param()
if self.need_client_setup():
self.setup_client()
self.enter_keep_session()
_with_synchronized(self)
def check_ports_info(self):
"""Check if available storage ports exist."""
if (self.conf.hitachi_target_ports and
not self.storage_info['controller_ports']):
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource="Target ports")
raise utils.HBSDError(msg)
if (self.conf.hitachi_compute_target_ports and
not self.storage_info['compute_ports']):
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource="Compute target ports")
raise utils.HBSDError(msg)
utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list',
value=self.storage_info['controller_ports'])
utils.output_log(MSG.SET_CONFIG_VALUE,
object='compute target port list',
value=self.storage_info['compute_ports'])
def attach_ldev(self, volume, ldev, connector, targets):
"""Initialize connection between the server and the volume."""
raise NotImplementedError()
def get_properties_fc(self, targets):
"""Return FC-specific server-LDEV connection info."""
data = {}
data['target_wwn'] = [
self.storage_info['wwns'][target[0]] for target in targets['list']
if targets['lun'][target[0]]]
return data
def get_properties_iscsi(self, targets, multipath):
"""Return iSCSI-specific server-LDEV connection info."""
data = {}
primary_target = targets['list'][0]
if not multipath:
data['target_portal'] = self.storage_info[
'portals'][primary_target[0]]
data['target_iqn'] = targets['iqns'][primary_target]
else:
# Set the list of numbers that LUN was added
data['target_portals'] = [
self.storage_info['portals'][target[0]] for target in
targets['list'] if targets['lun'][target[0]]]
data['target_iqns'] = [
targets['iqns'][target] for target in targets['list']
if targets['lun'][target[0]]]
if self.conf.use_chap_auth:
data['auth_method'] = 'CHAP'
data['auth_username'] = self.conf.chap_username
data['auth_password'] = self.conf.chap_password
return data
def get_properties(self, targets, target_lun, connector):
"""Return server-LDEV connection info."""
multipath = connector.get('multipath', False)
if self.storage_info['protocol'] == 'FC':
data = self.get_properties_fc(targets)
elif self.storage_info['protocol'] == 'iSCSI':
data = self.get_properties_iscsi(targets, multipath)
data['target_discovered'] = False
if not multipath or self.storage_info['protocol'] == 'FC':
data['target_lun'] = target_lun
else:
# Set the list of numbers that LUN was added
target_luns = []
for target in targets['list']:
if targets['lun'][target[0]]:
target_luns.append(target_lun)
data['target_luns'] = target_luns
return data
# A synchronization to prevent conflicts between host group creation
# and deletion.
@coordination.synchronized('hbsd-host-{self.conf.hitachi_storage_id}-'
'{connector[host]}')
def initialize_connection(self, volume, connector):
"""Initialize connection between the server and the volume."""
targets = {
'info': {},
'list': [],
'lun': {},
'iqns': {},
'target_map': {},
}
ldev = utils.get_ldev(volume)
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION,
volume_id=volume['id'])
raise utils.HBSDError(msg)
target_lun = self.attach_ldev(volume, ldev, connector, targets)
return {
'driver_volume_type': self.driver_info['volume_type'],
'data': self.get_properties(targets, target_lun, connector),
}
def get_target_ports(self, connector):
"""Return a list of ports corresponding to the specified connector."""
if 'ip' in connector and connector['ip'] == CONF.my_ip:
return self.storage_info['controller_ports']
return (self.storage_info['compute_ports'] or
self.storage_info['controller_ports'])
def get_port_hostgroup_map(self, ldev_id):
"""Get the mapping of a port and host group."""
raise NotImplementedError()
def set_terminate_target(self, fake_connector, port_hostgroup_map):
"""Set necessary information in connector in terminate."""
raise NotImplementedError()
def detach_ldev(self, volume, ldev, connector):
"""Terminate connection between the server and the volume."""
raise NotImplementedError()
def terminate_connection(self, volume, connector):
"""Terminate connection between the server and the volume."""
ldev = utils.get_ldev(volume)
if ldev is None:
utils.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING,
volume_id=volume['id'])
return
# If a fake connector is generated by nova when the host
# is down, then the connector will not have a host property,
# In this case construct the lock without the host property
# so that all the fake connectors to an SVC are serialized
if 'host' not in connector:
port_hostgroup_map = self.get_port_hostgroup_map(ldev)
if not port_hostgroup_map:
utils.output_log(MSG.NO_LUN, ldev=ldev)
return
self.set_terminate_target(connector, port_hostgroup_map)
# A synchronization to prevent conflicts between host group creation
# and deletion.
@coordination.synchronized(
'hbsd-host-%(storage_id)s-%(host)s' % {
'storage_id': self.conf.hitachi_storage_id,
'host': connector.get('host'),
}
)
def inner(self, volume, connector):
deleted_targets = self.detach_ldev(volume, ldev, connector)
if self.storage_info['protocol'] == 'FC':
target_wwn = [
self.storage_info['wwns'][target]
for target in deleted_targets]
return {'driver_volume_type': self.driver_info['volume_type'],
'data': {'target_wwn': target_wwn}}
return inner(self, volume, connector)
def unmanage_snapshot(self, snapshot):
"""Output error message and raise NotImplementedError."""
utils.output_log(
MSG.SNAPSHOT_UNMANAGE_FAILED, snapshot_id=snapshot['id'])
raise NotImplementedError()
def retype(self):
return False
def has_snap_pair(self, pvol, svol):
"""Check if the volume have the pair of the snapshot."""
raise NotImplementedError()
def restore_ldev(self, pvol, svol):
"""Restore a pair of the specified LDEV."""
raise NotImplementedError()
def revert_to_snapshot(self, volume, snapshot):
"""Rollback the specified snapshot."""
pvol = utils.get_ldev(volume)
svol = utils.get_ldev(snapshot)
if (pvol is not None and
svol is not None and
self.has_snap_pair(pvol, svol)):
self.restore_ldev(pvol, svol)
else:
raise NotImplementedError()

View File

@ -0,0 +1,230 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Fibre channel module for Hitachi HBSD Driver."""
from oslo_config import cfg
from cinder import interface
from cinder import utils as cinder_utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common as common
from cinder.volume.drivers.hitachi import hbsd_utils as utils
FC_VOLUME_OPTS = [
cfg.BoolOpt(
'hitachi_zoning_request',
default=False,
help='If True, the driver will configure FC zoning between the server '
'and the storage system provided that FC zoning manager is '
'enabled.'),
]
MSG = utils.HBSDMsg
_DRIVER_INFO = {
'proto': 'FC',
'hba_id': 'wwpns',
'hba_id_type': 'World Wide Name',
'msg_id': {
'target': MSG.CREATE_HOST_GROUP_FAILED,
},
'volume_backend_name': '%(prefix)sFC' % {
'prefix': utils.DRIVER_PREFIX,
},
'volume_opts': FC_VOLUME_OPTS,
'volume_type': 'fibre_channel',
}
CONF = cfg.CONF
CONF.register_opts(FC_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class HBSDFCDriver(driver.FibreChannelDriver):
"""Fibre channel class for Hitachi HBSD Driver.
Version history:
.. code-block:: none
1.0.0 - Initial driver.
1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods
2.0.0 - Major redesign of the driver. This version requires the REST
API for communication with the storage backend.
"""
VERSION = common.VERSION
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Hitachi_VSP_CI"
def __init__(self, *args, **kwargs):
"""Initialize instance variables."""
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
driver=self.__class__.__name__,
version=self.get_version())
super(HBSDFCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
self.configuration.append_config_values(FC_VOLUME_OPTS)
self.common = utils.import_object(
self.configuration, _DRIVER_INFO, kwargs.get('db'))
def check_for_setup_error(self):
pass
@cinder_utils.trace
def create_volume(self, volume):
"""Create a volume and return its properties."""
return self.common.create_volume(volume)
@cinder_utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot and return its properties."""
return self.common.create_volume_from_snapshot(volume, snapshot)
@cinder_utils.trace
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume and return its properties."""
return self.common.create_cloned_volume(volume, src_vref)
@cinder_utils.trace
def delete_volume(self, volume):
"""Delete the specified volume."""
self.common.delete_volume(volume)
@cinder_utils.trace
def create_snapshot(self, snapshot):
"""Create a snapshot from a volume and return its properties."""
return self.common.create_snapshot(snapshot)
@cinder_utils.trace
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot."""
self.common.delete_snapshot(snapshot)
def local_path(self, volume):
pass
def get_volume_stats(self, refresh=False):
"""Return properties, capabilities and current states of the driver."""
data = self.common.get_volume_stats(refresh)
if 'pools' in data:
data["pools"][0]["filter_function"] = self.get_filter_function()
data["pools"][0]["goodness_function"] = (
self.get_goodness_function())
return data
@cinder_utils.trace
def update_migrated_volume(
self, ctxt, volume, new_volume, original_volume_status):
"""Do any remaining jobs after migration."""
self.common.discard_zero_page(new_volume)
super(HBSDFCDriver, self).update_migrated_volume(
ctxt, volume, new_volume, original_volume_status)
@cinder_utils.trace
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
super(HBSDFCDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
self.common.discard_zero_page(volume)
@cinder_utils.trace
def extend_volume(self, volume, new_size):
"""Extend the specified volume to the specified size."""
self.common.extend_volume(volume, new_size)
@cinder_utils.trace
def manage_existing(self, volume, existing_ref):
"""Return volume properties which Cinder needs to manage the volume."""
return self.common.manage_existing(volume, existing_ref)
@cinder_utils.trace
def manage_existing_get_size(self, volume, existing_ref):
"""Return the size[GB] of the specified volume."""
return self.common.manage_existing_get_size(existing_ref)
@cinder_utils.trace
def unmanage(self, volume):
"""Prepare the volume for removing it from Cinder management."""
self.common.unmanage(volume)
@cinder_utils.trace
def do_setup(self, context):
"""Prepare for the startup of the driver."""
self.common.do_setup(context)
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def create_export(self, context, volume, connector):
"""Export the volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def create_export_snapshot(self, context, snapshot, connector):
pass
def remove_export_snapshot(self, context, snapshot):
pass
@cinder_utils.trace
def initialize_connection(self, volume, connector):
"""Initialize connection between the server and the volume."""
return self.common.initialize_connection(volume, connector)
@cinder_utils.trace
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate connection between the server and the volume."""
if connector is None:
connector = {}
if utils.is_shared_connection(volume, connector):
return
self.common.terminate_connection(volume, connector)
@cinder_utils.trace
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Initialize connection between the server and the snapshot."""
return self.common.initialize_connection(snapshot, connector)
@cinder_utils.trace
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Terminate connection between the server and the snapshot."""
self.common.terminate_connection(snapshot, connector)
@cinder_utils.trace
def unmanage_snapshot(self, snapshot):
"""Prepare the snapshot for removing it from Cinder management."""
return self.common.unmanage_snapshot(snapshot)
@cinder_utils.trace
def retype(self, ctxt, volume, new_type, diff, host):
"""Retype the specified volume."""
return self.common.retype()
def backup_use_temp_snapshot(self):
return True
@cinder_utils.trace
def revert_to_snapshot(self, context, volume, snapshot):
"""Rollback the specified snapshot"""
return self.common.revert_to_snapshot(volume, snapshot)

View File

@ -0,0 +1,214 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""iSCSI module for Hitachi HBSD Driver."""
from cinder import interface
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common as common
from cinder.volume.drivers.hitachi import hbsd_utils as utils
MSG = utils.HBSDMsg
_DRIVER_INFO = {
'proto': 'iSCSI',
'hba_id': 'initiator',
'hba_id_type': 'iSCSI initiator IQN',
'msg_id': {
'target': MSG.CREATE_ISCSI_TARGET_FAILED,
},
'volume_backend_name': '%(prefix)siSCSI' % {
'prefix': utils.DRIVER_PREFIX,
},
'volume_opts': [],
'volume_type': 'iscsi',
}
@interface.volumedriver
class HBSDISCSIDriver(driver.ISCSIDriver):
"""iSCSI class for Hitachi HBSD Driver.
Version history:
.. code-block:: none
1.0.0 - Initial driver.
1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods
2.0.0 - Major redesign of the driver. This version requires the REST
API for communication with the storage backend.
"""
VERSION = common.VERSION
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Hitachi_VSP_CI"
def __init__(self, *args, **kwargs):
"""Initialize instance variables."""
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
driver=self.__class__.__name__,
version=self.get_version())
super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
self.common = utils.import_object(
self.configuration, _DRIVER_INFO, kwargs.get('db'))
def check_for_setup_error(self):
pass
@cinder_utils.trace
def create_volume(self, volume):
"""Create a volume and return its properties."""
return self.common.create_volume(volume)
@cinder_utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot and return its properties."""
return self.common.create_volume_from_snapshot(volume, snapshot)
@cinder_utils.trace
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume and return its properties."""
return self.common.create_cloned_volume(volume, src_vref)
@cinder_utils.trace
def delete_volume(self, volume):
"""Delete the specified volume."""
self.common.delete_volume(volume)
@cinder_utils.trace
def create_snapshot(self, snapshot):
"""Create a snapshot from a volume and return its properties."""
return self.common.create_snapshot(snapshot)
@cinder_utils.trace
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot."""
self.common.delete_snapshot(snapshot)
def local_path(self, volume):
pass
def get_volume_stats(self, refresh=False):
"""Return properties, capabilities and current states of the driver."""
data = self.common.get_volume_stats(refresh)
if 'pools' in data:
data["pools"][0]["filter_function"] = self.get_filter_function()
data["pools"][0]["goodness_function"] = (
self.get_goodness_function())
return data
@cinder_utils.trace
def update_migrated_volume(
self, ctxt, volume, new_volume, original_volume_status):
"""Do any remaining jobs after migration."""
self.common.discard_zero_page(new_volume)
super(HBSDISCSIDriver, self).update_migrated_volume(
ctxt, volume, new_volume, original_volume_status)
@cinder_utils.trace
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
super(HBSDISCSIDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
self.common.discard_zero_page(volume)
@cinder_utils.trace
def extend_volume(self, volume, new_size):
"""Extend the specified volume to the specified size."""
self.common.extend_volume(volume, new_size)
@cinder_utils.trace
def manage_existing(self, volume, existing_ref):
"""Return volume properties which Cinder needs to manage the volume."""
return self.common.manage_existing(volume, existing_ref)
@cinder_utils.trace
def manage_existing_get_size(self, volume, existing_ref):
"""Return the size[GB] of the specified volume."""
return self.common.manage_existing_get_size(existing_ref)
@cinder_utils.trace
def unmanage(self, volume):
"""Prepare the volume for removing it from Cinder management."""
self.common.unmanage(volume)
@cinder_utils.trace
def do_setup(self, context):
"""Prepare for the startup of the driver."""
self.common.do_setup(context)
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def create_export(self, context, volume, connector):
"""Export the volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def create_export_snapshot(self, context, snapshot, connector):
pass
def remove_export_snapshot(self, context, snapshot):
pass
@cinder_utils.trace
def initialize_connection(self, volume, connector):
"""Initialize connection between the server and the volume."""
return self.common.initialize_connection(volume, connector)
@cinder_utils.trace
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate connection between the server and the volume."""
if connector is None:
connector = {}
if utils.is_shared_connection(volume, connector):
return
self.common.terminate_connection(volume, connector)
@cinder_utils.trace
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Initialize connection between the server and the snapshot."""
return self.common.initialize_connection(snapshot, connector)
@cinder_utils.trace
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Terminate connection between the server and the snapshot."""
self.common.terminate_connection(snapshot, connector)
@cinder_utils.trace
def unmanage_snapshot(self, snapshot):
"""Prepare the snapshot for removing it from Cinder management."""
return self.common.unmanage_snapshot(snapshot)
@cinder_utils.trace
def retype(self, ctxt, volume, new_type, diff, host):
"""Retype the specified volume."""
return self.common.retype()
def backup_use_temp_snapshot(self):
return True
@cinder_utils.trace
def revert_to_snapshot(self, context, volume, snapshot):
"""Rollback the specified snapshot"""
return self.common.revert_to_snapshot(volume, snapshot)

View File

@ -0,0 +1,788 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""REST interface module for Hitachi HBSD Driver."""
from collections import defaultdict
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
from cinder import exception
from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_common as common
from cinder.volume.drivers.hitachi import hbsd_rest_api as rest_api
from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.volume.drivers.san import san
_LU_PATH_DEFINED = ('B958', '015A')
NORMAL_STS = 'NML'
_LUN_MAX_WAITTIME = 50
_LUN_RETRY_INTERVAL = 1
PAIR_ATTR = 'HTI'
_SNAP_MODE = 'A'
_CLONE_MODE = 'C'
_NORMAL_MODE = '-'
_PERMITTED_TYPES = set(['CVS', 'HDP', 'HDT'])
_CHECK_LDEV_MANAGEABILITY_KEYS = (
'emulationType', 'numOfPorts', 'attributes', 'status')
_CHECK_LDEV_SIZE_KEYS = ('blockCapacity',)
SMPL = 1
PVOL = 2
SVOL = 3
COPY = 2
PAIR = 3
PSUS = 4
PSUE = 5
SMPP = 6
UNKN = 0xff
_STATUS_TABLE = {
'SMPL': SMPL,
'COPY': COPY,
'RCPY': COPY,
'PAIR': PAIR,
'PFUL': PAIR,
'PSUS': PSUS,
'PFUS': PSUS,
'SSUS': PSUS,
'PSUE': PSUE,
'PSUP': PSUS,
'SSUP': PSUS,
'SMPP': SMPP,
}
SNAP_NAME = 'HBSD-snap'
CLONE_NAME = 'HBSD-clone'
_SNAP_HASH_SIZE = 8
EX_ENOOBJ = 'EX_ENOOBJ'
_REST_DEFAULT_PORT = 443
_GET_LDEV_COUNT = 16384
_MAX_LDEV_ID = 65535
EX_ENLDEV = 'EX_ENLDEV'
EX_INVARG = 'EX_INVARG'
_INVALID_RANGE = [EX_ENLDEV, EX_INVARG]
REST_VOLUME_OPTS = [
cfg.BoolOpt(
'hitachi_rest_tcp_keepalive',
default=True,
help='Enables or disables use of REST API tcp keepalive'),
cfg.BoolOpt(
'hitachi_discard_zero_page',
default=True,
help='Enable or disable zero page reclamation in a DP-VOL.'),
]
_REQUIRED_REST_OPTS = [
'san_login',
'san_password',
'san_ip',
]
CONF = cfg.CONF
CONF.register_opts(REST_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
def _is_valid_target(target, target_name, target_ports):
"""Check if the specified target is valid."""
return (target[:utils.PORT_ID_LENGTH] in target_ports and
target_name.startswith(utils.TARGET_PREFIX))
def _check_ldev_manageability(ldev_info, ldev, existing_ref):
"""Check if the LDEV meets the criteria for being managed."""
if ldev_info['status'] != NORMAL_STS:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
attributes = set(ldev_info['attributes'])
if (not ldev_info['emulationType'].startswith('OPEN-V') or
len(attributes) < 2 or not attributes.issubset(_PERMITTED_TYPES)):
msg = utils.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev,
ldevtype=utils.NVOL_LDEV_TYPE)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
if ldev_info['numOfPorts']:
msg = utils.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
def _check_ldev_size(ldev_info, ldev, existing_ref):
"""Hitachi storage calculates volume sizes in a block unit, 512 bytes."""
if ldev_info['blockCapacity'] % utils.GIGABYTE_PER_BLOCK_SIZE:
msg = utils.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
class HBSDREST(common.HBSDCommon):
"""REST interface class for Hitachi HBSD Driver."""
def __init__(self, conf, storage_protocol, db):
"""Initialize instance variables."""
super(HBSDREST, self).__init__(conf, storage_protocol, db)
self.conf.append_config_values(REST_VOLUME_OPTS)
self.conf.append_config_values(san.san_opts)
self.client = None
def setup_client(self):
"""Initialize RestApiClient."""
verify = self.conf.driver_ssl_cert_verify
if verify:
verify_path = self.conf.safe_get('driver_ssl_cert_path')
if verify_path:
verify = verify_path
self.verify = verify
self.client = rest_api.RestApiClient(
self.conf.san_ip,
self.conf.san_api_port,
self.conf.hitachi_storage_id,
self.conf.san_login,
self.conf.san_password,
tcp_keepalive=self.conf.hitachi_rest_tcp_keepalive,
verify=verify)
self.client.login()
def need_client_setup(self):
"""Check if the making of the communication client is necessary."""
return not self.client or not self.client.get_my_session()
def enter_keep_session(self):
"""Begin the keeping of the session."""
if self.client is not None:
self.client.enter_keep_session()
def _create_ldev_on_storage(self, size):
"""Create an LDEV on the storage system."""
body = {
'byteFormatCapacity': '%sG' % size,
'poolId': self.storage_info['pool_id'],
'isParallelExecutionEnabled': True,
}
if self.storage_info['ldev_range']:
min_ldev, max_ldev = self.storage_info['ldev_range'][:2]
body['startLdevId'] = min_ldev
body['endLdevId'] = max_ldev
return self.client.add_ldev(body, no_log=True)
def create_ldev(self, size):
"""Create an LDEV of the specified size and the specified type."""
ldev = self._create_ldev_on_storage(size)
LOG.debug('Created logical device. (LDEV: %s)', ldev)
return ldev
def modify_ldev_name(self, ldev, name):
"""Modify LDEV name."""
body = {'label': name}
self.client.modify_ldev(ldev, body)
def delete_ldev_from_storage(self, ldev):
"""Delete the specified LDEV from the storage."""
result = self.client.get_ldev(ldev)
if result['emulationType'] == 'NOT DEFINED':
utils.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev)
return
self.client.delete_ldev(
ldev,
timeout_message=(MSG.LDEV_DELETION_WAIT_TIMEOUT, {'ldev': ldev}))
def _get_copy_pair_status(self, ldev):
"""Return the status of the volume in a copy pair."""
params_s = {"svolLdevId": ldev}
result_s = self.client.get_snapshots(params_s)
if not result_s:
params_p = {"pvolLdevId": ldev}
result_p = self.client.get_snapshots(params_p)
if not result_p:
return SMPL
return _STATUS_TABLE.get(result_p[0]['status'], UNKN)
return _STATUS_TABLE.get(result_s[0]['status'], UNKN)
def _wait_copy_pair_status(self, ldev, status, interval=3,
timeout=utils.DEFAULT_PROCESS_WAITTIME):
"""Wait until the S-VOL status changes to the specified status."""
def _wait_for_copy_pair_status(
start_time, ldev, status, timeout):
"""Raise True if the S-VOL is in the specified status."""
if not isinstance(status, set):
status = set([status])
if self._get_copy_pair_status(ldev) in status:
raise loopingcall.LoopingCallDone()
if utils.timed_out(start_time, timeout):
raise loopingcall.LoopingCallDone(False)
loop = loopingcall.FixedIntervalLoopingCall(
_wait_for_copy_pair_status, timeutils.utcnow(),
ldev, status, timeout)
if not loop.start(interval=interval).wait():
msg = utils.output_log(
MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
raise utils.HBSDError(msg)
def _create_snap_pair(self, pvol, svol):
"""Create a snapshot copy pair on the storage."""
snapshot_name = '%(prefix)s%(svol)s' % {
'prefix': SNAP_NAME,
'svol': svol % _SNAP_HASH_SIZE,
}
try:
body = {"snapshotGroupName": snapshot_name,
"snapshotPoolId": self.storage_info['snap_pool_id'],
"pvolLdevId": pvol,
"svolLdevId": svol,
"autoSplit": True,
"canCascade": True,
"isDataReductionForceCopy": True}
self.client.add_snapshot(body)
except utils.HBSDError as ex:
if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
rest_api.INVALID_SNAPSHOT_POOL and
not self.conf.hitachi_snap_pool):
msg = utils.output_log(
MSG.INVALID_PARAMETER, param='hitachi_snap_pool')
raise utils.HBSDError(msg)
else:
raise
try:
self._wait_copy_pair_status(svol, PSUS)
except Exception:
with excutils.save_and_reraise_exception():
try:
self._delete_pair_from_storage(pvol, svol)
except utils.HBSDError:
utils.output_log(
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
def _create_clone_pair(self, pvol, svol):
"""Create a clone copy pair on the storage."""
snapshot_name = '%(prefix)s%(svol)s' % {
'prefix': CLONE_NAME,
'svol': svol % _SNAP_HASH_SIZE,
}
try:
body = {"snapshotGroupName": snapshot_name,
"snapshotPoolId": self.storage_info['snap_pool_id'],
"pvolLdevId": pvol,
"svolLdevId": svol,
"isClone": True,
"clonesAutomation": True,
"copySpeed": 'medium',
"isDataReductionForceCopy": True}
self.client.add_snapshot(body)
except utils.HBSDError as ex:
if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
rest_api.INVALID_SNAPSHOT_POOL and
not self.conf.hitachi_snap_pool):
msg = utils.output_log(
MSG.INVALID_PARAMETER, param='hitachi_snap_pool')
raise utils.HBSDError(msg)
else:
raise
try:
self._wait_copy_pair_status(svol, set([PSUS, SMPP, SMPL]))
except Exception:
with excutils.save_and_reraise_exception():
try:
self._delete_pair_from_storage(pvol, svol)
except utils.HBSDError:
utils.output_log(
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
def create_pair_on_storage(self, pvol, svol, is_snapshot=False):
"""Create a copy pair on the storage."""
if is_snapshot:
self._create_snap_pair(pvol, svol)
else:
self._create_clone_pair(pvol, svol)
def get_ldev_info(self, keys, ldev, **kwargs):
"""Return a dictionary of LDEV-related items."""
d = {}
result = self.client.get_ldev(ldev, **kwargs)
for key in keys:
d[key] = result.get(key)
return d
def _wait_copy_pair_deleting(self, ldev):
"""Wait until the LDEV is no longer in a copy pair."""
def _wait_for_copy_pair_smpl(start_time, ldev):
"""Raise True if the LDEV is no longer in a copy pair."""
ldev_info = self.get_ldev_info(['status', 'attributes'], ldev)
if (ldev_info['status'] != NORMAL_STS or
PAIR_ATTR not in ldev_info['attributes']):
raise loopingcall.LoopingCallDone()
if utils.timed_out(
start_time, utils.DEFAULT_PROCESS_WAITTIME):
raise loopingcall.LoopingCallDone(False)
loop = loopingcall.FixedIntervalLoopingCall(
_wait_for_copy_pair_smpl, timeutils.utcnow(), ldev)
if not loop.start(interval=10).wait():
msg = utils.output_log(
MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
raise utils.HBSDError(msg)
def _delete_pair_from_storage(self, pvol, svol):
"""Disconnect the volume pair that consists of the specified LDEVs."""
params_s = {"svolLdevId": svol}
result = self.client.get_snapshots(params_s)
if not result:
return
mun = result[0]['muNumber']
# If the snapshot is in deleting status,
# not need to call a delete operation.
if _STATUS_TABLE.get(result[0]['status']) != SMPP:
self.client.unassign_snapshot_volume(pvol, mun,
ignore_all_errors=True)
ignore_return_code = [EX_ENOOBJ]
self.client.delete_snapshot(
pvol, mun, ignore_return_code=ignore_return_code)
self._wait_copy_pair_deleting(svol)
def delete_pair_based_on_svol(self, pvol, svol_info):
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
# If the pair status does not satisfy the execution condition,
if not (svol_info['is_psus'] or
_STATUS_TABLE.get(svol_info['status']) == SMPP):
msg = utils.output_log(
MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol, svol=svol_info['ldev'])
raise utils.HBSDBusy(msg)
self._delete_pair_from_storage(pvol, svol_info['ldev'])
def check_param(self):
"""Check parameter values and consistency among them."""
super(HBSDREST, self).check_param()
utils.check_opts(self.conf, REST_VOLUME_OPTS)
utils.check_opts(self.conf, san.san_opts)
LOG.debug(
'Setting ldev_range: %s', self.storage_info['ldev_range'])
for opt in _REQUIRED_REST_OPTS:
if not self.conf.safe_get(opt):
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
raise utils.HBSDError(msg)
if not self.conf.safe_get('san_api_port'):
self.conf.san_api_port = _REST_DEFAULT_PORT
def _find_lun(self, ldev, port, gid):
"""Get LUN using."""
luns_info = self.client.get_luns(port, gid)
for lun_info in luns_info:
if lun_info['ldevId'] == ldev:
return lun_info['lun']
return None
def _run_add_lun(self, ldev, port, gid, lun=None):
"""Create a LUN between the specified LDEV and port-gid."""
ignore_error = [_LU_PATH_DEFINED]
if lun is not None:
ignore_error = [rest_api.ANOTHER_LDEV_MAPPED]
assigned_lun, errobj = self.client.add_lun(
port, gid, ldev, lun=lun,
ignore_error=ignore_error,
interval=_LUN_RETRY_INTERVAL,
timeout=_LUN_MAX_WAITTIME)
err_code = utils.safe_get_err_code(errobj)
if lun is None:
if err_code == _LU_PATH_DEFINED:
lun = self._find_lun(ldev, port, gid)
LOG.debug(
'An logical unit path has already defined in the '
'specified logical device. (LDEV: %(ldev)s, '
'port: %(port)s, gid: %(gid)s, lun: %(lun)s)',
{'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun})
else:
lun = assigned_lun
elif err_code == rest_api.ANOTHER_LDEV_MAPPED:
utils.output_log(MSG.MAP_LDEV_FAILED,
ldev=ldev, port=port, id=gid, lun=lun)
return None
LOG.debug(
'Created logical unit path to the specified logical device. '
'(LDEV: %(ldev)s, port: %(port)s, '
'gid: %(gid)s, lun: %(lun)s)',
{'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun})
return lun
def map_ldev(self, targets, ldev):
"""Create the path between the server and the LDEV and return LUN."""
port, gid = targets['list'][0]
lun = self._run_add_lun(ldev, port, gid)
targets['lun'][port] = True
for port, gid in targets['list'][1:]:
# When multipath is configured, Nova compute expects that
# target_lun define the same value in all storage target.
# Therefore, it should use same value of lun in other target.
try:
lun2 = self._run_add_lun(ldev, port, gid, lun=lun)
if lun2 is not None:
targets['lun'][port] = True
except utils.HBSDError:
utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev,
port=port, id=gid, lun=lun)
return lun
def attach_ldev(self, volume, ldev, connector, targets):
"""Initialize connection between the server and the volume."""
target_ports = self.get_target_ports(connector)
if (self.find_targets_from_storage(
targets, connector, target_ports) and
self.conf.hitachi_group_create):
self.create_mapping_targets(targets, connector)
utils.require_target_existed(targets)
targets['list'].sort()
for port in target_ports:
targets['lun'][port] = False
return int(self.map_ldev(targets, ldev))
def _find_mapped_targets_from_storage(self, targets, ldev, target_ports):
"""Update port-gid list for the specified LDEV."""
ldev_info = self.get_ldev_info(['ports'], ldev)
if not ldev_info['ports']:
return
for port_info in ldev_info['ports']:
if _is_valid_target(port_info['portId'],
port_info['hostGroupName'],
target_ports):
targets['list'].append(port_info)
def _get_unmap_targets_list(self, target_list, mapped_list):
"""Return a list of IDs of ports that need to be disconnected."""
unmap_list = []
for mapping_info in mapped_list:
if ((mapping_info['portId'][:utils.PORT_ID_LENGTH],
mapping_info['hostGroupNumber'])
in target_list):
unmap_list.append(mapping_info)
return unmap_list
def unmap_ldev(self, targets, ldev):
"""Delete the LUN between the specified LDEV and port-gid."""
interval = _LUN_RETRY_INTERVAL
ignore_return_code = [EX_ENOOBJ]
ignore_message_id = [rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST]
timeout = utils.DEFAULT_PROCESS_WAITTIME
for target in targets['list']:
port = target['portId']
gid = target['hostGroupNumber']
lun = target['lun']
self.client.delete_lun(port, gid, lun,
interval=interval,
ignore_return_code=ignore_return_code,
ignore_message_id=ignore_message_id,
timeout=timeout)
LOG.debug(
'Deleted logical unit path of the specified logical '
'device. (LDEV: %(ldev)s, host group: %(target)s)',
{'ldev': ldev, 'target': target})
def _get_target_luns(self, target):
"""Get the LUN mapping information of the host group."""
port = target['portId']
gid = target['hostGroupNumber']
mapping_list = []
luns_info = self.client.get_luns(port, gid)
if luns_info:
for lun_info in luns_info:
mapping_list.append((port, gid, lun_info['lun'],
lun_info['ldevId']))
return mapping_list
def delete_target_from_storage(self, port, gid):
"""Delete the host group or the iSCSI target from the port."""
result = 1
try:
self.client.delete_host_grp(port, gid)
result = 0
except utils.HBSDError:
utils.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid)
else:
LOG.debug(
'Deleted target. (port: %(port)s, gid: %(gid)s)',
{'port': port, 'gid': gid})
return result
def _clean_mapping_targets(self, targets):
"""Delete the empty host group without LU."""
deleted_targets = []
for target in targets['list']:
if not len(self._get_target_luns(target)):
port = target['portId']
gid = target['hostGroupNumber']
ret = self.delete_target_from_storage(port, gid)
if not ret:
deleted_targets.append(port)
return deleted_targets
def detach_ldev(self, volume, ldev, connector):
"""Terminate connection between the server and the volume."""
targets = {
'info': {},
'list': [],
'iqns': {},
}
mapped_targets = {
'list': [],
}
unmap_targets = {}
deleted_targets = []
target_ports = self.get_target_ports(connector)
self.find_targets_from_storage(targets, connector, target_ports)
self._find_mapped_targets_from_storage(
mapped_targets, ldev, target_ports)
unmap_targets['list'] = self._get_unmap_targets_list(
targets['list'], mapped_targets['list'])
unmap_targets['list'].sort(
reverse=True,
key=lambda port: (port.get('portId'), port.get('hostGroupNumber')))
self.unmap_ldev(unmap_targets, ldev)
if self.conf.hitachi_group_delete:
deleted_targets = self._clean_mapping_targets(unmap_targets)
return deleted_targets
def find_all_mapped_targets_from_storage(self, targets, ldev):
"""Add all port-gids connected with the LDEV to the list."""
ldev_info = self.get_ldev_info(['ports'], ldev)
if ldev_info['ports']:
for port in ldev_info['ports']:
targets['list'].append(port)
def extend_ldev(self, ldev, old_size, new_size):
"""Extend the specified LDEV to the specified new size."""
body = {"parameters": {"additionalByteFormatCapacity":
'%sG' % (new_size - old_size)}}
self.client.extend_ldev(ldev, body)
def get_pool_info(self):
"""Return the total and free capacity of the storage pool."""
result = self.client.get_pool(
self.storage_info['pool_id'],
ignore_message_id=[rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST])
if 'errorSource' in result:
msg = utils.output_log(MSG.POOL_NOT_FOUND,
pool=self.storage_info['pool_id'])
raise utils.HBSDError(msg)
tp_cap = result['totalPoolCapacity'] / units.Ki
ta_cap = result['availableVolumeCapacity'] / units.Ki
tl_cap = result['totalLocatedCapacity'] / units.Ki
return tp_cap, ta_cap, tl_cap
def discard_zero_page(self, volume):
"""Return the volume's no-data pages to the storage pool."""
if self.conf.hitachi_discard_zero_page:
ldev = utils.get_ldev(volume)
try:
self.client.discard_zero_page(ldev)
except utils.HBSDError:
utils.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev)
def _get_copy_pair_info(self, ldev):
"""Return info of the copy pair."""
params_p = {"pvolLdevId": ldev}
result_p = self.client.get_snapshots(params_p)
if result_p:
is_psus = _STATUS_TABLE.get(result_p[0]['status']) == PSUS
pvol, svol = ldev, int(result_p[0]['svolLdevId'])
status = result_p[0]['status']
else:
params_s = {"svolLdevId": ldev}
result_s = self.client.get_snapshots(params_s)
if result_s:
is_psus = _STATUS_TABLE.get(result_s[0]['status']) == PSUS
pvol, svol = int(result_s[0]['pvolLdevId']), ldev
status = result_s[0]['status']
else:
return None, None
LOG.debug(
'Copy pair status. (P-VOL: %(pvol)s, S-VOL: %(svol)s, '
'status: %(status)s)',
{'pvol': pvol, 'svol': svol, 'status': status})
return pvol, [{'ldev': svol, 'is_psus': is_psus, 'status': status}]
def get_pair_info(self, ldev):
"""Return info of the volume pair."""
pair_info = {}
ldev_info = self.get_ldev_info(['status', 'attributes'], ldev)
if (ldev_info['status'] != NORMAL_STS or
PAIR_ATTR not in ldev_info['attributes']):
return None
pvol, svol_info = self._get_copy_pair_info(ldev)
if pvol is not None:
pair_info['pvol'] = pvol
pair_info.setdefault('svol_info', [])
pair_info['svol_info'].extend(svol_info)
return pair_info
def get_ldev_by_name(self, name):
"""Get the LDEV number from the given name."""
ignore_message_id = ['KART40044-E']
ignore_return_code = _INVALID_RANGE
if self.storage_info['ldev_range']:
start, end = self.storage_info['ldev_range'][:2]
if end - start + 1 > _GET_LDEV_COUNT:
cnt = _GET_LDEV_COUNT
else:
cnt = end - start + 1
else:
start = 0
end = _MAX_LDEV_ID
cnt = _GET_LDEV_COUNT
for current in range(start, end, cnt):
params = {'headLdevId': current, 'ldevOption': 'dpVolume',
'count': cnt}
ldev_list = self.client.get_ldevs(
params, ignore_message_id=ignore_message_id,
ignore_return_code=ignore_return_code)
for ldev_data in ldev_list:
if 'label' in ldev_data and name == ldev_data['label']:
return ldev_data['ldevId']
return None
def check_ldev_manageability(self, ldev, existing_ref):
"""Check if the LDEV meets the criteria for being managed."""
ldev_info = self.get_ldev_info(
_CHECK_LDEV_MANAGEABILITY_KEYS, ldev)
_check_ldev_manageability(ldev_info, ldev, existing_ref)
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
"""Return the size[GB] of the specified LDEV."""
ldev_info = self.get_ldev_info(
_CHECK_LDEV_SIZE_KEYS, ldev)
_check_ldev_size(ldev_info, ldev, existing_ref)
return ldev_info['blockCapacity'] / utils.GIGABYTE_PER_BLOCK_SIZE
def _get_pool_id(self, name):
"""Get the pool id from specified name."""
pool_list = self.client.get_pools()
for pool_data in pool_list:
if pool_data['poolName'] == name:
return pool_data['poolId']
return None
def check_pool_id(self):
"""Check the pool id of hitachi_pool and hitachi_snap_pool."""
pool = self.conf.hitachi_pool
if pool is not None:
if pool.isdigit():
self.storage_info['pool_id'] = int(pool)
else:
self.storage_info['pool_id'] = self._get_pool_id(pool)
if self.storage_info['pool_id'] is None:
msg = utils.output_log(
MSG.POOL_NOT_FOUND, pool=self.conf.hitachi_pool)
raise utils.HBSDError(msg)
snap_pool = self.conf.hitachi_snap_pool
if snap_pool is not None:
if snap_pool.isdigit():
self.storage_info['snap_pool_id'] = int(snap_pool)
else:
self.storage_info['snap_pool_id'] = (
self._get_pool_id(snap_pool))
if self.storage_info['snap_pool_id'] is None:
msg = utils.output_log(MSG.POOL_NOT_FOUND,
pool=self.conf.hitachi_snap_pool)
raise utils.HBSDError(msg)
else:
self.storage_info['snap_pool_id'] = self.storage_info['pool_id']
def _to_hostgroup(self, port, gid):
"""Get a host group name from host group ID."""
return self.client.get_host_grp(port, gid)['hostGroupName']
def get_port_hostgroup_map(self, ldev_id):
"""Get the mapping of a port and host group."""
hostgroups = defaultdict(list)
ldev_info = self.get_ldev_info(['ports'], ldev_id)
if not ldev_info['ports']:
return hostgroups
for port in ldev_info['ports']:
portId = port["portId"]
hostgroup = self._to_hostgroup(
portId, port["hostGroupNumber"])
hostgroups[portId].append(hostgroup)
return hostgroups
def check_pair_svol(self, ldev):
"""Check if the specified LDEV is S-VOL in a copy pair."""
ldev_info = self.get_ldev_info(['status',
'snapshotPoolId'], ldev)
if ldev_info['status'] != NORMAL_STS:
return False
if ldev_info['snapshotPoolId'] is not None:
_, svol_info = self._get_copy_pair_info(ldev)
if svol_info and svol_info[0]['status'] == 'PSUP':
self._wait_copy_pair_deleting(ldev)
return False
else:
return True
return False
def restore_ldev(self, pvol, svol):
"""Restore a pair of the specified LDEV."""
timeout = utils.MAX_PROCESS_WAITTIME
params_s = {"svolLdevId": svol}
result = self.client.get_snapshots(params_s)
mun = result[0]['muNumber']
body = {"parameters": {"autoSplit": True}}
self.client.restore_snapshot(pvol, mun, body)
self._wait_copy_pair_status(
svol, PSUS, timeout=timeout, interval=10)
def has_snap_pair(self, pvol, svol):
"""Check if the volume have the pair of the snapshot."""
ldev_info = self.get_ldev_info(['status', 'attributes'], svol)
if (ldev_info['status'] != NORMAL_STS or
PAIR_ATTR not in ldev_info['attributes']):
return False
params_s = {"svolLdevId": svol}
result = self.client.get_snapshots(params_s)
if not result:
return False
return (result[0]['primaryOrSecondary'] == "S-VOL" and
int(result[0]['pvolLdevId']) == pvol)

View File

@ -0,0 +1,758 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
REST API client class for Hitachi HBSD Driver.
"""
from http import client as httpclient
import threading
from eventlet import greenthread
from keystoneauth1.session import TCPKeepAliveAdapter
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import timeutils
import requests
from cinder.volume.drivers.hitachi import hbsd_utils as utils
_LOCK_WAITTIME = 2 * 60 * 60
_EXEC_MAX_WAITTIME = 30
_EXTEND_WAITTIME = 10 * 60
_EXEC_RETRY_INTERVAL = 5
_DEFAULT_CONNECT_TIMEOUT = 30
_RESPONSE_TIMEOUT_TOLERANCE = 30
_JOB_API_RESPONSE_TIMEOUT = 30 * 60
_GET_API_RESPONSE_TIMEOUT = 30 * 60
_REST_SERVER_BUSY_TIMEOUT = 2 * 60 * 60
_REST_SERVER_RESTART_TIMEOUT = 10 * 60
_REST_SERVER_ERROR_TIMEOUT = 10 * 60
_KEEP_SESSION_LOOP_INTERVAL = 3 * 60
_HTTPS = 'https://'
_REST_LOCKED_ERRORS = [
('2E11', '2205'),
('2E11', '2207'),
]
LDEV_ALREADY_DEFINED = ('2E22', '0001')
NO_AVAILABLE_LDEV_ID = ('2E11', '2209')
INVALID_SNAPSHOT_POOL = ('2E30', '600E')
_MSGID_REST_SERVER_BUSY = ('KART00003-E',)
_MSGID_LOCK_FAILURE = ('KART40050-E', 'KART40051-E', 'KART40052-E')
EXCEED_WWN_MAX = ('B957', '4184')
ANOTHER_LDEV_MAPPED = ('B958', '0947')
REST_NO_RETRY_ERRORS = [
('2E10', '9705'),
('2E10', '9706'),
('2E10', '9707'),
('2E11', '8303'),
('2E30', '0007'),
('B956', '3173'),
('B956', '31D7'),
('B956', '31D9'),
('B957', '4188'),
('B958', '015A'),
('B958', '015E'),
LDEV_ALREADY_DEFINED,
NO_AVAILABLE_LDEV_ID,
EXCEED_WWN_MAX,
INVALID_SNAPSHOT_POOL,
]
MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST = 'KART30013-E'
_REST_NO_RETRY_MESSAGEIDS = [
MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST
]
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
def _build_base_url(ip_addr, ip_port):
return '%(https)s%(ip)s:%(port)s/ConfigurationManager' % {
'https': _HTTPS,
'ip': ip_addr,
'port': ip_port,
}
class ResponseData(dict):
def is_json(self):
return (self['rsp'].content and
'json' in self['rsp'].headers['Content-Type'])
def _init_content(self):
"""Set response object."""
if self.is_json():
self['rsp_body'] = self['rsp'].json()
elif self['rsp'].content:
self['rsp_body'] = self['rsp'].text
else:
self['rsp_body'] = None
def _init_error(self):
"""Set error object"""
if self['rsp_body'] and 'errorSource' in self['rsp_body']:
self['errobj'] = self['rsp_body']
elif self['rsp_body'] and 'error' in self['rsp_body']:
self['errobj'] = self['rsp_body']['error']
else:
self['errobj'] = {}
def __init__(self, rsp):
"""Initialize instance variables."""
super(ResponseData, self).__init__()
self['rsp'] = rsp
self['status_code'] = rsp.status_code
self._init_content()
self._init_error()
def job_succeeded(self):
return (self.is_json() and
self['rsp_body'].get('status') == 'Completed' and
self['rsp_body'].get('state') == 'Succeeded')
def get_err_code(self):
return utils.safe_get_err_code(self['errobj'])
def get_return_code(self):
return utils.safe_get_return_code(self['errobj'])
def is_success(self, ignore_error, ignore_message_id,
ignore_return_code, ignore_all_errors=False):
"""Check the success or failure of the response."""
return (ignore_all_errors or
self['status_code'] == httpclient.OK or
(self['status_code'] == httpclient.ACCEPTED and
self.job_succeeded()) or
self.get_err_code() in ignore_error or
self['errobj'].get('messageId') in ignore_message_id or
self.get_return_code() in ignore_return_code)
def is_locked(self):
"""Check if a response is the error of the lock factor."""
if not self['errobj']:
return False
message_id = self['errobj'].get('messageId')
retcode = self['errobj'].get('errorCode', {}).get('errorCode')
return (message_id in _MSGID_LOCK_FAILURE or
self.get_err_code() in _REST_LOCKED_ERRORS or
retcode == 'EX_EACCES')
def is_auth_fail(self):
"""Check if a response is an authorization error."""
return self['status_code'] == httpclient.UNAUTHORIZED
def get_message_id(self):
return utils.safe_get_message_id(self['errobj'])
def is_no_retry_error(self, no_retry_error_code):
"""Check if a response is a no retry error."""
return (not self.is_auth_fail() and
((self['status_code'] not in
list(range(200, 300)) + list(range(500, 600))) or
self.get_err_code() in no_retry_error_code or
self.get_message_id() in _REST_NO_RETRY_MESSAGEIDS))
def is_rest_server_busy(self):
"""Check if a response is a server busy error."""
if not self['errobj']:
return False
message_id = self['errobj'].get('messageId')
return (message_id in _MSGID_REST_SERVER_BUSY)
def get_errobj(self):
return {
'errorSource': self['errobj'].get('errorSource', ''),
'messageId': self['errobj'].get('messageId', ''),
'message': self['errobj'].get('message', ''),
'cause': self['errobj'].get('cause', ''),
'solution': self['errobj'].get('solution', ''),
'errorCode': self['errobj'].get('errorCode', {}),
}
def get_job_result(self):
return {'job_id': self['rsp_body'].get('jobId', ''),
'status': self['rsp_body'].get('status', ''),
'state': self['rsp_body'].get('state', '')}
class RestApiClient():
def __init__(self, ip_addr, ip_port, storage_device_id,
user_id, user_pass, tcp_keepalive=False,
verify=False, connect_timeout=_DEFAULT_CONNECT_TIMEOUT):
"""Initialize instance variables."""
self.ip_addr = ip_addr
self.ip_port = ip_port
self.storage_id = storage_device_id
self.storage_info = {}
self.user_id = user_id
self.user_pass = user_pass
self.tcp_keepalive = tcp_keepalive
self.verify = verify
self.connect_timeout = connect_timeout
self.login_lock = threading.Lock()
self.keep_session_loop = loopingcall.FixedIntervalLoopingCall(
self._keep_session)
self.base_url = _build_base_url(ip_addr, self.ip_port)
self.object_url = '%(base_url)s/v1/objects/storages/%(storage_id)s' % {
'base_url': self.base_url,
'storage_id': self.storage_id,
}
self.service_url = '%(base_url)s/v1/%(storage_id)s/services' % {
'base_url': self.base_url,
'storage_id': self.storage_id,
}
self.headers = {"content-type": "application/json",
"accept": "application/json"}
class Session(requests.auth.AuthBase):
def __init__(self, id, token):
"""Initialize instance variables."""
self.id = id
self.token = token
def __call__(self, req):
req.headers['Authorization'] = 'Session %(token)s' % {
'token': self.token,
}
return req
def _request(self, method, url, params=None, body=None,
async_=False, **kwargs):
"""Transmit the request to REST API server."""
kwargs.setdefault('ignore_error', [])
kwargs['no_retry_error'] = (kwargs['ignore_error'] +
REST_NO_RETRY_ERRORS)
kwargs.setdefault('no_retry', False)
kwargs.setdefault('do_raise', True)
kwargs.setdefault('ignore_message_id', [])
kwargs.setdefault('no_relogin', False)
kwargs.setdefault('ignore_return_code', [])
kwargs.setdefault('ignore_all_errors', False)
kwargs.setdefault('timeout_message', None)
kwargs.setdefault('no_log', False)
kwargs.setdefault('timeout', _EXEC_MAX_WAITTIME)
headers = dict(self.headers)
if async_:
read_timeout = (_JOB_API_RESPONSE_TIMEOUT +
_RESPONSE_TIMEOUT_TOLERANCE)
headers.update({
"Response-Max-Wait": str(_JOB_API_RESPONSE_TIMEOUT),
"Response-Job-Status": "Completed;"})
else:
read_timeout = _GET_API_RESPONSE_TIMEOUT
auth_data = kwargs.get('auth', self.get_my_session())
timeout = (self.connect_timeout, read_timeout)
interval = kwargs.get('interval', _EXEC_RETRY_INTERVAL)
retry = True
start_time = timeutils.utcnow()
watch = timeutils.StopWatch()
while retry:
watch.restart()
try:
with requests.Session() as session:
if self.tcp_keepalive:
session.mount(_HTTPS, TCPKeepAliveAdapter())
rsp = session.request(method, url,
params=params,
json=body,
headers=headers,
auth=auth_data,
timeout=timeout,
verify=self.verify)
except Exception as e:
msg = utils.output_log(
MSG.REST_SERVER_CONNECT_FAILED,
exception=type(e), message=e,
method=method, url=url, params=params, body=body)
raise utils.HBSDError(msg)
response = ResponseData(rsp)
if (response['status_code'] == httpclient.INTERNAL_SERVER_ERROR and
kwargs['timeout'] < _REST_SERVER_RESTART_TIMEOUT):
kwargs['timeout'] = _REST_SERVER_RESTART_TIMEOUT
if (response['status_code'] == httpclient.SERVICE_UNAVAILABLE and
kwargs['timeout'] < _REST_SERVER_ERROR_TIMEOUT):
kwargs['timeout'] = _REST_SERVER_ERROR_TIMEOUT
retry, rsp_data, errobj = self._check_rest_api_response(
response, start_time,
method=method, url=url, params=params, body=body, **kwargs)
if retry:
watch.stop()
idle = max(interval - watch.elapsed(), 0)
greenthread.sleep(idle)
if not kwargs['no_relogin'] and response.is_auth_fail():
auth_data = self.get_my_session()
return rsp_data, errobj
def _check_rest_api_response(
self, response, start_time, method=None,
url=None, params=None, body=None, **kwargs):
"""Check the response from REST API server."""
rsp_body = response['rsp_body']
errobj = response['errobj']
if response.is_locked():
if (kwargs['no_retry'] or
utils.timed_out(start_time, _LOCK_WAITTIME)):
msg = utils.output_log(MSG.REST_API_FAILED,
no_log=kwargs['no_log'],
method=method, url=url,
params=params, body=body,
**response.get_errobj())
if kwargs['do_raise']:
raise utils.HBSDError(msg, errobj=errobj)
return False, rsp_body, errobj
else:
LOG.debug("The resource group to which the operation object ",
"belongs is being locked by other software.")
return True, rsp_body, errobj
if response.is_success(kwargs['ignore_error'],
kwargs['ignore_message_id'],
kwargs['ignore_return_code'],
kwargs['ignore_all_errors']):
return False, rsp_body, errobj
if (kwargs['no_retry'] and
response['status_code'] != httpclient.INTERNAL_SERVER_ERROR or
response.is_no_retry_error(kwargs['no_retry_error'])):
retry = False
elif response.is_auth_fail():
retry = self.relogin(kwargs['no_relogin'])
else:
retry = True
if retry and response.is_rest_server_busy():
if utils.timed_out(start_time, _REST_SERVER_BUSY_TIMEOUT):
retry = False
elif retry and utils.timed_out(start_time, kwargs['timeout']):
if kwargs['timeout_message']:
utils.output_log(kwargs['timeout_message'][0],
**kwargs['timeout_message'][1])
if response.is_json():
msg = utils.output_log(MSG.REST_API_TIMEOUT,
no_log=kwargs['no_log'],
method=method, url=url,
params=params, body=body,
**response.get_job_result())
if errobj:
msg = utils.output_log(MSG.REST_API_FAILED,
no_log=kwargs['no_log'],
method=method, url=url,
params=params, body=body,
**response.get_errobj())
else:
msg = utils.output_log(MSG.REST_API_HTTP_ERROR,
no_log=kwargs['no_log'],
status_code=response['status_code'],
response_body=rsp_body,
method=method, url=url,
params=params, body=body)
if kwargs['do_raise']:
raise utils.HBSDError(msg, errobj=errobj)
return False, rsp_body, errobj
if errobj:
LOG.debug('ERROR %s', errobj)
else:
LOG.debug('ERROR %s', ' '.join(str(rsp_body).splitlines()))
if not retry:
if response.is_json():
msg = utils.output_log(MSG.REST_API_FAILED,
no_log=kwargs['no_log'],
method=method, url=url,
params=params, body=body,
**response.get_errobj())
else:
msg = utils.output_log(MSG.REST_API_HTTP_ERROR,
no_log=kwargs['no_log'],
status_code=response['status_code'],
response_body=rsp_body,
method=method, url=url,
params=params, body=body)
if kwargs['do_raise']:
raise utils.HBSDError(msg, errobj=errobj)
return retry, rsp_body, errobj
def set_my_session(self, session):
self.session = session
def get_my_session(self):
return getattr(self, 'session', None)
def _login(self, do_raise=True):
"""Establishes a session and manages the session."""
url = '%(url)s/sessions' % {
'url': self.object_url,
}
auth = (self.user_id, self.user_pass)
rsp, err = self._request("POST", url, auth=auth, no_relogin=True,
do_raise=do_raise, timeout=_LOCK_WAITTIME)
if not err:
self.set_my_session(self.Session(rsp["sessionId"], rsp["token"]))
return True
else:
return False
def login(self):
"""Establishes a session and manages the session."""
LOG.debug("Trying to login.")
return self._login()
def get_session(self, session_id, **kwargs):
"""Get a session information."""
url = '%(url)s/sessions/%(id)s' % {
'url': self.object_url,
'id': session_id,
}
return self._get_object(url, **kwargs)
def _has_session(self):
"""Check if there is a session managing."""
has_session = False
try:
session = self.get_my_session()
if session is not None:
self.get_session(session.id, no_retry=True, no_log=True)
has_session = True
except utils.HBSDError as ex:
LOG.debug('Failed to get session info: %s', ex)
return has_session
def relogin(self, no_relogin, no_log=False):
"""Establishes a session again."""
retry = False
if not no_relogin:
with self.login_lock:
retry = self._has_session()
if not retry:
LOG.debug("Trying to re-login.")
retry = self._login(do_raise=False)
if not retry:
utils.output_log(
MSG.REST_LOGIN_FAILED,
no_log=no_log, user=self.user_id)
return retry
def _keep_session(self):
"""Keep a session."""
LOG.debug('_keep_session thread is started')
try:
self.relogin(False, no_log=True)
except Exception as ex:
LOG.debug(
'relogin() in _keep_session() failed. %s', ex)
def enter_keep_session(self):
"""Begin the keeping of a session."""
self.keep_session_loop.start(_KEEP_SESSION_LOOP_INTERVAL)
LOG.debug('enter_keep_session')
def _get_object(self, url, params=None, **kwargs):
"""Transmit a GET request that appointed object ID."""
rsp = self._request("GET", url, params=params, **kwargs)[0]
return rsp if rsp else None
def _get_objects(self, url, params=None, **kwargs):
"""Transmit a GET request."""
rsp = self._request("GET", url, params=params, **kwargs)[0]
return rsp.get("data") if rsp else None
def _add_object(self, url, body, **kwargs):
"""Transmit a POST request."""
rsp, errobj = self._request(
"POST", url, body=body, async_=True, **kwargs)
if not rsp:
return None, errobj
resources = rsp.get('affectedResources')
if resources:
return resources[0].split('/')[-1], errobj
return None, errobj
def _delete_object(self, url, params=None, body=None, **kwargs):
"""Transmit a DELETE request."""
self._request("DELETE", url, params=params, body=body, async_=True,
**kwargs)
def _invoke(self, url, body=None, **kwargs):
"""Transmit a PUT request."""
self._request("PUT", url, body=body, async_=True, **kwargs)
def get_pools(self, params=None):
"""Get a list of pool information."""
url = '%(url)s/pools' % {
'url': self.object_url,
}
return self._get_objects(url, params=params)
def get_pool(self, pool_id, **kwargs):
"""Get a pool information."""
url = '%(url)s/pools/%(id)s' % {
'url': self.object_url,
'id': pool_id,
}
return self._get_object(url, **kwargs)
def get_ldev(self, ldev_id, **kwargs):
"""Get a ldev information."""
url = '%(url)s/ldevs/%(id)s' % {
'url': self.object_url,
'id': ldev_id,
}
return self._get_object(url, **kwargs)
def get_ldevs(self, params=None, **kwargs):
"""Get a list of ldev information."""
url = '%(url)s/ldevs' % {
'url': self.object_url,
}
return self._get_objects(url, params=params, **kwargs)
def add_ldev(self, body, **kwargs):
"""Add a ldev information."""
url = '%(url)s/ldevs' % {
'url': self.object_url,
}
ldev_id = self._add_object(url, body=body, **kwargs)[0]
return int(ldev_id) if ldev_id else None
def delete_ldev(self, ldev_id, body=None, **kwargs):
"""Delete a ldev information."""
url = '%(url)s/ldevs/%(id)s' % {
'url': self.object_url,
'id': ldev_id,
}
self._delete_object(url, body=body, **kwargs)
def modify_ldev(self, ldev_id, body):
"""Modify a ldev information."""
url = '%(url)s/ldevs/%(id)s' % {
'url': self.object_url,
'id': ldev_id,
}
self._invoke(url, body=body)
def extend_ldev(self, ldev_id, body):
"""Expand a ldev size."""
url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % {
'url': self.object_url,
'id': ldev_id,
'action': 'expand',
}
self._invoke(url, body=body, timeout=_EXTEND_WAITTIME)
def get_ports(self, params=None):
"""Get a list of port information."""
url = '%(url)s/ports' % {
'url': self.object_url,
}
return self._get_objects(url, params=params)
def get_port(self, port_id):
"""Get a port information."""
url = '%(url)s/ports/%(id)s' % {
'url': self.object_url,
'id': port_id,
}
return self._get_object(url)
def get_host_grps(self, params=None):
"""Get a list of host group information."""
url = '%(url)s/host-groups' % {
'url': self.object_url,
}
return self._get_objects(url, params=params)
def get_host_grp(self, port_id, host_group_number):
"""Get a host group information."""
url = '%(url)s/host-groups/%(port)s,%(number)d' % {
'url': self.object_url,
'port': port_id,
'number': host_group_number,
}
return self._get_object(url)
def add_host_grp(self, body, **kwargs):
"""Add a host group information."""
url = '%(url)s/host-groups' % {
'url': self.object_url,
}
host_group_id = self._add_object(url, body=body, **kwargs)[0]
return int(host_group_id.split(',')[-1]) if host_group_id else None
def delete_host_grp(self, port_id, host_group_number):
"""Delete a host group information."""
url = '%(url)s/host-groups/%(port)s,%(number)d' % {
'url': self.object_url,
'port': port_id,
'number': host_group_number,
}
self._delete_object(url)
def modify_host_grp(self, port_id, host_group_number, body, **kwargs):
"""Modify a host group information."""
url = '%(url)s/host-groups/%(port)s,%(number)d' % {
'url': self.object_url,
'port': port_id,
'number': host_group_number,
}
self._invoke(url, body=body, **kwargs)
def get_hba_wwns(self, port_id, host_group_number):
"""Get a list of wwn information."""
url = '%(url)s/host-wwns' % {
'url': self.object_url,
}
params = {"portId": port_id, "hostGroupNumber": host_group_number}
return self._get_objects(url, params=params)
def get_hba_wwns_by_name(self, port_id, host_group_name):
"""Get a list of wwn information of the specified name."""
url = '%(url)s/host-wwns' % {
'url': self.object_url,
}
params = {"portId": port_id, "hostGroupName": host_group_name}
return self._get_objects(url, params=params)
def add_hba_wwn(self, port_id, host_group_number, host_wwn, **kwargs):
"""Add a wwn information."""
url = '%(url)s/host-wwns' % {
'url': self.object_url,
}
body = {"hostWwn": host_wwn, "portId": port_id,
"hostGroupNumber": host_group_number}
return self._add_object(url, body=body, **kwargs)[0]
def get_hba_iscsis(self, port_id, host_group_number):
"""Get a list of ISCSI information."""
url = '%(url)s/host-iscsis' % {
'url': self.object_url,
}
params = {"portId": port_id, "hostGroupNumber": host_group_number}
return self._get_objects(url, params=params)
def get_hba_iscsis_by_name(self, port_id, host_group_name):
"""Get a list of ISCSI information of the specified name."""
url = '%(url)s/host-iscsis' % {
'url': self.object_url,
}
params = {"portId": port_id, "hostGroupName": host_group_name}
return self._get_objects(url, params=params)
def add_hba_iscsi(self, port_id, host_group_number, iscsi_name):
"""Add a ISCSI information."""
url = '%(url)s/host-iscsis' % {
'url': self.object_url,
}
body = {"iscsiName": iscsi_name, "portId": port_id,
"hostGroupNumber": host_group_number}
return self._add_object(url, body=body)[0]
def get_luns(self, port_id, host_group_number,
is_basic_lun_information=False):
"""Get a list of lun information."""
url = '%(url)s/luns' % {
'url': self.object_url,
}
params = {"portId": port_id, "hostGroupNumber": host_group_number,
"isBasicLunInformation": is_basic_lun_information}
return self._get_objects(url, params=params)
def add_lun(self, port_id, host_group_number, ldev_id, lun=None, **kwargs):
"""Add a lun information."""
url = '%(url)s/luns' % {
'url': self.object_url,
}
body = {"portId": port_id, "hostGroupNumber": host_group_number,
"ldevId": ldev_id}
if lun is not None:
body['lun'] = lun
lun_id, errobj = self._add_object(url, body=body, **kwargs)
return int(lun_id.split(',')[-1]) if lun_id else None, errobj
def delete_lun(self, port_id, host_group_number, lun, **kwargs):
"""Delete a lun information."""
url = '%(url)s/luns/%(port)s,%(number)s,%(lun)d' % {
'url': self.object_url,
'port': port_id,
'number': host_group_number,
'lun': lun,
}
self._delete_object(url, **kwargs)
def get_snapshots(self, params=None):
"""Get a list of snapshot information."""
url = '%(url)s/snapshots' % {
'url': self.object_url,
}
return self._get_objects(url, params=params)
def add_snapshot(self, body, **kwargs):
"""Add a snapshot information."""
url = '%(url)s/snapshots' % {
'url': self.object_url,
}
return self._add_object(url, body=body, **kwargs)[0]
def delete_snapshot(self, pvol_ldev_id, mu_number, **kwargs):
"""Delete a snapshot information."""
url = '%(url)s/snapshots/%(pvol)d,%(mu)d' % {
'url': self.object_url,
'pvol': pvol_ldev_id,
'mu': mu_number,
}
self._delete_object(url, **kwargs)
def unassign_snapshot_volume(self, pvol_ldev_id, mu_number, **kwargs):
"""Unassign a snapshot information."""
url = '%(url)s/snapshots/%(pvol)d,%(mu)d/actions/%(action)s/invoke' % {
'url': self.object_url,
'pvol': pvol_ldev_id,
'mu': mu_number,
'action': 'unassign-volume',
}
self._invoke(url, **kwargs)
def restore_snapshot(self, pvol_ldev_id, mu_number, body=None):
"""Restore a snapshot information."""
url = '%(url)s/snapshots/%(pvol)d,%(mu)d/actions/%(action)s/invoke' % {
'url': self.object_url,
'pvol': pvol_ldev_id,
'mu': mu_number,
'action': 'restore',
}
self._invoke(url, body=body)
def discard_zero_page(self, ldev_id):
"""Return the ldev's no-data pages to the storage pool."""
url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % {
'url': self.object_url,
'id': ldev_id,
'action': 'discard-zero-page',
}
self._invoke(url)

View File

@ -0,0 +1,256 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""REST interface fibre channel module for Hitachi HBSD Driver."""
from oslo_log import log as logging
from cinder.volume.drivers.hitachi import hbsd_rest as rest
from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.zonemanager import utils as fczm_utils
_FC_HMO_DISABLE_IO = 91
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
class HBSDRESTFC(rest.HBSDREST):
"""REST interface fibre channel class for Hitachi HBSD Driver."""
def __init__(self, conf, storage_protocol, db):
"""Initialize instance variables."""
super(HBSDRESTFC, self).__init__(conf, storage_protocol, db)
self._lookup_service = fczm_utils.create_lookup_service()
def connect_storage(self):
"""Prepare for using the storage."""
target_ports = self.conf.hitachi_target_ports
compute_target_ports = self.conf.hitachi_compute_target_ports
available_ports = []
available_compute_ports = []
super(HBSDRESTFC, self).connect_storage()
# The port attributes must contain TAR.
params = {'portAttributes': 'TAR'}
port_list = self.client.get_ports(params=params)
for port in set(target_ports + compute_target_ports):
if port not in [port_data['portId'] for port_data in port_list]:
utils.output_log(MSG.INVALID_PORT, port=port,
additional_info='portAttributes: not TAR')
for port_data in port_list:
port = port_data['portId']
if port not in set(target_ports + compute_target_ports):
continue
secure_fc_port = True
if (port_data['portType'] not in ['FIBRE', 'FCoE'] or
not port_data['lunSecuritySetting']):
secure_fc_port = False
if not secure_fc_port:
utils.output_log(
MSG.INVALID_PORT, port=port,
additional_info='portType: %s, lunSecuritySetting: %s, '
'fabricMode: %s, portConnection: %s' %
(port_data['portType'],
port_data.get('lunSecuritySetting'),
port_data.get('fabricMode'),
port_data.get('portConnection')))
if not secure_fc_port:
continue
wwn = port_data.get('wwn')
if target_ports and port in target_ports:
available_ports.append(port)
self.storage_info['wwns'][port] = wwn
if compute_target_ports and port in compute_target_ports:
available_compute_ports.append(port)
self.storage_info['wwns'][port] = wwn
if target_ports:
for port in target_ports:
if port in available_ports:
self.storage_info['controller_ports'].append(port)
if compute_target_ports:
for port in compute_target_ports:
if port in available_compute_ports:
self.storage_info['compute_ports'].append(port)
self.check_ports_info()
utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list',
value=self.storage_info['wwns'])
def create_target_to_storage(self, port, connector, hba_ids):
"""Create a host group on the specified port."""
wwpns = self.get_hba_ids_from_connector(connector)
target_name = '%(prefix)s-%(wwpns)s' % {
'prefix': utils.DRIVER_PREFIX,
'wwpns': min(wwpns),
}
try:
body = {'portId': port,
'hostGroupName': target_name}
gid = self.client.add_host_grp(body, no_log=True)
except Exception:
params = {'portId': port}
host_grp_list = self.client.get_host_grps(params)
for host_grp_data in host_grp_list:
if host_grp_data['hostGroupName'] == target_name:
return target_name, host_grp_data['hostGroupNumber']
raise
return target_name, gid
def set_hba_ids(self, port, gid, hba_ids):
"""Connect all specified HBAs with the specified port."""
registered_wwns = []
for wwn in hba_ids:
try:
self.client.add_hba_wwn(port, gid, wwn, no_log=True)
registered_wwns.append(wwn)
except utils.HBSDError:
utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid,
wwn=wwn)
if not registered_wwns:
msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port,
gid=gid)
raise utils.HBSDError(msg)
def set_target_mode(self, port, gid):
"""Configure the host group to meet the environment."""
body = {'hostMode': 'LINUX/IRIX',
'hostModeOptions': [_FC_HMO_DISABLE_IO]}
self.client.modify_host_grp(port, gid, body, ignore_all_errors=True)
def _get_hwwns_in_hostgroup(self, port, gid, wwpns):
"""Return WWN registered with the host group."""
hwwns_in_hostgroup = []
for hba_wwn in self.client.get_hba_wwns(port, gid):
hwwn = hba_wwn['hostWwn']
if hwwn in wwpns:
hwwns_in_hostgroup.append(hwwn)
return hwwns_in_hostgroup
def _set_target_info(self, targets, host_grps, wwpns):
"""Set the information of the host group having the specified WWN."""
for host_grp in host_grps:
port = host_grp['portId']
gid = host_grp['hostGroupNumber']
hwwns_in_hostgroup = self._get_hwwns_in_hostgroup(port, gid, wwpns)
if hwwns_in_hostgroup:
targets['info'][port] = True
targets['list'].append((port, gid))
LOG.debug(
'Found wwpns in host group. (port: %(port)s, '
'gid: %(gid)s, wwpns: %(wwpns)s)',
{'port': port, 'gid': gid, 'wwpns': hwwns_in_hostgroup})
return True
return False
def _get_hwwns_in_hostgroup_by_name(self, port, host_group_name, wwpns):
"""Return WWN registered with the host group of the specified name."""
hba_wwns = self.client.get_hba_wwns_by_name(port, host_group_name)
return [hba_wwn for hba_wwn in hba_wwns if hba_wwn['hostWwn'] in wwpns]
def _set_target_info_by_names(self, targets, port, target_names, wwpns):
"""Set the information of the host group having the specified name and
the specified WWN.
"""
for target_name in target_names:
hwwns_in_hostgroup = self._get_hwwns_in_hostgroup_by_name(
port, target_name, wwpns)
if hwwns_in_hostgroup:
gid = hwwns_in_hostgroup[0]['hostGroupNumber']
targets['info'][port] = True
targets['list'].append((port, gid))
LOG.debug(
'Found wwpns in host group. (port: %(port)s, '
'gid: %(gid)s, wwpns: %(wwpns)s)',
{'port': port, 'gid': gid, 'wwpns':
[hwwn['hostWwn'] for hwwn in hwwns_in_hostgroup]})
return True
return False
def find_targets_from_storage(
self, targets, connector, target_ports):
"""Find mapped ports, memorize them and return unmapped port count."""
wwpns = self.get_hba_ids_from_connector(connector)
target_names = [
'%(prefix)s-%(wwpns)s' % {
'prefix': utils.DRIVER_PREFIX,
'wwpns': min(wwpns),
}
]
if 'ip' in connector:
target_names.append(
'%(prefix)s-%(ip)s' % {
'prefix': utils.DRIVER_PREFIX,
'ip': connector['ip'],
}
)
not_found_count = 0
for port in target_ports:
targets['info'][port] = False
if self._set_target_info_by_names(
targets, port, target_names, wwpns):
continue
host_grps = self.client.get_host_grps({'portId': port})
if self._set_target_info(
targets, [hg for hg in host_grps if hg['hostGroupName'] not in
target_names], wwpns):
pass
else:
not_found_count += 1
return not_found_count
def initialize_connection(self, volume, connector):
"""Initialize connection between the server and the volume."""
conn_info = super(HBSDRESTFC, self).initialize_connection(
volume, connector)
if self.conf.hitachi_zoning_request:
init_targ_map = utils.build_initiator_target_map(
connector, conn_info['data']['target_wwn'],
self._lookup_service)
if init_targ_map:
conn_info['data']['initiator_target_map'] = init_targ_map
fczm_utils.add_fc_zone(conn_info)
return conn_info
def terminate_connection(self, volume, connector):
"""Terminate connection between the server and the volume."""
conn_info = super(HBSDRESTFC, self).terminate_connection(
volume, connector)
if self.conf.hitachi_zoning_request:
if conn_info and conn_info['data']['target_wwn']:
init_targ_map = utils.build_initiator_target_map(
connector, conn_info['data']['target_wwn'],
self._lookup_service)
if init_targ_map:
conn_info['data']['initiator_target_map'] = init_targ_map
fczm_utils.remove_fc_zone(conn_info)
return conn_info
def _get_wwpns(self, port, hostgroup):
"""Get WWPN from a port and the host group."""
wwpns = []
hba_wwns = self.client.get_hba_wwns_by_name(port, hostgroup)
for hba_wwn in hba_wwns:
wwpns.append(hba_wwn['hostWwn'])
return wwpns
def set_terminate_target(self, fake_connector, port_hostgroup_map):
"""Set necessary information in connector in terminate."""
wwpns = set()
for port, hostgroups in port_hostgroup_map.items():
for hostgroup in hostgroups:
wwpns.update(self._get_wwpns(port, hostgroup))
fake_connector['wwpns'] = list(wwpns)

View File

@ -0,0 +1,234 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""REST interface iSCSI module for Hitachi HBSD Driver."""
from oslo_log import log as logging
from cinder.volume.drivers.hitachi import hbsd_rest as rest
from cinder.volume.drivers.hitachi import hbsd_utils as utils
_ISCSI_HMO_REPORT_FULL_PORTAL = 83
_ISCSI_HMO_DISABLE_IO = 91
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
class HBSDRESTISCSI(rest.HBSDREST):
"""REST interface iscsi class for Hitachi HBSD Driver."""
def _set_target_portal(self, port):
"""Get port info and store it in an instance variable."""
result = self.client.get_port(port)
ipv4_addr = result.get('ipv4Address')
tcp_port = result.get('tcpPort')
if not ipv4_addr or not tcp_port:
return False, ipv4_addr, tcp_port
self.storage_info['portals'][port] = '%(ip)s:%(port)s' % {
'ip': ipv4_addr,
'port': tcp_port,
}
return True, ipv4_addr, tcp_port
def connect_storage(self):
"""Prepare for using the storage."""
target_ports = self.conf.hitachi_target_ports
compute_target_ports = self.conf.hitachi_compute_target_ports
super(HBSDRESTISCSI, self).connect_storage()
# The port type must be ISCSI and the port attributes must contain TAR.
params = {'portType': 'ISCSI',
'portAttributes': 'TAR'}
port_list = self.client.get_ports(params=params)
for port in set(target_ports + compute_target_ports):
if port not in [port_data['portId'] for port_data in port_list]:
utils.output_log(
MSG.INVALID_PORT, port=port, additional_info='(portType, '
'portAttributes): not (ISCSI, TAR)')
for port_data in port_list:
port = port_data['portId']
if port not in set(target_ports + compute_target_ports):
continue
has_addr = True
if not port_data['lunSecuritySetting']:
addr_info = ""
elif port in set(target_ports + compute_target_ports):
has_addr, ipv4_addr, tcp_port = self._set_target_portal(port)
if not has_addr:
addr_info = (', ipv4Address: %s, tcpPort: %s' %
(ipv4_addr, tcp_port))
if not port_data['lunSecuritySetting'] or not has_addr:
utils.output_log(
MSG.INVALID_PORT, port=port,
additional_info='portType: %s, lunSecuritySetting: %s%s' %
(port_data['portType'], port_data['lunSecuritySetting'],
addr_info))
if not port_data['lunSecuritySetting']:
continue
if target_ports and port in target_ports and has_addr:
self.storage_info['controller_ports'].append(port)
if (compute_target_ports and port in compute_target_ports and
has_addr):
self.storage_info['compute_ports'].append(port)
self.check_ports_info()
utils.output_log(MSG.SET_CONFIG_VALUE,
object='port-<IP address:port> list',
value=self.storage_info['portals'])
def create_target_to_storage(self, port, connector, hba_ids):
"""Create an iSCSI target on the specified port."""
target_name = '%(prefix)s-%(ip)s' % {
'prefix': utils.DRIVER_PREFIX,
'ip': connector['ip'],
}
body = {'portId': port, 'hostGroupName': target_name}
if hba_ids:
body['iscsiName'] = '%(id)s%(suffix)s' % {
'id': hba_ids,
'suffix': utils.TARGET_IQN_SUFFIX,
}
try:
gid = self.client.add_host_grp(body, no_log=True)
except Exception:
params = {'portId': port}
host_grp_list = self.client.get_host_grps(params)
for host_grp_data in host_grp_list:
if host_grp_data['hostGroupName'] == target_name:
return target_name, host_grp_data['hostGroupNumber']
else:
raise
return target_name, gid
def set_hba_ids(self, port, gid, hba_ids):
"""Connect the specified HBA with the specified port."""
self.client.add_hba_iscsi(port, gid, hba_ids)
def set_target_mode(self, port, gid):
"""Configure the iSCSI target to meet the environment."""
body = {'hostMode': 'LINUX/IRIX',
'hostModeOptions': [_ISCSI_HMO_REPORT_FULL_PORTAL,
_ISCSI_HMO_DISABLE_IO]}
self.client.modify_host_grp(port, gid, body)
def _is_host_iqn_registered_in_target(self, port, gid, host_iqn):
"""Check if the specified IQN is registered with iSCSI target."""
for hba_iscsi in self.client.get_hba_iscsis(port, gid):
if host_iqn == hba_iscsi['iscsiName']:
return True
return False
def _set_target_info(self, targets, host_grps, iqn):
"""Set the information of the iSCSI target having the specified IQN."""
for host_grp in host_grps:
port = host_grp['portId']
gid = host_grp['hostGroupNumber']
storage_iqn = host_grp['iscsiName']
if self._is_host_iqn_registered_in_target(port, gid, iqn):
targets['info'][port] = True
targets['list'].append((port, gid))
targets['iqns'][(port, gid)] = storage_iqn
return True
return False
def _get_host_iqn_registered_in_target_by_name(
self, port, target_name, host_iqn):
"""Get the information of the iSCSI target having the specified name
and the specified IQN.
"""
for hba_iscsi in self.client.get_hba_iscsis_by_name(port, target_name):
if host_iqn == hba_iscsi['iscsiName']:
return hba_iscsi
return None
def _set_target_info_by_name(self, targets, port, target_name, iqn):
"""Set the information of the iSCSI target having the specified name
and the specified IQN.
"""
host_iqn_registered_in_target = (
self._get_host_iqn_registered_in_target_by_name(
port, target_name, iqn))
if host_iqn_registered_in_target:
gid = host_iqn_registered_in_target['hostGroupNumber']
storage_iqn = self.client.get_host_grp(port, gid)['iscsiName']
targets['info'][port] = True
targets['list'].append((port, gid))
targets['iqns'][(port, gid)] = storage_iqn
return True
return False
def find_targets_from_storage(self, targets, connector, target_ports):
"""Find mapped ports, memorize them and return unmapped port count."""
iqn = self.get_hba_ids_from_connector(connector)
not_found_count = 0
for port in target_ports:
targets['info'][port] = False
if 'ip' in connector:
target_name = '%(prefix)s-%(ip)s' % {
'prefix': utils.DRIVER_PREFIX,
'ip': connector['ip'],
}
if self._set_target_info_by_name(
targets, port, target_name, iqn):
continue
host_grps = self.client.get_host_grps({'portId': port})
if 'ip' in connector:
host_grps = [hg for hg in host_grps
if hg['hostGroupName'] != target_name]
if self._set_target_info(targets, host_grps, iqn):
pass
else:
not_found_count += 1
return not_found_count
def get_properties_iscsi(self, targets, multipath):
"""Return iSCSI-specific server-LDEV connection info."""
if not multipath:
target_list = targets['list'][:1]
else:
target_list = targets['list'][:]
for target in target_list:
if target not in targets['iqns']:
port, gid = target
target_info = self.client.get_host_grp(port, gid)
iqn = target_info.get('iscsiName') if target_info else None
if not iqn:
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource='Target IQN')
raise utils.HBSDError(msg)
targets['iqns'][target] = iqn
LOG.debug(
'Found target iqn of host group. (port: %(port)s, '
'gid: %(gid)s, target iqn: %(iqn)s)',
{'port': port, 'gid': gid, 'iqn': iqn})
return super(HBSDRESTISCSI, self).get_properties_iscsi(
targets, multipath)
def _get_iqn(self, port, hostgroup):
"""Get IQN from a port and the ISCSI target."""
hba_iscsis = self.client.get_hba_iscsis_by_name(port, hostgroup)
return hba_iscsis[0]['iscsiName']
def set_terminate_target(self, fake_connector, port_hostgroup_map):
"""Set necessary information in connector in terminate."""
for port, hostgroups in port_hostgroup_map.items():
for hostgroup in hostgroups:
iqn = self._get_iqn(port, hostgroup)
if iqn:
fake_connector['initiator'] = iqn
return

View File

@ -0,0 +1,528 @@
# Copyright (C) 2020, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Utility module for Hitachi HBSD Driver."""
import enum
import logging as base_logging
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
_DRIVER_DIR = 'cinder.volume.drivers.hitachi'
_DRIVERS = {
'REST': {
'FC': 'hbsd_rest_fc.HBSDRESTFC',
'iSCSI': 'hbsd_rest_iscsi.HBSDRESTISCSI',
},
}
DRIVER_PREFIX = 'HBSD'
TARGET_PREFIX = 'HBSD-'
TARGET_IQN_SUFFIX = '.hbsd-target'
GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512
MAX_PROCESS_WAITTIME = 24 * 60 * 60
DEFAULT_PROCESS_WAITTIME = 15 * 60
NORMAL_LDEV_TYPE = 'Normal'
NVOL_LDEV_TYPE = 'DP-VOL'
INFO_SUFFIX = 'I'
WARNING_SUFFIX = 'W'
ERROR_SUFFIX = 'E'
PORT_ID_LENGTH = 5
class HBSDError(exception.VolumeDriverException):
message = _("HBSD error occurred. %(message)s")
class HBSDBusy(HBSDError):
message = _("Device or resource is busy.")
@enum.unique
class HBSDMsg(enum.Enum):
"""messages for Hitachi HBSD Driver."""
DRIVER_INITIALIZATION_START = {
'msg_id': 4,
'loglevel': base_logging.INFO,
'msg': 'Initialization of %(driver)s %(version)s started.',
'suffix': INFO_SUFFIX,
}
SET_CONFIG_VALUE = {
'msg_id': 5,
'loglevel': base_logging.INFO,
'msg': 'Set %(object)s to %(value)s.',
'suffix': INFO_SUFFIX,
}
OBJECT_CREATED = {
'msg_id': 6,
'loglevel': base_logging.INFO,
'msg': 'Created %(object)s. (%(details)s)',
'suffix': INFO_SUFFIX,
}
NO_LUN = {
'msg_id': 301,
'loglevel': base_logging.WARNING,
'msg': 'A LUN (HLUN) was not found. (LDEV: %(ldev)s)',
'suffix': WARNING_SUFFIX,
}
INVALID_LDEV_FOR_UNMAPPING = {
'msg_id': 302,
'loglevel': base_logging.WARNING,
'msg': 'Failed to specify a logical device for the volume '
'%(volume_id)s to be unmapped.',
'suffix': WARNING_SUFFIX,
}
INVALID_LDEV_FOR_DELETION = {
'msg_id': 304,
'loglevel': base_logging.WARNING,
'msg': 'Failed to specify a logical device to be deleted. '
'(method: %(method)s, id: %(id)s)',
'suffix': WARNING_SUFFIX,
}
DELETE_TARGET_FAILED = {
'msg_id': 306,
'loglevel': base_logging.WARNING,
'msg': 'A host group or an iSCSI target could not be deleted. '
'(port: %(port)s, gid: %(id)s)',
'suffix': WARNING_SUFFIX,
}
CREATE_HOST_GROUP_FAILED = {
'msg_id': 308,
'loglevel': base_logging.WARNING,
'msg': 'A host group could not be added. (port: %(port)s)',
'suffix': WARNING_SUFFIX,
}
CREATE_ISCSI_TARGET_FAILED = {
'msg_id': 309,
'loglevel': base_logging.WARNING,
'msg': 'An iSCSI target could not be added. (port: %(port)s)',
'suffix': WARNING_SUFFIX,
}
UNMAP_LDEV_FAILED = {
'msg_id': 310,
'loglevel': base_logging.WARNING,
'msg': 'Failed to unmap a logical device. (LDEV: %(ldev)s)',
'suffix': WARNING_SUFFIX,
}
DELETE_LDEV_FAILED = {
'msg_id': 313,
'loglevel': base_logging.WARNING,
'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)',
'suffix': WARNING_SUFFIX,
}
MAP_LDEV_FAILED = {
'msg_id': 314,
'loglevel': base_logging.WARNING,
'msg': 'Failed to map a logical device. (LDEV: %(ldev)s, port: '
'%(port)s, id: %(id)s, lun: %(lun)s)',
'suffix': WARNING_SUFFIX,
}
DISCARD_ZERO_PAGE_FAILED = {
'msg_id': 315,
'loglevel': base_logging.WARNING,
'msg': 'Failed to perform a zero-page reclamation. (LDEV: '
'%(ldev)s)',
'suffix': WARNING_SUFFIX,
}
ADD_HBA_WWN_FAILED = {
'msg_id': 317,
'loglevel': base_logging.WARNING,
'msg': 'Failed to assign the WWN. (port: %(port)s, gid: %(gid)s, '
'wwn: %(wwn)s)',
'suffix': WARNING_SUFFIX,
}
LDEV_NOT_EXIST = {
'msg_id': 319,
'loglevel': base_logging.WARNING,
'msg': 'The logical device does not exist in the storage system. '
'(LDEV: %(ldev)s)',
'suffix': WARNING_SUFFIX,
}
REST_LOGIN_FAILED = {
'msg_id': 321,
'loglevel': base_logging.WARNING,
'msg': 'Failed to perform user authentication of the REST API server. '
'(user: %(user)s)',
'suffix': WARNING_SUFFIX,
}
DELETE_PAIR_FAILED = {
'msg_id': 325,
'loglevel': base_logging.WARNING,
'msg': 'Failed to delete copy pair. (P-VOL: %(pvol)s, S-VOL: '
'%(svol)s)',
'suffix': WARNING_SUFFIX,
}
DISCONNECT_VOLUME_FAILED = {
'msg_id': 329,
'loglevel': base_logging.WARNING,
'msg': 'Failed to detach the logical device. (LDEV: %(ldev)s, '
'reason: %(reason)s)',
'suffix': WARNING_SUFFIX,
}
INVALID_PORT = {
'msg_id': 339,
'loglevel': base_logging.WARNING,
'msg': 'Port %(port)s will not be used because its settings are '
'invalid. (%(additional_info)s)',
'suffix': WARNING_SUFFIX,
}
STORAGE_COMMAND_FAILED = {
'msg_id': 600,
'loglevel': base_logging.ERROR,
'msg': 'The command %(cmd)s failed. (ret: %(ret)s, stdout: '
'%(out)s, stderr: %(err)s)',
'suffix': ERROR_SUFFIX,
}
INVALID_PARAMETER = {
'msg_id': 601,
'loglevel': base_logging.ERROR,
'msg': 'A parameter is invalid. (%(param)s)',
'suffix': ERROR_SUFFIX,
}
PAIR_STATUS_WAIT_TIMEOUT = {
'msg_id': 611,
'loglevel': base_logging.ERROR,
'msg': 'The status change of copy pair could not be '
'completed. (S-VOL: %(svol)s)',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_STATUS_FOR_COPY = {
'msg_id': 612,
'loglevel': base_logging.ERROR,
'msg': 'The source logical device to be replicated does not exist '
'in the storage system. (LDEV: %(ldev)s)',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_FOR_EXTENSION = {
'msg_id': 613,
'loglevel': base_logging.ERROR,
'msg': 'The volume %(volume_id)s to be extended was not found.',
'suffix': ERROR_SUFFIX,
}
NO_HBA_WWN_ADDED_TO_HOST_GRP = {
'msg_id': 614,
'loglevel': base_logging.ERROR,
'msg': 'No WWN is assigned. (port: %(port)s, gid: %(gid)s)',
'suffix': ERROR_SUFFIX,
}
UNABLE_TO_DELETE_PAIR = {
'msg_id': 616,
'loglevel': base_logging.ERROR,
'msg': 'Failed to delete a pair. (P-VOL: %(pvol)s)',
'suffix': ERROR_SUFFIX,
}
INVALID_VOLUME_TYPE_FOR_EXTEND = {
'msg_id': 618,
'loglevel': base_logging.ERROR,
'msg': 'The volume %(volume_id)s could not be extended. The '
'volume type must be Normal.',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_FOR_CONNECTION = {
'msg_id': 619,
'loglevel': base_logging.ERROR,
'msg': 'The volume %(volume_id)s to be mapped was not found.',
'suffix': ERROR_SUFFIX,
}
POOL_INFO_RETRIEVAL_FAILED = {
'msg_id': 620,
'loglevel': base_logging.ERROR,
'msg': 'Failed to provide information about a pool. (pool: '
'%(pool)s)',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_FOR_VOLUME_COPY = {
'msg_id': 624,
'loglevel': base_logging.ERROR,
'msg': 'The %(type)s %(id)s source to be replicated was not '
'found.',
'suffix': ERROR_SUFFIX,
}
CONNECT_VOLUME_FAILED = {
'msg_id': 634,
'loglevel': base_logging.ERROR,
'msg': 'Failed to attach the logical device. (LDEV: %(ldev)s, '
'reason: %(reason)s)',
'suffix': ERROR_SUFFIX,
}
CREATE_LDEV_FAILED = {
'msg_id': 636,
'loglevel': base_logging.ERROR,
'msg': 'Failed to add the logical device.',
'suffix': ERROR_SUFFIX,
}
POOL_NOT_FOUND = {
'msg_id': 640,
'loglevel': base_logging.ERROR,
'msg': 'A pool could not be found. (pool: %(pool)s)',
'suffix': ERROR_SUFFIX,
}
NO_AVAILABLE_RESOURCE = {
'msg_id': 648,
'loglevel': base_logging.ERROR,
'msg': 'There are no resources available for use. (resource: '
'%(resource)s)',
'suffix': ERROR_SUFFIX,
}
NO_CONNECTED_TARGET = {
'msg_id': 649,
'loglevel': base_logging.ERROR,
'msg': 'The host group or iSCSI target was not found.',
'suffix': ERROR_SUFFIX,
}
RESOURCE_NOT_FOUND = {
'msg_id': 650,
'loglevel': base_logging.ERROR,
'msg': 'The resource %(resource)s was not found.',
'suffix': ERROR_SUFFIX,
}
LDEV_DELETION_WAIT_TIMEOUT = {
'msg_id': 652,
'loglevel': base_logging.ERROR,
'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_ATTR_FOR_MANAGE = {
'msg_id': 702,
'loglevel': base_logging.ERROR,
'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV '
'must be an unpaired %(ldevtype)s.',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_SIZE_FOR_MANAGE = {
'msg_id': 703,
'loglevel': base_logging.ERROR,
'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV '
'size must be expressed in gigabytes.',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_PORT_FOR_MANAGE = {
'msg_id': 704,
'loglevel': base_logging.ERROR,
'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV '
'must not be mapped.',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_TYPE_FOR_UNMANAGE = {
'msg_id': 706,
'loglevel': base_logging.ERROR,
'msg': 'Failed to unmanage the volume %(volume_id)s. The volume '
'type must be %(volume_type)s.',
'suffix': ERROR_SUFFIX,
}
INVALID_LDEV_FOR_MANAGE = {
'msg_id': 707,
'loglevel': base_logging.ERROR,
'msg': 'No valid value is specified for "source-id" or "source-name". '
'A valid LDEV number must be specified in "source-id" or '
'a valid LDEV name must be specified in "source-name" '
'to manage the volume.',
'suffix': ERROR_SUFFIX,
}
SNAPSHOT_UNMANAGE_FAILED = {
'msg_id': 722,
'loglevel': base_logging.ERROR,
'msg': 'Failed to unmanage the snapshot %(snapshot_id)s. '
'This driver does not support unmanaging snapshots.',
'suffix': ERROR_SUFFIX,
}
VOLUME_COPY_FAILED = {
'msg_id': 725,
'loglevel': base_logging.ERROR,
'msg': 'Failed to copy a volume. (copy method: %(copy_method)s, '
'P-VOL: %(pvol)s, S-VOL: %(svol)s)',
'suffix': ERROR_SUFFIX
}
REST_SERVER_CONNECT_FAILED = {
'msg_id': 731,
'loglevel': base_logging.ERROR,
'msg': 'Failed to communicate with the REST API server. '
'(exception: %(exception)s, message: %(message)s, '
'method: %(method)s, url: %(url)s, params: %(params)s, '
'body: %(body)s)',
'suffix': ERROR_SUFFIX,
}
REST_API_FAILED = {
'msg_id': 732,
'loglevel': base_logging.ERROR,
'msg': 'The REST API failed. (source: %(errorSource)s, '
'ID: %(messageId)s, message: %(message)s, cause: %(cause)s, '
'solution: %(solution)s, code: %(errorCode)s, '
'method: %(method)s, url: %(url)s, params: %(params)s, '
'body: %(body)s)',
'suffix': ERROR_SUFFIX,
}
REST_API_TIMEOUT = {
'msg_id': 733,
'loglevel': base_logging.ERROR,
'msg': 'The REST API timed out. (job ID: %(job_id)s, '
'job status: %(status)s, job state: %(state)s, '
'method: %(method)s, url: %(url)s, params: %(params)s, '
'body: %(body)s)',
'suffix': ERROR_SUFFIX,
}
REST_API_HTTP_ERROR = {
'msg_id': 734,
'loglevel': base_logging.ERROR,
'msg': 'The REST API failed. (HTTP status code: %(status_code)s, '
'response body: %(response_body)s, '
'method: %(method)s, url: %(url)s, params: %(params)s, '
'body: %(body)s)',
'suffix': ERROR_SUFFIX,
}
def __init__(self, error_info):
"""Initialize Enum attributes."""
self.msg_id = error_info['msg_id']
self.level = error_info['loglevel']
self.msg = error_info['msg']
self.suffix = error_info['suffix']
def output_log(self, **kwargs):
"""Output the message to the log file and return the message."""
msg = self.msg % kwargs
LOG.log(self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s",
{'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg})
return msg
def output_log(msg_enum, **kwargs):
"""Output the specified message to the log file and return the message."""
return msg_enum.output_log(**kwargs)
LOG = logging.getLogger(__name__)
MSG = HBSDMsg
def get_ldev(obj):
"""Get the LDEV number from the given object and return it as integer."""
if not obj:
return None
ldev = obj.get('provider_location')
if not ldev or not ldev.isdigit():
return None
return int(ldev)
def timed_out(start_time, timeout):
"""Check if the specified time has passed."""
return timeutils.is_older_than(start_time, timeout)
def import_object(conf, driver_info, db):
"""Import a class and return an instance of it."""
os.environ['LANG'] = 'C'
cli = _DRIVERS.get('REST')
return importutils.import_object(
'%(dir)s.%(proto)s' % {
'dir': _DRIVER_DIR,
'proto': cli[driver_info['proto']],
},
conf, driver_info, db)
def check_opt_value(conf, names):
"""Check if the parameter names and values are valid."""
for name in names:
try:
getattr(conf, name)
except (cfg.NoSuchOptError, cfg.ConfigFileValueError):
with excutils.save_and_reraise_exception():
output_log(MSG.INVALID_PARAMETER, param=name)
def check_opts(conf, opts):
"""Check if the specified configuration is valid."""
names = []
for opt in opts:
if opt.required and not conf.safe_get(opt.name):
msg = output_log(MSG.INVALID_PARAMETER, param=opt.name)
raise HBSDError(msg)
names.append(opt.name)
check_opt_value(conf, names)
def require_target_existed(targets):
"""Check if the target list includes one or more members."""
if not targets['list']:
msg = output_log(MSG.NO_CONNECTED_TARGET)
raise HBSDError(msg)
def build_initiator_target_map(connector, target_wwns, lookup_service):
"""Return a dictionary mapping server-wwns and lists of storage-wwns."""
init_targ_map = {}
initiator_wwns = connector['wwpns']
if lookup_service:
dev_map = lookup_service.get_device_mapping_from_network(
initiator_wwns, target_wwns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
for initiator in fabric['initiator_port_wwn_list']:
init_targ_map[initiator] = fabric['target_port_wwn_list']
else:
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return init_targ_map
def safe_get_err_code(errobj):
if not errobj:
return '', ''
err_code = errobj.get('errorCode', {})
return err_code.get('SSB1', '').upper(), err_code.get('SSB2', '').upper()
def safe_get_return_code(errobj):
if not errobj:
return ''
err_code = errobj.get('errorCode', {})
return err_code.get('errorCode', '')
def safe_get_message_id(errobj):
if not errobj:
return ''
return errobj.get('messageId', '')
def is_shared_connection(volume, connector):
"""Check if volume is multiattach to 1 node."""
connection_count = 0
host = connector.get('host') if connector else None
if host and volume.get('multiattach'):
attachment_list = volume.volume_attachment
try:
att_list = attachment_list.object
except AttributeError:
att_list = attachment_list
for attachment in att_list:
if attachment.attached_host == host:
connection_count += 1
return connection_count > 1

View File

@ -0,0 +1,3 @@
---
features:
- New Cinder Hitachi driver based on REST API for Hitachi VSP storages.