Introduce Hitachi VSP driver
This patch introduces Hitachi VSP FC driver. Signed-off-by: "Kazumasa Nomura <kazumasa.nomura.rx@hitachi.com>" DocImpact Implements: blueprint hitachi-vsp-fc-driver Change-Id: Ife99bf138c86a2f9be91298064513073271f1239
This commit is contained in:
parent
850a197226
commit
5c815388e2
@ -1097,6 +1097,19 @@ class HBSDVolumeIsBusy(VolumeIsBusy):
|
||||
message = _("Volume %(volume_name)s is busy.")
|
||||
|
||||
|
||||
# Hitachi VSP Driver
|
||||
class VSPError(VolumeDriverException):
|
||||
message = _("VSP error occurred. %(message)s")
|
||||
|
||||
|
||||
class VSPBusy(VSPError):
|
||||
message = _("Device or resource is busy.")
|
||||
|
||||
|
||||
class VSPNotSupported(VSPError):
|
||||
message = _("The function on the storage is not supported.")
|
||||
|
||||
|
||||
# Datera driver
|
||||
class DateraAPIException(VolumeBackendAPIException):
|
||||
message = _("Bad response from Datera API")
|
||||
|
@ -110,6 +110,12 @@ from cinder.volume.drivers.hitachi import hnas_nfs as \
|
||||
cinder_volume_drivers_hitachi_hnasnfs
|
||||
from cinder.volume.drivers.hitachi import hnas_utils as \
|
||||
cinder_volume_drivers_hitachi_hnasutils
|
||||
from cinder.volume.drivers.hitachi import vsp_common as \
|
||||
cinder_volume_drivers_hitachi_vspcommon
|
||||
from cinder.volume.drivers.hitachi import vsp_fc as \
|
||||
cinder_volume_drivers_hitachi_vspfc
|
||||
from cinder.volume.drivers.hitachi import vsp_horcm as \
|
||||
cinder_volume_drivers_hitachi_vsphorcm
|
||||
from cinder.volume.drivers.hpe import hpe_3par_common as \
|
||||
cinder_volume_drivers_hpe_hpe3parcommon
|
||||
from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \
|
||||
@ -285,6 +291,9 @@ def list_opts():
|
||||
cinder_volume_drivers_hitachi_hnasiscsi.iSCSI_OPTS,
|
||||
cinder_volume_drivers_hitachi_hnasnfs.NFS_OPTS,
|
||||
cinder_volume_drivers_hitachi_hnasutils.drivers_common_opts,
|
||||
cinder_volume_drivers_hitachi_vspcommon.common_opts,
|
||||
cinder_volume_drivers_hitachi_vspfc.fc_opts,
|
||||
cinder_volume_drivers_hitachi_vsphorcm.horcm_opts,
|
||||
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
|
||||
cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts,
|
||||
cinder_volume_drivers_hpe_hpexpopts.FC_VOLUME_OPTS,
|
||||
|
File diff suppressed because it is too large
Load Diff
884
cinder/volume/drivers/hitachi/vsp_common.py
Normal file
884
cinder/volume/drivers/hitachi/vsp_common.py
Normal file
@ -0,0 +1,884 @@
|
||||
# Copyright (C) 2016, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
"""Common module for Hitachi VSP Driver."""
|
||||
|
||||
import abc
|
||||
import re
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from cinder import coordination
|
||||
from cinder import exception
|
||||
from cinder import utils as cinder_utils
|
||||
from cinder.volume.drivers.hitachi import vsp_utils as utils
|
||||
from cinder.volume import utils as volume_utils
|
||||
|
||||
|
||||
VERSION = '1.0.0'
|
||||
|
||||
_COPY_METHOD = set(['FULL', 'THIN'])
|
||||
|
||||
_INHERITED_VOLUME_OPTS = [
|
||||
'volume_backend_name',
|
||||
'volume_driver',
|
||||
'reserved_percentage',
|
||||
'use_multipath_for_image_xfer',
|
||||
'enforce_multipath_for_image_xfer',
|
||||
'num_volume_device_scan_tries',
|
||||
]
|
||||
|
||||
common_opts = [
|
||||
cfg.StrOpt(
|
||||
'vsp_storage_id',
|
||||
help='Product number of the storage system.'),
|
||||
cfg.StrOpt(
|
||||
'vsp_pool',
|
||||
help='Pool number or pool name of the DP pool.'),
|
||||
cfg.StrOpt(
|
||||
'vsp_thin_pool',
|
||||
help='Pool number or pool name of the Thin Image pool.'),
|
||||
cfg.StrOpt(
|
||||
'vsp_ldev_range',
|
||||
help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that '
|
||||
'can be used by the driver. Values can be in decimal format '
|
||||
'(e.g. 1000) or in colon-separated hexadecimal format '
|
||||
'(e.g. 00:03:E8).'),
|
||||
cfg.StrOpt(
|
||||
'vsp_default_copy_method',
|
||||
default='FULL',
|
||||
choices=['FULL', 'THIN'],
|
||||
help='Method of volume copy. FULL indicates full data copy by '
|
||||
'ShadowImage and THIN indicates differential data copy by Thin '
|
||||
'Image.'),
|
||||
cfg.IntOpt(
|
||||
'vsp_copy_speed',
|
||||
min=1,
|
||||
max=15,
|
||||
default=3,
|
||||
help='Speed at which data is copied by ShadowImage. 1 or 2 indicates '
|
||||
'low speed, 3 indicates middle speed, and a value between 4 and '
|
||||
'15 indicates high speed.'),
|
||||
cfg.IntOpt(
|
||||
'vsp_copy_check_interval',
|
||||
min=1,
|
||||
max=600,
|
||||
default=3,
|
||||
help='Interval in seconds at which volume pair synchronization status '
|
||||
'is checked when volume pairs are created.'),
|
||||
cfg.IntOpt(
|
||||
'vsp_async_copy_check_interval',
|
||||
min=1,
|
||||
max=600,
|
||||
default=10,
|
||||
help='Interval in seconds at which volume pair synchronization status '
|
||||
'is checked when volume pairs are deleted.'),
|
||||
cfg.ListOpt(
|
||||
'vsp_target_ports',
|
||||
help='IDs of the storage ports. To specify multiple ports, connect '
|
||||
'them by commas (e.g. CL1-A,CL2-A).'),
|
||||
cfg.BoolOpt(
|
||||
'vsp_group_request',
|
||||
default=False,
|
||||
help='If True, the driver will create host groups on storage ports '
|
||||
'as needed.'),
|
||||
]
|
||||
|
||||
_REQUIRED_COMMON_OPTS = [
|
||||
'vsp_storage_id',
|
||||
'vsp_pool',
|
||||
'vsp_target_ports',
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(common_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
MSG = utils.VSPMsg
|
||||
|
||||
|
||||
def _str2int(num):
|
||||
"""Convert a string into an integer."""
|
||||
if not num:
|
||||
return None
|
||||
if num.isdigit():
|
||||
return int(num)
|
||||
if not re.match(r'\w\w:\w\w:\w\w', num):
|
||||
return None
|
||||
try:
|
||||
return int(num.replace(':', ''), 16)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class VSPCommon(object):
|
||||
"""Common class for Hitachi VSP Driver.
|
||||
|
||||
Version history:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
1.0.0 - Initial driver.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, conf, driverinfo, db):
|
||||
"""Initialize instance variables."""
|
||||
self.conf = conf
|
||||
self.db = db
|
||||
self.ctxt = None
|
||||
self.lock = {}
|
||||
self.driver_info = driverinfo
|
||||
self.storage_info = {
|
||||
'protocol': driverinfo['proto'],
|
||||
'pool_id': None,
|
||||
'ldev_range': [],
|
||||
'ports': [],
|
||||
'wwns': {},
|
||||
'output_first': True,
|
||||
}
|
||||
|
||||
self._stats = {}
|
||||
|
||||
def run_and_verify_storage_cli(self, *cmd, **kwargs):
|
||||
"""Run storage CLI and return the result or raise an exception."""
|
||||
do_raise = kwargs.pop('do_raise', True)
|
||||
ignore_error = kwargs.get('ignore_error')
|
||||
success_code = kwargs.get('success_code', set([0]))
|
||||
(ret, stdout, stderr) = self.run_storage_cli(*cmd, **kwargs)
|
||||
if (ret not in success_code and
|
||||
not utils.check_ignore_error(ignore_error, stderr)):
|
||||
msg = utils.output_log(
|
||||
MSG.STORAGE_COMMAND_FAILED, cmd=utils.mask_password(cmd),
|
||||
ret=ret, out=' '.join(stdout.splitlines()),
|
||||
err=' '.join(stderr.splitlines()))
|
||||
if do_raise:
|
||||
raise exception.VSPError(msg)
|
||||
return ret, stdout, stderr
|
||||
|
||||
@abc.abstractmethod
|
||||
def run_storage_cli(self, *cmd, **kwargs):
|
||||
"""Run storage CLI."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_copy_method(self, metadata):
|
||||
"""Return copy method(FULL or THIN)."""
|
||||
method = metadata.get(
|
||||
'copy_method', self.conf.vsp_default_copy_method)
|
||||
if method not in _COPY_METHOD:
|
||||
msg = utils.output_log(MSG.INVALID_PARAMETER_VALUE,
|
||||
meta='copy_method')
|
||||
raise exception.VSPError(msg)
|
||||
if method == 'THIN' and not self.conf.vsp_thin_pool:
|
||||
msg = utils.output_log(MSG.INVALID_PARAMETER,
|
||||
param='vsp_thin_pool')
|
||||
raise exception.VSPError(msg)
|
||||
return method
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create a volume and return its properties."""
|
||||
try:
|
||||
ldev = self.create_ldev(volume['size'])
|
||||
except exception.VSPError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
utils.output_log(MSG.CREATE_LDEV_FAILED)
|
||||
return {
|
||||
'provider_location': six.text_type(ldev),
|
||||
}
|
||||
|
||||
def create_ldev(self, size, is_vvol=False):
|
||||
"""Create an LDEV and return its LDEV number."""
|
||||
ldev = self.get_unused_ldev()
|
||||
self.create_ldev_on_storage(ldev, size, is_vvol)
|
||||
LOG.debug('Created logical device. (LDEV: %s)', ldev)
|
||||
return ldev
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_ldev_on_storage(self, ldev, size, is_vvol):
|
||||
"""Create an LDEV on the storage system."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_unused_ldev(self):
|
||||
"""Find an unused LDEV and return its LDEV number."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Create a volume from a snapshot and return its properties."""
|
||||
ldev = utils.get_ldev(snapshot)
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is None'.
|
||||
if ldev is None:
|
||||
msg = utils.output_log(
|
||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='snapshot',
|
||||
id=snapshot['id'])
|
||||
raise exception.VSPError(msg)
|
||||
size = volume['size']
|
||||
metadata = utils.get_volume_metadata(volume)
|
||||
if size < snapshot['volume_size']:
|
||||
msg = utils.output_log(
|
||||
MSG.INVALID_VOLUME_SIZE_FOR_COPY, type='snapshot',
|
||||
volume_id=volume['id'])
|
||||
raise exception.VSPError(msg)
|
||||
elif (size > snapshot['volume_size'] and not self.check_vvol(ldev) and
|
||||
self.get_copy_method(metadata) == "THIN"):
|
||||
msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI,
|
||||
copy_method=utils.THIN,
|
||||
type='snapshot', volume_id=volume['id'])
|
||||
raise exception.VSPError(msg)
|
||||
sync = size > snapshot['volume_size']
|
||||
new_ldev = self._copy_ldev(
|
||||
ldev, snapshot['volume_size'], metadata, sync)
|
||||
if sync:
|
||||
self.delete_pair(new_ldev)
|
||||
self.extend_ldev(new_ldev, snapshot['volume_size'], size)
|
||||
return {
|
||||
'provider_location': six.text_type(new_ldev),
|
||||
}
|
||||
|
||||
def _copy_ldev(self, ldev, size, metadata, sync=False):
|
||||
"""Create a copy of the specified volume and return its properties."""
|
||||
try:
|
||||
return self.copy_on_storage(ldev, size, metadata, sync)
|
||||
except exception.VSPNotSupported:
|
||||
return self._copy_on_host(ldev, size)
|
||||
|
||||
def _copy_on_host(self, src_ldev, size):
|
||||
"""Create a copy of the specified LDEV via host."""
|
||||
dest_ldev = self.create_ldev(size)
|
||||
try:
|
||||
self._copy_with_dd(src_ldev, dest_ldev, size)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
self._delete_ldev(dest_ldev)
|
||||
except exception.VSPError:
|
||||
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=dest_ldev)
|
||||
return dest_ldev
|
||||
|
||||
def _copy_with_dd(self, src_ldev, dest_ldev, size):
|
||||
"""Copy the content of a volume by dd command."""
|
||||
src_info = None
|
||||
dest_info = None
|
||||
properties = cinder_utils.brick_get_connector_properties(
|
||||
multipath=self.conf.use_multipath_for_image_xfer,
|
||||
enforce_multipath=self.conf.enforce_multipath_for_image_xfer)
|
||||
try:
|
||||
dest_info = self._attach_ldev(dest_ldev, properties)
|
||||
src_info = self._attach_ldev(src_ldev, properties)
|
||||
volume_utils.copy_volume(
|
||||
src_info['device']['path'], dest_info['device']['path'],
|
||||
size * units.Ki, self.conf.volume_dd_blocksize)
|
||||
finally:
|
||||
if src_info:
|
||||
self._detach_ldev(src_info, src_ldev, properties)
|
||||
if dest_info:
|
||||
self._detach_ldev(dest_info, dest_ldev, properties)
|
||||
self.discard_zero_page({'provider_location': six.text_type(dest_ldev)})
|
||||
|
||||
def _attach_ldev(self, ldev, properties):
|
||||
"""Attach the specified LDEV to the server."""
|
||||
volume = {
|
||||
'provider_location': six.text_type(ldev),
|
||||
}
|
||||
conn = self.initialize_connection(volume, properties)
|
||||
try:
|
||||
connector = cinder_utils.brick_get_connector(
|
||||
conn['driver_volume_type'],
|
||||
use_multipath=self.conf.use_multipath_for_image_xfer,
|
||||
device_scan_attempts=self.conf.num_volume_device_scan_tries,
|
||||
conn=conn)
|
||||
device = connector.connect_volume(conn['data'])
|
||||
except Exception as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
utils.output_log(MSG.CONNECT_VOLUME_FAILED, ldev=ldev,
|
||||
reason=six.text_type(ex))
|
||||
self._terminate_connection(volume, properties)
|
||||
return {
|
||||
'conn': conn,
|
||||
'device': device,
|
||||
'connector': connector,
|
||||
}
|
||||
|
||||
def _detach_ldev(self, attach_info, ldev, properties):
|
||||
"""Detach the specified LDEV from the server."""
|
||||
volume = {
|
||||
'provider_location': six.text_type(ldev),
|
||||
}
|
||||
connector = attach_info['connector']
|
||||
try:
|
||||
connector.disconnect_volume(
|
||||
attach_info['conn']['data'], attach_info['device'])
|
||||
except Exception as ex:
|
||||
utils.output_log(MSG.DISCONNECT_VOLUME_FAILED, ldev=ldev,
|
||||
reason=six.text_type(ex))
|
||||
self._terminate_connection(volume, properties)
|
||||
|
||||
def _terminate_connection(self, volume, connector):
|
||||
"""Disconnect the specified volume from the server."""
|
||||
try:
|
||||
self.terminate_connection(volume, connector)
|
||||
except exception.VSPError:
|
||||
utils.output_log(MSG.UNMAP_LDEV_FAILED,
|
||||
ldev=utils.get_ldev(volume))
|
||||
|
||||
def copy_on_storage(self, pvol, size, metadata, sync):
|
||||
"""Create a copy of the specified LDEV on the storage."""
|
||||
is_thin = self.get_copy_method(metadata) == "THIN"
|
||||
svol = self.create_ldev(size, is_vvol=is_thin)
|
||||
try:
|
||||
self.create_pair_on_storage(pvol, svol, is_thin)
|
||||
if sync:
|
||||
self.wait_full_copy_completion(pvol, svol)
|
||||
except exception.VSPError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
self._delete_ldev(svol)
|
||||
except exception.VSPError:
|
||||
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol)
|
||||
return svol
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_pair_on_storage(self, pvol, svol, is_thin):
|
||||
"""Create a copy pair on the storage."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _delete_ldev(self, ldev):
|
||||
"""Delete the specified LDEV."""
|
||||
self.delete_pair(ldev)
|
||||
self.unmap_ldev_from_storage(ldev)
|
||||
self.delete_ldev_from_storage(ldev)
|
||||
|
||||
def unmap_ldev_from_storage(self, ldev):
|
||||
"""Delete the connection between the specified LDEV and servers."""
|
||||
targets = {
|
||||
'list': [],
|
||||
}
|
||||
self.find_all_mapped_targets_from_storage(targets, ldev)
|
||||
self.unmap_ldev(targets, ldev)
|
||||
|
||||
@abc.abstractmethod
|
||||
def find_all_mapped_targets_from_storage(self, targets, ldev):
|
||||
"""Add all port-gids connected with the LDEV to the list."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_pair(self, ldev, all_split=True):
|
||||
"""Disconnect all volume pairs to which the specified LDEV belongs."""
|
||||
pair_info = self.get_pair_info(ldev)
|
||||
if not pair_info:
|
||||
return
|
||||
if pair_info['pvol'] == ldev:
|
||||
self.delete_pair_based_on_pvol(pair_info, all_split)
|
||||
else:
|
||||
self.delete_pair_based_on_svol(
|
||||
pair_info['pvol'], pair_info['svol_info'][0])
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_pair_info(self, ldev):
|
||||
"""Return volume pair info(LDEV number, pair status and pair type)."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_pair_based_on_pvol(self, pair_info, all_split):
|
||||
"""Disconnect all volume pairs to which the specified P-VOL belongs."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_pair_based_on_svol(self, pvol, svol_info):
|
||||
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_pair_from_storage(self, pvol, svol, is_thin):
|
||||
"""Disconnect the volume pair that consists of the specified LDEVs."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_ldev_from_storage(self, ldev):
|
||||
"""Delete the specified LDEV from the storage."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Create a clone of the specified volume and return its properties."""
|
||||
ldev = utils.get_ldev(src_vref)
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is not None'.
|
||||
if ldev is None:
|
||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
||||
type='volume', id=src_vref['id'])
|
||||
raise exception.VSPError(msg)
|
||||
size = volume['size']
|
||||
metadata = utils.get_volume_metadata(volume)
|
||||
if size < src_vref['size']:
|
||||
msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_COPY,
|
||||
type='volume', volume_id=volume['id'])
|
||||
raise exception.VSPError(msg)
|
||||
elif (size > src_vref['size'] and not self.check_vvol(ldev) and
|
||||
self.get_copy_method(metadata) == "THIN"):
|
||||
msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI,
|
||||
copy_method=utils.THIN, type='volume',
|
||||
volume_id=volume['id'])
|
||||
raise exception.VSPError(msg)
|
||||
sync = size > src_vref['size']
|
||||
new_ldev = self._copy_ldev(ldev, src_vref['size'], metadata, sync)
|
||||
if sync:
|
||||
self.delete_pair(new_ldev)
|
||||
self.extend_ldev(new_ldev, src_vref['size'], size)
|
||||
return {
|
||||
'provider_location': six.text_type(new_ldev),
|
||||
}
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Delete the specified volume."""
|
||||
ldev = utils.get_ldev(volume)
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is not None'.
|
||||
if ldev is None:
|
||||
utils.output_log(MSG.INVALID_LDEV_FOR_DELETION,
|
||||
method='delete_volume', id=volume['id'])
|
||||
return
|
||||
try:
|
||||
self._delete_ldev(ldev)
|
||||
except exception.VSPBusy:
|
||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Create a snapshot from a volume and return its properties."""
|
||||
src_vref = snapshot.volume
|
||||
ldev = utils.get_ldev(src_vref)
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is None'.
|
||||
if ldev is None:
|
||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
||||
type='volume', id=src_vref['id'])
|
||||
raise exception.VSPError(msg)
|
||||
size = snapshot['volume_size']
|
||||
metadata = utils.get_volume_metadata(src_vref)
|
||||
new_ldev = self._copy_ldev(ldev, size, metadata)
|
||||
return {
|
||||
'provider_location': six.text_type(new_ldev),
|
||||
}
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Delete the specified snapshot."""
|
||||
ldev = utils.get_ldev(snapshot)
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is None'.
|
||||
if ldev is None:
|
||||
utils.output_log(
|
||||
MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot',
|
||||
id=snapshot['id'])
|
||||
return
|
||||
try:
|
||||
self._delete_ldev(ldev)
|
||||
except exception.VSPBusy:
|
||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Return properties, capabilities and current states of the driver."""
|
||||
if refresh:
|
||||
if self.storage_info['output_first']:
|
||||
self.storage_info['output_first'] = False
|
||||
utils.output_log(MSG.DRIVER_READY_FOR_USE,
|
||||
config_group=self.conf.config_group)
|
||||
self._update_volume_stats()
|
||||
return self._stats
|
||||
|
||||
def _update_volume_stats(self):
|
||||
"""Update properties, capabilities and current states of the driver."""
|
||||
data = {}
|
||||
backend_name = self.conf.safe_get('volume_backend_name')
|
||||
data['volume_backend_name'] = (
|
||||
backend_name or self.driver_info['volume_backend_name'])
|
||||
data['vendor_name'] = 'Hitachi'
|
||||
data['driver_version'] = VERSION
|
||||
data['storage_protocol'] = self.storage_info['protocol']
|
||||
try:
|
||||
total_gb, free_gb = self.get_pool_info()
|
||||
except exception.VSPError:
|
||||
utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED,
|
||||
pool=self.conf.vsp_pool)
|
||||
return
|
||||
data['total_capacity_gb'] = total_gb
|
||||
data['free_capacity_gb'] = free_gb
|
||||
data['reserved_percentage'] = self.conf.safe_get('reserved_percentage')
|
||||
data['QoS_support'] = False
|
||||
LOG.debug("Updating volume status. (%s)", data)
|
||||
self._stats = data
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_pool_info(self):
|
||||
"""Return the total and free capacity of the storage pool."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def discard_zero_page(self, volume):
|
||||
"""Return the volume's no-data pages to the storage pool."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extend the specified volume to the specified size."""
|
||||
ldev = utils.get_ldev(volume)
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is None'.
|
||||
if ldev is None:
|
||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION,
|
||||
volume_id=volume['id'])
|
||||
raise exception.VSPError(msg)
|
||||
if self.check_vvol(ldev):
|
||||
msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND,
|
||||
volume_id=volume['id'])
|
||||
raise exception.VSPError(msg)
|
||||
self.delete_pair(ldev)
|
||||
self.extend_ldev(ldev, volume['size'], new_size)
|
||||
|
||||
@abc.abstractmethod
|
||||
def check_vvol(self, ldev):
|
||||
"""Return True if the specified LDEV is V-VOL, False otherwise."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def extend_ldev(self, ldev, old_size, new_size):
|
||||
"""Extend the specified LDEV to the specified new size."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def manage_existing(self, existing_ref):
|
||||
"""Return volume properties which Cinder needs to manage the volume."""
|
||||
ldev = _str2int(existing_ref.get('source-id'))
|
||||
return {
|
||||
'provider_location': six.text_type(ldev),
|
||||
}
|
||||
|
||||
def manage_existing_get_size(self, existing_ref):
|
||||
"""Return the size[GB] of the specified volume."""
|
||||
ldev = _str2int(existing_ref.get('source-id'))
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is None'.
|
||||
if ldev is None:
|
||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=existing_ref, reason=msg)
|
||||
return self.get_ldev_size_in_gigabyte(ldev, existing_ref)
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
|
||||
"""Return the size[GB] of the specified LDEV."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def unmanage(self, volume):
|
||||
"""Prepare the volume for removing it from Cinder management."""
|
||||
ldev = utils.get_ldev(volume)
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is None'.
|
||||
if ldev is None:
|
||||
utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage',
|
||||
id=volume['id'])
|
||||
return
|
||||
if self.check_vvol(ldev):
|
||||
utils.output_log(
|
||||
MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'],
|
||||
volume_type=utils.NORMAL_LDEV_TYPE)
|
||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||
try:
|
||||
self.delete_pair(ldev)
|
||||
except exception.VSPBusy:
|
||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Prepare for the startup of the driver."""
|
||||
self.ctxt = context
|
||||
|
||||
self.check_param()
|
||||
self.config_lock()
|
||||
self.connect_storage()
|
||||
self.init_cinder_hosts()
|
||||
self.output_param_to_log()
|
||||
|
||||
def check_param(self):
|
||||
"""Check parameter values and consistency among them."""
|
||||
utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS)
|
||||
utils.check_opts(self.conf, common_opts)
|
||||
utils.check_opts(self.conf, self.driver_info['volume_opts'])
|
||||
if (self.conf.vsp_default_copy_method == 'THIN' and
|
||||
not self.conf.vsp_thin_pool):
|
||||
msg = utils.output_log(MSG.INVALID_PARAMETER,
|
||||
param='vsp_thin_pool')
|
||||
raise exception.VSPError(msg)
|
||||
if self.conf.vsp_ldev_range:
|
||||
self.storage_info['ldev_range'] = self._range2list(
|
||||
'vsp_ldev_range')
|
||||
for opt in _REQUIRED_COMMON_OPTS:
|
||||
if not self.conf.safe_get(opt):
|
||||
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
|
||||
raise exception.VSPError(msg)
|
||||
|
||||
def _range2list(self, param):
|
||||
"""Analyze a 'xxx-xxx' string and return a list of two integers."""
|
||||
values = [_str2int(value) for value in
|
||||
self.conf.safe_get(param).split('-')]
|
||||
if (len(values) != 2 or
|
||||
values[0] is None or values[1] is None or
|
||||
values[0] > values[1]):
|
||||
msg = utils.output_log(MSG.INVALID_PARAMETER, param=param)
|
||||
raise exception.VSPError(msg)
|
||||
return values
|
||||
|
||||
@abc.abstractmethod
|
||||
def config_lock(self):
|
||||
"""Initialize lock resource names."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def connect_storage(self):
|
||||
"""Prepare for using the storage."""
|
||||
self.storage_info['pool_id'] = self.get_pool_id()
|
||||
# When 'pool_id' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is None'.
|
||||
if self.storage_info['pool_id'] is None:
|
||||
msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=self.conf.vsp_pool)
|
||||
raise exception.VSPError(msg)
|
||||
utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID',
|
||||
value=self.storage_info['pool_id'])
|
||||
|
||||
def check_ports_info(self):
|
||||
"""Check if available storage ports exist."""
|
||||
if (self.conf.vsp_target_ports and
|
||||
not self.storage_info['ports']):
|
||||
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
|
||||
resource="Target ports")
|
||||
raise exception.VSPError(msg)
|
||||
utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list',
|
||||
value=self.storage_info['ports'])
|
||||
|
||||
def get_pool_id(self):
|
||||
"""Return the storage pool ID as integer."""
|
||||
pool = self.conf.vsp_pool
|
||||
if pool.isdigit():
|
||||
return int(pool)
|
||||
return None
|
||||
|
||||
def init_cinder_hosts(self, **kwargs):
|
||||
"""Initialize server-storage connection."""
|
||||
targets = kwargs.pop('targets', {'info': {}, 'list': []})
|
||||
connector = cinder_utils.brick_get_connector_properties(
|
||||
multipath=self.conf.use_multipath_for_image_xfer,
|
||||
enforce_multipath=self.conf.enforce_multipath_for_image_xfer)
|
||||
target_ports = self.storage_info['ports']
|
||||
|
||||
if target_ports:
|
||||
if (self.find_targets_from_storage(
|
||||
targets, connector, target_ports) and
|
||||
self.conf.vsp_group_request):
|
||||
self.create_mapping_targets(targets, connector)
|
||||
|
||||
utils.require_target_existed(targets)
|
||||
|
||||
@abc.abstractmethod
|
||||
def find_targets_from_storage(self, targets, connector, target_ports):
|
||||
"""Find mapped ports, memorize them and return unmapped port count."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_mapping_targets(self, targets, connector):
|
||||
"""Create server-storage connection for all specified storage ports."""
|
||||
hba_ids = self.get_hba_ids_from_connector(connector)
|
||||
for port in targets['info'].keys():
|
||||
if targets['info'][port]:
|
||||
continue
|
||||
|
||||
try:
|
||||
self._create_target(targets, port, connector, hba_ids)
|
||||
except exception.VSPError:
|
||||
utils.output_log(
|
||||
self.driver_info['msg_id']['target'], port=port)
|
||||
|
||||
if not targets['list']:
|
||||
self.find_targets_from_storage(
|
||||
targets, connector, targets['info'].keys())
|
||||
|
||||
def get_hba_ids_from_connector(self, connector):
|
||||
"""Return the HBA ID stored in the connector."""
|
||||
if self.driver_info['hba_id'] in connector:
|
||||
return connector[self.driver_info['hba_id']]
|
||||
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
|
||||
resource=self.driver_info['hba_id_type'])
|
||||
raise exception.VSPError(msg)
|
||||
|
||||
def _create_target(self, targets, port, connector, hba_ids):
|
||||
"""Create a host group for the specified storage port."""
|
||||
target_name, gid = self.create_target_to_storage(port, connector)
|
||||
utils.output_log(MSG.OBJECT_CREATED, object='a target',
|
||||
details='port: %(port)s, gid: %(gid)s, target_name: '
|
||||
'%(target)s' %
|
||||
{'port': port, 'gid': gid, 'target': target_name})
|
||||
try:
|
||||
self.set_target_mode(port, gid)
|
||||
self.set_hba_ids(port, gid, hba_ids)
|
||||
except exception.VSPError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.delete_target_from_storage(port, gid)
|
||||
targets['info'][port] = True
|
||||
targets['list'].append((port, gid))
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_target_to_storage(self, port, connector):
|
||||
"""Create a host group on the specified port."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def set_target_mode(self, port, gid):
|
||||
"""Configure the host group to meet the environment."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def set_hba_ids(self, port, gid, hba_ids):
|
||||
"""Connect all specified HBAs with the specified port."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_target_from_storage(self, port, gid):
|
||||
"""Delete the host group from the port."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def output_param_to_log(self):
|
||||
"""Output configuration parameter values to the log file."""
|
||||
utils.output_log(MSG.OUTPUT_PARAMETER_VALUES,
|
||||
config_group=self.conf.config_group)
|
||||
name, version = self.get_storage_cli_info()
|
||||
utils.output_storage_cli_info(name, version)
|
||||
utils.output_opt_info(self.conf, _INHERITED_VOLUME_OPTS)
|
||||
utils.output_opts(self.conf, common_opts)
|
||||
utils.output_opts(self.conf, self.driver_info['volume_opts'])
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_storage_cli_info(self):
|
||||
"""Return a tuple of the storage CLI name and its version."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-'
|
||||
'{connector[host]}')
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initialize connection between the server and the volume."""
|
||||
targets = {
|
||||
'info': {},
|
||||
'list': [],
|
||||
'lun': {},
|
||||
}
|
||||
ldev = utils.get_ldev(volume)
|
||||
# When 'ldev' is 0, it should be true.
|
||||
# Therefore, it cannot remove 'is None'.
|
||||
if ldev is None:
|
||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION,
|
||||
volume_id=volume['id'])
|
||||
raise exception.VSPError(msg)
|
||||
|
||||
if (self.find_targets_from_storage(
|
||||
targets, connector, self.storage_info['ports']) and
|
||||
self.conf.vsp_group_request):
|
||||
self.create_mapping_targets(targets, connector)
|
||||
|
||||
utils.require_target_existed(targets)
|
||||
|
||||
targets['list'].sort()
|
||||
for port in self.storage_info['ports']:
|
||||
targets['lun'][port] = False
|
||||
target_lun = int(self.map_ldev(targets, ldev))
|
||||
|
||||
return {
|
||||
'driver_volume_type': self.driver_info['volume_type'],
|
||||
'data': self.get_properties(targets, connector, target_lun),
|
||||
}
|
||||
|
||||
@abc.abstractmethod
|
||||
def map_ldev(self, targets, ldev):
|
||||
"""Create the path between the server and the LDEV and return LUN."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_properties(self, targets, connector, target_lun=None):
|
||||
"""Return server-LDEV connection info."""
|
||||
multipath = connector.get('multipath', False)
|
||||
if self.storage_info['protocol'] == 'FC':
|
||||
data = self.get_properties_fc(targets)
|
||||
if target_lun is not None:
|
||||
data['target_discovered'] = False
|
||||
if not multipath or self.storage_info['protocol'] == 'FC':
|
||||
data['target_lun'] = target_lun
|
||||
return data
|
||||
|
||||
def get_properties_fc(self, targets):
|
||||
"""Return FC-specific server-LDEV connection info."""
|
||||
data = {}
|
||||
data['target_wwn'] = [
|
||||
self.storage_info['wwns'][target[0]] for target in targets['list']
|
||||
if targets['lun'][target[0]]]
|
||||
return data
|
||||
|
||||
@coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-'
|
||||
'{connector[host]}')
|
||||
def terminate_connection(self, volume, connector):
|
||||
"""Terminate connection between the server and the volume."""
|
||||
targets = {
|
||||
'info': {},
|
||||
'list': [],
|
||||
}
|
||||
mapped_targets = {
|
||||
'list': [],
|
||||
}
|
||||
unmap_targets = {}
|
||||
|
||||
ldev = utils.get_ldev(volume)
|
||||
if ldev is None:
|
||||
utils.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING,
|
||||
volume_id=volume['id'])
|
||||
return
|
||||
self.find_targets_from_storage(targets, connector,
|
||||
self.storage_info['ports'])
|
||||
if not targets['list']:
|
||||
utils.output_log(MSG.NO_CONNECTED_TARGET)
|
||||
self.find_mapped_targets_from_storage(
|
||||
mapped_targets, ldev, self.storage_info['ports'])
|
||||
|
||||
unmap_targets['list'] = self.get_unmap_targets_list(
|
||||
targets['list'], mapped_targets['list'])
|
||||
unmap_targets['list'].sort(reverse=True)
|
||||
self.unmap_ldev(unmap_targets, ldev)
|
||||
|
||||
target_wwn = [
|
||||
self.storage_info['wwns'][port_gid[:utils.PORT_ID_LENGTH]]
|
||||
for port_gid in unmap_targets['list']]
|
||||
return {'driver_volume_type': self.driver_info['volume_type'],
|
||||
'data': {'target_wwn': target_wwn}}
|
||||
|
||||
@abc.abstractmethod
|
||||
def find_mapped_targets_from_storage(self, targets, ldev, target_ports):
|
||||
"""Find and store IDs of ports used for server-LDEV connection."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_unmap_targets_list(self, target_list, mapped_list):
|
||||
"""Return a list of IDs of ports that need to be disconnected."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def unmap_ldev(self, targets, ldev):
|
||||
"""Delete the LUN between the specified LDEV and port-gid."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def wait_full_copy_completion(self, pvol, svol):
|
||||
"""Wait until FULL copy is completed."""
|
||||
raise NotImplementedError()
|
178
cinder/volume/drivers/hitachi/vsp_fc.py
Normal file
178
cinder/volume/drivers/hitachi/vsp_fc.py
Normal file
@ -0,0 +1,178 @@
|
||||
# Copyright (C) 2016, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
"""Fibre channel module for Hitachi VSP Driver."""
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from cinder import interface
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.hitachi import vsp_common as common
|
||||
from cinder.volume.drivers.hitachi import vsp_utils as utils
|
||||
|
||||
fc_opts = [
|
||||
cfg.BoolOpt(
|
||||
'vsp_zoning_request',
|
||||
default=False,
|
||||
help='If True, the driver will configure FC zoning between the server '
|
||||
'and the storage system provided that FC zoning manager is '
|
||||
'enabled.'),
|
||||
]
|
||||
|
||||
MSG = utils.VSPMsg
|
||||
|
||||
_DRIVER_INFO = {
|
||||
'proto': 'FC',
|
||||
'hba_id': 'wwpns',
|
||||
'hba_id_type': 'World Wide Name',
|
||||
'msg_id': {
|
||||
'target': MSG.CREATE_HOST_GROUP_FAILED,
|
||||
},
|
||||
'volume_backend_name': utils.DRIVER_PREFIX + 'FC',
|
||||
'volume_opts': fc_opts,
|
||||
'volume_type': 'fibre_channel',
|
||||
}
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(fc_opts)
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class VSPFCDriver(driver.FibreChannelDriver):
|
||||
"""Fibre channel class for Hitachi VSP Driver.
|
||||
|
||||
Version history:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
1.0.0 - Initial driver.
|
||||
|
||||
"""
|
||||
|
||||
VERSION = common.VERSION
|
||||
|
||||
# ThirdPartySystems wiki page
|
||||
CI_WIKI_NAME = "Hitachi_VSP_CI"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize instance variables."""
|
||||
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
|
||||
driver=self.__class__.__name__,
|
||||
version=self.get_version())
|
||||
super(VSPFCDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
self.configuration.append_config_values(common.common_opts)
|
||||
self.configuration.append_config_values(fc_opts)
|
||||
self.common = utils.import_object(
|
||||
self.configuration, _DRIVER_INFO, kwargs.get('db'))
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Error are checked in do_setup() instead of this method."""
|
||||
pass
|
||||
|
||||
@utils.output_start_end_log
|
||||
def create_volume(self, volume):
|
||||
"""Create a volume and return its properties."""
|
||||
return self.common.create_volume(volume)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Create a volume from a snapshot and return its properties."""
|
||||
return self.common.create_volume_from_snapshot(volume, snapshot)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Create a clone of the specified volume and return its properties."""
|
||||
return self.common.create_cloned_volume(volume, src_vref)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def delete_volume(self, volume):
|
||||
"""Delete the specified volume."""
|
||||
self.common.delete_volume(volume)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Create a snapshot from a volume and return its properties."""
|
||||
return self.common.create_snapshot(snapshot)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Delete the specified snapshot."""
|
||||
self.common.delete_snapshot(snapshot)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Return properties, capabilities and current states of the driver."""
|
||||
return self.common.get_volume_stats(refresh)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def update_migrated_volume(
|
||||
self, ctxt, volume, new_volume, original_volume_status):
|
||||
"""Do any remaining jobs after migration."""
|
||||
self.common.discard_zero_page(new_volume)
|
||||
super(VSPFCDriver, self).update_migrated_volume(
|
||||
ctxt, volume, new_volume, original_volume_status)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
super(VSPFCDriver, self).copy_image_to_volume(
|
||||
context, volume, image_service, image_id)
|
||||
self.common.discard_zero_page(volume)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extend the specified volume to the specified size."""
|
||||
self.common.extend_volume(volume, new_size)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def manage_existing(self, volume, existing_ref):
|
||||
"""Return volume properties which Cinder needs to manage the volume."""
|
||||
return self.common.manage_existing(existing_ref)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
"""Return the size[GB] of the specified volume."""
|
||||
return self.common.manage_existing_get_size(existing_ref)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def unmanage(self, volume):
|
||||
"""Prepare the volume for removing it from Cinder management."""
|
||||
self.common.unmanage(volume)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def do_setup(self, context):
|
||||
"""Prepare for the startup of the driver."""
|
||||
self.common.do_setup(context)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously recreate an export for a volume."""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume, connector):
|
||||
"""Export the volume."""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Remove an export for a volume."""
|
||||
pass
|
||||
|
||||
@utils.output_start_end_log
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initialize connection between the server and the volume."""
|
||||
return self.common.initialize_connection(volume, connector)
|
||||
|
||||
@utils.output_start_end_log
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Terminate connection between the server and the volume."""
|
||||
self.common.terminate_connection(volume, connector)
|
1422
cinder/volume/drivers/hitachi/vsp_horcm.py
Normal file
1422
cinder/volume/drivers/hitachi/vsp_horcm.py
Normal file
File diff suppressed because it is too large
Load Diff
183
cinder/volume/drivers/hitachi/vsp_horcm_fc.py
Normal file
183
cinder/volume/drivers/hitachi/vsp_horcm_fc.py
Normal file
@ -0,0 +1,183 @@
|
||||
# Copyright (C) 2016, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
"""HORCM interface fibre channel module for Hitachi VSP Driver."""
|
||||
|
||||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from cinder import exception
|
||||
from cinder.volume.drivers.hitachi import vsp_horcm as horcm
|
||||
from cinder.volume.drivers.hitachi import vsp_utils as utils
|
||||
from cinder.zonemanager import utils as fczm_utils
|
||||
|
||||
_FC_LINUX_MODE_OPTS = ['-host_mode', 'LINUX']
|
||||
_HOST_GROUPS_PATTERN = re.compile(
|
||||
r"^CL\w-\w+ +(?P<gid>\d+) +%s(?!pair00 )\S* +\d+ " % utils.TARGET_PREFIX,
|
||||
re.M)
|
||||
_FC_PORT_PATTERN = re.compile(
|
||||
(r"^(CL\w-\w)\w* +(?:FIBRE|FCoE) +TAR +\w+ +\w+ +\w +\w+ +Y +"
|
||||
r"\d+ +\d+ +(\w{16})"), re.M)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
MSG = utils.VSPMsg
|
||||
|
||||
|
||||
class VSPHORCMFC(horcm.VSPHORCM):
|
||||
"""HORCM interface fibre channel class for Hitachi VSP Driver.
|
||||
|
||||
Version history:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
1.0.0 - Initial driver.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, conf, storage_protocol, db):
|
||||
"""Initialize instance variables."""
|
||||
super(VSPHORCMFC, self).__init__(conf, storage_protocol, db)
|
||||
self._lookup_service = fczm_utils.create_lookup_service()
|
||||
|
||||
def connect_storage(self):
|
||||
"""Prepare for using the storage."""
|
||||
target_ports = self.conf.vsp_target_ports
|
||||
|
||||
super(VSPHORCMFC, self).connect_storage()
|
||||
result = self.run_raidcom('get', 'port')
|
||||
for port, wwn in _FC_PORT_PATTERN.findall(result[1]):
|
||||
if target_ports and port in target_ports:
|
||||
self.storage_info['ports'].append(port)
|
||||
self.storage_info['wwns'][port] = wwn
|
||||
|
||||
self.check_ports_info()
|
||||
utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list',
|
||||
value=self.storage_info['wwns'])
|
||||
|
||||
def create_target_to_storage(self, port, connector):
|
||||
"""Create a host group on the specified port."""
|
||||
wwpns = self.get_hba_ids_from_connector(connector)
|
||||
target_name = utils.TARGET_PREFIX + min(wwpns)
|
||||
try:
|
||||
result = self.run_raidcom(
|
||||
'add', 'host_grp', '-port', port, '-host_grp_name',
|
||||
target_name)
|
||||
except exception.VSPError:
|
||||
result = self.run_raidcom('get', 'host_grp', '-port', port)
|
||||
hostgroup_pt = re.compile(
|
||||
r"^CL\w-\w+ +(?P<gid>\d+) +%s +\d+ " %
|
||||
target_name, re.M)
|
||||
gid = hostgroup_pt.findall(result[1])
|
||||
if gid:
|
||||
return target_name, gid[0]
|
||||
else:
|
||||
raise
|
||||
return target_name, horcm.find_value(result[1], 'gid')
|
||||
|
||||
def set_hba_ids(self, port, gid, hba_ids):
|
||||
"""Connect all specified HBAs with the specified port."""
|
||||
registered_wwns = []
|
||||
for wwn in hba_ids:
|
||||
try:
|
||||
self.run_raidcom(
|
||||
'add', 'hba_wwn', '-port',
|
||||
'-'.join([port, gid]), '-hba_wwn', wwn)
|
||||
registered_wwns.append(wwn)
|
||||
except exception.VSPError:
|
||||
utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid,
|
||||
wwn=wwn)
|
||||
if not registered_wwns:
|
||||
msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port,
|
||||
gid=gid)
|
||||
raise exception.VSPError(msg)
|
||||
|
||||
def set_target_mode(self, port, gid):
|
||||
"""Configure the host group to meet the environment."""
|
||||
self.run_raidcom(
|
||||
'modify', 'host_grp', '-port',
|
||||
'-'.join([port, gid]), *_FC_LINUX_MODE_OPTS,
|
||||
success_code=horcm.ALL_EXIT_CODE)
|
||||
|
||||
def find_targets_from_storage(self, targets, connector, target_ports):
|
||||
"""Find mapped ports, memorize them and return unmapped port count."""
|
||||
nr_not_found = 0
|
||||
old_target_name = None
|
||||
if 'ip' in connector:
|
||||
old_target_name = utils.TARGET_PREFIX + connector['ip']
|
||||
success_code = horcm.HORCM_EXIT_CODE.union([horcm.EX_ENOOBJ])
|
||||
wwpns = self.get_hba_ids_from_connector(connector)
|
||||
wwpns_pattern = re.compile(
|
||||
r'^CL\w-\w+ +\d+ +\S+ +(%s) ' % '|'.join(wwpns), re.M | re.I)
|
||||
target_name = utils.TARGET_PREFIX + min(wwpns)
|
||||
|
||||
for port in target_ports:
|
||||
targets['info'][port] = False
|
||||
|
||||
result = self.run_raidcom(
|
||||
'get', 'hba_wwn', '-port', port, target_name,
|
||||
success_code=success_code)
|
||||
wwpns = wwpns_pattern.findall(result[1])
|
||||
if not wwpns and old_target_name:
|
||||
result = self.run_raidcom(
|
||||
'get', 'hba_wwn', '-port', port, old_target_name,
|
||||
success_code=success_code)
|
||||
wwpns = wwpns_pattern.findall(result[1])
|
||||
if wwpns:
|
||||
gid = result[1].splitlines()[1].split()[1]
|
||||
targets['info'][port] = True
|
||||
targets['list'].append((port, gid))
|
||||
LOG.debug(
|
||||
'Found wwpns in host group immediately. '
|
||||
'(port: %(port)s, gid: %(gid)s, wwpns: %(wwpns)s)',
|
||||
{'port': port, 'gid': gid, 'wwpns': wwpns})
|
||||
continue
|
||||
|
||||
result = self.run_raidcom(
|
||||
'get', 'host_grp', '-port', port)
|
||||
for gid in _HOST_GROUPS_PATTERN.findall(result[1]):
|
||||
result = self.run_raidcom(
|
||||
'get', 'hba_wwn', '-port', '-'.join([port, gid]))
|
||||
wwpns = wwpns_pattern.findall(result[1])
|
||||
if wwpns:
|
||||
targets['info'][port] = True
|
||||
targets['list'].append((port, gid))
|
||||
LOG.debug(
|
||||
'Found wwpns in host group. (port: %(port)s, '
|
||||
'gid: %(gid)s, wwpns: %(wwpns)s)',
|
||||
{'port': port, 'gid': gid, 'wwpns': wwpns})
|
||||
break
|
||||
else:
|
||||
nr_not_found += 1
|
||||
|
||||
return nr_not_found
|
||||
|
||||
@fczm_utils.AddFCZone
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initialize connection between the server and the volume."""
|
||||
conn_info = super(VSPHORCMFC, self).initialize_connection(
|
||||
volume, connector)
|
||||
if self.conf.vsp_zoning_request:
|
||||
utils.update_conn_info(conn_info, connector, self._lookup_service)
|
||||
return conn_info
|
||||
|
||||
@fczm_utils.RemoveFCZone
|
||||
def terminate_connection(self, volume, connector):
|
||||
"""Terminate connection between the server and the volume."""
|
||||
conn_info = super(VSPHORCMFC, self).terminate_connection(
|
||||
volume, connector)
|
||||
if self.conf.vsp_zoning_request and (
|
||||
conn_info and conn_info['data']['target_wwn']):
|
||||
utils.update_conn_info(conn_info, connector, self._lookup_service)
|
||||
return conn_info
|
662
cinder/volume/drivers/hitachi/vsp_utils.py
Normal file
662
cinder/volume/drivers/hitachi/vsp_utils.py
Normal file
@ -0,0 +1,662 @@
|
||||
# Copyright (C) 2016, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
"""Utility module for Hitachi VSP Driver."""
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import logging as base_logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import enum
|
||||
from oslo_concurrency import processutils as putils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import strutils
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _LE
|
||||
from cinder.i18n import _LI
|
||||
from cinder.i18n import _LW
|
||||
from cinder import utils as cinder_utils
|
||||
|
||||
|
||||
_DRIVER_DIR = 'cinder.volume.drivers.hitachi'
|
||||
|
||||
_DRIVERS = {
|
||||
'HORCM': {
|
||||
'FC': 'vsp_horcm_fc.VSPHORCMFC',
|
||||
},
|
||||
}
|
||||
|
||||
DRIVER_PREFIX = 'VSP'
|
||||
TARGET_PREFIX = 'HBSD-'
|
||||
GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512
|
||||
|
||||
MAX_PROCESS_WAITTIME = 24 * 60 * 60
|
||||
DEFAULT_PROCESS_WAITTIME = 15 * 60
|
||||
|
||||
NORMAL_LDEV_TYPE = 'Normal'
|
||||
NVOL_LDEV_TYPE = 'DP-VOL'
|
||||
|
||||
FULL = 'Full copy'
|
||||
THIN = 'Thin copy'
|
||||
|
||||
INFO_SUFFIX = 'I'
|
||||
WARNING_SUFFIX = 'W'
|
||||
ERROR_SUFFIX = 'E'
|
||||
|
||||
PORT_ID_LENGTH = 5
|
||||
|
||||
|
||||
@enum.unique
|
||||
class VSPMsg(enum.Enum):
|
||||
"""messages for Hitachi VSP Driver."""
|
||||
|
||||
METHOD_START = {
|
||||
'msg_id': 0,
|
||||
'loglevel': base_logging.INFO,
|
||||
'msg': _LI('%(method)s starts. (config_group: %(config_group)s)'),
|
||||
'suffix': INFO_SUFFIX
|
||||
}
|
||||
OUTPUT_PARAMETER_VALUES = {
|
||||
'msg_id': 1,
|
||||
'loglevel': base_logging.INFO,
|
||||
'msg': _LI('The parameter of the storage backend. (config_group: '
|
||||
'%(config_group)s)'),
|
||||
'suffix': INFO_SUFFIX
|
||||
}
|
||||
METHOD_END = {
|
||||
'msg_id': 2,
|
||||
'loglevel': base_logging.INFO,
|
||||
'msg': _LI('%(method)s ended. (config_group: %(config_group)s)'),
|
||||
'suffix': INFO_SUFFIX
|
||||
}
|
||||
DRIVER_READY_FOR_USE = {
|
||||
'msg_id': 3,
|
||||
'loglevel': base_logging.INFO,
|
||||
'msg': _LI('The storage backend can be used. (config_group: '
|
||||
'%(config_group)s)'),
|
||||
'suffix': INFO_SUFFIX
|
||||
}
|
||||
DRIVER_INITIALIZATION_START = {
|
||||
'msg_id': 4,
|
||||
'loglevel': base_logging.INFO,
|
||||
'msg': _LI('Initialization of %(driver)s %(version)s started.'),
|
||||
'suffix': INFO_SUFFIX
|
||||
}
|
||||
SET_CONFIG_VALUE = {
|
||||
'msg_id': 5,
|
||||
'loglevel': base_logging.INFO,
|
||||
'msg': _LI('Set %(object)s to %(value)s.'),
|
||||
'suffix': INFO_SUFFIX
|
||||
}
|
||||
OBJECT_CREATED = {
|
||||
'msg_id': 6,
|
||||
'loglevel': base_logging.INFO,
|
||||
'msg': _LI('Created %(object)s. (%(details)s)'),
|
||||
'suffix': INFO_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_FOR_UNMAPPING = {
|
||||
'msg_id': 302,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to specify a logical device for the volume '
|
||||
'%(volume_id)s to be unmapped.'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_FOR_DELETION = {
|
||||
'msg_id': 304,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to specify a logical device to be deleted. '
|
||||
'(method: %(method)s, id: %(id)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
DELETE_TARGET_FAILED = {
|
||||
'msg_id': 306,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('A host group could not be deleted. (port: %(port)s, '
|
||||
'gid: %(id)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
CREATE_HOST_GROUP_FAILED = {
|
||||
'msg_id': 308,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('A host group could not be added. (port: %(port)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
UNMAP_LDEV_FAILED = {
|
||||
'msg_id': 310,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to unmap a logical device. (LDEV: %(ldev)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
DELETE_LDEV_FAILED = {
|
||||
'msg_id': 313,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to delete a logical device. (LDEV: %(ldev)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
MAP_LDEV_FAILED = {
|
||||
'msg_id': 314,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to map a logical device. (LDEV: %(ldev)s, port: '
|
||||
'%(port)s, id: %(id)s, lun: %(lun)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
DISCARD_ZERO_PAGE_FAILED = {
|
||||
'msg_id': 315,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to perform a zero-page reclamation. (LDEV: '
|
||||
'%(ldev)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
ADD_HBA_WWN_FAILED = {
|
||||
'msg_id': 317,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to assign the WWN. (port: %(port)s, gid: %(gid)s, '
|
||||
'wwn: %(wwn)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
LDEV_NOT_EXIST = {
|
||||
'msg_id': 319,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('The logical device does not exist in the storage system. '
|
||||
'(LDEV: %(ldev)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
HORCM_START_FAILED = {
|
||||
'msg_id': 320,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to start HORCM. (inst: %(inst)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
HORCM_RESTART_FOR_SI_FAILED = {
|
||||
'msg_id': 322,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to reload the configuration of full copy pair. '
|
||||
'(inst: %(inst)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
HORCM_LOGIN_FAILED = {
|
||||
'msg_id': 323,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to perform user authentication of HORCM. '
|
||||
'(user: %(user)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
DELETE_SI_PAIR_FAILED = {
|
||||
'msg_id': 324,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to delete full copy pair. (P-VOL: %(pvol)s, S-VOL: '
|
||||
'%(svol)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
DELETE_TI_PAIR_FAILED = {
|
||||
'msg_id': 325,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to delete thin copy pair. (P-VOL: %(pvol)s, S-VOL: '
|
||||
'%(svol)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
WAIT_SI_PAIR_STATUS_FAILED = {
|
||||
'msg_id': 326,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to change the status of full copy pair. (P-VOL: '
|
||||
'%(pvol)s, S-VOL: %(svol)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
DELETE_DEVICE_GRP_FAILED = {
|
||||
'msg_id': 327,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to delete the configuration of full copy pair. '
|
||||
'(P-VOL: %(pvol)s, S-VOL: %(svol)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
DISCONNECT_VOLUME_FAILED = {
|
||||
'msg_id': 329,
|
||||
'loglevel': base_logging.WARNING,
|
||||
'msg': _LW('Failed to detach the logical device. (LDEV: %(ldev)s, '
|
||||
'reason: %(reason)s)'),
|
||||
'suffix': WARNING_SUFFIX
|
||||
}
|
||||
STORAGE_COMMAND_FAILED = {
|
||||
'msg_id': 600,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The command %(cmd)s failed. (ret: %(ret)s, stdout: '
|
||||
'%(out)s, stderr: %(err)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_PARAMETER = {
|
||||
'msg_id': 601,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('A parameter is invalid. (%(param)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_PARAMETER_VALUE = {
|
||||
'msg_id': 602,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('A parameter value is invalid. (%(meta)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
HORCM_SHUTDOWN_FAILED = {
|
||||
'msg_id': 608,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to shutdown HORCM. (inst: %(inst)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
HORCM_RESTART_FAILED = {
|
||||
'msg_id': 609,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to restart HORCM. (inst: %(inst)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
SI_PAIR_STATUS_WAIT_TIMEOUT = {
|
||||
'msg_id': 610,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The status change of full copy pair could not be '
|
||||
'completed. (S-VOL: %(svol)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
TI_PAIR_STATUS_WAIT_TIMEOUT = {
|
||||
'msg_id': 611,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The status change of thin copy pair could not be '
|
||||
'completed. (S-VOL: %(svol)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_STATUS_FOR_COPY = {
|
||||
'msg_id': 612,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The source logical device to be replicated does not exist '
|
||||
'in the storage system. (LDEV: %(ldev)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_FOR_EXTENSION = {
|
||||
'msg_id': 613,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The volume %(volume_id)s to be extended was not found.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
NO_HBA_WWN_ADDED_TO_HOST_GRP = {
|
||||
'msg_id': 614,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('No WWN is assigned. (port: %(port)s, gid: %(gid)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
NO_AVAILABLE_MIRROR_UNIT = {
|
||||
'msg_id': 615,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('A pair could not be created. The maximum number of pair '
|
||||
'is exceeded. (copy method: %(copy_method)s, P-VOL: '
|
||||
'%(pvol)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
UNABLE_TO_DELETE_PAIR = {
|
||||
'msg_id': 616,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: '
|
||||
'%(svol)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_VOLUME_SIZE_FOR_COPY = {
|
||||
'msg_id': 617,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to create a volume from a %(type)s. The size of '
|
||||
'the new volume must be equal to or greater than the size '
|
||||
'of the original %(type)s. (new volume: %(volume_id)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_VOLUME_TYPE_FOR_EXTEND = {
|
||||
'msg_id': 618,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The volume %(volume_id)s could not be extended. The '
|
||||
'volume type must be Normal.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_FOR_CONNECTION = {
|
||||
'msg_id': 619,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The volume %(volume_id)s to be mapped was not found.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
POOL_INFO_RETRIEVAL_FAILED = {
|
||||
'msg_id': 620,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to provide information about a pool. (pool: '
|
||||
'%(pool)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_VOLUME_SIZE_FOR_TI = {
|
||||
'msg_id': 621,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to create a volume from a %(type)s. The size of '
|
||||
'the new volume must be equal to the size of the original '
|
||||
'%(type)s when the new volume is created by '
|
||||
'%(copy_method)s. (new volume: %(volume_id)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_FOR_VOLUME_COPY = {
|
||||
'msg_id': 624,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The %(type)s %(id)s source to be replicated was not '
|
||||
'found.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
CREATE_HORCM_CONF_FILE_FAILED = {
|
||||
'msg_id': 632,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to open a file. (file: %(file)s, ret: %(ret)s, '
|
||||
'stderr: %(err)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
CONNECT_VOLUME_FAILED = {
|
||||
'msg_id': 634,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to attach the logical device. (LDEV: %(ldev)s, '
|
||||
'reason: %(reason)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
CREATE_LDEV_FAILED = {
|
||||
'msg_id': 636,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to add the logical device.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
ADD_PAIR_TARGET_FAILED = {
|
||||
'msg_id': 638,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to add the pair target.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
NO_MAPPING_FOR_LDEV = {
|
||||
'msg_id': 639,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to map a logical device to any pair targets. '
|
||||
'(LDEV: %(ldev)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
POOL_NOT_FOUND = {
|
||||
'msg_id': 640,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('A pool could not be found. (pool: %(pool)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
NO_AVAILABLE_RESOURCE = {
|
||||
'msg_id': 648,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('There are no resources available for use. (resource: '
|
||||
'%(resource)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
NO_CONNECTED_TARGET = {
|
||||
'msg_id': 649,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The host group was not found.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
RESOURCE_NOT_FOUND = {
|
||||
'msg_id': 650,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The resource %(resource)s was not found.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
LDEV_DELETION_WAIT_TIMEOUT = {
|
||||
'msg_id': 652,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to delete a logical device. (LDEV: %(ldev)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
LDEV_CREATION_WAIT_TIMEOUT = {
|
||||
'msg_id': 653,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('The creation of a logical device could not be completed. '
|
||||
'(LDEV: %(ldev)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_ATTR_FOR_MANAGE = {
|
||||
'msg_id': 702,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV '
|
||||
'must be an unpaired %(ldevtype)s.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_SIZE_FOR_MANAGE = {
|
||||
'msg_id': 703,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV '
|
||||
'size must be expressed in gigabytes.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_PORT_FOR_MANAGE = {
|
||||
'msg_id': 704,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV '
|
||||
'must not be mapped.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_TYPE_FOR_UNMANAGE = {
|
||||
'msg_id': 706,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to unmanage the volume %(volume_id)s. The volume '
|
||||
'type must be %(volume_type)s.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
INVALID_LDEV_FOR_MANAGE = {
|
||||
'msg_id': 707,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('No valid value is specified for "source-id". A valid LDEV '
|
||||
'number must be specified in "source-id" to manage the '
|
||||
'volume.'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
VOLUME_COPY_FAILED = {
|
||||
'msg_id': 722,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': _LE('Failed to copy a volume. (copy method: %(copy_method)s, '
|
||||
'P-VOL: %(pvol)s, S-VOL: %(svol)s)'),
|
||||
'suffix': ERROR_SUFFIX
|
||||
}
|
||||
|
||||
def __init__(self, error_info):
|
||||
"""Initialize Enum attributes."""
|
||||
self.msg_id = error_info['msg_id']
|
||||
self.level = error_info['loglevel']
|
||||
self.msg = error_info['msg']
|
||||
self.suffix = error_info['suffix']
|
||||
|
||||
def output_log(self, **kwargs):
|
||||
"""Output the message to the log file and return the message."""
|
||||
msg = self.msg % kwargs
|
||||
LOG.log(self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s",
|
||||
{'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg})
|
||||
return msg
|
||||
|
||||
|
||||
def output_log(msg_enum, **kwargs):
|
||||
"""Output the specified message to the log file and return the message."""
|
||||
return msg_enum.output_log(**kwargs)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
MSG = VSPMsg
|
||||
|
||||
|
||||
def output_start_end_log(func):
|
||||
"""Output the log of the start and the end of the method."""
|
||||
@functools.wraps(func)
|
||||
def wrap(self, *args, **kwargs):
|
||||
"""Wrap the method to add logging function."""
|
||||
def _output_start_end_log(*_args, **_kwargs):
|
||||
"""Output the log of the start and the end of the method."""
|
||||
output_log(MSG.METHOD_START,
|
||||
method=func.__name__,
|
||||
config_group=self.configuration.config_group)
|
||||
ret = func(*_args, **_kwargs)
|
||||
output_log(MSG.METHOD_END,
|
||||
method=func.__name__,
|
||||
config_group=self.configuration.config_group)
|
||||
return ret
|
||||
return _output_start_end_log(self, *args, **kwargs)
|
||||
return wrap
|
||||
|
||||
|
||||
def get_ldev(obj):
|
||||
"""Get the LDEV number from the given object and return it as integer."""
|
||||
if not obj:
|
||||
return None
|
||||
ldev = obj.get('provider_location')
|
||||
if not ldev or not ldev.isdigit():
|
||||
return None
|
||||
return int(ldev)
|
||||
|
||||
|
||||
def check_timeout(start_time, timeout):
|
||||
"""Return True if the specified time has passed, False otherwise."""
|
||||
return timeutils.is_older_than(start_time, timeout)
|
||||
|
||||
|
||||
def mask_password(cmd):
|
||||
"""Return a string in which the password is masked."""
|
||||
if len(cmd) > 3 and cmd[0] == 'raidcom' and cmd[1] == '-login':
|
||||
tmp = list(cmd)
|
||||
tmp[3] = strutils.mask_dict_password({'password': ''}).get('password')
|
||||
else:
|
||||
tmp = cmd
|
||||
return ' '.join([six.text_type(c) for c in tmp])
|
||||
|
||||
|
||||
def execute(*cmd, **kwargs):
|
||||
"""Run the specified command and return its results."""
|
||||
process_input = kwargs.pop('process_input', None)
|
||||
run_as_root = kwargs.pop('run_as_root', True)
|
||||
ret = 0
|
||||
try:
|
||||
if len(cmd) > 3 and cmd[0] == 'raidcom' and cmd[1] == '-login':
|
||||
stdout, stderr = cinder_utils.execute(
|
||||
*cmd, process_input=process_input, run_as_root=run_as_root,
|
||||
loglevel=base_logging.NOTSET)[:2]
|
||||
else:
|
||||
stdout, stderr = cinder_utils.execute(
|
||||
*cmd, process_input=process_input, run_as_root=run_as_root)[:2]
|
||||
except putils.ProcessExecutionError as ex:
|
||||
ret = ex.exit_code
|
||||
stdout = ex.stdout
|
||||
stderr = ex.stderr
|
||||
LOG.debug('cmd: %s', mask_password(cmd))
|
||||
LOG.debug('from: %s', inspect.stack()[2])
|
||||
LOG.debug('ret: %s', ret)
|
||||
LOG.debug('stdout: %s', ' '.join(stdout.splitlines()))
|
||||
LOG.debug('stderr: %s', ' '.join(stderr.splitlines()))
|
||||
return ret, stdout, stderr
|
||||
|
||||
|
||||
def import_object(conf, driver_info, db):
|
||||
"""Import a class and return an instance of it."""
|
||||
os.environ['LANG'] = 'C'
|
||||
cli = _DRIVERS.get('HORCM')
|
||||
return importutils.import_object(
|
||||
'.'.join([_DRIVER_DIR, cli[driver_info['proto']]]),
|
||||
conf, driver_info, db)
|
||||
|
||||
|
||||
def check_ignore_error(ignore_error, stderr):
|
||||
"""Return True if ignore_error is in stderr, False otherwise."""
|
||||
if not ignore_error or not stderr:
|
||||
return False
|
||||
if not isinstance(ignore_error, six.string_types):
|
||||
ignore_error = '|'.join(ignore_error)
|
||||
|
||||
if re.search(ignore_error, stderr):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_opts(conf, opts):
|
||||
"""Check if the specified configuration is valid."""
|
||||
names = []
|
||||
for opt in opts:
|
||||
names.append(opt.name)
|
||||
check_opt_value(conf, names)
|
||||
|
||||
|
||||
def check_opt_value(conf, names):
|
||||
"""Check if the parameter names and values are valid."""
|
||||
for name in names:
|
||||
try:
|
||||
getattr(conf, name)
|
||||
except (cfg.NoSuchOptError, cfg.ConfigFileValueError):
|
||||
with excutils.save_and_reraise_exception():
|
||||
output_log(MSG.INVALID_PARAMETER, param=name)
|
||||
|
||||
|
||||
def output_storage_cli_info(name, version):
|
||||
"""Output storage CLI info to the log file."""
|
||||
LOG.info(_LI('\t%(name)-35s%(version)s'),
|
||||
{'name': name + ' version: ', 'version': version})
|
||||
|
||||
|
||||
def output_opt_info(conf, names):
|
||||
"""Output parameter names and values to the log file."""
|
||||
for name in names:
|
||||
LOG.info(_LI('\t%(name)-35s%(attr)s'),
|
||||
{'name': name + ': ', 'attr': getattr(conf, name)})
|
||||
|
||||
|
||||
def output_opts(conf, opts):
|
||||
"""Output parameter names and values to the log file."""
|
||||
names = [opt.name for opt in opts if not opt.secret]
|
||||
output_opt_info(conf, names)
|
||||
|
||||
|
||||
def require_target_existed(targets):
|
||||
"""Check if the target list includes one or more members."""
|
||||
if not targets['list']:
|
||||
msg = output_log(MSG.NO_CONNECTED_TARGET)
|
||||
raise exception.VSPError(msg)
|
||||
|
||||
|
||||
def get_volume_metadata(volume):
|
||||
"""Return a dictionary of the metadata of the specified volume."""
|
||||
volume_metadata = volume.get('volume_metadata', {})
|
||||
return {item['key']: item['value'] for item in volume_metadata}
|
||||
|
||||
|
||||
def update_conn_info(conn_info, connector, lookup_service):
|
||||
"""Set wwn mapping list to the connection info."""
|
||||
init_targ_map = build_initiator_target_map(
|
||||
connector, conn_info['data']['target_wwn'], lookup_service)
|
||||
if init_targ_map:
|
||||
conn_info['data']['initiator_target_map'] = init_targ_map
|
||||
|
||||
|
||||
def build_initiator_target_map(connector, target_wwns, lookup_service):
|
||||
"""Return a dictionary mapping server-wwns and lists of storage-wwns."""
|
||||
init_targ_map = {}
|
||||
initiator_wwns = connector['wwpns']
|
||||
if lookup_service:
|
||||
dev_map = lookup_service.get_device_mapping_from_network(
|
||||
initiator_wwns, target_wwns)
|
||||
for fabric_name in dev_map:
|
||||
fabric = dev_map[fabric_name]
|
||||
for initiator in fabric['initiator_port_wwn_list']:
|
||||
init_targ_map[initiator] = fabric['target_port_wwn_list']
|
||||
else:
|
||||
for initiator in initiator_wwns:
|
||||
init_targ_map[initiator] = target_wwns
|
||||
return init_targ_map
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP
|
||||
Family and HUSVM.
|
Loading…
Reference in New Issue
Block a user