diff --git a/cinder/exception.py b/cinder/exception.py index 05b61994967..39d26ddd951 100644 --- a/cinder/exception.py +++ b/cinder/exception.py @@ -1097,6 +1097,19 @@ class HBSDVolumeIsBusy(VolumeIsBusy): message = _("Volume %(volume_name)s is busy.") +# Hitachi VSP Driver +class VSPError(VolumeDriverException): + message = _("VSP error occurred. %(message)s") + + +class VSPBusy(VSPError): + message = _("Device or resource is busy.") + + +class VSPNotSupported(VSPError): + message = _("The function on the storage is not supported.") + + # Datera driver class DateraAPIException(VolumeBackendAPIException): message = _("Bad response from Datera API") diff --git a/cinder/opts.py b/cinder/opts.py index 6fa4f172327..083ae453896 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -110,6 +110,12 @@ from cinder.volume.drivers.hitachi import hnas_nfs as \ cinder_volume_drivers_hitachi_hnasnfs from cinder.volume.drivers.hitachi import hnas_utils as \ cinder_volume_drivers_hitachi_hnasutils +from cinder.volume.drivers.hitachi import vsp_common as \ + cinder_volume_drivers_hitachi_vspcommon +from cinder.volume.drivers.hitachi import vsp_fc as \ + cinder_volume_drivers_hitachi_vspfc +from cinder.volume.drivers.hitachi import vsp_horcm as \ + cinder_volume_drivers_hitachi_vsphorcm from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \ @@ -285,6 +291,9 @@ def list_opts(): cinder_volume_drivers_hitachi_hnasiscsi.iSCSI_OPTS, cinder_volume_drivers_hitachi_hnasnfs.NFS_OPTS, cinder_volume_drivers_hitachi_hnasutils.drivers_common_opts, + cinder_volume_drivers_hitachi_vspcommon.common_opts, + cinder_volume_drivers_hitachi_vspfc.fc_opts, + cinder_volume_drivers_hitachi_vsphorcm.horcm_opts, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts, cinder_volume_drivers_hpe_hpexpopts.FC_VOLUME_OPTS, diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py new file mode 100644 index 00000000000..7b76fc5e2f5 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py @@ -0,0 +1,1664 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Unit tests for Hitachi VSP Driver.""" + +import copy +import os + +import mock +from os_brick.initiator import connector as brick_connector +from oslo_concurrency import processutils +from oslo_config import cfg +from six.moves import range + +from cinder import context as cinder_context +from cinder import db +from cinder.db.sqlalchemy import api as sqlalchemy_api +from cinder import exception +from cinder.objects import snapshot as obj_snap +from cinder import test +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume import driver +from cinder.volume.drivers.hitachi import vsp_fc +from cinder.volume.drivers.hitachi import vsp_horcm +from cinder.volume.drivers.hitachi import vsp_utils +from cinder.volume import utils as volume_utils + +# Dummy return values +SUCCEED = 0 +STDOUT = "" +STDERR = "" +CMD_SUCCEED = (SUCCEED, STDOUT, STDERR) + +# Configuration parameter values +CONFIG_MAP = { + 'serial': '492015', + 'my_ip': '127.0.0.1', +} + +# CCI instance numbers +INST_NUMS = (200, 201) + +# ShadowImage copy group names +CG_MAP = {'cg%s' % x: vsp_horcm._COPY_GROUP % ( + CONFIG_MAP['my_ip'], CONFIG_MAP['serial'], INST_NUMS[1], x) + for x in range(3) +} + +# Map containing all maps for dummy response creation +DUMMY_RESPONSE_MAP = CONFIG_MAP.copy() +DUMMY_RESPONSE_MAP.update(CG_MAP) + +# Dummy response for FC zoning device mapping +DEVICE_MAP = { + 'fabric_name': { + 'initiator_port_wwn_list': ['123456789abcdee', '123456789abcdef'], + 'target_port_wwn_list': ['111111112345678']}} + +# cmd: raidcom get copy_grp +GET_COPY_GRP_RESULT = ( + "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" + "%(cg0)s %(cg0)sP 0 - %(serial)s\n" + "%(cg1)s %(cg1)sP 0 - %(serial)s\n" + "%(cg1)s %(cg1)sS - - %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get copy_grp +GET_COPY_GRP_RESULT2 = "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" + +# cmd: raidcom get copy_grp +GET_COPY_GRP_RESULT3 = ( + "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" + "%(cg0)s %(cg0)sP 0 - %(serial)s\n" + "%(cg0)s %(cg0)sS 0 - %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91P +GET_DEVICE_GRP_MU1P_RESULT = ( + "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" + "%(cg1)sP VSP-LDEV-0-2 0 %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91S +GET_DEVICE_GRP_MU1S_RESULT = ( + "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" + "%(cg1)sS VSP-LDEV-0-2 2 %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get hba_wwn -port CL1-A HBSD-0123456789abcdef +GET_HBA_WWN_CL1A_HOSTGRP_RESULT = ( + "PORT GID GROUP_NAME HWWN Serial# NICK_NAME\n" + "CL1-A 0 HBSD-0123456789abcdef 0123456789abcdef %(serial)s -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get dp_pool +GET_DP_POOL_RESULT = ( + "PID POLS U(%) AV_CAP(MB) TP_CAP(MB) W(%) H(%) Num LDEV# LCNT " + "TL_CAP(MB) BM TR_CAP(MB) RCNT\n" + "030 POLN 0 6006 6006 75 80 1 14860 32 167477 NB 0 0\n" +) + +# cmd: raidcom get dp_pool +GET_DP_POOL_ERROR_RESULT = ( + "PID POLS U(%) POOL_NAME Seq# Num LDEV# H(%) VCAP(%) TYPE PM PT\n" +) + +# cmd: raidcom get pool -key opt +GET_POOL_KEYOPT_RESULT = ( + "PID POLS U(%%) POOL_NAME Seq# Num LDEV# H(%%) VCAP(%%) TYPE PM PT\n" + "030 POLM 30 VSPPOOL %(serial)s 1 10000 80 - OPEN N HDP\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get hba_wwn -port CL1-B-0 +GET_HBA_WWN_CL1B0_RESULT = ( + "PORT GID GROUP_NAME HWWN Serial# NICK_NAME\n" + "CL1-B 0 HBSD-0123456789abcdef 0123456789abcdef %(serial)s -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get host_grp -port CL1-A +GET_HOST_GRP_CL1A_RESULT = ( + "PORT GID GROUP_NAME Serial# HMD HMO_BITs\n" + "CL1-A 0 HBSD-0123456789abcdef %(serial)s LINUX/IRIX 91\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get host_grp -port CL1-B +GET_HOST_GRP_CL1B_RESULT = ( + "PORT GID GROUP_NAME Serial# HMD HMO_BITs\n" + "CL1-B 0 HBSD-0123456789abcdef %(serial)s LINUX/IRIX 91\n" +) % DUMMY_RESPONSE_MAP + +# raidcom add host_grp -port CLx-y -host_grp_name HBSD-0123456789abcdef +ADD_HOSTGRP_RESULT = "raidcom: Host group ID 0(0x0) will be used for adding.\n" + +# raidcom add host_grp -port CLx-y -host_grp_name HBSD-pair00 +ADD_HOSTGRP_PAIR_RESULT = ( + "raidcom: Host group ID 2(0x2) will be used for adding.\n" +) + +# raidcom add lun -port CL1-A-0 -ldev_id x +ADD_LUN_LUN0_RESULT = "raidcom: LUN 0(0x0) will be used for adding.\n" + +# cmd: raidcom get ldev -ldev_list undefined -cnt 1 +GET_LDEV_LDEV_LIST_UNDEFINED = ( + "LDEV : 1 VIR_LDEV : 65534\n" + "VOL_TYPE : NOT DEFINED\n" +) + +# cmd: raidcom get ldev -ldev_id 0 -cnt 2 -key front_end (LDEV) +GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2 = ( + " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" + " Ports PORT_No:LU#:GRPNAME\n" + " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 1 - - NOT DEFINED - - - -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get ldev -ldev_id 0 -cnt 10 -key front_end (LDEV) +GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT = ( + " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" + " Ports PORT_No:LU#:GRPNAME\n" + " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 1 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 2 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 3 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 4 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 5 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 6 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 7 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 8 - - NOT DEFINED - - - -\n" + " %(serial)s 9 - - NOT DEFINED - - - -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get ldev -ldev_id x -check_status NOT DEFINED +GET_LDEV_CHECKSTATUS_ERR = ( + "raidcom: testing condition has failed with exit(1).\n" +) + +# cmd: raidcom get ldev -ldev_id 0 +GET_LDEV_LDEV0_RESULT = """ +LDEV : 0 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 1 +GET_LDEV_LDEV1_RESULT = """ +LDEV : 1 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP +VOL_Capacity(BLK) : 268435456 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 3 +GET_LDEV_LDEV3_RESULT = """ +LDEV : 3 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : +""" + +# cmd: raidcom get ldev -ldev_id 4 +GET_LDEV_LDEV4_RESULT = """ +LDEV : 4 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : QS : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 5 +GET_LDEV_LDEV5_RESULT = """ +LDEV : 5 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP : VVOL +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 6 +GET_LDEV_LDEV6_RESULT = """ +LDEV : 6 +VOL_TYPE : OPEN-V-CVS +PORTs : CL1-A-0 0 HBSD-0123456789abcdef +VOL_ATTR : CVS : HDP +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 7 +GET_LDEV_LDEV7_RESULT = """ +LDEV : 7 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : QS : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 10 +GET_LDEV_LDEV10_RESULT = """ +LDEV : 10 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : MRCF : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 11 +GET_LDEV_LDEV11_RESULT = """ +LDEV : 11 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : QS : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 12 +GET_LDEV_LDEV12_RESULT = """ +LDEV : 12 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : MRCF : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 13 +GET_LDEV_LDEV13_RESULT = """ +LDEV : 13 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : MRCF : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : BLK +""" + +# cmd: raidcom get ldev -ldev_id 14 +GET_LDEV_LDEV14_RESULT = """ +LDEV : 14 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP : HDT +VOL_Capacity(BLK) : 9999999 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get lun -port CL1-A-0 +GET_LUN_CL1A0_RESULT = ( + "PORT GID HMD LUN NUM LDEV CM Serial# HMO_BITs\n" + "CL1-A 0 LINUX/IRIX 4 1 4 - %(serial)s\n" + "CL1-A 0 LINUX/IRIX 254 1 5 - %(serial)s\n" + "CL1-A 0 LINUX/IRIX 255 1 6 - %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get port +GET_PORT_RESULT = ( + "PORT TYPE ATTR SPD LPID FAB CONN SSW SL Serial# WWN PHY_PORT\n" + "CL1-A FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" + "CL1-B FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" + "CL3-A FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" + "CL3-B FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get snapshot -ldev_id 4 +GET_SNAPSHOT_LDEV4_RESULT = ( + "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " + "SPLT-TIME\n" + "VSP-SNAP0 P-VOL PSUS %(serial)s 4 3 8 31 100 ---- 57db5cb0\n" + "VSP-SNAP0 P-VOL PSUS %(serial)s 4 4 9 31 100 ---- 57db5cb0\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get snapshot -ldev_id 7 +GET_SNAPSHOT_LDEV7_RESULT = ( + "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " + "SPLT-TIME\n" + "VSP-SNAP0 P-VOL PSUS %(serial)s 7 3 8 31 100 ---- 57db5cb0\n" + "VSP-SNAP0 P-VOL PSUS %(serial)s 7 4 9 31 100 ---- 57db5cb0\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get snapshot -ldev_id 8 +GET_SNAPSHOT_LDEV8_RESULT = ( + "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " + "SPLT-TIME\n" + "VSP-SNAP0 S-VOL SSUS %(serial)s 8 3 7 31 100 ---- 57db5cb0\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get snapshot -ldev_id 11 +GET_SNAPSHOT_LDEV11_RESULT = ( + "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " + "SPLT-TIME\n" + "VSP-SNAP0 S-VOL SSUS %(serial)s 11 3 7 31 100 ---- 57db5cb0\n" +) % DUMMY_RESPONSE_MAP + +# cmd: pairdisplay -CLI -d 492015 1 0 -IM201 +PAIRDISPLAY_LDEV0_1_RESULT = ( + "Group PairVol L/R Port# TID LU-M Seq# LDEV# " + "P/S Status Seq# P-LDEV# M\n" + "%(cg0)s VSP-LDEV-0-1 L CL1-A-0 0 0 0 %(serial)s 0 " + "P-VOL PSUS %(serial)s 1 W\n" + "%(cg0)s VSP-LDEV-0-1 R CL1-A-0 0 1 0 %(serial)s 1 " + "S-VOL SSUS - 0 -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: pairdisplay -CLI -d 492015 10 0 -IM201 +PAIRDISPLAY_LDEV7_10_RESULT = ( + "Group PairVol L/R Port# TID LU-M Seq# LDEV# " + "P/S Status Seq# P-LDEV# M\n" + "%(cg0)s VSP-LDEV-7-10 L CL1-A-1 0 0 0 %(serial)s 7 " + "P-VOL PSUS %(serial)s 10 W\n" + "%(cg0)s VSP-LDEV-7-10 R CL1-A-1 0 1 0 %(serial)s 10 " + "S-VOL SSUS - 7 -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: pairdisplay -CLI -d 492015 12 0 -IM201 +PAIRDISPLAY_LDEV7_12_RESULT = ( + "Group PairVol L/R Port# TID LU-M Seq# LDEV# " + "P/S Status Seq# P-LDEV# M\n" + "%(cg0)s VSP-LDEV-7-12 L CL1-A-1 0 0 0 %(serial)s 7 " + "P-VOL PSUS %(serial)s 12 W\n" + "%(cg0)s VSP-LDEV-7-12 R CL1-A-1 0 1 0 %(serial)s 12 " + "S-VOL SSUS - 7 -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidqry -h +RAIDQRY_RESULT = ( + "Model : RAID-Manager/Linux/x64\n" + "Ver&Rev: 01-39-03/03\n" + "Usage : raidqry [options] for HORC[200]\n" + " -h Help/Usage\n" + " -I[#] Set to HORCMINST#\n" + " -IH[#] or -ITC[#] Set to HORC mode [and HORCMINST#]\n" + " -IM[#] or -ISI[#] Set to MRCF mode [and HORCMINST#]\n" + " -z Set to the interactive mode\n" + " -zx Set to the interactive mode and HORCM monitoring\n" + " -q Quit(Return to main())\n" + " -g Specify for getting all group name on local\n" + " -l Specify the local query\n" + " -lm Specify the local query with full micro version\n" + " -r Specify the remote query\n" + " -f Specify display for floatable host\n" +) + +EXECUTE_TABLE = { + ('add', 'hba_wwn', '-port', 'CL3-A-0', '-hba_wwn', '0123456789abcdef'): ( + vsp_horcm.EX_INVARG, STDOUT, STDERR), + ('add', 'host_grp', '-port', 'CL1-A', '-host_grp_name', + 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), + ('add', 'host_grp', '-port', 'CL1-B', '-host_grp_name', + 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), + ('add', 'host_grp', '-port', 'CL3-A', '-host_grp_name', + 'HBSD-0123456789abcdef'): (SUCCEED, ADD_HOSTGRP_RESULT, STDERR), + ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', + 'HBSD-0123456789abcdef'): (SUCCEED, ADD_HOSTGRP_RESULT, STDERR), + ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', + 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), + ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 0): ( + SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), + ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 1): ( + SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), + ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 5): ( + SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), + ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 6): ( + vsp_horcm.EX_CMDRJE, STDOUT, vsp_horcm._LU_PATH_DEFINED), + ('add', 'lun', '-port', 'CL1-B-0', '-ldev_id', 0, '-lun_id', 0): ( + SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), + ('extend', 'ldev', '-ldev_id', 3, '-capacity', '128G'): ( + vsp_horcm.EX_CMDIOE, STDOUT, + "raidcom: [EX_CMDIOE] Control command I/O error"), + ('get', 'hba_wwn', '-port', 'CL1-A', 'HBSD-0123456789abcdef'): ( + SUCCEED, GET_HBA_WWN_CL1A_HOSTGRP_RESULT, STDERR), + ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT, STDERR), + ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'P'): ( + SUCCEED, GET_DEVICE_GRP_MU1P_RESULT, STDERR), + ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'S'): ( + SUCCEED, GET_DEVICE_GRP_MU1S_RESULT, STDERR), + ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_RESULT, STDERR), + ('get', 'pool', '-key', 'opt'): (SUCCEED, GET_POOL_KEYOPT_RESULT, STDERR), + ('get', 'hba_wwn', '-port', 'CL1-B-0'): ( + SUCCEED, GET_HBA_WWN_CL1B0_RESULT, STDERR), + ('get', 'host_grp', '-port', 'CL1-A'): ( + SUCCEED, GET_HOST_GRP_CL1A_RESULT, STDERR), + ('get', 'host_grp', '-port', 'CL1-B'): ( + SUCCEED, GET_HOST_GRP_CL1B_RESULT, STDERR), + ('get', 'ldev', '-ldev_list', 'undefined', '-cnt', '1'): ( + SUCCEED, GET_LDEV_LDEV_LIST_UNDEFINED, STDERR), + ('get', 'ldev', '-ldev_id', 0, '-cnt', 2, '-key', 'front_end'): ( + SUCCEED, GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2, STDERR), + ('get', 'ldev', '-ldev_id', 0, '-cnt', 10, '-key', 'front_end'): ( + SUCCEED, GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 0, '-check_status', 'NOT', 'DEFINED'): ( + 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), + ('get', 'ldev', '-ldev_id', 0): (SUCCEED, GET_LDEV_LDEV0_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 1): (SUCCEED, GET_LDEV_LDEV1_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 3): (SUCCEED, GET_LDEV_LDEV3_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 4): (SUCCEED, GET_LDEV_LDEV4_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 5): (SUCCEED, GET_LDEV_LDEV5_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 6): (SUCCEED, GET_LDEV_LDEV6_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 7): (SUCCEED, GET_LDEV_LDEV7_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 10): (SUCCEED, GET_LDEV_LDEV10_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 11): (SUCCEED, GET_LDEV_LDEV11_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 12): (SUCCEED, GET_LDEV_LDEV12_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 13): (SUCCEED, GET_LDEV_LDEV13_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 14): (SUCCEED, GET_LDEV_LDEV14_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 15): (vsp_horcm.EX_COMERR, "", STDERR), + ('get', 'lun', '-port', 'CL1-A-0'): ( + SUCCEED, GET_LUN_CL1A0_RESULT, STDERR), + ('get', 'port'): (SUCCEED, GET_PORT_RESULT, STDERR), + ('get', 'snapshot', '-ldev_id', 4): ( + SUCCEED, GET_SNAPSHOT_LDEV4_RESULT, STDERR), + ('get', 'snapshot', '-ldev_id', 7): ( + SUCCEED, GET_SNAPSHOT_LDEV7_RESULT, STDERR), + ('get', 'snapshot', '-ldev_id', 8): ( + SUCCEED, GET_SNAPSHOT_LDEV8_RESULT, STDERR), + ('get', 'snapshot', '-ldev_id', 11): ( + SUCCEED, GET_SNAPSHOT_LDEV11_RESULT, STDERR), + ('modify', 'ldev', '-ldev_id', 3, '-status', 'discard_zero_page'): ( + vsp_horcm.EX_CMDIOE, STDOUT, STDERR), + ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 10, 0, + '-IM%s' % INST_NUMS[1]): ( + SUCCEED, PAIRDISPLAY_LDEV7_10_RESULT, STDERR), + ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 12, 0, + '-IM%s' % INST_NUMS[1]): ( + SUCCEED, PAIRDISPLAY_LDEV7_12_RESULT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 8, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 10, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 12, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), + ('raidqry', '-h'): (SUCCEED, RAIDQRY_RESULT, STDERR), + ('tee', '/etc/horcm501.conf'): (1, STDOUT, STDERR), + ('-login', 'user', 'pasword'): (SUCCEED, STDOUT, STDERR), + ('-login', 'userX', 'paswordX'): (vsp_horcm.EX_ENAUTH, STDOUT, STDERR), + ('-login', 'userY', 'paswordY'): (vsp_horcm.EX_COMERR, STDOUT, STDERR), +} + +EXECUTE_TABLE2 = EXECUTE_TABLE.copy() +EXECUTE_TABLE2.update({ + ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT2, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUS, STDOUT, STDERR), +}) + +EXECUTE_TABLE3 = EXECUTE_TABLE2.copy() + +EXECUTE_TABLE4 = EXECUTE_TABLE.copy() +EXECUTE_TABLE4.update({ + ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), +}) + +EXECUTE_TABLE5 = EXECUTE_TABLE.copy() +EXECUTE_TABLE5.update({ + ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), + ('get', 'ldev', '-ldev_id', 1, '-check_status', 'NOT', 'DEFINED'): ( + 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), + ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 1, 0, + '-IM%s' % INST_NUMS[1]): ( + SUCCEED, PAIRDISPLAY_LDEV0_1_RESULT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), +}) + +ERROR_EXECUTE_TABLE = { + ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_ERROR_RESULT, STDERR), +} + +DEFAULT_CONNECTOR = { + 'host': 'host', + 'ip': CONFIG_MAP['my_ip'], + 'wwpns': ['0123456789abcdef'], + 'multipath': False, +} + +CTXT = cinder_context.get_admin_context() + +TEST_VOLUME = [] +for i in range(14): + volume = {} + volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) + volume['name'] = 'test-volume{0:d}'.format(i) + volume['provider_location'] = None if i == 2 else '{0:d}'.format(i) + volume['size'] = 256 if i == 1 else 128 + if i == 2: + volume['status'] = 'creating' + elif i == 5: + volume['status'] = 'in-use' + else: + volume['status'] = 'available' + volume = fake_volume.fake_volume_obj(CTXT, **volume) + TEST_VOLUME.append(volume) + + +def _volume_get(context, volume_id): + """Return predefined volume info.""" + return TEST_VOLUME[int(volume_id.replace("-", ""))] + +TEST_SNAPSHOT = [] +for i in range(8): + snapshot = {} + snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) + snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) + snapshot['provider_location'] = None if i == 2 else '{0:d}'.format( + i if i < 5 else i + 5) + snapshot['size'] = 256 if i == 1 else 128 + snapshot['status'] = 'creating' if i == 2 else 'available' + snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format( + i if i < 5 else 7) + snapshot['volume'] = _volume_get(None, snapshot['volume_id']) + snapshot['volume_name'] = 'test-volume{0:d}'.format(i if i < 5 else 7) + snapshot['volume_size'] = 256 if i == 1 else 128 + snapshot = obj_snap.Snapshot._from_db_object( + CTXT, obj_snap.Snapshot(), + fake_snapshot.fake_db_snapshot(**snapshot)) + TEST_SNAPSHOT.append(snapshot) + +# Flags that determine _fake_run_horcmstart() return values +run_horcmstart_returns_error = False +run_horcmstart_returns_error2 = False +run_horcmstart3_cnt = 0 + + +def _access(*args, **kargs): + """Assume access to the path is allowed.""" + return True + + +def _execute(*args, **kargs): + """Return predefined results for command execution.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) + return result + + +def _execute2(*args, **kargs): + """Return predefined results based on EXECUTE_TABLE2.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE2.get(cmd, CMD_SUCCEED) + return result + + +def _execute3(*args, **kargs): + """Change pairevtwait's dummy return value after it is called.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE3.get(cmd, CMD_SUCCEED) + if cmd == ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): + EXECUTE_TABLE3.update({ + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), + }) + return result + + +def _execute4(*args, **kargs): + """Return predefined results based on EXECUTE_TABLE4.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE4.get(cmd, CMD_SUCCEED) + return result + + +def _execute5(*args, **kargs): + """Return predefined results based on EXECUTE_TABLE5.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE5.get(cmd, CMD_SUCCEED) + return result + + +def _cinder_execute(*args, **kargs): + """Return predefined results or raise an exception.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + ret, stdout, stderr = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) + if ret == SUCCEED: + return stdout, stderr + else: + pee = processutils.ProcessExecutionError(exit_code=ret, + stdout=stdout, + stderr=stderr) + raise pee + + +def _error_execute(*args, **kargs): + """Return predefined error results.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = _execute(*args, **kargs) + ret = ERROR_EXECUTE_TABLE.get(cmd) + return ret if ret else result + + +def _brick_get_connector_properties(multipath=False, enforce_multipath=False): + """Return a predefined connector object.""" + return DEFAULT_CONNECTOR + + +def _brick_get_connector_properties_error(multipath=False, + enforce_multipath=False): + """Return an incomplete connector object.""" + connector = dict(DEFAULT_CONNECTOR) + del connector['wwpns'] + return connector + + +def _connect_volume(*args, **kwargs): + """Return predefined volume info.""" + return {'path': u'/dev/disk/by-path/xxxx', 'type': 'block'} + + +def _disconnect_volume(*args, **kwargs): + """Return without doing anything.""" + pass + + +def _copy_volume(*args, **kwargs): + """Return without doing anything.""" + pass + + +def _volume_admin_metadata_get(context, volume_id): + """Return dummy admin metadata.""" + return {'fake_key': 'fake_value'} + + +def _snapshot_metadata_update(context, snapshot_id, metadata, delete): + """Return without doing anything.""" + pass + + +def _fake_is_smpl(*args): + """Assume the ShadowImage pair status is SMPL.""" + return True + + +def _fake_run_horcmgr(*args): + """Assume CCI is running.""" + return vsp_horcm._HORCM_RUNNING + + +def _fake_run_horcmstart(*args): + """Return a value based on a flag value.""" + return 0 if not run_horcmstart_returns_error else 3 + + +def _fake_run_horcmstart2(*args): + """Return a value based on a flag value.""" + return 0 if not run_horcmstart_returns_error2 else 3 + + +def _fake_run_horcmstart3(*args): + """Update a counter and return a value based on it.""" + global run_horcmstart3_cnt + run_horcmstart3_cnt = run_horcmstart3_cnt + 1 + return 0 if run_horcmstart3_cnt <= 1 else 3 + + +def _fake_check_ldev_status(*args, **kwargs): + """Assume LDEV status has changed as desired.""" + return None + + +def _fake_exists(path): + """Assume the path does not exist.""" + return False + + +class FakeLookupService(object): + """Dummy FC zoning mapping lookup service class.""" + + def get_device_mapping_from_network(self, initiator_wwns, target_wwns): + """Return predefined FC zoning mapping.""" + return DEVICE_MAP + + +class VSPHORCMFCDriverTest(test.TestCase): + """Unit test class for VSP HORCM interface fibre channel module.""" + + test_existing_ref = {'source-id': '0'} + test_existing_none_ldev_ref = {'source-id': '2'} + test_existing_invalid_ldev_ref = {'source-id': 'AAA'} + test_existing_value_error_ref = {'source-id': 'XX:XX:XX'} + test_existing_no_ldev_ref = {} + test_existing_invalid_sts_ldev = {'source-id': '13'} + test_existing_invalid_vol_attr = {'source-id': '12'} + test_existing_invalid_size = {'source-id': '14'} + test_existing_invalid_port_cnt = {'source-id': '6'} + test_existing_failed_to_start_horcmgr = {'source-id': '15'} + + def setUp(self): + """Set up the test environment.""" + super(VSPHORCMFCDriverTest, self).setUp() + + self.configuration = mock.Mock(conf.Configuration) + self.ctxt = cinder_context.get_admin_context() + self._setup_config() + self._setup_driver() + + def _setup_config(self): + """Set configuration parameter values.""" + self.configuration.config_group = "HORCM" + + self.configuration.volume_backend_name = "HORCMFC" + self.configuration.volume_driver = ( + "cinder.volume.drivers.hitachi.vsp_fc.VSPFCDriver") + self.configuration.reserved_percentage = "0" + self.configuration.use_multipath_for_image_xfer = False + self.configuration.enforce_multipath_for_image_xfer = False + self.configuration.num_volume_device_scan_tries = 3 + self.configuration.volume_dd_blocksize = "1000" + + self.configuration.vsp_storage_id = CONFIG_MAP['serial'] + self.configuration.vsp_pool = "30" + self.configuration.vsp_thin_pool = None + self.configuration.vsp_ldev_range = "0-1" + self.configuration.vsp_default_copy_method = 'FULL' + self.configuration.vsp_copy_speed = 3 + self.configuration.vsp_copy_check_interval = 1 + self.configuration.vsp_async_copy_check_interval = 1 + self.configuration.vsp_target_ports = "CL1-A" + self.configuration.vsp_group_request = True + + self.configuration.vsp_zoning_request = False + + self.configuration.vsp_horcm_numbers = INST_NUMS + self.configuration.vsp_horcm_user = "user" + self.configuration.vsp_horcm_password = "pasword" + self.configuration.vsp_horcm_add_conf = False + + self.configuration.safe_get = self._fake_safe_get + + CONF = cfg.CONF + CONF.my_ip = CONFIG_MAP['my_ip'] + + def _fake_safe_get(self, value): + """Retrieve a configuration value avoiding throwing an exception.""" + try: + val = getattr(self.configuration, value) + except AttributeError: + val = None + return val + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def _setup_driver(self, *args): + """Set up the driver environment.""" + self.driver = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self.driver.do_setup(None) + self.driver.check_for_setup_error() + self.driver.create_export(None, None, None) + self.driver.ensure_export(None, None) + self.driver.remove_export(None, None) + + # API test cases + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) + def test_do_setup(self, *args): + """Normal case: The host group exists beforehand.""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + + drv.do_setup(None) + self.assertEqual( + {'CL1-A': '0123456789abcdef'}, + drv.common.storage_info['wwns']) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_raidqry_h_invalid(self, *args): + """Error case: 'raidqry -h' returns nothing. This error is ignored.""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + + raidqry_h_original = EXECUTE_TABLE[('raidqry', '-h')] + EXECUTE_TABLE[('raidqry', '-h')] = (SUCCEED, "", STDERR) + drv.do_setup(None) + self.assertEqual( + {'CL1-A': '0123456789abcdef'}, + drv.common.storage_info['wwns']) + EXECUTE_TABLE[('raidqry', '-h')] = raidqry_h_original + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_specify_pool_name(self, *args): + """Normal case: Specify pool name rather than pool number.""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_pool = "VSPPOOL" + + drv.do_setup(None) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_create_hostgrp(self, *args): + """Normal case: The host groups does not exist beforehand.""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_target_ports = "CL3-B" + + drv.do_setup(None) + + @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_create_hostgrp_error(self, *args): + """Error case: 'add hba_wwn' fails(MSGID0614-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_target_ports = "CL3-A" + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_thin_pool_not_specified(self, *args): + """Error case: Parameter error(vsp_thin_pool).(MSGID0601-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_default_copy_method = 'THIN' + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_ldev_range_not_specified(self, *args): + """Normal case: Not specify LDEV range.""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_ldev_range = None + + drv.do_setup(None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_storage_id_not_specified(self, *args): + """Error case: Parameter error(vsp_storage_id).(MSGID0601-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_storage_id = None + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_horcm_numbers_invalid(self, *args): + """Error case: Parameter error(vsp_horcm_numbers).(MSGID0601-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_numbers = (200, 200) + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_horcm_user_not_specified(self, *args): + """Error case: Parameter error(vsp_horcm_user).(MSGID0601-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_user = None + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(processutils, 'execute', side_effect=_execute) + @mock.patch.object(os.path, 'exists', side_effect=_fake_exists) + @mock.patch.object(os, 'access', side_effect=_access) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_failed_to_create_conf(self, *args): + """Error case: Writing into horcmxxx.conf fails.(MSGID0632-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_numbers = (500, 501) + self.configuration.vsp_horcm_add_conf = True + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_failed_to_login(self, *args): + """Error case: 'raidcom -login' fails with EX_ENAUTH(MSGID0600-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_user = "userX" + self.configuration.vsp_horcm_password = "paswordX" + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 2) + @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_failed_to_command(self, *args): + """Error case: 'raidcom -login' fails with EX_COMERR(MSGID0600-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_user = "userY" + self.configuration.vsp_horcm_password = "paswordY" + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 2) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm, '_run_horcmgr', side_effect=_fake_run_horcmgr) + def test_do_setup_failed_to_horcmshutdown(self, *args): + """Error case: CCI's status is always RUNNING(MSGID0608-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart) + def test_do_setup_failed_to_horcmstart(self, *args): + """Error case: _run_horcmstart() returns an error(MSGID0609-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + + global run_horcmstart_returns_error + run_horcmstart_returns_error = True + self.assertRaises(exception.VSPError, drv.do_setup, None) + run_horcmstart_returns_error = False + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties_error) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_wwn_not_found(self, *args): + """Error case: The connector does not have 'wwpns'(MSGID0650-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties_error) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_port_not_found(self, *args): + """Error case: The target port does not exist(MSGID0650-E).""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_target_ports = ["CL4-A"] + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_extend_volume(self, *args): + """Normal case: Extend volume succeeds.""" + self.driver.extend_volume(TEST_VOLUME[0], 256) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_extend_volume_volume_provider_location_is_none(self, *args): + """Error case: The volume's provider_location is None(MSGID0613-E).""" + self.assertRaises( + exception.VSPError, self.driver.extend_volume, TEST_VOLUME[2], 256) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_extend_volume_volume_ldev_is_vvol(self, *args): + """Error case: The volume is a V-VOL(MSGID0618-E).""" + self.assertRaises( + exception.VSPError, self.driver.extend_volume, TEST_VOLUME[5], 256) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_extend_volume_volume_is_busy(self, *args): + """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" + self.assertRaises( + exception.VSPError, self.driver.extend_volume, TEST_VOLUME[4], 256) + + @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) + @mock.patch.object(vsp_horcm, '_EXTEND_WAITTIME', 1) + @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) + def test_extend_volume_raidcom_error(self, *args): + """Error case: 'extend ldev' returns an error(MSGID0600-E).""" + self.assertRaises( + exception.VSPError, self.driver.extend_volume, TEST_VOLUME[3], 256) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_get_volume_stats(self, *args): + """Normal case: Refreshing data required.""" + stats = self.driver.get_volume_stats(True) + self.assertEqual('Hitachi', stats['vendor_name']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_get_volume_stats_no_refresh(self, *args): + """Normal case: Refreshing data not required.""" + stats = self.driver.get_volume_stats() + self.assertEqual({}, stats) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_error_execute) + def test_get_volume_stats_failed_to_get_dp_pool(self, *args): + """Error case: The pool does not exist(MSGID0640-E, MSGID0620-E).""" + self.driver.common.storage_info['pool_id'] = 29 + + stats = self.driver.get_volume_stats(True) + self.assertEqual({}, stats) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume(self, *args): + """Normal case: Available LDEV range is 0-1.""" + ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) + self.assertEqual('1', ret['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_free_ldev_not_found_on_storage(self, *args): + """Error case: No unused LDEV exists(MSGID0648-E).""" + self.driver.common.storage_info['ldev_range'] = [0, 0] + + self.assertRaises( + exception.VSPError, self.driver.create_volume, TEST_VOLUME[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_no_setting_ldev_range(self, *args): + """Normal case: Available LDEV range is unlimited.""" + self.driver.common.storage_info['ldev_range'] = None + + ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) + self.assertEqual('1', ret['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm.VSPHORCM, + '_check_ldev_status', side_effect=_fake_check_ldev_status) + def test_delete_volume(self, *args): + """Normal case: Delete a volume.""" + self.driver.delete_volume(TEST_VOLUME[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_volume_provider_location_is_none(self, *args): + """Error case: The volume's provider_location is None(MSGID0304-W).""" + self.driver.delete_volume(TEST_VOLUME[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_volume_ldev_not_found_on_storage(self, *args): + """Unusual case: The volume's LDEV does not exist.(MSGID0319-W).""" + self.driver.delete_volume(TEST_VOLUME[3]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_volume_volume_is_busy(self, *args): + """Error case: The volume is a P-VOL of a THIN pair(MSGID0616-E).""" + self.assertRaises( + exception.VolumeIsBusy, self.driver.delete_volume, TEST_VOLUME[4]) + + @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot_full(self, *args): + """Normal case: copy_method=FULL.""" + self.driver.common.storage_info['ldev_range'] = [0, 9] + + ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) + self.assertEqual('8', ret['provider_location']) + + @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot_thin(self, *args): + """Normal case: copy_method=THIN.""" + self.driver.common.storage_info['ldev_range'] = [0, 9] + self.configuration.vsp_thin_pool = 31 + self.configuration.vsp_default_copy_method = "THIN" + + ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) + self.assertEqual('8', ret['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot_provider_location_is_none(self, *args): + """Error case: Source vol's provider_location is None(MSGID0624-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot_ldev_not_found_on_storage(self, *args): + """Error case: The src-vol's LDEV does not exist.(MSGID0612-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[3]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_full(self, *args): + """Normal case: Delete a snapshot.""" + self.driver.delete_snapshot(TEST_SNAPSHOT[5]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm.VSPHORCM, '_is_smpl', side_effect=_fake_is_smpl) + def test_delete_snapshot_full_smpl(self, *args): + """Normal case: The LDEV in an SI volume pair becomes SMPL.""" + self.driver.delete_snapshot(TEST_SNAPSHOT[7]) + + @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_vvol_timeout(self, *args): + """Error case: V-VOL is not deleted from a snapshot(MSGID0611-E).""" + self.assertRaises( + exception.VSPError, self.driver.delete_snapshot, TEST_SNAPSHOT[6]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_provider_location_is_none(self, *args): + """Error case: Snapshot's provider_location is None(MSGID0304-W).""" + self.driver.delete_snapshot(TEST_SNAPSHOT[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_ldev_not_found_on_storage(self, *args): + """Unusual case: The snapshot's LDEV does not exist.(MSGID0319-W).""" + self.driver.delete_snapshot(TEST_SNAPSHOT[3]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_snapshot_is_busy(self, *args): + """Error case: The snapshot is a P-VOL of a THIN pair(MSGID0616-E).""" + self.assertRaises( + exception.SnapshotIsBusy, self.driver.delete_snapshot, + TEST_SNAPSHOT[4]) + + @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object( + utils, 'brick_get_connector', + side_effect=mock.MagicMock()) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + brick_connector.FibreChannelConnector, + 'connect_volume', _connect_volume) + @mock.patch.object( + brick_connector.FibreChannelConnector, + 'disconnect_volume', _disconnect_volume) + def test_create_cloned_volume_with_dd_same_size(self, *args): + """Normal case: The source volume is a V-VOL and copied by dd.""" + vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[5]) + self.assertEqual('1', vol['provider_location']) + + @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object( + utils, 'brick_get_connector', + side_effect=mock.MagicMock()) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + brick_connector.FibreChannelConnector, + 'connect_volume', _connect_volume) + @mock.patch.object( + brick_connector.FibreChannelConnector, + 'disconnect_volume', _disconnect_volume) + def test_create_cloned_volume_with_dd_extend_size(self, *args): + """Normal case: Copy with dd and extend the size afterward.""" + vol = self.driver.create_cloned_volume(TEST_VOLUME[1], TEST_VOLUME[5]) + self.assertEqual('1', vol['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_cloned_volume_provider_location_is_none(self, *args): + """Error case: Source vol's provider_location is None(MSGID0624-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_cloned_volume, + TEST_VOLUME[0], TEST_VOLUME[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_cloned_volume_invalid_size(self, *args): + """Error case: src-size > clone-size(MSGID0617-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_cloned_volume, + TEST_VOLUME[0], TEST_VOLUME[1]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_cloned_volume_extend_size_thin(self, *args): + """Error case: clone > src and copy_method=THIN(MSGID0621-E).""" + self.configuration.vsp_thin_pool = 31 + test_vol_obj = copy.copy(TEST_VOLUME[1]) + test_vol_obj.metadata.update({'copy_method': 'THIN'}) + self.assertRaises( + exception.VSPError, self.driver.create_cloned_volume, + test_vol_obj, TEST_VOLUME[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_from_snapshot_same_size(self, *args): + """Normal case: Copy with ShadowImage.""" + vol = self.driver.create_volume_from_snapshot( + TEST_VOLUME[0], TEST_SNAPSHOT[0]) + self.assertEqual('1', vol['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute2) + def test_create_volume_from_snapshot_full_extend_normal(self, *args): + """Normal case: Copy with ShadowImage and extend the size afterward.""" + test_vol_obj = copy.copy(TEST_VOLUME[1]) + test_vol_obj.metadata.update({'copy_method': 'FULL'}) + vol = self.driver.create_volume_from_snapshot( + test_vol_obj, TEST_SNAPSHOT[0]) + self.assertEqual('1', vol['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute3) + def test_create_volume_from_snapshot_full_extend_PSUE(self, *args): + """Error case: SI copy -> pair status: PSUS -> PSUE(MSGID0722-E).""" + test_vol_obj = copy.copy(TEST_VOLUME[1]) + test_vol_obj.metadata.update({'copy_method': 'FULL'}) + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + test_vol_obj, TEST_SNAPSHOT[0]) + + @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute4) + def test_create_volume_from_snapshot_full_PSUE(self, *args): + """Error case: SI copy -> pair status becomes PSUE(MSGID0610-E).""" + test_vol_obj = copy.copy(TEST_VOLUME[0]) + test_vol_obj.metadata.update({'copy_method': 'FULL'}) + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + test_vol_obj, TEST_SNAPSHOT[0]) + + @mock.patch.object( + vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart3) + @mock.patch.object(vsp_horcm, '_LDEV_STATUS_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute5) + def test_create_volume_from_snapshot_full_SMPL(self, *args): + """Error case: SI copy -> pair status becomes SMPL(MSGID0610-E).""" + test_vol_obj = copy.copy(TEST_VOLUME[0]) + test_vol_obj.metadata.update({'copy_method': 'FULL'}) + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + test_vol_obj, TEST_SNAPSHOT[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_from_snapshot_invalid_size(self, *args): + """Error case: volume-size < snapshot-size(MSGID0617-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + TEST_VOLUME[0], TEST_SNAPSHOT[1]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_from_snapshot_thin_extend(self, *args): + """Error case: volume > snapshot and copy_method=THIN(MSGID0621-E).""" + self.configuration.vsp_thin_pool = 31 + test_vol_obj = copy.copy(TEST_VOLUME[1]) + test_vol_obj.metadata.update({'copy_method': 'THIN'}) + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + test_vol_obj, TEST_SNAPSHOT[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_from_snapshot_provider_location_is_none( + self, *args): + """Error case: Snapshot's provider_location is None(MSGID0624-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + TEST_VOLUME[0], TEST_SNAPSHOT[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'volume_admin_metadata_get', + side_effect=_volume_admin_metadata_get) + def test_initialize_connection(self, *args): + """Normal case: Initialize connection.""" + self.configuration.vsp_zoning_request = True + self.driver.common._lookup_service = FakeLookupService() + + ret = self.driver.initialize_connection( + TEST_VOLUME[0], DEFAULT_CONNECTOR) + self.assertEqual('fibre_channel', ret['driver_volume_type']) + self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) + self.assertEqual(0, ret['data']['target_lun']) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'volume_admin_metadata_get', + side_effect=_volume_admin_metadata_get) + def test_initialize_connection_multipath(self, *args): + """Normal case: Initialize connection in multipath environment.""" + drv = vsp_fc.VSPFCDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_target_ports = ["CL1-A", "CL1-B"] + drv.do_setup(None) + multipath_connector = copy.copy(DEFAULT_CONNECTOR) + multipath_connector['multipath'] = True + ret = drv.initialize_connection(TEST_VOLUME[0], multipath_connector) + self.assertEqual('fibre_channel', ret['driver_volume_type']) + self.assertEqual(['0123456789abcdef', '0123456789abcdef'], + ret['data']['target_wwn']) + self.assertEqual(0, ret['data']['target_lun']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_initialize_connection_provider_location_is_none(self, *args): + """Error case: The volume's provider_location is None(MSGID0619-E).""" + self.assertRaises( + exception.VSPError, self.driver.initialize_connection, + TEST_VOLUME[2], DEFAULT_CONNECTOR) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'volume_admin_metadata_get', + side_effect=_volume_admin_metadata_get) + def test_initialize_connection_already_attached(self, *args): + """Unusual case: 'add lun' returns 'already defined' error.""" + ret = self.driver.initialize_connection( + TEST_VOLUME[6], DEFAULT_CONNECTOR) + self.assertEqual('fibre_channel', ret['driver_volume_type']) + self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) + self.assertEqual(255, ret['data']['target_lun']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_terminate_connection(self, *args): + """Normal case: Terminate connection.""" + self.driver.terminate_connection(TEST_VOLUME[6], DEFAULT_CONNECTOR) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_terminate_connection_provider_location_is_none(self, *args): + """Unusual case: Volume's provider_location is None(MSGID0302-W).""" + self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_terminate_connection_no_port_mapped_to_ldev(self, *args): + """Unusual case: No port is mapped to the LDEV.""" + self.driver.terminate_connection(TEST_VOLUME[3], DEFAULT_CONNECTOR) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_terminate_connection_initiator_iqn_not_found(self, *args): + """Error case: The connector does not have 'wwpns'(MSGID0650-E).""" + connector = dict(DEFAULT_CONNECTOR) + del connector['wwpns'] + + self.assertRaises( + exception.VSPError, self.driver.terminate_connection, + TEST_VOLUME[0], connector) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_copy_volume_to_image(self, *args): + """Normal case: Copy a volume to an image.""" + image_service = 'fake_image_service' + image_meta = 'fake_image_meta' + + with mock.patch.object(driver.VolumeDriver, 'copy_volume_to_image') \ + as mock_copy_volume_to_image: + self.driver.copy_volume_to_image( + self.ctxt, TEST_VOLUME[0], image_service, image_meta) + + mock_copy_volume_to_image.assert_called_with( + self.ctxt, TEST_VOLUME[0], image_service, image_meta) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing(self, *args): + """Normal case: Bring an existing volume under Cinder's control.""" + ret = self.driver.manage_existing( + TEST_VOLUME[0], self.test_existing_ref) + self.assertEqual('0', ret['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_normal(self, *args): + """Normal case: Return an existing LDEV's size.""" + self.driver.manage_existing_get_size( + TEST_VOLUME[0], self.test_existing_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_none_ldev_ref(self, *args): + """Error case: Source LDEV's properties do not exist(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_none_ldev_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_ldev_ref(self, *args): + """Error case: Source LDEV's ID is an invalid decimal(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_ldev_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_value_error_ref(self, *args): + """Error case: Source LDEV's ID is an invalid hex(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_value_error_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_no_ldev_ref(self, *args): + """Error case: Source LDEV's ID is not specified(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_no_ldev_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_sts_ldev(self, *args): + """Error case: Source LDEV's STS is invalid(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_sts_ldev) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_vol_attr(self, *args): + """Error case: Source LDEV's VOL_ATTR is invalid(MSGID0702-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_vol_attr) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_size_ref(self, *args): + """Error case: Source LDEV's VOL_Capacity is invalid(MSGID0703-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_size) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_port_cnt(self, *args): + """Error case: Source LDEV's NUM_PORT is invalid(MSGID0704-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_port_cnt) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart2) + def test_manage_existing_get_size_failed_to_start_horcmgr(self, *args): + """Error case: _start_horcmgr() returns an error(MSGID0320-W).""" + global run_horcmstart_returns_error2 + run_horcmstart_returns_error2 = True + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_failed_to_start_horcmgr) + run_horcmstart_returns_error2 = False + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_unmanage(self, *args): + """Normal case: Take out a volume from Cinder's control.""" + self.driver.unmanage(TEST_VOLUME[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_unmanage_provider_location_is_none(self, *args): + """Error case: The volume's provider_location is None(MSGID0304-W).""" + self.driver.unmanage(TEST_VOLUME[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_unmanage_volume_invalid_sts_ldev(self, *args): + """Unusual case: The volume's STS is BLK.""" + self.driver.unmanage(TEST_VOLUME[13]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_unmanage_volume_is_busy(self, *args): + """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" + self.assertRaises( + exception.VolumeIsBusy, self.driver.unmanage, TEST_VOLUME[4]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_copy_image_to_volume(self, *args): + """Normal case: Copy an image to a volume.""" + image_service = 'fake_image_service' + image_id = 'fake_image_id' + self.configuration.vsp_horcm_numbers = (400, 401) + + with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ + as mock_copy_image: + self.driver.copy_image_to_volume( + self.ctxt, TEST_VOLUME[0], image_service, image_id) + + mock_copy_image.assert_called_with( + self.ctxt, TEST_VOLUME[0], image_service, image_id) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_restore_backup(self, *args): + """Normal case: Restore a backup volume.""" + backup = 'fake_backup' + backup_service = 'fake_backup_service' + + with mock.patch.object(driver.VolumeDriver, 'restore_backup') \ + as mock_restore_backup: + self.driver.restore_backup( + self.ctxt, backup, TEST_VOLUME[0], backup_service) + + mock_restore_backup.assert_called_with( + self.ctxt, backup, TEST_VOLUME[0], backup_service) + + @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) + def test_update_migrated_volume_success(self, *args): + """Normal case: 'modify ldev -status discard_zero_page' succeeds.""" + self.assertRaises( + NotImplementedError, + self.driver.update_migrated_volume, + self.ctxt, + TEST_VOLUME[0], + TEST_VOLUME[2], + "available") + + @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) + @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_update_migrated_volume_error(self, *args): + """Error case: 'modify ldev' fails(MSGID0315-W).""" + self.assertRaises( + NotImplementedError, + self.driver.update_migrated_volume, + self.ctxt, + TEST_VOLUME[0], + TEST_VOLUME[3], + "available") + + def test_get_ldev_volume_is_none(self, *args): + """Error case: The volume is None.""" + self.assertIsNone(vsp_utils.get_ldev(None)) + + def test_check_ignore_error_string(self, *args): + """Normal case: ignore_error is a string.""" + ignore_error = 'SSB=0xB980,0xB902' + stderr = ('raidcom: [EX_CMDRJE] An order to the control/command device' + ' was rejected\nIt was rejected due to SKEY=0x05, ASC=0x26, ' + 'ASCQ=0x00, SSB=0xB980,0xB902 on Serial#(400003)\nCAUSE : ' + 'The specified port can not be operated.') + self.assertTrue(vsp_utils.check_ignore_error(ignore_error, stderr)) + + def test_check_opts_parameter_specified(self, *args): + """Normal case: A valid parameter is specified.""" + cfg.CONF.paramAAA = 'aaa' + vsp_utils.check_opts(conf.Configuration(None), + [cfg.StrOpt('paramAAA')]) + + def test_check_opt_value_parameter_not_set(self, *args): + """Error case: A parameter is not set(MSGID0601-E).""" + self.assertRaises(cfg.NoSuchOptError, + vsp_utils.check_opt_value, + conf.Configuration(None), + ['paramCCC']) + + def test_build_initiator_target_map_no_lookup_service(self, *args): + """Normal case: None is specified for lookup_service.""" + connector = {'wwpns': ['0000000000000000', '1111111111111111']} + target_wwns = ['2222222222222222', '3333333333333333'] + init_target_map = vsp_utils.build_initiator_target_map(connector, + target_wwns, + None) + self.assertEqual( + {'0000000000000000': ['2222222222222222', '3333333333333333'], + '1111111111111111': ['2222222222222222', '3333333333333333']}, + init_target_map) + + def test_update_conn_info_not_update_conn_info(self, *args): + """Normal case: Not update connection info.""" + vsp_utils.update_conn_info(dict({'data': dict({'target_wwn': []})}), + dict({'wwpns': []}), + None) diff --git a/cinder/volume/drivers/hitachi/vsp_common.py b/cinder/volume/drivers/hitachi/vsp_common.py new file mode 100644 index 00000000000..dbe08a1cb0b --- /dev/null +++ b/cinder/volume/drivers/hitachi/vsp_common.py @@ -0,0 +1,884 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Common module for Hitachi VSP Driver.""" + +import abc +import re + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import units +import six + +from cinder import coordination +from cinder import exception +from cinder import utils as cinder_utils +from cinder.volume.drivers.hitachi import vsp_utils as utils +from cinder.volume import utils as volume_utils + + +VERSION = '1.0.0' + +_COPY_METHOD = set(['FULL', 'THIN']) + +_INHERITED_VOLUME_OPTS = [ + 'volume_backend_name', + 'volume_driver', + 'reserved_percentage', + 'use_multipath_for_image_xfer', + 'enforce_multipath_for_image_xfer', + 'num_volume_device_scan_tries', +] + +common_opts = [ + cfg.StrOpt( + 'vsp_storage_id', + help='Product number of the storage system.'), + cfg.StrOpt( + 'vsp_pool', + help='Pool number or pool name of the DP pool.'), + cfg.StrOpt( + 'vsp_thin_pool', + help='Pool number or pool name of the Thin Image pool.'), + cfg.StrOpt( + 'vsp_ldev_range', + help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that ' + 'can be used by the driver. Values can be in decimal format ' + '(e.g. 1000) or in colon-separated hexadecimal format ' + '(e.g. 00:03:E8).'), + cfg.StrOpt( + 'vsp_default_copy_method', + default='FULL', + choices=['FULL', 'THIN'], + help='Method of volume copy. FULL indicates full data copy by ' + 'ShadowImage and THIN indicates differential data copy by Thin ' + 'Image.'), + cfg.IntOpt( + 'vsp_copy_speed', + min=1, + max=15, + default=3, + help='Speed at which data is copied by ShadowImage. 1 or 2 indicates ' + 'low speed, 3 indicates middle speed, and a value between 4 and ' + '15 indicates high speed.'), + cfg.IntOpt( + 'vsp_copy_check_interval', + min=1, + max=600, + default=3, + help='Interval in seconds at which volume pair synchronization status ' + 'is checked when volume pairs are created.'), + cfg.IntOpt( + 'vsp_async_copy_check_interval', + min=1, + max=600, + default=10, + help='Interval in seconds at which volume pair synchronization status ' + 'is checked when volume pairs are deleted.'), + cfg.ListOpt( + 'vsp_target_ports', + help='IDs of the storage ports. To specify multiple ports, connect ' + 'them by commas (e.g. CL1-A,CL2-A).'), + cfg.BoolOpt( + 'vsp_group_request', + default=False, + help='If True, the driver will create host groups on storage ports ' + 'as needed.'), +] + +_REQUIRED_COMMON_OPTS = [ + 'vsp_storage_id', + 'vsp_pool', + 'vsp_target_ports', +] + +CONF = cfg.CONF +CONF.register_opts(common_opts) + +LOG = logging.getLogger(__name__) +MSG = utils.VSPMsg + + +def _str2int(num): + """Convert a string into an integer.""" + if not num: + return None + if num.isdigit(): + return int(num) + if not re.match(r'\w\w:\w\w:\w\w', num): + return None + try: + return int(num.replace(':', ''), 16) + except ValueError: + return None + + +@six.add_metaclass(abc.ABCMeta) +class VSPCommon(object): + """Common class for Hitachi VSP Driver. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver. + + """ + + def __init__(self, conf, driverinfo, db): + """Initialize instance variables.""" + self.conf = conf + self.db = db + self.ctxt = None + self.lock = {} + self.driver_info = driverinfo + self.storage_info = { + 'protocol': driverinfo['proto'], + 'pool_id': None, + 'ldev_range': [], + 'ports': [], + 'wwns': {}, + 'output_first': True, + } + + self._stats = {} + + def run_and_verify_storage_cli(self, *cmd, **kwargs): + """Run storage CLI and return the result or raise an exception.""" + do_raise = kwargs.pop('do_raise', True) + ignore_error = kwargs.get('ignore_error') + success_code = kwargs.get('success_code', set([0])) + (ret, stdout, stderr) = self.run_storage_cli(*cmd, **kwargs) + if (ret not in success_code and + not utils.check_ignore_error(ignore_error, stderr)): + msg = utils.output_log( + MSG.STORAGE_COMMAND_FAILED, cmd=utils.mask_password(cmd), + ret=ret, out=' '.join(stdout.splitlines()), + err=' '.join(stderr.splitlines())) + if do_raise: + raise exception.VSPError(msg) + return ret, stdout, stderr + + @abc.abstractmethod + def run_storage_cli(self, *cmd, **kwargs): + """Run storage CLI.""" + raise NotImplementedError() + + def get_copy_method(self, metadata): + """Return copy method(FULL or THIN).""" + method = metadata.get( + 'copy_method', self.conf.vsp_default_copy_method) + if method not in _COPY_METHOD: + msg = utils.output_log(MSG.INVALID_PARAMETER_VALUE, + meta='copy_method') + raise exception.VSPError(msg) + if method == 'THIN' and not self.conf.vsp_thin_pool: + msg = utils.output_log(MSG.INVALID_PARAMETER, + param='vsp_thin_pool') + raise exception.VSPError(msg) + return method + + def create_volume(self, volume): + """Create a volume and return its properties.""" + try: + ldev = self.create_ldev(volume['size']) + except exception.VSPError: + with excutils.save_and_reraise_exception(): + utils.output_log(MSG.CREATE_LDEV_FAILED) + return { + 'provider_location': six.text_type(ldev), + } + + def create_ldev(self, size, is_vvol=False): + """Create an LDEV and return its LDEV number.""" + ldev = self.get_unused_ldev() + self.create_ldev_on_storage(ldev, size, is_vvol) + LOG.debug('Created logical device. (LDEV: %s)', ldev) + return ldev + + @abc.abstractmethod + def create_ldev_on_storage(self, ldev, size, is_vvol): + """Create an LDEV on the storage system.""" + raise NotImplementedError() + + @abc.abstractmethod + def get_unused_ldev(self): + """Find an unused LDEV and return its LDEV number.""" + raise NotImplementedError() + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot and return its properties.""" + ldev = utils.get_ldev(snapshot) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if ldev is None: + msg = utils.output_log( + MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='snapshot', + id=snapshot['id']) + raise exception.VSPError(msg) + size = volume['size'] + metadata = utils.get_volume_metadata(volume) + if size < snapshot['volume_size']: + msg = utils.output_log( + MSG.INVALID_VOLUME_SIZE_FOR_COPY, type='snapshot', + volume_id=volume['id']) + raise exception.VSPError(msg) + elif (size > snapshot['volume_size'] and not self.check_vvol(ldev) and + self.get_copy_method(metadata) == "THIN"): + msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI, + copy_method=utils.THIN, + type='snapshot', volume_id=volume['id']) + raise exception.VSPError(msg) + sync = size > snapshot['volume_size'] + new_ldev = self._copy_ldev( + ldev, snapshot['volume_size'], metadata, sync) + if sync: + self.delete_pair(new_ldev) + self.extend_ldev(new_ldev, snapshot['volume_size'], size) + return { + 'provider_location': six.text_type(new_ldev), + } + + def _copy_ldev(self, ldev, size, metadata, sync=False): + """Create a copy of the specified volume and return its properties.""" + try: + return self.copy_on_storage(ldev, size, metadata, sync) + except exception.VSPNotSupported: + return self._copy_on_host(ldev, size) + + def _copy_on_host(self, src_ldev, size): + """Create a copy of the specified LDEV via host.""" + dest_ldev = self.create_ldev(size) + try: + self._copy_with_dd(src_ldev, dest_ldev, size) + except Exception: + with excutils.save_and_reraise_exception(): + try: + self._delete_ldev(dest_ldev) + except exception.VSPError: + utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=dest_ldev) + return dest_ldev + + def _copy_with_dd(self, src_ldev, dest_ldev, size): + """Copy the content of a volume by dd command.""" + src_info = None + dest_info = None + properties = cinder_utils.brick_get_connector_properties( + multipath=self.conf.use_multipath_for_image_xfer, + enforce_multipath=self.conf.enforce_multipath_for_image_xfer) + try: + dest_info = self._attach_ldev(dest_ldev, properties) + src_info = self._attach_ldev(src_ldev, properties) + volume_utils.copy_volume( + src_info['device']['path'], dest_info['device']['path'], + size * units.Ki, self.conf.volume_dd_blocksize) + finally: + if src_info: + self._detach_ldev(src_info, src_ldev, properties) + if dest_info: + self._detach_ldev(dest_info, dest_ldev, properties) + self.discard_zero_page({'provider_location': six.text_type(dest_ldev)}) + + def _attach_ldev(self, ldev, properties): + """Attach the specified LDEV to the server.""" + volume = { + 'provider_location': six.text_type(ldev), + } + conn = self.initialize_connection(volume, properties) + try: + connector = cinder_utils.brick_get_connector( + conn['driver_volume_type'], + use_multipath=self.conf.use_multipath_for_image_xfer, + device_scan_attempts=self.conf.num_volume_device_scan_tries, + conn=conn) + device = connector.connect_volume(conn['data']) + except Exception as ex: + with excutils.save_and_reraise_exception(): + utils.output_log(MSG.CONNECT_VOLUME_FAILED, ldev=ldev, + reason=six.text_type(ex)) + self._terminate_connection(volume, properties) + return { + 'conn': conn, + 'device': device, + 'connector': connector, + } + + def _detach_ldev(self, attach_info, ldev, properties): + """Detach the specified LDEV from the server.""" + volume = { + 'provider_location': six.text_type(ldev), + } + connector = attach_info['connector'] + try: + connector.disconnect_volume( + attach_info['conn']['data'], attach_info['device']) + except Exception as ex: + utils.output_log(MSG.DISCONNECT_VOLUME_FAILED, ldev=ldev, + reason=six.text_type(ex)) + self._terminate_connection(volume, properties) + + def _terminate_connection(self, volume, connector): + """Disconnect the specified volume from the server.""" + try: + self.terminate_connection(volume, connector) + except exception.VSPError: + utils.output_log(MSG.UNMAP_LDEV_FAILED, + ldev=utils.get_ldev(volume)) + + def copy_on_storage(self, pvol, size, metadata, sync): + """Create a copy of the specified LDEV on the storage.""" + is_thin = self.get_copy_method(metadata) == "THIN" + svol = self.create_ldev(size, is_vvol=is_thin) + try: + self.create_pair_on_storage(pvol, svol, is_thin) + if sync: + self.wait_full_copy_completion(pvol, svol) + except exception.VSPError: + with excutils.save_and_reraise_exception(): + try: + self._delete_ldev(svol) + except exception.VSPError: + utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol) + return svol + + @abc.abstractmethod + def create_pair_on_storage(self, pvol, svol, is_thin): + """Create a copy pair on the storage.""" + raise NotImplementedError() + + def _delete_ldev(self, ldev): + """Delete the specified LDEV.""" + self.delete_pair(ldev) + self.unmap_ldev_from_storage(ldev) + self.delete_ldev_from_storage(ldev) + + def unmap_ldev_from_storage(self, ldev): + """Delete the connection between the specified LDEV and servers.""" + targets = { + 'list': [], + } + self.find_all_mapped_targets_from_storage(targets, ldev) + self.unmap_ldev(targets, ldev) + + @abc.abstractmethod + def find_all_mapped_targets_from_storage(self, targets, ldev): + """Add all port-gids connected with the LDEV to the list.""" + raise NotImplementedError() + + def delete_pair(self, ldev, all_split=True): + """Disconnect all volume pairs to which the specified LDEV belongs.""" + pair_info = self.get_pair_info(ldev) + if not pair_info: + return + if pair_info['pvol'] == ldev: + self.delete_pair_based_on_pvol(pair_info, all_split) + else: + self.delete_pair_based_on_svol( + pair_info['pvol'], pair_info['svol_info'][0]) + + @abc.abstractmethod + def get_pair_info(self, ldev): + """Return volume pair info(LDEV number, pair status and pair type).""" + raise NotImplementedError() + + @abc.abstractmethod + def delete_pair_based_on_pvol(self, pair_info, all_split): + """Disconnect all volume pairs to which the specified P-VOL belongs.""" + raise NotImplementedError() + + @abc.abstractmethod + def delete_pair_based_on_svol(self, pvol, svol_info): + """Disconnect all volume pairs to which the specified S-VOL belongs.""" + raise NotImplementedError() + + @abc.abstractmethod + def delete_pair_from_storage(self, pvol, svol, is_thin): + """Disconnect the volume pair that consists of the specified LDEVs.""" + raise NotImplementedError() + + @abc.abstractmethod + def delete_ldev_from_storage(self, ldev): + """Delete the specified LDEV from the storage.""" + raise NotImplementedError() + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume and return its properties.""" + ldev = utils.get_ldev(src_vref) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is not None'. + if ldev is None: + msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY, + type='volume', id=src_vref['id']) + raise exception.VSPError(msg) + size = volume['size'] + metadata = utils.get_volume_metadata(volume) + if size < src_vref['size']: + msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_COPY, + type='volume', volume_id=volume['id']) + raise exception.VSPError(msg) + elif (size > src_vref['size'] and not self.check_vvol(ldev) and + self.get_copy_method(metadata) == "THIN"): + msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI, + copy_method=utils.THIN, type='volume', + volume_id=volume['id']) + raise exception.VSPError(msg) + sync = size > src_vref['size'] + new_ldev = self._copy_ldev(ldev, src_vref['size'], metadata, sync) + if sync: + self.delete_pair(new_ldev) + self.extend_ldev(new_ldev, src_vref['size'], size) + return { + 'provider_location': six.text_type(new_ldev), + } + + def delete_volume(self, volume): + """Delete the specified volume.""" + ldev = utils.get_ldev(volume) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is not None'. + if ldev is None: + utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, + method='delete_volume', id=volume['id']) + return + try: + self._delete_ldev(ldev) + except exception.VSPBusy: + raise exception.VolumeIsBusy(volume_name=volume['name']) + + def create_snapshot(self, snapshot): + """Create a snapshot from a volume and return its properties.""" + src_vref = snapshot.volume + ldev = utils.get_ldev(src_vref) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if ldev is None: + msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY, + type='volume', id=src_vref['id']) + raise exception.VSPError(msg) + size = snapshot['volume_size'] + metadata = utils.get_volume_metadata(src_vref) + new_ldev = self._copy_ldev(ldev, size, metadata) + return { + 'provider_location': six.text_type(new_ldev), + } + + def delete_snapshot(self, snapshot): + """Delete the specified snapshot.""" + ldev = utils.get_ldev(snapshot) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if ldev is None: + utils.output_log( + MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot', + id=snapshot['id']) + return + try: + self._delete_ldev(ldev) + except exception.VSPBusy: + raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) + + def get_volume_stats(self, refresh=False): + """Return properties, capabilities and current states of the driver.""" + if refresh: + if self.storage_info['output_first']: + self.storage_info['output_first'] = False + utils.output_log(MSG.DRIVER_READY_FOR_USE, + config_group=self.conf.config_group) + self._update_volume_stats() + return self._stats + + def _update_volume_stats(self): + """Update properties, capabilities and current states of the driver.""" + data = {} + backend_name = self.conf.safe_get('volume_backend_name') + data['volume_backend_name'] = ( + backend_name or self.driver_info['volume_backend_name']) + data['vendor_name'] = 'Hitachi' + data['driver_version'] = VERSION + data['storage_protocol'] = self.storage_info['protocol'] + try: + total_gb, free_gb = self.get_pool_info() + except exception.VSPError: + utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, + pool=self.conf.vsp_pool) + return + data['total_capacity_gb'] = total_gb + data['free_capacity_gb'] = free_gb + data['reserved_percentage'] = self.conf.safe_get('reserved_percentage') + data['QoS_support'] = False + LOG.debug("Updating volume status. (%s)", data) + self._stats = data + + @abc.abstractmethod + def get_pool_info(self): + """Return the total and free capacity of the storage pool.""" + raise NotImplementedError() + + @abc.abstractmethod + def discard_zero_page(self, volume): + """Return the volume's no-data pages to the storage pool.""" + raise NotImplementedError() + + def extend_volume(self, volume, new_size): + """Extend the specified volume to the specified size.""" + ldev = utils.get_ldev(volume) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if ldev is None: + msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION, + volume_id=volume['id']) + raise exception.VSPError(msg) + if self.check_vvol(ldev): + msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND, + volume_id=volume['id']) + raise exception.VSPError(msg) + self.delete_pair(ldev) + self.extend_ldev(ldev, volume['size'], new_size) + + @abc.abstractmethod + def check_vvol(self, ldev): + """Return True if the specified LDEV is V-VOL, False otherwise.""" + raise NotImplementedError() + + @abc.abstractmethod + def extend_ldev(self, ldev, old_size, new_size): + """Extend the specified LDEV to the specified new size.""" + raise NotImplementedError() + + def manage_existing(self, existing_ref): + """Return volume properties which Cinder needs to manage the volume.""" + ldev = _str2int(existing_ref.get('source-id')) + return { + 'provider_location': six.text_type(ldev), + } + + def manage_existing_get_size(self, existing_ref): + """Return the size[GB] of the specified volume.""" + ldev = _str2int(existing_ref.get('source-id')) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if ldev is None: + msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=msg) + return self.get_ldev_size_in_gigabyte(ldev, existing_ref) + + @abc.abstractmethod + def get_ldev_size_in_gigabyte(self, ldev, existing_ref): + """Return the size[GB] of the specified LDEV.""" + raise NotImplementedError() + + def unmanage(self, volume): + """Prepare the volume for removing it from Cinder management.""" + ldev = utils.get_ldev(volume) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if ldev is None: + utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage', + id=volume['id']) + return + if self.check_vvol(ldev): + utils.output_log( + MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'], + volume_type=utils.NORMAL_LDEV_TYPE) + raise exception.VolumeIsBusy(volume_name=volume['name']) + try: + self.delete_pair(ldev) + except exception.VSPBusy: + raise exception.VolumeIsBusy(volume_name=volume['name']) + + def do_setup(self, context): + """Prepare for the startup of the driver.""" + self.ctxt = context + + self.check_param() + self.config_lock() + self.connect_storage() + self.init_cinder_hosts() + self.output_param_to_log() + + def check_param(self): + """Check parameter values and consistency among them.""" + utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS) + utils.check_opts(self.conf, common_opts) + utils.check_opts(self.conf, self.driver_info['volume_opts']) + if (self.conf.vsp_default_copy_method == 'THIN' and + not self.conf.vsp_thin_pool): + msg = utils.output_log(MSG.INVALID_PARAMETER, + param='vsp_thin_pool') + raise exception.VSPError(msg) + if self.conf.vsp_ldev_range: + self.storage_info['ldev_range'] = self._range2list( + 'vsp_ldev_range') + for opt in _REQUIRED_COMMON_OPTS: + if not self.conf.safe_get(opt): + msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt) + raise exception.VSPError(msg) + + def _range2list(self, param): + """Analyze a 'xxx-xxx' string and return a list of two integers.""" + values = [_str2int(value) for value in + self.conf.safe_get(param).split('-')] + if (len(values) != 2 or + values[0] is None or values[1] is None or + values[0] > values[1]): + msg = utils.output_log(MSG.INVALID_PARAMETER, param=param) + raise exception.VSPError(msg) + return values + + @abc.abstractmethod + def config_lock(self): + """Initialize lock resource names.""" + raise NotImplementedError() + + def connect_storage(self): + """Prepare for using the storage.""" + self.storage_info['pool_id'] = self.get_pool_id() + # When 'pool_id' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if self.storage_info['pool_id'] is None: + msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=self.conf.vsp_pool) + raise exception.VSPError(msg) + utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID', + value=self.storage_info['pool_id']) + + def check_ports_info(self): + """Check if available storage ports exist.""" + if (self.conf.vsp_target_ports and + not self.storage_info['ports']): + msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, + resource="Target ports") + raise exception.VSPError(msg) + utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list', + value=self.storage_info['ports']) + + def get_pool_id(self): + """Return the storage pool ID as integer.""" + pool = self.conf.vsp_pool + if pool.isdigit(): + return int(pool) + return None + + def init_cinder_hosts(self, **kwargs): + """Initialize server-storage connection.""" + targets = kwargs.pop('targets', {'info': {}, 'list': []}) + connector = cinder_utils.brick_get_connector_properties( + multipath=self.conf.use_multipath_for_image_xfer, + enforce_multipath=self.conf.enforce_multipath_for_image_xfer) + target_ports = self.storage_info['ports'] + + if target_ports: + if (self.find_targets_from_storage( + targets, connector, target_ports) and + self.conf.vsp_group_request): + self.create_mapping_targets(targets, connector) + + utils.require_target_existed(targets) + + @abc.abstractmethod + def find_targets_from_storage(self, targets, connector, target_ports): + """Find mapped ports, memorize them and return unmapped port count.""" + raise NotImplementedError() + + def create_mapping_targets(self, targets, connector): + """Create server-storage connection for all specified storage ports.""" + hba_ids = self.get_hba_ids_from_connector(connector) + for port in targets['info'].keys(): + if targets['info'][port]: + continue + + try: + self._create_target(targets, port, connector, hba_ids) + except exception.VSPError: + utils.output_log( + self.driver_info['msg_id']['target'], port=port) + + if not targets['list']: + self.find_targets_from_storage( + targets, connector, targets['info'].keys()) + + def get_hba_ids_from_connector(self, connector): + """Return the HBA ID stored in the connector.""" + if self.driver_info['hba_id'] in connector: + return connector[self.driver_info['hba_id']] + msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, + resource=self.driver_info['hba_id_type']) + raise exception.VSPError(msg) + + def _create_target(self, targets, port, connector, hba_ids): + """Create a host group for the specified storage port.""" + target_name, gid = self.create_target_to_storage(port, connector) + utils.output_log(MSG.OBJECT_CREATED, object='a target', + details='port: %(port)s, gid: %(gid)s, target_name: ' + '%(target)s' % + {'port': port, 'gid': gid, 'target': target_name}) + try: + self.set_target_mode(port, gid) + self.set_hba_ids(port, gid, hba_ids) + except exception.VSPError: + with excutils.save_and_reraise_exception(): + self.delete_target_from_storage(port, gid) + targets['info'][port] = True + targets['list'].append((port, gid)) + + @abc.abstractmethod + def create_target_to_storage(self, port, connector): + """Create a host group on the specified port.""" + raise NotImplementedError() + + @abc.abstractmethod + def set_target_mode(self, port, gid): + """Configure the host group to meet the environment.""" + raise NotImplementedError() + + @abc.abstractmethod + def set_hba_ids(self, port, gid, hba_ids): + """Connect all specified HBAs with the specified port.""" + raise NotImplementedError() + + @abc.abstractmethod + def delete_target_from_storage(self, port, gid): + """Delete the host group from the port.""" + raise NotImplementedError() + + def output_param_to_log(self): + """Output configuration parameter values to the log file.""" + utils.output_log(MSG.OUTPUT_PARAMETER_VALUES, + config_group=self.conf.config_group) + name, version = self.get_storage_cli_info() + utils.output_storage_cli_info(name, version) + utils.output_opt_info(self.conf, _INHERITED_VOLUME_OPTS) + utils.output_opts(self.conf, common_opts) + utils.output_opts(self.conf, self.driver_info['volume_opts']) + + @abc.abstractmethod + def get_storage_cli_info(self): + """Return a tuple of the storage CLI name and its version.""" + raise NotImplementedError() + + @coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-' + '{connector[host]}') + def initialize_connection(self, volume, connector): + """Initialize connection between the server and the volume.""" + targets = { + 'info': {}, + 'list': [], + 'lun': {}, + } + ldev = utils.get_ldev(volume) + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if ldev is None: + msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION, + volume_id=volume['id']) + raise exception.VSPError(msg) + + if (self.find_targets_from_storage( + targets, connector, self.storage_info['ports']) and + self.conf.vsp_group_request): + self.create_mapping_targets(targets, connector) + + utils.require_target_existed(targets) + + targets['list'].sort() + for port in self.storage_info['ports']: + targets['lun'][port] = False + target_lun = int(self.map_ldev(targets, ldev)) + + return { + 'driver_volume_type': self.driver_info['volume_type'], + 'data': self.get_properties(targets, connector, target_lun), + } + + @abc.abstractmethod + def map_ldev(self, targets, ldev): + """Create the path between the server and the LDEV and return LUN.""" + raise NotImplementedError() + + def get_properties(self, targets, connector, target_lun=None): + """Return server-LDEV connection info.""" + multipath = connector.get('multipath', False) + if self.storage_info['protocol'] == 'FC': + data = self.get_properties_fc(targets) + if target_lun is not None: + data['target_discovered'] = False + if not multipath or self.storage_info['protocol'] == 'FC': + data['target_lun'] = target_lun + return data + + def get_properties_fc(self, targets): + """Return FC-specific server-LDEV connection info.""" + data = {} + data['target_wwn'] = [ + self.storage_info['wwns'][target[0]] for target in targets['list'] + if targets['lun'][target[0]]] + return data + + @coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-' + '{connector[host]}') + def terminate_connection(self, volume, connector): + """Terminate connection between the server and the volume.""" + targets = { + 'info': {}, + 'list': [], + } + mapped_targets = { + 'list': [], + } + unmap_targets = {} + + ldev = utils.get_ldev(volume) + if ldev is None: + utils.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING, + volume_id=volume['id']) + return + self.find_targets_from_storage(targets, connector, + self.storage_info['ports']) + if not targets['list']: + utils.output_log(MSG.NO_CONNECTED_TARGET) + self.find_mapped_targets_from_storage( + mapped_targets, ldev, self.storage_info['ports']) + + unmap_targets['list'] = self.get_unmap_targets_list( + targets['list'], mapped_targets['list']) + unmap_targets['list'].sort(reverse=True) + self.unmap_ldev(unmap_targets, ldev) + + target_wwn = [ + self.storage_info['wwns'][port_gid[:utils.PORT_ID_LENGTH]] + for port_gid in unmap_targets['list']] + return {'driver_volume_type': self.driver_info['volume_type'], + 'data': {'target_wwn': target_wwn}} + + @abc.abstractmethod + def find_mapped_targets_from_storage(self, targets, ldev, target_ports): + """Find and store IDs of ports used for server-LDEV connection.""" + raise NotImplementedError() + + @abc.abstractmethod + def get_unmap_targets_list(self, target_list, mapped_list): + """Return a list of IDs of ports that need to be disconnected.""" + raise NotImplementedError() + + @abc.abstractmethod + def unmap_ldev(self, targets, ldev): + """Delete the LUN between the specified LDEV and port-gid.""" + raise NotImplementedError() + + @abc.abstractmethod + def wait_full_copy_completion(self, pvol, svol): + """Wait until FULL copy is completed.""" + raise NotImplementedError() diff --git a/cinder/volume/drivers/hitachi/vsp_fc.py b/cinder/volume/drivers/hitachi/vsp_fc.py new file mode 100644 index 00000000000..240e844ffe4 --- /dev/null +++ b/cinder/volume/drivers/hitachi/vsp_fc.py @@ -0,0 +1,178 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Fibre channel module for Hitachi VSP Driver.""" + +from oslo_config import cfg + +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.hitachi import vsp_common as common +from cinder.volume.drivers.hitachi import vsp_utils as utils + +fc_opts = [ + cfg.BoolOpt( + 'vsp_zoning_request', + default=False, + help='If True, the driver will configure FC zoning between the server ' + 'and the storage system provided that FC zoning manager is ' + 'enabled.'), +] + +MSG = utils.VSPMsg + +_DRIVER_INFO = { + 'proto': 'FC', + 'hba_id': 'wwpns', + 'hba_id_type': 'World Wide Name', + 'msg_id': { + 'target': MSG.CREATE_HOST_GROUP_FAILED, + }, + 'volume_backend_name': utils.DRIVER_PREFIX + 'FC', + 'volume_opts': fc_opts, + 'volume_type': 'fibre_channel', +} + +CONF = cfg.CONF +CONF.register_opts(fc_opts) + + +@interface.volumedriver +class VSPFCDriver(driver.FibreChannelDriver): + """Fibre channel class for Hitachi VSP Driver. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver. + + """ + + VERSION = common.VERSION + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Hitachi_VSP_CI" + + def __init__(self, *args, **kwargs): + """Initialize instance variables.""" + utils.output_log(MSG.DRIVER_INITIALIZATION_START, + driver=self.__class__.__name__, + version=self.get_version()) + super(VSPFCDriver, self).__init__(*args, **kwargs) + + self.configuration.append_config_values(common.common_opts) + self.configuration.append_config_values(fc_opts) + self.common = utils.import_object( + self.configuration, _DRIVER_INFO, kwargs.get('db')) + + def check_for_setup_error(self): + """Error are checked in do_setup() instead of this method.""" + pass + + @utils.output_start_end_log + def create_volume(self, volume): + """Create a volume and return its properties.""" + return self.common.create_volume(volume) + + @utils.output_start_end_log + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot and return its properties.""" + return self.common.create_volume_from_snapshot(volume, snapshot) + + @utils.output_start_end_log + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume and return its properties.""" + return self.common.create_cloned_volume(volume, src_vref) + + @utils.output_start_end_log + def delete_volume(self, volume): + """Delete the specified volume.""" + self.common.delete_volume(volume) + + @utils.output_start_end_log + def create_snapshot(self, snapshot): + """Create a snapshot from a volume and return its properties.""" + return self.common.create_snapshot(snapshot) + + @utils.output_start_end_log + def delete_snapshot(self, snapshot): + """Delete the specified snapshot.""" + self.common.delete_snapshot(snapshot) + + def get_volume_stats(self, refresh=False): + """Return properties, capabilities and current states of the driver.""" + return self.common.get_volume_stats(refresh) + + @utils.output_start_end_log + def update_migrated_volume( + self, ctxt, volume, new_volume, original_volume_status): + """Do any remaining jobs after migration.""" + self.common.discard_zero_page(new_volume) + super(VSPFCDriver, self).update_migrated_volume( + ctxt, volume, new_volume, original_volume_status) + + @utils.output_start_end_log + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + super(VSPFCDriver, self).copy_image_to_volume( + context, volume, image_service, image_id) + self.common.discard_zero_page(volume) + + @utils.output_start_end_log + def extend_volume(self, volume, new_size): + """Extend the specified volume to the specified size.""" + self.common.extend_volume(volume, new_size) + + @utils.output_start_end_log + def manage_existing(self, volume, existing_ref): + """Return volume properties which Cinder needs to manage the volume.""" + return self.common.manage_existing(existing_ref) + + @utils.output_start_end_log + def manage_existing_get_size(self, volume, existing_ref): + """Return the size[GB] of the specified volume.""" + return self.common.manage_existing_get_size(existing_ref) + + @utils.output_start_end_log + def unmanage(self, volume): + """Prepare the volume for removing it from Cinder management.""" + self.common.unmanage(volume) + + @utils.output_start_end_log + def do_setup(self, context): + """Prepare for the startup of the driver.""" + self.common.do_setup(context) + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def create_export(self, context, volume, connector): + """Export the volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + @utils.output_start_end_log + def initialize_connection(self, volume, connector): + """Initialize connection between the server and the volume.""" + return self.common.initialize_connection(volume, connector) + + @utils.output_start_end_log + def terminate_connection(self, volume, connector, **kwargs): + """Terminate connection between the server and the volume.""" + self.common.terminate_connection(volume, connector) diff --git a/cinder/volume/drivers/hitachi/vsp_horcm.py b/cinder/volume/drivers/hitachi/vsp_horcm.py new file mode 100644 index 00000000000..1dd5d0d4380 --- /dev/null +++ b/cinder/volume/drivers/hitachi/vsp_horcm.py @@ -0,0 +1,1422 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""HORCM interface module for Hitachi VSP Driver.""" + +import functools +import math +import os +import re + +from oslo_config import cfg +from oslo_config import types +from oslo_log import log as logging +from oslo_service import loopingcall +from oslo_utils import excutils +from oslo_utils import timeutils +from oslo_utils import units +import six +from six.moves import range + +from cinder import coordination +from cinder import exception +from cinder import utils as cinder_utils +from cinder.volume.drivers.hitachi import vsp_common as common +from cinder.volume.drivers.hitachi import vsp_utils as utils + +_GETSTORAGEARRAY_ONCE = 1000 +_LU_PATH_DEFINED = 'SSB=0xB958,0x015A' +_ANOTHER_LDEV_MAPPED = 'SSB=0xB958,0x0947' +_NOT_LOCKED = 'SSB=0x2E11,0x2205' +_LOCK_WAITTIME = 2 * 60 * 60 +NORMAL_STS = 'NML' +_LDEV_STATUS_WAITTIME = 120 +_LDEV_CHECK_INTERVAL = 1 +_LDEV_CREATED = ['-check_status', NORMAL_STS] +_LDEV_DELETED = ['-check_status', 'NOT', 'DEFINED'] +_LUN_MAX_WAITTIME = 50 +_LUN_RETRY_INTERVAL = 1 +FULL_ATTR = 'MRCF' +THIN_ATTR = 'QS' +VVOL_ATTR = 'VVOL' +_PERMITTED_TYPES = set(['CVS', 'HDP', 'HDT']) +_PAIR_ATTRS = set([FULL_ATTR, THIN_ATTR]) +_CHECK_KEYS = ('vol_type', 'vol_size', 'num_port', 'vol_attr', 'sts') +_HORCM_WAITTIME = 1 +_EXEC_MAX_WAITTIME = 30 +_EXTEND_WAITTIME = 10 * 60 +_EXEC_RETRY_INTERVAL = 5 +_HORCM_NO_RETRY_ERRORS = [ + 'SSB=0x2E10,0x9705', + 'SSB=0x2E10,0x9706', + 'SSB=0x2E10,0x9707', + 'SSB=0x2E11,0x8303', + 'SSB=0x2E30,0x0007', + 'SSB=0xB956,0x3173', + 'SSB=0xB956,0x31D7', + 'SSB=0xB956,0x31D9', + 'SSB=0xB957,0x4188', + _LU_PATH_DEFINED, + 'SSB=0xB958,0x015E', +] + +SMPL = 1 +PVOL = 2 +SVOL = 3 + +COPY = 2 +PAIR = 3 +PSUS = 4 +PSUE = 5 +UNKN = 0xff + +_STATUS_TABLE = { + 'SMPL': SMPL, + 'COPY': COPY, + 'RCPY': COPY, + 'PAIR': PAIR, + 'PFUL': PAIR, + 'PSUS': PSUS, + 'PFUS': PSUS, + 'SSUS': PSUS, + 'PSUE': PSUE, +} + +_NOT_SET = '-' + +_SMPL_STAUS = set([_NOT_SET, 'SMPL']) + +_HORCM_RUNNING = 1 +_COPY_GROUP = utils.DRIVER_PREFIX + '-%s%s%03X%d' +_SNAP_NAME = utils.DRIVER_PREFIX + '-SNAP' +_LDEV_NAME = utils.DRIVER_PREFIX + '-LDEV-%d-%d' +_PAIR_TARGET_NAME_BODY = 'pair00' +_PAIR_TARGET_NAME = utils.TARGET_PREFIX + _PAIR_TARGET_NAME_BODY +_MAX_MUNS = 3 + +_SNAP_HASH_SIZE = 8 + +ALL_EXIT_CODE = set(range(256)) +HORCM_EXIT_CODE = set(range(128)) +EX_ENAUTH = 202 +EX_ENOOBJ = 205 +EX_CMDRJE = 221 +EX_ENLDEV = 227 +EX_CMDIOE = 237 +EX_ENOGRP = 239 +EX_INVCMD = 240 +EX_INVMOD = 241 +EX_ENORMT = 242 +EX_ENODEV = 246 +EX_ENOENT = 247 +EX_OPTINV = 248 +EX_ATTDBG = 250 +EX_ATTHOR = 251 +EX_INVARG = 253 +EX_COMERR = 255 +_NO_SUCH_DEVICE = [EX_ENOGRP, EX_ENODEV, EX_ENOENT] +_INVALID_RANGE = [EX_ENLDEV, EX_INVARG] +_HORCM_ERROR = set([EX_ENORMT, EX_ATTDBG, EX_ATTHOR, EX_COMERR]) +_COMMAND_IO_TO_RAID = set( + [EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV]) + +_DEFAULT_PORT_BASE = 31000 + +_HORCMGR = 0 +_PAIR_HORCMGR = 1 +_INFINITE = "-" + +_HORCM_PATTERNS = { + 'gid': { + 'pattern': re.compile(r"ID +(?P\d+)\(0x\w+\)"), + 'type': six.text_type, + }, + 'ldev': { + 'pattern': re.compile(r"^LDEV +: +(?P\d+)", re.M), + 'type': int, + }, + 'lun': { + 'pattern': re.compile(r"LUN +(?P\d+)\(0x\w+\)"), + 'type': six.text_type, + }, + 'num_port': { + 'pattern': re.compile(r"^NUM_PORT +: +(?P\d+)", re.M), + 'type': int, + }, + 'pair_gid': { + 'pattern': re.compile( + r"^CL\w-\w+ +(?P\d+) +%s " % _PAIR_TARGET_NAME, re.M), + 'type': six.text_type, + }, + 'ports': { + 'pattern': re.compile(r"^PORTs +: +(?P.+)$", re.M), + 'type': list, + }, + 'vol_attr': { + 'pattern': re.compile(r"^VOL_ATTR +: +(?P.+)$", re.M), + 'type': list, + }, + 'vol_size': { + 'pattern': re.compile( + r"^VOL_Capacity\(BLK\) +: +(?P\d+)""", re.M), + 'type': int, + }, + 'vol_type': { + 'pattern': re.compile(r"^VOL_TYPE +: +(?P.+)$", re.M), + 'type': six.text_type, + }, + 'sts': { + 'pattern': re.compile(r"^STS +: +(?P.+)", re.M), + 'type': six.text_type, + }, + 'undefined_ldev': { + 'pattern': re.compile( + r"^ +\d+ +(?P\d+) +- +- +NOT +DEFINED", re.M), + 'type': int, + }, +} + +LDEV_SEP_PATTERN = re.compile(r'\ +:\ +') +CMD_PATTERN = re.compile(r"((?:^|\n)HORCM_CMD\n)") + +horcm_opts = [ + cfg.ListOpt( + 'vsp_horcm_numbers', + item_type=types.Integer(min=0, max=2047), + default=[200, 201], + help='Command Control Interface instance numbers in the format of ' + '\'xxx,yyy\'. The second one is for ShadowImage operation and ' + 'the first one is for other purposes.'), + cfg.StrOpt( + 'vsp_horcm_user', + help='Name of the user on the storage system.'), + cfg.StrOpt( + 'vsp_horcm_password', + secret=True, + help='Password corresponding to vsp_horcm_user.'), + cfg.BoolOpt( + 'vsp_horcm_add_conf', + default=True, + help='If True, the driver will create or update the Command Control ' + 'Interface configuration file as needed.'), +] + +_REQUIRED_HORCM_OPTS = [ + 'vsp_horcm_user', + 'vsp_horcm_password', +] + +CONF = cfg.CONF +CONF.register_opts(horcm_opts) + +LOG = logging.getLogger(__name__) +MSG = utils.VSPMsg + + +def horcmgr_synchronized(func): + """Synchronize CCI operations per CCI instance.""" + @functools.wraps(func) + def wrap(self, *args, **kwargs): + """Synchronize CCI operations per CCI instance.""" + @coordination.synchronized(self.lock[args[0]]) + def func_locked(*_args, **_kwargs): + """Execute the wrapped function in a synchronized section.""" + return func(*_args, **_kwargs) + return func_locked(self, *args, **kwargs) + return wrap + + +def _is_valid_target(target, target_name, target_ports, is_pair): + """Return True if the specified host group is valid, False otherwise.""" + if is_pair: + return (target[:utils.PORT_ID_LENGTH] in target_ports and + target_name == _PAIR_TARGET_NAME) + if (target[:utils.PORT_ID_LENGTH] not in target_ports or + not target_name.startswith(utils.TARGET_PREFIX) or + target_name == _PAIR_TARGET_NAME): + return False + return True + + +def find_value(stdout, key): + """Return the first match from the given raidcom command output.""" + match = _HORCM_PATTERNS[key]['pattern'].search(stdout) + if match: + if _HORCM_PATTERNS[key]['type'] is list: + return [ + value.strip() for value in + LDEV_SEP_PATTERN.split(match.group(key))] + return _HORCM_PATTERNS[key]['type'](match.group(key)) + return None + + +def _run_horcmgr(inst): + """Return 1 if the CCI instance is running.""" + result = utils.execute( + 'env', 'HORCMINST=%s' % inst, 'horcmgr', '-check') + return result[0] + + +def _run_horcmshutdown(inst): + """Stop the CCI instance and return 0 if successful.""" + result = utils.execute('horcmshutdown.sh', inst) + return result[0] + + +def _run_horcmstart(inst): + """Start the CCI instance and return 0 if successful.""" + result = utils.execute('horcmstart.sh', inst) + return result[0] + + +def _check_ldev(ldev_info, ldev, existing_ref): + """Check if the LDEV meets the criteria for being managed by the driver.""" + if ldev_info['sts'] != NORMAL_STS: + msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=msg) + vol_attr = set(ldev_info['vol_attr']) + if (not ldev_info['vol_type'].startswith('OPEN-V') or + len(vol_attr) < 2 or not vol_attr.issubset(_PERMITTED_TYPES)): + msg = utils.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev, + ldevtype=utils.NVOL_LDEV_TYPE) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=msg) + # Hitachi storage calculates volume sizes in a block unit, 512 bytes. + if ldev_info['vol_size'] % utils.GIGABYTE_PER_BLOCK_SIZE: + msg = utils.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=msg) + if ldev_info['num_port']: + msg = utils.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=msg) + + +class VSPHORCM(common.VSPCommon): + """HORCM interface class for Hitachi VSP Driver. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver. + + """ + + def __init__(self, conf, storage_protocol, db): + """Initialize instance variables.""" + super(VSPHORCM, self).__init__(conf, storage_protocol, db) + self.conf.append_config_values(horcm_opts) + + self._copy_groups = [None] * _MAX_MUNS + self._pair_targets = [] + self._pattern = { + 'pool': None, + 'p_pool': None, + } + + def run_raidcom(self, *args, **kwargs): + """Run a raidcom command and return its output.""" + if 'success_code' not in kwargs: + kwargs['success_code'] = HORCM_EXIT_CODE + cmd = ['raidcom'] + list(args) + [ + '-s', self.conf.vsp_storage_id, + '-I%s' % self.conf.vsp_horcm_numbers[_HORCMGR]] + return self.run_and_verify_storage_cli(*cmd, **kwargs) + + def _run_pair_cmd(self, command, *args, **kwargs): + """Run a pair-related CCI command and return its output.""" + kwargs['horcmgr'] = _PAIR_HORCMGR + if 'success_code' not in kwargs: + kwargs['success_code'] = HORCM_EXIT_CODE + cmd = [command] + list(args) + [ + '-IM%s' % self.conf.vsp_horcm_numbers[_PAIR_HORCMGR]] + return self.run_and_verify_storage_cli(*cmd, **kwargs) + + def run_storage_cli(self, *cmd, **kwargs): + """Run a CCI command and return its output.""" + interval = kwargs.pop('interval', _EXEC_RETRY_INTERVAL) + flag = {'ignore_enauth': True} + + def _wait_for_horcm_execution(start_time, flag, *cmd, **kwargs): + """Run a CCI command and raise its output.""" + ignore_error = kwargs.pop('ignore_error', []) + no_retry_error = ignore_error + _HORCM_NO_RETRY_ERRORS + success_code = kwargs.pop('success_code', HORCM_EXIT_CODE) + timeout = kwargs.pop('timeout', _EXEC_MAX_WAITTIME) + horcmgr = kwargs.pop('horcmgr', _HORCMGR) + do_login = kwargs.pop('do_login', False) + + result = utils.execute(*cmd, **kwargs) + if _NOT_LOCKED in result[2] and not utils.check_timeout( + start_time, _LOCK_WAITTIME): + LOG.debug( + "The resource group to which the operation object " + "belongs is being locked by other software.") + return + if (result[0] in success_code or + utils.check_timeout(start_time, timeout) or + utils.check_ignore_error(no_retry_error, result[2])): + raise loopingcall.LoopingCallDone(result) + if result[0] == EX_ENAUTH: + if not self._retry_login(flag['ignore_enauth'], do_login): + raise loopingcall.LoopingCallDone(result) + flag['ignore_enauth'] = False + elif result[0] in _HORCM_ERROR: + if not self._start_horcmgr(horcmgr): + raise loopingcall.LoopingCallDone(result) + elif result[0] not in _COMMAND_IO_TO_RAID: + raise loopingcall.LoopingCallDone(result) + + loop = loopingcall.FixedIntervalLoopingCall( + _wait_for_horcm_execution, timeutils.utcnow(), + flag, *cmd, **kwargs) + return loop.start(interval=interval).wait() + + def _retry_login(self, ignore_enauth, do_login): + """Return True if login to CCI succeeds, False otherwise.""" + if not ignore_enauth: + if not do_login: + result = self._run_raidcom_login(do_raise=False) + + if do_login or result[0]: + utils.output_log(MSG.HORCM_LOGIN_FAILED, + user=self.conf.vsp_horcm_user) + return False + + return True + + def _run_raidcom_login(self, do_raise=True): + """Log in to CCI and return its output.""" + return self.run_raidcom( + '-login', self.conf.vsp_horcm_user, + self.conf.vsp_horcm_password, + do_raise=do_raise, do_login=True) + + @horcmgr_synchronized + def _restart_horcmgr(self, horcmgr): + """Restart the CCI instance.""" + inst = self.conf.vsp_horcm_numbers[horcmgr] + + def _wait_for_horcm_shutdown(start_time, inst): + """Stop the CCI instance and raise True if it stops.""" + if _run_horcmgr(inst) != _HORCM_RUNNING: + raise loopingcall.LoopingCallDone() + if (_run_horcmshutdown(inst) and + _run_horcmgr(inst) == _HORCM_RUNNING or + utils.check_timeout( + start_time, utils.DEFAULT_PROCESS_WAITTIME)): + raise loopingcall.LoopingCallDone(False) + + loop = loopingcall.FixedIntervalLoopingCall( + _wait_for_horcm_shutdown, timeutils.utcnow(), inst) + if not loop.start(interval=_HORCM_WAITTIME).wait(): + msg = utils.output_log( + MSG.HORCM_SHUTDOWN_FAILED, + inst=self.conf.vsp_horcm_numbers[horcmgr]) + raise exception.VSPError(msg) + + ret = _run_horcmstart(inst) + if ret and ret != _HORCM_RUNNING: + msg = utils.output_log( + MSG.HORCM_RESTART_FAILED, + inst=self.conf.vsp_horcm_numbers[horcmgr]) + raise exception.VSPError(msg) + + @coordination.synchronized('{self.lock[create_ldev]}') + def create_ldev(self, size, is_vvol=False): + """Create an LDEV of the specified size and the specified type.""" + ldev = super(VSPHORCM, self).create_ldev(size, is_vvol=is_vvol) + self._check_ldev_status(ldev) + return ldev + + def _check_ldev_status(self, ldev, delete=False): + """Wait until the LDEV status changes to the specified status.""" + if not delete: + args = _LDEV_CREATED + msg_id = MSG.LDEV_CREATION_WAIT_TIMEOUT + else: + args = _LDEV_DELETED + msg_id = MSG.LDEV_DELETION_WAIT_TIMEOUT + + def _wait_for_ldev_status(start_time, ldev, *args): + """Raise True if the LDEV is in the specified status.""" + result = self.run_raidcom( + 'get', 'ldev', '-ldev_id', ldev, *args, do_raise=False) + if not result[0]: + raise loopingcall.LoopingCallDone() + if utils.check_timeout(start_time, _LDEV_STATUS_WAITTIME): + raise loopingcall.LoopingCallDone(False) + + loop = loopingcall.FixedIntervalLoopingCall( + _wait_for_ldev_status, timeutils.utcnow(), ldev, *args) + if not loop.start(interval=_LDEV_CHECK_INTERVAL).wait(): + msg = utils.output_log(msg_id, ldev=ldev) + raise exception.VSPError(msg) + + def create_ldev_on_storage(self, ldev, size, is_vvol): + """Create an LDEV on the storage system.""" + args = ['add', 'ldev', '-ldev_id', ldev, '-capacity', '%sG' % size, + '-emulation', 'OPEN-V', '-pool'] + if is_vvol: + args.append('snap') + else: + args.append(self.conf.vsp_pool) + self.run_raidcom(*args) + + def get_unused_ldev(self): + """Find an unused LDEV and return its LDEV number.""" + if not self.storage_info['ldev_range']: + ldev_info = self.get_ldev_info( + ['ldev'], '-ldev_list', 'undefined', '-cnt', '1') + ldev = ldev_info.get('ldev') + else: + ldev = self._find_unused_ldev_by_range() + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is None'. + if ldev is None: + msg = utils.output_log(MSG.NO_AVAILABLE_RESOURCE, resource='LDEV') + raise exception.VSPError(msg) + return ldev + + def _find_unused_ldev_by_range(self): + """Return the LDEV number of an unused LDEV in the LDEV range.""" + success_code = HORCM_EXIT_CODE.union(_INVALID_RANGE) + start, end = self.storage_info['ldev_range'][:2] + + while start <= end: + if end - start + 1 > _GETSTORAGEARRAY_ONCE: + cnt = _GETSTORAGEARRAY_ONCE + else: + cnt = end - start + 1 + + ldev_info = self.get_ldev_info( + ['undefined_ldev'], '-ldev_id', start, '-cnt', cnt, + '-key', 'front_end', success_code=success_code) + ldev = ldev_info.get('undefined_ldev') + # When 'ldev' is 0, it should be true. + # Therefore, it cannot remove 'is not None'. + if ldev is not None: + return ldev + + start += _GETSTORAGEARRAY_ONCE + + return None + + def get_ldev_info(self, keys, *args, **kwargs): + """Return a dictionary of LDEV-related items.""" + data = {} + result = self.run_raidcom('get', 'ldev', *args, **kwargs) + for key in keys: + data[key] = find_value(result[1], key) + return data + + def copy_on_storage(self, pvol, size, metadata, sync): + """Check if the LDEV can be copied on the storage.""" + ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', pvol) + if ldev_info['sts'] != NORMAL_STS: + msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol) + raise exception.VSPError(msg) + + if VVOL_ATTR in ldev_info['vol_attr']: + raise exception.VSPNotSupported() + return super(VSPHORCM, self).copy_on_storage(pvol, size, metadata, + sync) + + @coordination.synchronized('{self.lock[create_pair]}') + def create_pair_on_storage(self, pvol, svol, is_thin): + """Create a copy pair on the storage.""" + path_list = [] + vol_type, pair_info = self._get_vol_type_and_pair_info(pvol) + if vol_type == SVOL: + self._delete_pair_based_on_svol( + pair_info['pvol'], pair_info['svol_info'], + no_restart=True) + if vol_type != PVOL: + self._initialize_pair_connection(pvol) + path_list.append(pvol) + try: + self._initialize_pair_connection(svol) + path_list.append(svol) + self._create_pair_on_storage_core(pvol, svol, is_thin, vol_type) + except exception.VSPError: + with excutils.save_and_reraise_exception(): + for ldev in path_list: + try: + self._terminate_pair_connection(ldev) + except exception.VSPError: + utils.output_log(MSG.UNMAP_LDEV_FAILED, ldev=ldev) + + def _create_pair_on_storage_core(self, pvol, svol, is_thin, vol_type): + """Create a copy pair on the storage depending on the copy method.""" + if is_thin: + self._create_thin_copy_pair(pvol, svol) + + else: + self._create_full_copy_pair(pvol, svol, vol_type) + + def _create_thin_copy_pair(self, pvol, svol): + """Create a THIN copy pair on the storage.""" + snapshot_name = _SNAP_NAME + six.text_type(svol % _SNAP_HASH_SIZE) + self.run_raidcom( + 'add', 'snapshot', '-ldev_id', pvol, svol, '-pool', + self.conf.vsp_thin_pool, '-snapshot_name', + snapshot_name, '-copy_size', self.conf.vsp_copy_speed) + try: + self.wait_thin_copy(svol, PAIR) + self.run_raidcom( + 'modify', 'snapshot', '-ldev_id', svol, + '-snapshot_data', 'create') + self.wait_thin_copy(svol, PSUS) + except exception.VSPError: + with excutils.save_and_reraise_exception(): + interval = self.conf.vsp_async_copy_check_interval + try: + self._delete_thin_copy_pair(pvol, svol, interval) + except exception.VSPError: + utils.output_log(MSG.DELETE_TI_PAIR_FAILED, pvol=pvol, + svol=svol) + + def _create_full_copy_pair(self, pvol, svol, vol_type): + """Create a FULL copy pair on the storage.""" + mun = 0 + + if vol_type == PVOL: + mun = self._get_unused_mun(pvol) + + copy_group = self._copy_groups[mun] + ldev_name = _LDEV_NAME % (pvol, svol) + restart = False + create = False + + try: + self._add_pair_config(pvol, svol, copy_group, ldev_name, mun) + self._restart_horcmgr(_PAIR_HORCMGR) + restart = True + self._run_pair_cmd( + 'paircreate', '-g', copy_group, '-d', ldev_name, + '-c', self.conf.vsp_copy_speed, + '-vl', '-split', '-fq', 'quick') + create = True + + self._wait_full_copy(svol, set([PSUS, COPY])) + except exception.VSPError: + with excutils.save_and_reraise_exception(): + if create: + try: + self._wait_full_copy(svol, set([PAIR, PSUS, PSUE])) + except exception.VSPError: + utils.output_log(MSG.WAIT_SI_PAIR_STATUS_FAILED, + pvol=pvol, svol=svol) + + interval = self.conf.vsp_async_copy_check_interval + + try: + self._delete_full_copy_pair(pvol, svol, interval) + except exception.VSPError: + utils.output_log(MSG.DELETE_SI_PAIR_FAILED, pvol=pvol, + svol=svol) + + try: + if self._is_smpl(svol): + self._delete_pair_config( + pvol, svol, copy_group, ldev_name) + except exception.VSPError: + utils.output_log(MSG.DELETE_DEVICE_GRP_FAILED, pvol=pvol, + svol=svol) + + if restart: + try: + self._restart_horcmgr(_PAIR_HORCMGR) + except exception.VSPError: + utils.output_log( + MSG.HORCM_RESTART_FOR_SI_FAILED, + inst=self.conf.vsp_horcm_numbers[1]) + + def _get_unused_mun(self, ldev): + """Return the number of an unused mirror unit.""" + pair_list = [] + + for mun in range(_MAX_MUNS): + pair_info = self._get_full_copy_pair_info(ldev, mun) + if not pair_info: + return mun + + pair_list.append((pair_info['svol_info'], mun)) + + for svol_info, mun in pair_list: + if svol_info['is_psus']: + self._delete_pair_based_on_svol( + ldev, svol_info, no_restart=True) + return mun + + utils.output_log(MSG.NO_AVAILABLE_MIRROR_UNIT, + copy_method=utils.FULL, pvol=ldev) + raise exception.VSPBusy() + + def _get_vol_type_and_pair_info(self, ldev): + """Return a tuple of the LDEV's ShadowImage pair status and info.""" + ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) + if ldev_info['sts'] != NORMAL_STS: + return (SMPL, None) + + if THIN_ATTR in ldev_info['vol_attr']: + return (PVOL, None) + + if FULL_ATTR in ldev_info['vol_attr']: + pair_info = self._get_full_copy_pair_info(ldev, 0) + if not pair_info: + return (PVOL, None) + + if pair_info['pvol'] != ldev: + return (SVOL, pair_info) + + return (PVOL, None) + + return (SMPL, None) + + def _get_full_copy_info(self, ldev): + """Return a tuple of P-VOL and S-VOL's info of a ShadowImage pair.""" + vol_type, pair_info = self._get_vol_type_and_pair_info(ldev) + svol_info = [] + + if vol_type == SMPL: + return (None, None) + + elif vol_type == SVOL: + return (pair_info['pvol'], [pair_info['svol_info']]) + + for mun in range(_MAX_MUNS): + pair_info = self._get_full_copy_pair_info(ldev, mun) + if pair_info: + svol_info.append(pair_info['svol_info']) + + return (ldev, svol_info) + + @coordination.synchronized('{self.lock[create_pair]}') + def delete_pair(self, ldev, all_split=True): + """Delete the specified LDEV in a synchronized section.""" + super(VSPHORCM, self).delete_pair(ldev, all_split=all_split) + + def delete_pair_based_on_pvol(self, pair_info, all_split): + """Disconnect all volume pairs to which the specified P-VOL belongs.""" + svols = [] + restart = False + + try: + for svol_info in pair_info['svol_info']: + if svol_info['is_thin'] or not svol_info['is_psus']: + svols.append(six.text_type(svol_info['ldev'])) + continue + + self.delete_pair_from_storage( + pair_info['pvol'], svol_info['ldev'], False) + + restart = True + + self._terminate_pair_connection(svol_info['ldev']) + + if not svols: + self._terminate_pair_connection(pair_info['pvol']) + + finally: + if restart: + self._restart_horcmgr(_PAIR_HORCMGR) + + if all_split and svols: + utils.output_log( + MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'], + svol=', '.join(svols)) + raise exception.VSPBusy() + + def delete_pair_based_on_svol(self, pvol, svol_info): + """Disconnect all volume pairs to which the specified S-VOL belongs.""" + self._delete_pair_based_on_svol(pvol, svol_info) + + def _delete_pair_based_on_svol(self, pvol, svol_info, no_restart=False): + """Disconnect all volume pairs to which the specified S-VOL belongs.""" + do_restart = False + + if not svol_info['is_psus']: + utils.output_log(MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol, + svol=svol_info['ldev']) + raise exception.VSPBusy() + + try: + self.delete_pair_from_storage( + pvol, svol_info['ldev'], svol_info['is_thin']) + do_restart = True + self._terminate_pair_connection(svol_info['ldev']) + self._terminate_pair_connection(pvol) + finally: + if not no_restart and do_restart: + self._restart_horcmgr(_PAIR_HORCMGR) + + def delete_pair_from_storage(self, pvol, svol, is_thin): + """Disconnect the volume pair that consists of the specified LDEVs.""" + interval = self.conf.vsp_async_copy_check_interval + if is_thin: + self._delete_thin_copy_pair(pvol, svol, interval) + else: + self._delete_full_copy_pair(pvol, svol, interval) + + def _delete_thin_copy_pair(self, pvol, svol, interval): + """Disconnect a THIN volume pair.""" + result = self.run_raidcom( + 'get', 'snapshot', '-ldev_id', svol) + if not result[1]: + return + mun = result[1].splitlines()[1].split()[5] + self.run_raidcom( + 'unmap', 'snapshot', '-ldev_id', svol, + success_code=ALL_EXIT_CODE) + self.run_raidcom( + 'delete', 'snapshot', '-ldev_id', pvol, '-mirror_id', mun) + self._wait_thin_copy_deleting(svol, interval=interval) + + def _wait_thin_copy_deleting(self, ldev, **kwargs): + """Wait until the LDEV is no longer in a THIN volume pair.""" + interval = kwargs.pop( + 'interval', self.conf.vsp_async_copy_check_interval) + + def _wait_for_thin_copy_smpl(start_time, ldev, **kwargs): + """Raise True if the LDEV is no longer in a THIN volume pair.""" + timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) + ldev_info = self.get_ldev_info( + ['sts', 'vol_attr'], '-ldev_id', ldev) + if (ldev_info['sts'] != NORMAL_STS or + THIN_ATTR not in ldev_info['vol_attr']): + raise loopingcall.LoopingCallDone() + if utils.check_timeout(start_time, timeout): + raise loopingcall.LoopingCallDone(False) + + loop = loopingcall.FixedIntervalLoopingCall( + _wait_for_thin_copy_smpl, timeutils.utcnow(), ldev, **kwargs) + if not loop.start(interval=interval).wait(): + msg = utils.output_log(MSG.TI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) + raise exception.VSPError(msg) + + def _delete_full_copy_pair(self, pvol, svol, interval): + """Disconnect a FULL volume pair.""" + stdout = self._run_pairdisplay( + '-d', self.conf.vsp_storage_id, svol, 0) + if not stdout: + return + + copy_group = stdout.splitlines()[2].split()[0] + ldev_name = _LDEV_NAME % (pvol, svol) + + if stdout.splitlines()[1].split()[9] != 'P-VOL': + self._restart_horcmgr(_PAIR_HORCMGR) + try: + self._run_pair_cmd( + 'pairsplit', '-g', copy_group, '-d', ldev_name, '-S') + self._wait_full_copy(svol, set([SMPL]), interval=interval) + finally: + if self._is_smpl(svol): + self._delete_pair_config(pvol, svol, copy_group, ldev_name) + + def _initialize_pair_connection(self, ldev): + """Initialize server-volume connection for volume copy.""" + port, gid = None, None + + for port, gid in self._pair_targets: + try: + targets = { + 'list': [(port, gid)], + 'lun': {}, + } + return self.map_ldev(targets, ldev) + except exception.VSPError: + utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, + id=gid, lun=None) + + msg = utils.output_log(MSG.NO_MAPPING_FOR_LDEV, ldev=ldev) + raise exception.VSPError(msg) + + def _terminate_pair_connection(self, ldev): + """Terminate server-volume connection for volume copy.""" + targets = { + 'list': [], + } + ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) + if (ldev_info['sts'] == NORMAL_STS and + FULL_ATTR in ldev_info['vol_attr'] or + self._get_thin_copy_svol_status(ldev) != SMPL): + LOG.debug( + 'The specified LDEV has pair. Therefore, unmapping ' + 'operation was skipped. ' + '(LDEV: %(ldev)s, vol_attr: %(info)s)', + {'ldev': ldev, 'info': ldev_info['vol_attr']}) + return + self._find_mapped_targets_from_storage( + targets, ldev, self.storage_info['ports'], is_pair=True) + self.unmap_ldev(targets, ldev) + + def check_param(self): + """Check parameter values and consistency among them.""" + super(VSPHORCM, self).check_param() + utils.check_opts(self.conf, horcm_opts) + insts = self.conf.vsp_horcm_numbers + if len(insts) != 2 or insts[_HORCMGR] == insts[_PAIR_HORCMGR]: + msg = utils.output_log(MSG.INVALID_PARAMETER, + param='vsp_horcm_numbers') + raise exception.VSPError(msg) + utils.output_log(MSG.SET_CONFIG_VALUE, object='LDEV range', + value=self.storage_info['ldev_range']) + for opt in _REQUIRED_HORCM_OPTS: + if not self.conf.safe_get(opt): + msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt) + raise exception.VSPError(msg) + + def _set_copy_groups(self, host_ip): + """Initialize an instance variable for ShadowImage copy groups.""" + serial = self.conf.vsp_storage_id + inst = self.conf.vsp_horcm_numbers[_PAIR_HORCMGR] + + for mun in range(_MAX_MUNS): + copy_group = _COPY_GROUP % (host_ip, serial, inst, mun) + self._copy_groups[mun] = copy_group + utils.output_log(MSG.SET_CONFIG_VALUE, object='copy group list', + value=self._copy_groups) + + def connect_storage(self): + """Prepare for using the storage.""" + self._set_copy_groups(CONF.my_ip) + + if self.conf.vsp_horcm_add_conf: + self._create_horcm_conf() + self._create_horcm_conf(horcmgr=_PAIR_HORCMGR) + self._restart_horcmgr(_HORCMGR) + self._restart_horcmgr(_PAIR_HORCMGR) + self._run_raidcom_login() + super(VSPHORCM, self).connect_storage() + + self._pattern['p_pool'] = re.compile( + (r"^%03d +\S+ +\d+ +\d+ +(?P\d+) +\d+ +\d+ +\d+ +\w+ +" + r"\d+ +(?P\d+)") % self.storage_info['pool_id'], re.M) + self._pattern['pool'] = re.compile( + r"^%03d +\S+ +\d+ +\S+ +\w+ +\d+ +\w+ +\d+ +(?P\S+)" % + self.storage_info['pool_id'], re.M) + + def _find_lun(self, ldev, port, gid): + """Return LUN determined by the given arguments.""" + result = self.run_raidcom( + 'get', 'lun', '-port', '-'.join([port, gid])) + match = re.search( + r'^%(port)s +%(gid)s +\S+ +(?P\d+) +1 +%(ldev)s ' % { + 'port': port, 'gid': gid, 'ldev': ldev}, result[1], re.M) + if match: + return match.group('lun') + return None + + def _find_mapped_targets_from_storage(self, targets, ldev, + target_ports, is_pair=False): + """Update port-gid list for the specified LDEV.""" + ldev_info = self.get_ldev_info(['ports'], '-ldev_id', ldev) + if not ldev_info['ports']: + return + for ports_strings in ldev_info['ports']: + ports = ports_strings.split() + if _is_valid_target(ports[0], ports[2], target_ports, is_pair): + targets['list'].append(ports[0]) + + def find_mapped_targets_from_storage(self, targets, ldev, target_ports): + """Update port-gid list for the specified LDEV.""" + self._find_mapped_targets_from_storage(targets, ldev, target_ports) + + def get_unmap_targets_list(self, target_list, mapped_list): + """Return a list of IDs of ports that need to be disconnected.""" + unmap_list = [] + for mapping_info in mapped_list: + if (mapping_info[:utils.PORT_ID_LENGTH], + mapping_info.split('-')[2]) in target_list: + unmap_list.append(mapping_info) + return unmap_list + + def unmap_ldev(self, targets, ldev): + """Delete the LUN between the specified LDEV and port-gid.""" + interval = _LUN_RETRY_INTERVAL + success_code = HORCM_EXIT_CODE.union([EX_ENOOBJ]) + timeout = utils.DEFAULT_PROCESS_WAITTIME + for target in targets['list']: + self.run_raidcom( + 'delete', 'lun', '-port', target, '-ldev_id', ldev, + interval=interval, success_code=success_code, timeout=timeout) + LOG.debug( + 'Deleted logical unit path of the specified logical ' + 'device. (LDEV: %(ldev)s, host group: %(target)s)', + {'ldev': ldev, 'target': target}) + + def find_all_mapped_targets_from_storage(self, targets, ldev): + """Add all port-gids connected with the LDEV to the list.""" + ldev_info = self.get_ldev_info(['ports'], '-ldev_id', ldev) + if ldev_info['ports']: + for port in ldev_info['ports']: + targets['list'].append(port.split()[0]) + + def delete_target_from_storage(self, port, gid): + """Delete the host group from the port.""" + result = self.run_raidcom( + 'delete', 'host_grp', '-port', + '-'.join([port, gid]), do_raise=False) + if result[0]: + utils.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid) + + def _run_add_lun(self, ldev, port, gid, lun=None): + """Create a LUN between the specified LDEV and port-gid.""" + args = ['add', 'lun', '-port', '-'.join([port, gid]), '-ldev_id', ldev] + ignore_error = [_LU_PATH_DEFINED] + if lun: + args.extend(['-lun_id', lun]) + ignore_error = [_ANOTHER_LDEV_MAPPED] + result = self.run_raidcom( + *args, ignore_error=ignore_error, + interval=_LUN_RETRY_INTERVAL, timeout=_LUN_MAX_WAITTIME) + if not lun: + if result[0] == EX_CMDRJE: + lun = self._find_lun(ldev, port, gid) + LOG.debug( + 'A logical unit path has already been defined in the ' + 'specified logical device. (LDEV: %(ldev)s, ' + 'port: %(port)s, gid: %(gid)s, lun: %(lun)s)', + {'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun}) + else: + lun = find_value(result[1], 'lun') + elif _ANOTHER_LDEV_MAPPED in result[2]: + utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, id=gid, + lun=lun) + return None + LOG.debug( + 'Created logical unit path to the specified logical device. ' + '(LDEV: %(ldev)s, port: %(port)s, ' + 'gid: %(gid)s, lun: %(lun)s)', + {'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun}) + return lun + + def map_ldev(self, targets, ldev): + """Create the path between the server and the LDEV and return LUN.""" + port, gid = targets['list'][0] + lun = self._run_add_lun(ldev, port, gid) + targets['lun'][port] = True + for port, gid in targets['list'][1:]: + try: + lun2 = self._run_add_lun(ldev, port, gid, lun=lun) + if lun2 is not None: + targets['lun'][port] = True + except exception.VSPError: + utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, + id=gid, lun=lun) + return lun + + def extend_ldev(self, ldev, old_size, new_size): + """Extend the specified LDEV to the specified new size.""" + timeout = _EXTEND_WAITTIME + self.run_raidcom('extend', 'ldev', '-ldev_id', ldev, '-capacity', + '%sG' % (new_size - old_size), timeout=timeout) + + def get_pool_info(self): + """Return the total and free capacity of the storage pool.""" + result = self.run_raidcom('get', 'dp_pool') + p_pool_match = self._pattern['p_pool'].search(result[1]) + + result = self.run_raidcom('get', 'pool', '-key', 'opt') + pool_match = self._pattern['pool'].search(result[1]) + + if not p_pool_match or not pool_match: + msg = utils.output_log(MSG.POOL_NOT_FOUND, + pool=self.storage_info['pool_id']) + raise exception.VSPError(msg) + + tp_cap = float(p_pool_match.group('tp_cap')) / units.Ki + tl_cap = float(p_pool_match.group('tl_cap')) / units.Ki + vcap = 'infinite' if pool_match.group('vcap') == _INFINITE else ( + int(pool_match.group('vcap'))) + + if vcap == 'infinite': + return 'unknown', 'unknown' + else: + total_gb = int(math.floor(tp_cap * (vcap / 100.0))) + free_gb = int(math.floor(total_gb - tl_cap)) + return total_gb, free_gb + + def discard_zero_page(self, volume): + """Return the volume's no-data pages to the storage pool.""" + ldev = utils.get_ldev(volume) + try: + self.run_raidcom( + 'modify', 'ldev', '-ldev_id', ldev, + '-status', 'discard_zero_page') + except exception.VSPError: + utils.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev) + + def wait_thin_copy(self, ldev, status, **kwargs): + """Wait until the S-VOL status changes to the specified status.""" + interval = kwargs.pop( + 'interval', self.conf.vsp_copy_check_interval) + + def _wait_for_thin_copy_status(start_time, ldev, status, **kwargs): + """Raise True if the S-VOL is in the specified status.""" + timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) + if self._get_thin_copy_svol_status(ldev) == status: + raise loopingcall.LoopingCallDone() + if utils.check_timeout(start_time, timeout): + raise loopingcall.LoopingCallDone(False) + + loop = loopingcall.FixedIntervalLoopingCall( + _wait_for_thin_copy_status, timeutils.utcnow(), + ldev, status, **kwargs) + if not loop.start(interval=interval).wait(): + msg = utils.output_log(MSG.TI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) + raise exception.VSPError(msg) + + def _get_thin_copy_svol_status(self, ldev): + """Return the status of the S-VOL in a THIN volume pair.""" + result = self.run_raidcom( + 'get', 'snapshot', '-ldev_id', ldev) + if not result[1]: + return SMPL + return _STATUS_TABLE.get(result[1].splitlines()[1].split()[2], UNKN) + + def _create_horcm_conf(self, horcmgr=_HORCMGR): + """Create a CCI configuration file.""" + inst = self.conf.vsp_horcm_numbers[horcmgr] + serial = self.conf.vsp_storage_id + filename = '/etc/horcm%s.conf' % inst + port = _DEFAULT_PORT_BASE + inst + found = False + if not os.path.exists(filename): + file_str = """ +HORCM_MON +#ip_address service poll(10ms) timeout(10ms) +127.0.0.1 %16d 6000 3000 +HORCM_CMD +""" % port + else: + file_str = cinder_utils.read_file_as_root(filename) + if re.search(r'^\\\\.\\CMD-%s:/dev/sd$' % serial, file_str, re.M): + found = True + if not found: + repl_str = r'\1\\\\.\\CMD-%s:/dev/sd\n' % serial + file_str = CMD_PATTERN.sub(repl_str, file_str) + result = utils.execute('tee', filename, process_input=file_str) + if result[0]: + msg = utils.output_log( + MSG.CREATE_HORCM_CONF_FILE_FAILED, file=filename, + ret=result[0], err=result[2]) + raise exception.VSPError(msg) + + def init_cinder_hosts(self, **kwargs): + """Initialize server-storage connection.""" + targets = { + 'info': {}, + 'list': [], + } + super(VSPHORCM, self).init_cinder_hosts(targets=targets) + self._init_pair_targets(targets['info']) + + def _init_pair_targets(self, targets_info): + """Initialize server-storage connection for volume copy.""" + for port in targets_info.keys(): + if not targets_info[port]: + continue + result = self.run_raidcom('get', 'host_grp', '-port', port) + gid = find_value(result[1], 'pair_gid') + if not gid: + try: + connector = { + 'ip': _PAIR_TARGET_NAME_BODY, + 'wwpns': [_PAIR_TARGET_NAME_BODY], + } + target_name, gid = self.create_target_to_storage( + port, connector) + utils.output_log(MSG.OBJECT_CREATED, + object='a target for pair operation', + details='port: %(port)s, gid: %(gid)s, ' + 'target_name: %(target)s' % + {'port': port, 'gid': gid, + 'target': target_name}) + except exception.VSPError: + utils.output_log(MSG.CREATE_HOST_GROUP_FAILED, port=port) + continue + self._pair_targets.append((port, gid)) + + if not self._pair_targets: + msg = utils.output_log(MSG.ADD_PAIR_TARGET_FAILED) + raise exception.VSPError(msg) + self._pair_targets.sort(reverse=True) + utils.output_log(MSG.SET_CONFIG_VALUE, + object='port-gid list for pair operation', + value=self._pair_targets) + + @coordination.synchronized('{self.lock[create_ldev]}') + def delete_ldev_from_storage(self, ldev): + """Delete the specified LDEV from the storage.""" + self._delete_ldev_from_storage(ldev) + self._check_ldev_status(ldev, delete=True) + + def _delete_ldev_from_storage(self, ldev): + """Delete the specified LDEV from the storage.""" + result = self.run_raidcom( + 'get', 'ldev', '-ldev_id', ldev, *_LDEV_DELETED, do_raise=False) + if not result[0]: + utils.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev) + return + self.run_raidcom('delete', 'ldev', '-ldev_id', ldev) + + def _run_pairdisplay(self, *args): + """Execute ShadowImage pairdisplay command.""" + result = self._run_pair_cmd( + 'pairdisplay', '-CLI', *args, do_raise=False, + success_code=HORCM_EXIT_CODE.union(_NO_SUCH_DEVICE)) + return result[1] + + def _check_copy_grp(self, copy_group): + """Return the number of device groups in the specified copy group.""" + count = 0 + result = self.run_raidcom('get', 'copy_grp') + for line in result[1].splitlines()[1:]: + line = line.split() + if line[0] == copy_group: + count += 1 + if count == 2: + break + return count + + def _check_device_grp(self, group_name, ldev, ldev_name=None): + """Return True if the LDEV is in the device group, False otherwise.""" + result = self.run_raidcom( + 'get', 'device_grp', '-device_grp_name', group_name) + for line in result[1].splitlines()[1:]: + line = line.split() + if int(line[2]) == ldev: + if not ldev_name: + return True + else: + return line[1] == ldev_name + return False + + def _is_smpl(self, ldev): + """Return True if the status of the LDEV is SMPL, False otherwise.""" + stdout = self._run_pairdisplay( + '-d', self.conf.vsp_storage_id, ldev, 0) + if not stdout: + return True + return stdout.splitlines()[2].split()[9] in _SMPL_STAUS + + def _get_full_copy_pair_info(self, ldev, mun): + """Return info of the ShadowImage volume pair.""" + stdout = self._run_pairdisplay( + '-d', self.conf.vsp_storage_id, ldev, mun) + if not stdout: + return None + line = stdout.splitlines()[2].split() + if not line[8].isdigit() or not line[12].isdigit(): + return None + pvol, svol = int(line[12]), int(line[8]) + LOG.debug( + 'Full copy pair status. (P-VOL: %(pvol)s, S-VOL: %(svol)s, ' + 'status: %(status)s)', + {'pvol': pvol, 'svol': svol, 'status': line[10]}) + return { + 'pvol': pvol, + 'svol_info': { + 'ldev': svol, + 'is_psus': line[10] == "SSUS", + 'is_thin': False, + }, + } + + def _get_thin_copy_info(self, ldev): + """Return info of the Thin Image volume pair.""" + result = self.run_raidcom( + 'get', 'snapshot', '-ldev_id', ldev) + if not result[1]: + return (None, None) + + line = result[1].splitlines()[1].split() + is_psus = _STATUS_TABLE.get(line[2]) == PSUS + if line[1] == "P-VOL": + pvol, svol = ldev, int(line[6]) + else: + pvol, svol = int(line[6]), ldev + LOG.debug( + 'Thin copy pair status. (P-VOL: %(pvol)s, S-VOL: %(svol)s, ' + 'status: %(status)s)', + {'pvol': pvol, 'svol': svol, 'status': line[2]}) + return (pvol, [{'ldev': svol, 'is_thin': True, 'is_psus': is_psus}]) + + def get_pair_info(self, ldev): + """Return info of the volume pair.""" + pair_info = {} + ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) + if ldev_info['sts'] != NORMAL_STS or _PAIR_ATTRS.isdisjoint( + ldev_info['vol_attr']): + return None + + if FULL_ATTR in ldev_info['vol_attr']: + pvol, svol_info = self._get_full_copy_info(ldev) + # When 'pvol' is 0, it should be true. + # Therefore, it cannot remove 'is not None'. + if pvol is not None: + pair_info['pvol'] = pvol + pair_info.setdefault('svol_info', []) + pair_info['svol_info'].extend(svol_info) + + if THIN_ATTR in ldev_info['vol_attr']: + pvol, svol_info = self._get_thin_copy_info(ldev) + # When 'pvol' is 0, it should be true. + # Therefore, it cannot remove 'is not None'. + if pvol is not None: + pair_info['pvol'] = pvol + pair_info.setdefault('svol_info', []) + pair_info['svol_info'].extend(svol_info) + + return pair_info + + def _add_pair_config(self, pvol, svol, copy_group, ldev_name, mun): + """Create device groups and a copy group for the SI volume pair.""" + pvol_group = copy_group + 'P' + svol_group = copy_group + 'S' + self.run_raidcom( + 'add', 'device_grp', '-device_grp_name', + pvol_group, ldev_name, '-ldev_id', pvol) + self.run_raidcom( + 'add', 'device_grp', '-device_grp_name', + svol_group, ldev_name, '-ldev_id', svol) + nr_copy_groups = self._check_copy_grp(copy_group) + if nr_copy_groups == 1: + self.run_raidcom( + 'delete', 'copy_grp', '-copy_grp_name', copy_group) + if nr_copy_groups != 2: + self.run_and_verify_storage_cli( + 'raidcom', 'add', 'copy_grp', '-copy_grp_name', + copy_group, pvol_group, svol_group, '-mirror_id', mun, + '-s', self.conf.vsp_storage_id, + '-IM%s' % self.conf.vsp_horcm_numbers[_HORCMGR], + success_code=HORCM_EXIT_CODE) + + def _delete_pair_config(self, pvol, svol, copy_group, ldev_name): + """Delete specified LDEVs from ShadowImage device groups.""" + pvol_group = copy_group + 'P' + svol_group = copy_group + 'S' + if self._check_device_grp(pvol_group, pvol, ldev_name=ldev_name): + self.run_raidcom( + 'delete', 'device_grp', '-device_grp_name', + pvol_group, '-ldev_id', pvol) + if self._check_device_grp(svol_group, svol, ldev_name=ldev_name): + self.run_raidcom( + 'delete', 'device_grp', '-device_grp_name', + svol_group, '-ldev_id', svol) + + def _wait_full_copy(self, ldev, status, **kwargs): + """Wait until the LDEV status changes to the specified status.""" + interval = kwargs.pop( + 'interval', self.conf.vsp_copy_check_interval) + + def _wait_for_full_copy_pair_status(start_time, ldev, + status, **kwargs): + """Raise True if the LDEV is in the specified status.""" + timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) + if self._run_pairevtwait(ldev) in status: + raise loopingcall.LoopingCallDone() + if utils.check_timeout(start_time, timeout): + raise loopingcall.LoopingCallDone(False) + + loop = loopingcall.FixedIntervalLoopingCall( + _wait_for_full_copy_pair_status, timeutils.utcnow(), + ldev, status, **kwargs) + if not loop.start(interval=interval).wait(): + msg = utils.output_log(MSG.SI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) + raise exception.VSPError(msg) + + def wait_full_copy_completion(self, pvol, svol): + """Wait until the ShadowImage volume copy has finished.""" + self._wait_full_copy(svol, set([PSUS, PSUE]), + timeout=utils.MAX_PROCESS_WAITTIME) + if self._run_pairevtwait(svol) == PSUE: + msg = utils.output_log(MSG.VOLUME_COPY_FAILED, + copy_method=utils.FULL, pvol=pvol, + svol=svol) + raise exception.VSPError(msg) + + def _run_pairevtwait(self, ldev): + """Execute ShadowImage pairevtwait command.""" + result = self._run_pair_cmd( + 'pairevtwait', '-d', self.conf.vsp_storage_id, + ldev, '-nowaits') + return result[0] + + def get_ldev_size_in_gigabyte(self, ldev, existing_ref): + """Return the size[GB] of the specified LDEV.""" + ldev_info = self.get_ldev_info( + _CHECK_KEYS, '-ldev_id', ldev, do_raise=False) + _check_ldev(ldev_info, ldev, existing_ref) + # Hitachi storage calculates volume sizes in a block unit, 512 bytes. + return ldev_info['vol_size'] / utils.GIGABYTE_PER_BLOCK_SIZE + + def get_pool_id(self): + """Return the pool number of vsp_pool.""" + pool_id = super(VSPHORCM, self).get_pool_id() + if pool_id is None: + pool = self.conf.vsp_pool + result = self.run_raidcom('get', 'pool', '-key', 'opt') + for line in result[1].splitlines()[1:]: + line = line.split() + if line[3] == pool: + return int(line[0]) + return pool_id + + def config_lock(self): + """Initialize lock resource names.""" + for key in ['create_ldev', 'create_pair']: + self.lock[key] = '_'.join([key, self.conf.vsp_storage_id]) + self.lock[_HORCMGR] = ( + 'horcmgr_%s' % self.conf.vsp_horcm_numbers[_HORCMGR]) + self.lock[_PAIR_HORCMGR] = ( + 'horcmgr_%s' % self.conf.vsp_horcm_numbers[_PAIR_HORCMGR]) + + @horcmgr_synchronized + def _start_horcmgr(self, horcmgr): + """Start the CCI instance and return True if successful.""" + inst = self.conf.vsp_horcm_numbers[horcmgr] + ret = 0 + if _run_horcmgr(inst) != _HORCM_RUNNING: + ret = _run_horcmstart(inst) + if ret and ret != _HORCM_RUNNING: + utils.output_log(MSG.HORCM_START_FAILED, inst=inst) + return False + return True + + def output_param_to_log(self): + """Output configuration parameter values to the log file.""" + super(VSPHORCM, self).output_param_to_log() + utils.output_opts(self.conf, horcm_opts) + + def get_storage_cli_info(self): + """Return a tuple of the storage CLI name and its version.""" + version = 'N/A' + result = utils.execute('raidqry', '-h') + match = re.search(r'^Ver&Rev: +(?P\S+)', result[1], re.M) + if match: + version = match.group('version') + return ('Command Control Interface', version) + + def check_vvol(self, ldev): + """Return True if the specified LDEV is V-VOL, False otherwise.""" + ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) + if ldev_info['sts'] != NORMAL_STS: + return False + return VVOL_ATTR in ldev_info['vol_attr'] diff --git a/cinder/volume/drivers/hitachi/vsp_horcm_fc.py b/cinder/volume/drivers/hitachi/vsp_horcm_fc.py new file mode 100644 index 00000000000..3430c0cbff7 --- /dev/null +++ b/cinder/volume/drivers/hitachi/vsp_horcm_fc.py @@ -0,0 +1,183 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""HORCM interface fibre channel module for Hitachi VSP Driver.""" + +import re + +from oslo_log import log as logging + +from cinder import exception +from cinder.volume.drivers.hitachi import vsp_horcm as horcm +from cinder.volume.drivers.hitachi import vsp_utils as utils +from cinder.zonemanager import utils as fczm_utils + +_FC_LINUX_MODE_OPTS = ['-host_mode', 'LINUX'] +_HOST_GROUPS_PATTERN = re.compile( + r"^CL\w-\w+ +(?P\d+) +%s(?!pair00 )\S* +\d+ " % utils.TARGET_PREFIX, + re.M) +_FC_PORT_PATTERN = re.compile( + (r"^(CL\w-\w)\w* +(?:FIBRE|FCoE) +TAR +\w+ +\w+ +\w +\w+ +Y +" + r"\d+ +\d+ +(\w{16})"), re.M) + +LOG = logging.getLogger(__name__) +MSG = utils.VSPMsg + + +class VSPHORCMFC(horcm.VSPHORCM): + """HORCM interface fibre channel class for Hitachi VSP Driver. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver. + + """ + + def __init__(self, conf, storage_protocol, db): + """Initialize instance variables.""" + super(VSPHORCMFC, self).__init__(conf, storage_protocol, db) + self._lookup_service = fczm_utils.create_lookup_service() + + def connect_storage(self): + """Prepare for using the storage.""" + target_ports = self.conf.vsp_target_ports + + super(VSPHORCMFC, self).connect_storage() + result = self.run_raidcom('get', 'port') + for port, wwn in _FC_PORT_PATTERN.findall(result[1]): + if target_ports and port in target_ports: + self.storage_info['ports'].append(port) + self.storage_info['wwns'][port] = wwn + + self.check_ports_info() + utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list', + value=self.storage_info['wwns']) + + def create_target_to_storage(self, port, connector): + """Create a host group on the specified port.""" + wwpns = self.get_hba_ids_from_connector(connector) + target_name = utils.TARGET_PREFIX + min(wwpns) + try: + result = self.run_raidcom( + 'add', 'host_grp', '-port', port, '-host_grp_name', + target_name) + except exception.VSPError: + result = self.run_raidcom('get', 'host_grp', '-port', port) + hostgroup_pt = re.compile( + r"^CL\w-\w+ +(?P\d+) +%s +\d+ " % + target_name, re.M) + gid = hostgroup_pt.findall(result[1]) + if gid: + return target_name, gid[0] + else: + raise + return target_name, horcm.find_value(result[1], 'gid') + + def set_hba_ids(self, port, gid, hba_ids): + """Connect all specified HBAs with the specified port.""" + registered_wwns = [] + for wwn in hba_ids: + try: + self.run_raidcom( + 'add', 'hba_wwn', '-port', + '-'.join([port, gid]), '-hba_wwn', wwn) + registered_wwns.append(wwn) + except exception.VSPError: + utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid, + wwn=wwn) + if not registered_wwns: + msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port, + gid=gid) + raise exception.VSPError(msg) + + def set_target_mode(self, port, gid): + """Configure the host group to meet the environment.""" + self.run_raidcom( + 'modify', 'host_grp', '-port', + '-'.join([port, gid]), *_FC_LINUX_MODE_OPTS, + success_code=horcm.ALL_EXIT_CODE) + + def find_targets_from_storage(self, targets, connector, target_ports): + """Find mapped ports, memorize them and return unmapped port count.""" + nr_not_found = 0 + old_target_name = None + if 'ip' in connector: + old_target_name = utils.TARGET_PREFIX + connector['ip'] + success_code = horcm.HORCM_EXIT_CODE.union([horcm.EX_ENOOBJ]) + wwpns = self.get_hba_ids_from_connector(connector) + wwpns_pattern = re.compile( + r'^CL\w-\w+ +\d+ +\S+ +(%s) ' % '|'.join(wwpns), re.M | re.I) + target_name = utils.TARGET_PREFIX + min(wwpns) + + for port in target_ports: + targets['info'][port] = False + + result = self.run_raidcom( + 'get', 'hba_wwn', '-port', port, target_name, + success_code=success_code) + wwpns = wwpns_pattern.findall(result[1]) + if not wwpns and old_target_name: + result = self.run_raidcom( + 'get', 'hba_wwn', '-port', port, old_target_name, + success_code=success_code) + wwpns = wwpns_pattern.findall(result[1]) + if wwpns: + gid = result[1].splitlines()[1].split()[1] + targets['info'][port] = True + targets['list'].append((port, gid)) + LOG.debug( + 'Found wwpns in host group immediately. ' + '(port: %(port)s, gid: %(gid)s, wwpns: %(wwpns)s)', + {'port': port, 'gid': gid, 'wwpns': wwpns}) + continue + + result = self.run_raidcom( + 'get', 'host_grp', '-port', port) + for gid in _HOST_GROUPS_PATTERN.findall(result[1]): + result = self.run_raidcom( + 'get', 'hba_wwn', '-port', '-'.join([port, gid])) + wwpns = wwpns_pattern.findall(result[1]) + if wwpns: + targets['info'][port] = True + targets['list'].append((port, gid)) + LOG.debug( + 'Found wwpns in host group. (port: %(port)s, ' + 'gid: %(gid)s, wwpns: %(wwpns)s)', + {'port': port, 'gid': gid, 'wwpns': wwpns}) + break + else: + nr_not_found += 1 + + return nr_not_found + + @fczm_utils.AddFCZone + def initialize_connection(self, volume, connector): + """Initialize connection between the server and the volume.""" + conn_info = super(VSPHORCMFC, self).initialize_connection( + volume, connector) + if self.conf.vsp_zoning_request: + utils.update_conn_info(conn_info, connector, self._lookup_service) + return conn_info + + @fczm_utils.RemoveFCZone + def terminate_connection(self, volume, connector): + """Terminate connection between the server and the volume.""" + conn_info = super(VSPHORCMFC, self).terminate_connection( + volume, connector) + if self.conf.vsp_zoning_request and ( + conn_info and conn_info['data']['target_wwn']): + utils.update_conn_info(conn_info, connector, self._lookup_service) + return conn_info diff --git a/cinder/volume/drivers/hitachi/vsp_utils.py b/cinder/volume/drivers/hitachi/vsp_utils.py new file mode 100644 index 00000000000..c9c74fac8cb --- /dev/null +++ b/cinder/volume/drivers/hitachi/vsp_utils.py @@ -0,0 +1,662 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Utility module for Hitachi VSP Driver.""" + +import functools +import inspect +import logging as base_logging +import os +import re + +import enum +from oslo_concurrency import processutils as putils +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import importutils +from oslo_utils import strutils +from oslo_utils import timeutils +from oslo_utils import units +import six + +from cinder import exception +from cinder.i18n import _LE +from cinder.i18n import _LI +from cinder.i18n import _LW +from cinder import utils as cinder_utils + + +_DRIVER_DIR = 'cinder.volume.drivers.hitachi' + +_DRIVERS = { + 'HORCM': { + 'FC': 'vsp_horcm_fc.VSPHORCMFC', + }, +} + +DRIVER_PREFIX = 'VSP' +TARGET_PREFIX = 'HBSD-' +GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512 + +MAX_PROCESS_WAITTIME = 24 * 60 * 60 +DEFAULT_PROCESS_WAITTIME = 15 * 60 + +NORMAL_LDEV_TYPE = 'Normal' +NVOL_LDEV_TYPE = 'DP-VOL' + +FULL = 'Full copy' +THIN = 'Thin copy' + +INFO_SUFFIX = 'I' +WARNING_SUFFIX = 'W' +ERROR_SUFFIX = 'E' + +PORT_ID_LENGTH = 5 + + +@enum.unique +class VSPMsg(enum.Enum): + """messages for Hitachi VSP Driver.""" + + METHOD_START = { + 'msg_id': 0, + 'loglevel': base_logging.INFO, + 'msg': _LI('%(method)s starts. (config_group: %(config_group)s)'), + 'suffix': INFO_SUFFIX + } + OUTPUT_PARAMETER_VALUES = { + 'msg_id': 1, + 'loglevel': base_logging.INFO, + 'msg': _LI('The parameter of the storage backend. (config_group: ' + '%(config_group)s)'), + 'suffix': INFO_SUFFIX + } + METHOD_END = { + 'msg_id': 2, + 'loglevel': base_logging.INFO, + 'msg': _LI('%(method)s ended. (config_group: %(config_group)s)'), + 'suffix': INFO_SUFFIX + } + DRIVER_READY_FOR_USE = { + 'msg_id': 3, + 'loglevel': base_logging.INFO, + 'msg': _LI('The storage backend can be used. (config_group: ' + '%(config_group)s)'), + 'suffix': INFO_SUFFIX + } + DRIVER_INITIALIZATION_START = { + 'msg_id': 4, + 'loglevel': base_logging.INFO, + 'msg': _LI('Initialization of %(driver)s %(version)s started.'), + 'suffix': INFO_SUFFIX + } + SET_CONFIG_VALUE = { + 'msg_id': 5, + 'loglevel': base_logging.INFO, + 'msg': _LI('Set %(object)s to %(value)s.'), + 'suffix': INFO_SUFFIX + } + OBJECT_CREATED = { + 'msg_id': 6, + 'loglevel': base_logging.INFO, + 'msg': _LI('Created %(object)s. (%(details)s)'), + 'suffix': INFO_SUFFIX + } + INVALID_LDEV_FOR_UNMAPPING = { + 'msg_id': 302, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to specify a logical device for the volume ' + '%(volume_id)s to be unmapped.'), + 'suffix': WARNING_SUFFIX + } + INVALID_LDEV_FOR_DELETION = { + 'msg_id': 304, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to specify a logical device to be deleted. ' + '(method: %(method)s, id: %(id)s)'), + 'suffix': WARNING_SUFFIX + } + DELETE_TARGET_FAILED = { + 'msg_id': 306, + 'loglevel': base_logging.WARNING, + 'msg': _LW('A host group could not be deleted. (port: %(port)s, ' + 'gid: %(id)s)'), + 'suffix': WARNING_SUFFIX + } + CREATE_HOST_GROUP_FAILED = { + 'msg_id': 308, + 'loglevel': base_logging.WARNING, + 'msg': _LW('A host group could not be added. (port: %(port)s)'), + 'suffix': WARNING_SUFFIX + } + UNMAP_LDEV_FAILED = { + 'msg_id': 310, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to unmap a logical device. (LDEV: %(ldev)s)'), + 'suffix': WARNING_SUFFIX + } + DELETE_LDEV_FAILED = { + 'msg_id': 313, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to delete a logical device. (LDEV: %(ldev)s)'), + 'suffix': WARNING_SUFFIX + } + MAP_LDEV_FAILED = { + 'msg_id': 314, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to map a logical device. (LDEV: %(ldev)s, port: ' + '%(port)s, id: %(id)s, lun: %(lun)s)'), + 'suffix': WARNING_SUFFIX + } + DISCARD_ZERO_PAGE_FAILED = { + 'msg_id': 315, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to perform a zero-page reclamation. (LDEV: ' + '%(ldev)s)'), + 'suffix': WARNING_SUFFIX + } + ADD_HBA_WWN_FAILED = { + 'msg_id': 317, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to assign the WWN. (port: %(port)s, gid: %(gid)s, ' + 'wwn: %(wwn)s)'), + 'suffix': WARNING_SUFFIX + } + LDEV_NOT_EXIST = { + 'msg_id': 319, + 'loglevel': base_logging.WARNING, + 'msg': _LW('The logical device does not exist in the storage system. ' + '(LDEV: %(ldev)s)'), + 'suffix': WARNING_SUFFIX + } + HORCM_START_FAILED = { + 'msg_id': 320, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to start HORCM. (inst: %(inst)s)'), + 'suffix': WARNING_SUFFIX + } + HORCM_RESTART_FOR_SI_FAILED = { + 'msg_id': 322, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to reload the configuration of full copy pair. ' + '(inst: %(inst)s)'), + 'suffix': WARNING_SUFFIX + } + HORCM_LOGIN_FAILED = { + 'msg_id': 323, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to perform user authentication of HORCM. ' + '(user: %(user)s)'), + 'suffix': WARNING_SUFFIX + } + DELETE_SI_PAIR_FAILED = { + 'msg_id': 324, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to delete full copy pair. (P-VOL: %(pvol)s, S-VOL: ' + '%(svol)s)'), + 'suffix': WARNING_SUFFIX + } + DELETE_TI_PAIR_FAILED = { + 'msg_id': 325, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to delete thin copy pair. (P-VOL: %(pvol)s, S-VOL: ' + '%(svol)s)'), + 'suffix': WARNING_SUFFIX + } + WAIT_SI_PAIR_STATUS_FAILED = { + 'msg_id': 326, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to change the status of full copy pair. (P-VOL: ' + '%(pvol)s, S-VOL: %(svol)s)'), + 'suffix': WARNING_SUFFIX + } + DELETE_DEVICE_GRP_FAILED = { + 'msg_id': 327, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to delete the configuration of full copy pair. ' + '(P-VOL: %(pvol)s, S-VOL: %(svol)s)'), + 'suffix': WARNING_SUFFIX + } + DISCONNECT_VOLUME_FAILED = { + 'msg_id': 329, + 'loglevel': base_logging.WARNING, + 'msg': _LW('Failed to detach the logical device. (LDEV: %(ldev)s, ' + 'reason: %(reason)s)'), + 'suffix': WARNING_SUFFIX + } + STORAGE_COMMAND_FAILED = { + 'msg_id': 600, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The command %(cmd)s failed. (ret: %(ret)s, stdout: ' + '%(out)s, stderr: %(err)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_PARAMETER = { + 'msg_id': 601, + 'loglevel': base_logging.ERROR, + 'msg': _LE('A parameter is invalid. (%(param)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_PARAMETER_VALUE = { + 'msg_id': 602, + 'loglevel': base_logging.ERROR, + 'msg': _LE('A parameter value is invalid. (%(meta)s)'), + 'suffix': ERROR_SUFFIX + } + HORCM_SHUTDOWN_FAILED = { + 'msg_id': 608, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to shutdown HORCM. (inst: %(inst)s)'), + 'suffix': ERROR_SUFFIX + } + HORCM_RESTART_FAILED = { + 'msg_id': 609, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to restart HORCM. (inst: %(inst)s)'), + 'suffix': ERROR_SUFFIX + } + SI_PAIR_STATUS_WAIT_TIMEOUT = { + 'msg_id': 610, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The status change of full copy pair could not be ' + 'completed. (S-VOL: %(svol)s)'), + 'suffix': ERROR_SUFFIX + } + TI_PAIR_STATUS_WAIT_TIMEOUT = { + 'msg_id': 611, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The status change of thin copy pair could not be ' + 'completed. (S-VOL: %(svol)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_STATUS_FOR_COPY = { + 'msg_id': 612, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The source logical device to be replicated does not exist ' + 'in the storage system. (LDEV: %(ldev)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_FOR_EXTENSION = { + 'msg_id': 613, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The volume %(volume_id)s to be extended was not found.'), + 'suffix': ERROR_SUFFIX + } + NO_HBA_WWN_ADDED_TO_HOST_GRP = { + 'msg_id': 614, + 'loglevel': base_logging.ERROR, + 'msg': _LE('No WWN is assigned. (port: %(port)s, gid: %(gid)s)'), + 'suffix': ERROR_SUFFIX + } + NO_AVAILABLE_MIRROR_UNIT = { + 'msg_id': 615, + 'loglevel': base_logging.ERROR, + 'msg': _LE('A pair could not be created. The maximum number of pair ' + 'is exceeded. (copy method: %(copy_method)s, P-VOL: ' + '%(pvol)s)'), + 'suffix': ERROR_SUFFIX + } + UNABLE_TO_DELETE_PAIR = { + 'msg_id': 616, + 'loglevel': base_logging.ERROR, + 'msg': _LE('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: ' + '%(svol)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_VOLUME_SIZE_FOR_COPY = { + 'msg_id': 617, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to create a volume from a %(type)s. The size of ' + 'the new volume must be equal to or greater than the size ' + 'of the original %(type)s. (new volume: %(volume_id)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_VOLUME_TYPE_FOR_EXTEND = { + 'msg_id': 618, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The volume %(volume_id)s could not be extended. The ' + 'volume type must be Normal.'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_FOR_CONNECTION = { + 'msg_id': 619, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The volume %(volume_id)s to be mapped was not found.'), + 'suffix': ERROR_SUFFIX + } + POOL_INFO_RETRIEVAL_FAILED = { + 'msg_id': 620, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to provide information about a pool. (pool: ' + '%(pool)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_VOLUME_SIZE_FOR_TI = { + 'msg_id': 621, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to create a volume from a %(type)s. The size of ' + 'the new volume must be equal to the size of the original ' + '%(type)s when the new volume is created by ' + '%(copy_method)s. (new volume: %(volume_id)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_FOR_VOLUME_COPY = { + 'msg_id': 624, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The %(type)s %(id)s source to be replicated was not ' + 'found.'), + 'suffix': ERROR_SUFFIX + } + CREATE_HORCM_CONF_FILE_FAILED = { + 'msg_id': 632, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to open a file. (file: %(file)s, ret: %(ret)s, ' + 'stderr: %(err)s)'), + 'suffix': ERROR_SUFFIX + } + CONNECT_VOLUME_FAILED = { + 'msg_id': 634, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to attach the logical device. (LDEV: %(ldev)s, ' + 'reason: %(reason)s)'), + 'suffix': ERROR_SUFFIX + } + CREATE_LDEV_FAILED = { + 'msg_id': 636, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to add the logical device.'), + 'suffix': ERROR_SUFFIX + } + ADD_PAIR_TARGET_FAILED = { + 'msg_id': 638, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to add the pair target.'), + 'suffix': ERROR_SUFFIX + } + NO_MAPPING_FOR_LDEV = { + 'msg_id': 639, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to map a logical device to any pair targets. ' + '(LDEV: %(ldev)s)'), + 'suffix': ERROR_SUFFIX + } + POOL_NOT_FOUND = { + 'msg_id': 640, + 'loglevel': base_logging.ERROR, + 'msg': _LE('A pool could not be found. (pool: %(pool)s)'), + 'suffix': ERROR_SUFFIX + } + NO_AVAILABLE_RESOURCE = { + 'msg_id': 648, + 'loglevel': base_logging.ERROR, + 'msg': _LE('There are no resources available for use. (resource: ' + '%(resource)s)'), + 'suffix': ERROR_SUFFIX + } + NO_CONNECTED_TARGET = { + 'msg_id': 649, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The host group was not found.'), + 'suffix': ERROR_SUFFIX + } + RESOURCE_NOT_FOUND = { + 'msg_id': 650, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The resource %(resource)s was not found.'), + 'suffix': ERROR_SUFFIX + } + LDEV_DELETION_WAIT_TIMEOUT = { + 'msg_id': 652, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to delete a logical device. (LDEV: %(ldev)s)'), + 'suffix': ERROR_SUFFIX + } + LDEV_CREATION_WAIT_TIMEOUT = { + 'msg_id': 653, + 'loglevel': base_logging.ERROR, + 'msg': _LE('The creation of a logical device could not be completed. ' + '(LDEV: %(ldev)s)'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_ATTR_FOR_MANAGE = { + 'msg_id': 702, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV ' + 'must be an unpaired %(ldevtype)s.'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_SIZE_FOR_MANAGE = { + 'msg_id': 703, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV ' + 'size must be expressed in gigabytes.'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_PORT_FOR_MANAGE = { + 'msg_id': 704, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV ' + 'must not be mapped.'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_TYPE_FOR_UNMANAGE = { + 'msg_id': 706, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to unmanage the volume %(volume_id)s. The volume ' + 'type must be %(volume_type)s.'), + 'suffix': ERROR_SUFFIX + } + INVALID_LDEV_FOR_MANAGE = { + 'msg_id': 707, + 'loglevel': base_logging.ERROR, + 'msg': _LE('No valid value is specified for "source-id". A valid LDEV ' + 'number must be specified in "source-id" to manage the ' + 'volume.'), + 'suffix': ERROR_SUFFIX + } + VOLUME_COPY_FAILED = { + 'msg_id': 722, + 'loglevel': base_logging.ERROR, + 'msg': _LE('Failed to copy a volume. (copy method: %(copy_method)s, ' + 'P-VOL: %(pvol)s, S-VOL: %(svol)s)'), + 'suffix': ERROR_SUFFIX + } + + def __init__(self, error_info): + """Initialize Enum attributes.""" + self.msg_id = error_info['msg_id'] + self.level = error_info['loglevel'] + self.msg = error_info['msg'] + self.suffix = error_info['suffix'] + + def output_log(self, **kwargs): + """Output the message to the log file and return the message.""" + msg = self.msg % kwargs + LOG.log(self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s", + {'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg}) + return msg + + +def output_log(msg_enum, **kwargs): + """Output the specified message to the log file and return the message.""" + return msg_enum.output_log(**kwargs) + +LOG = logging.getLogger(__name__) +MSG = VSPMsg + + +def output_start_end_log(func): + """Output the log of the start and the end of the method.""" + @functools.wraps(func) + def wrap(self, *args, **kwargs): + """Wrap the method to add logging function.""" + def _output_start_end_log(*_args, **_kwargs): + """Output the log of the start and the end of the method.""" + output_log(MSG.METHOD_START, + method=func.__name__, + config_group=self.configuration.config_group) + ret = func(*_args, **_kwargs) + output_log(MSG.METHOD_END, + method=func.__name__, + config_group=self.configuration.config_group) + return ret + return _output_start_end_log(self, *args, **kwargs) + return wrap + + +def get_ldev(obj): + """Get the LDEV number from the given object and return it as integer.""" + if not obj: + return None + ldev = obj.get('provider_location') + if not ldev or not ldev.isdigit(): + return None + return int(ldev) + + +def check_timeout(start_time, timeout): + """Return True if the specified time has passed, False otherwise.""" + return timeutils.is_older_than(start_time, timeout) + + +def mask_password(cmd): + """Return a string in which the password is masked.""" + if len(cmd) > 3 and cmd[0] == 'raidcom' and cmd[1] == '-login': + tmp = list(cmd) + tmp[3] = strutils.mask_dict_password({'password': ''}).get('password') + else: + tmp = cmd + return ' '.join([six.text_type(c) for c in tmp]) + + +def execute(*cmd, **kwargs): + """Run the specified command and return its results.""" + process_input = kwargs.pop('process_input', None) + run_as_root = kwargs.pop('run_as_root', True) + ret = 0 + try: + if len(cmd) > 3 and cmd[0] == 'raidcom' and cmd[1] == '-login': + stdout, stderr = cinder_utils.execute( + *cmd, process_input=process_input, run_as_root=run_as_root, + loglevel=base_logging.NOTSET)[:2] + else: + stdout, stderr = cinder_utils.execute( + *cmd, process_input=process_input, run_as_root=run_as_root)[:2] + except putils.ProcessExecutionError as ex: + ret = ex.exit_code + stdout = ex.stdout + stderr = ex.stderr + LOG.debug('cmd: %s', mask_password(cmd)) + LOG.debug('from: %s', inspect.stack()[2]) + LOG.debug('ret: %s', ret) + LOG.debug('stdout: %s', ' '.join(stdout.splitlines())) + LOG.debug('stderr: %s', ' '.join(stderr.splitlines())) + return ret, stdout, stderr + + +def import_object(conf, driver_info, db): + """Import a class and return an instance of it.""" + os.environ['LANG'] = 'C' + cli = _DRIVERS.get('HORCM') + return importutils.import_object( + '.'.join([_DRIVER_DIR, cli[driver_info['proto']]]), + conf, driver_info, db) + + +def check_ignore_error(ignore_error, stderr): + """Return True if ignore_error is in stderr, False otherwise.""" + if not ignore_error or not stderr: + return False + if not isinstance(ignore_error, six.string_types): + ignore_error = '|'.join(ignore_error) + + if re.search(ignore_error, stderr): + return True + return False + + +def check_opts(conf, opts): + """Check if the specified configuration is valid.""" + names = [] + for opt in opts: + names.append(opt.name) + check_opt_value(conf, names) + + +def check_opt_value(conf, names): + """Check if the parameter names and values are valid.""" + for name in names: + try: + getattr(conf, name) + except (cfg.NoSuchOptError, cfg.ConfigFileValueError): + with excutils.save_and_reraise_exception(): + output_log(MSG.INVALID_PARAMETER, param=name) + + +def output_storage_cli_info(name, version): + """Output storage CLI info to the log file.""" + LOG.info(_LI('\t%(name)-35s%(version)s'), + {'name': name + ' version: ', 'version': version}) + + +def output_opt_info(conf, names): + """Output parameter names and values to the log file.""" + for name in names: + LOG.info(_LI('\t%(name)-35s%(attr)s'), + {'name': name + ': ', 'attr': getattr(conf, name)}) + + +def output_opts(conf, opts): + """Output parameter names and values to the log file.""" + names = [opt.name for opt in opts if not opt.secret] + output_opt_info(conf, names) + + +def require_target_existed(targets): + """Check if the target list includes one or more members.""" + if not targets['list']: + msg = output_log(MSG.NO_CONNECTED_TARGET) + raise exception.VSPError(msg) + + +def get_volume_metadata(volume): + """Return a dictionary of the metadata of the specified volume.""" + volume_metadata = volume.get('volume_metadata', {}) + return {item['key']: item['value'] for item in volume_metadata} + + +def update_conn_info(conn_info, connector, lookup_service): + """Set wwn mapping list to the connection info.""" + init_targ_map = build_initiator_target_map( + connector, conn_info['data']['target_wwn'], lookup_service) + if init_targ_map: + conn_info['data']['initiator_target_map'] = init_targ_map + + +def build_initiator_target_map(connector, target_wwns, lookup_service): + """Return a dictionary mapping server-wwns and lists of storage-wwns.""" + init_targ_map = {} + initiator_wwns = connector['wwpns'] + if lookup_service: + dev_map = lookup_service.get_device_mapping_from_network( + initiator_wwns, target_wwns) + for fabric_name in dev_map: + fabric = dev_map[fabric_name] + for initiator in fabric['initiator_port_wwn_list']: + init_targ_map[initiator] = fabric['target_port_wwn_list'] + else: + for initiator in initiator_wwns: + init_targ_map[initiator] = target_wwns + return init_targ_map diff --git a/releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml b/releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml new file mode 100644 index 00000000000..44d220c9b67 --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP + Family and HUSVM.