diff --git a/cinder/opts.py b/cinder/opts.py index 083ae453896..386c0b282a0 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -116,6 +116,8 @@ from cinder.volume.drivers.hitachi import vsp_fc as \ cinder_volume_drivers_hitachi_vspfc from cinder.volume.drivers.hitachi import vsp_horcm as \ cinder_volume_drivers_hitachi_vsphorcm +from cinder.volume.drivers.hitachi import vsp_iscsi as \ + cinder_volume_drivers_hitachi_vspiscsi from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \ @@ -294,6 +296,7 @@ def list_opts(): cinder_volume_drivers_hitachi_vspcommon.common_opts, cinder_volume_drivers_hitachi_vspfc.fc_opts, cinder_volume_drivers_hitachi_vsphorcm.horcm_opts, + cinder_volume_drivers_hitachi_vspiscsi.iscsi_opts, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts, cinder_volume_drivers_hpe_hpexpopts.FC_VOLUME_OPTS, diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py new file mode 100644 index 00000000000..b8899bda21b --- /dev/null +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py @@ -0,0 +1,1746 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""Unit tests for Hitachi VSP Driver.""" + +import copy +import os + +import mock +from os_brick.initiator import connector as brick_connector +from oslo_concurrency import processutils +from oslo_config import cfg +from six.moves import range + +from cinder import context as cinder_context +from cinder import db +from cinder.db.sqlalchemy import api as sqlalchemy_api +from cinder import exception +from cinder.objects import snapshot as obj_snap +from cinder import test +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume import driver +from cinder.volume.drivers.hitachi import vsp_horcm +from cinder.volume.drivers.hitachi import vsp_iscsi +from cinder.volume.drivers.hitachi import vsp_utils +from cinder.volume import utils as volume_utils + +# Dummy return values +SUCCEED = 0 +STDOUT = "" +STDERR = "" +CMD_SUCCEED = (SUCCEED, STDOUT, STDERR) + +# Configuration parameter values +CONFIG_MAP = { + 'serial': '492015', + 'my_ip': '127.0.0.1', +} + +# CCI instance numbers +INST_NUMS = (200, 201) + +# ShadowImage copy group names +CG_MAP = {'cg%s' % x: vsp_horcm._COPY_GROUP % ( + CONFIG_MAP['my_ip'], CONFIG_MAP['serial'], INST_NUMS[1], x) + for x in range(3) +} + +# Map containing all maps for dummy response creation +DUMMY_RESPONSE_MAP = CONFIG_MAP.copy() +DUMMY_RESPONSE_MAP.update(CG_MAP) + +# cmd: raidcom get copy_grp +GET_COPY_GRP_RESULT = ( + "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" + "%(cg0)s %(cg0)sP 0 - %(serial)s\n" + "%(cg1)s %(cg1)sP 0 - %(serial)s\n" + "%(cg1)s %(cg1)sS - - %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get copy_grp +GET_COPY_GRP_RESULT2 = "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" + +# cmd: raidcom get copy_grp +GET_COPY_GRP_RESULT3 = ( + "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" + "%(cg0)s %(cg0)sP 0 - %(serial)s\n" + "%(cg0)s %(cg0)sS 0 - %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91P +GET_DEVICE_GRP_MU1P_RESULT = ( + "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" + "%(cg1)sP VSP-LDEV-0-2 0 %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91S +GET_DEVICE_GRP_MU1S_RESULT = ( + "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" + "%(cg1)sS VSP-LDEV-0-2 2 %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get hba_iscsi -port CL1-A HBSD-127.0.0.1 +GET_HBA_ISCSI_CL1A_HOSTGRP_RESULT = ( + "PORT GID GROUP_NAME IQN Serial# NICK_NAME\n" + "CL1-A 0 HBSD-127.0.0.1 iqn-initiator %(serial)s -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get dp_pool +GET_DP_POOL_RESULT = ( + "PID POLS U(%) AV_CAP(MB) TP_CAP(MB) W(%) H(%) Num LDEV# LCNT " + "TL_CAP(MB) BM TR_CAP(MB) RCNT\n" + "030 POLN 0 6006 6006 75 80 1 14860 32 167477 NB 0 0\n" +) + +# cmd: raidcom get dp_pool +GET_DP_POOL_ERROR_RESULT = ( + "PID POLS U(%) POOL_NAME Seq# Num LDEV# H(%) VCAP(%) TYPE PM PT\n" +) + +# cmd: raidcom get pool -key opt +GET_POOL_KEYOPT_RESULT = ( + "PID POLS U(%%) POOL_NAME Seq# Num LDEV# H(%%) VCAP(%%) TYPE PM PT\n" + "030 POLM 30 VSPPOOL %(serial)s 1 10000 80 - OPEN N HDP\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get hba_iscsi -port CL1-B-0 +GET_HBA_ISCSI_CL1B0_RESULT = ( + "PORT GID GROUP_NAME IQN Serial# NICK_NAME\n" + "CL1-B 0 HBSD-127.0.0.1 iqn-initiator %(serial)s -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get host_grp -port CL1-A +GET_HOST_GRP_CL1A_RESULT = ( + "PORT GID GROUP_NAME IQN AMD D Serial# HMD HMO_BITs\n" + "CL1-A 0 HBSD-127.0.0.1 iqn-initiator.hbsd-target BOTH S " + "%(serial)s LINUX/IRIX 83 91\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get host_grp -port CL1-B +GET_HOST_GRP_CL1B_RESULT = ( + "PORT GID GROUP_NAME IQN AMD D Serial# HMD HMO_BITs\n" + "CL1-B 0 HBSD-127.0.0.1 iqn-initiator.hbsd-target BOTH S " + "%(serial)s LINUX/IRIX 83 91\n" +) % DUMMY_RESPONSE_MAP + +# raidcom add host_grp -port CLx-y -host_grp_name HBSD-127.0.0.1 +ADD_HOSTGRP_RESULT = "raidcom: Host group ID 0(0x0) will be used for adding.\n" + +# raidcom add host_grp -port CLx-y -host_grp_name HBSD-pair00 +ADD_HOSTGRP_PAIR_RESULT = ( + "raidcom: Host group ID 2(0x2) will be used for adding.\n" +) + +# raidcom add lun -port CL1-A-0 -ldev_id x +ADD_LUN_LUN0_RESULT = "raidcom: LUN 0(0x0) will be used for adding.\n" + +# cmd: raidcom get ldev -ldev_list undefined -cnt 1 +GET_LDEV_LDEV_LIST_UNDEFINED = ( + "LDEV : 1 VIR_LDEV : 65534\n" + "VOL_TYPE : NOT DEFINED\n" +) + +# cmd: raidcom get ldev -ldev_id 0 -cnt 2 -key front_end (LDEV) +GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2 = ( + " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" + " Ports PORT_No:LU#:GRPNAME\n" + " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 1 - - NOT DEFINED - - - -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get ldev -ldev_id 0 -cnt 10 -key front_end (LDEV) +GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT = ( + " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" + " Ports PORT_No:LU#:GRPNAME\n" + " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 1 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 2 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 3 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 4 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 5 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 6 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 7 0 0 OPEN-V-CVS 2097152 - CVS 0\n" + " %(serial)s 8 - - NOT DEFINED - - - -\n" + " %(serial)s 9 - - NOT DEFINED - - - -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get ldev -ldev_id x -check_status NOT DEFINED +GET_LDEV_CHECKSTATUS_ERR = ( + "raidcom: testing condition has failed with exit(1).\n" +) + +# cmd: raidcom get ldev -ldev_id 0 +GET_LDEV_LDEV0_RESULT = """ +LDEV : 0 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 1 +GET_LDEV_LDEV1_RESULT = """ +LDEV : 1 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP +VOL_Capacity(BLK) : 268435456 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 3 +GET_LDEV_LDEV3_RESULT = """ +LDEV : 3 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : +""" + +# cmd: raidcom get ldev -ldev_id 4 +GET_LDEV_LDEV4_RESULT = """ +LDEV : 4 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : QS : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 5 +GET_LDEV_LDEV5_RESULT = """ +LDEV : 5 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP : VVOL +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 6 +GET_LDEV_LDEV6_RESULT = """ +LDEV : 6 +VOL_TYPE : OPEN-V-CVS +PORTs : CL1-A-0 0 HBSD-127.0.0.1 +VOL_ATTR : CVS : HDP +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 7 +GET_LDEV_LDEV7_RESULT = """ +LDEV : 7 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : QS : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 0 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 10 +GET_LDEV_LDEV10_RESULT = """ +LDEV : 10 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : MRCF : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 11 +GET_LDEV_LDEV11_RESULT = """ +LDEV : 11 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : QS : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 12 +GET_LDEV_LDEV12_RESULT = """ +LDEV : 12 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : MRCF : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get ldev -ldev_id 13 +GET_LDEV_LDEV13_RESULT = """ +LDEV : 13 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : MRCF : HDP : HDT +VOL_Capacity(BLK) : 2097152 +NUM_PORT : 1 +STS : BLK +""" + +# cmd: raidcom get ldev -ldev_id 14 +GET_LDEV_LDEV14_RESULT = """ +LDEV : 14 +VOL_TYPE : OPEN-V-CVS +VOL_ATTR : CVS : HDP : HDT +VOL_Capacity(BLK) : 9999999 +NUM_PORT : 1 +STS : NML +""" + +# cmd: raidcom get lun -port CL1-A-0 +GET_LUN_CL1A0_RESULT = ( + "PORT GID HMD LUN NUM LDEV CM Serial# HMO_BITs\n" + "CL1-A 0 LINUX/IRIX 4 1 4 - %(serial)s\n" + "CL1-A 0 LINUX/IRIX 254 1 5 - %(serial)s\n" + "CL1-A 0 LINUX/IRIX 255 1 6 - %(serial)s\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get port +GET_PORT_RESULT = ( + "PORT TYPE ATTR SPD LPID FAB CONN SSW SL Serial# WWN PHY_PORT\n" + "CL1-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" + "CL1-B ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" + "CL3-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" + "CL3-B ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" + "CL4-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get port -port CL1-A -key opt +GET_PORT_CL1A_KEY_OPT_RESULT = ( + "PORT : CL1-A\n" + "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" + "TCP_MTU : 1500\n" + "WSZ : 64KB\n" + "KA_TIMER : 60\n" + "TCP_PORT : 3260\n" + "IPV4_ADDR : 11.22.33.44\n" + "IPV4_SMSK : 255.255.0.0\n" + "IPV4_GWAD : 0.0.0.0\n" + "IPV6_ADDR_INF : INV : AM : fe80::\n" + "IPV6_GADR_INF : INV : AM : ::\n" + "IPV6_GWAD_INF : INV : :: : ::\n" + "VLAN_ID : -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get port -port CL1-B -key opt +GET_PORT_CL1B_KEY_OPT_RESULT = ( + "PORT : CL1-B\n" + "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" + "TCP_MTU : 1500\n" + "WSZ : 64KB\n" + "KA_TIMER : 60\n" + "TCP_PORT : 3260\n" + "IPV4_ADDR : 11.22.33.44\n" + "IPV4_SMSK : 255.255.0.0\n" + "IPV4_GWAD : 0.0.0.0\n" + "IPV6_ADDR_INF : INV : AM : fe80::\n" + "IPV6_GADR_INF : INV : AM : ::\n" + "IPV6_GWAD_INF : INV : :: : ::\n" + "VLAN_ID : -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get port -port CL3-A -key opt +GET_PORT_CL3A_KEY_OPT_RESULT = ( + "PORT : CL3-A\n" + "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" + "TCP_MTU : 1500\n" + "WSZ : 64KB\n" + "KA_TIMER : 60\n" + "TCP_PORT : 3260\n" + "IPV4_ADDR : 11.22.33.44\n" + "IPV4_SMSK : 255.255.0.0\n" + "IPV4_GWAD : 0.0.0.0\n" + "IPV6_ADDR_INF : INV : AM : fe80::\n" + "IPV6_GADR_INF : INV : AM : ::\n" + "IPV6_GWAD_INF : INV : :: : ::\n" + "VLAN_ID : -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get port -port CL3-A -key opt +GET_PORT_CL3B_KEY_OPT_RESULT = ( + "PORT : CL3-B\n" + "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" + "TCP_MTU : 1500\n" + "WSZ : 64KB\n" + "KA_TIMER : 60\n" + "TCP_PORT : 3260\n" + "IPV4_ADDR : 11.22.33.44\n" + "IPV4_SMSK : 255.255.0.0\n" + "IPV4_GWAD : 0.0.0.0\n" + "IPV6_ADDR_INF : INV : AM : fe80::\n" + "IPV6_GADR_INF : INV : AM : ::\n" + "IPV6_GWAD_INF : INV : :: : ::\n" + "VLAN_ID : -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get snapshot -ldev_id 4 +GET_SNAPSHOT_LDEV4_RESULT = ( + "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " + "SPLT-TIME\n" + "VSP-SNAP0 P-VOL PSUS %(serial)s 4 3 8 31 100 ---- 57db5cb0\n" + "VSP-SNAP0 P-VOL PSUS %(serial)s 4 4 9 31 100 ---- 57db5cb0\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get snapshot -ldev_id 7 +GET_SNAPSHOT_LDEV7_RESULT = ( + "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " + "SPLT-TIME\n" + "VSP-SNAP0 P-VOL PSUS %(serial)s 7 3 8 31 100 ---- 57db5cb0\n" + "VSP-SNAP0 P-VOL PSUS %(serial)s 7 4 9 31 100 ---- 57db5cb0\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get snapshot -ldev_id 8 +GET_SNAPSHOT_LDEV8_RESULT = ( + "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " + "SPLT-TIME\n" + "VSP-SNAP0 S-VOL SSUS %(serial)s 8 3 7 31 100 ---- 57db5cb0\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidcom get snapshot -ldev_id 11 +GET_SNAPSHOT_LDEV11_RESULT = ( + "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " + "SPLT-TIME\n" + "VSP-SNAP0 S-VOL SSUS %(serial)s 11 3 7 31 100 ---- 57db5cb0\n" +) % DUMMY_RESPONSE_MAP + +# cmd: pairdisplay -CLI -d 492015 1 0 -IM201 +PAIRDISPLAY_LDEV0_1_RESULT = ( + "Group PairVol L/R Port# TID LU-M Seq# LDEV# " + "P/S Status Seq# P-LDEV# M\n" + "%(cg0)s VSP-LDEV-0-1 L CL1-A-0 0 0 0 %(serial)s 0 " + "P-VOL PSUS %(serial)s 1 W\n" + "%(cg0)s VSP-LDEV-0-1 R CL1-A-0 0 1 0 %(serial)s 1 " + "S-VOL SSUS - 0 -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: pairdisplay -CLI -d 492015 10 0 -IM201 +PAIRDISPLAY_LDEV7_10_RESULT = ( + "Group PairVol L/R Port# TID LU-M Seq# LDEV# " + "P/S Status Seq# P-LDEV# M\n" + "%(cg0)s VSP-LDEV-7-10 L CL1-A-1 0 0 0 %(serial)s 7 " + "P-VOL PSUS %(serial)s 10 W\n" + "%(cg0)s VSP-LDEV-7-10 R CL1-A-1 0 1 0 %(serial)s 10 " + "S-VOL SSUS - 7 -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: pairdisplay -CLI -d 492015 12 0 -IM201 +PAIRDISPLAY_LDEV7_12_RESULT = ( + "Group PairVol L/R Port# TID LU-M Seq# LDEV# " + "P/S Status Seq# P-LDEV# M\n" + "%(cg0)s VSP-LDEV-7-12 L CL1-A-1 0 0 0 %(serial)s 7 " + "P-VOL PSUS %(serial)s 12 W\n" + "%(cg0)s VSP-LDEV-7-12 R CL1-A-1 0 1 0 %(serial)s 12 " + "S-VOL SSUS - 7 -\n" +) % DUMMY_RESPONSE_MAP + +# cmd: raidqry -h +RAIDQRY_RESULT = ( + "Model : RAID-Manager/Linux/x64\n" + "Ver&Rev: 01-39-03/03\n" + "Usage : raidqry [options] for HORC[200]\n" + " -h Help/Usage\n" + " -I[#] Set to HORCMINST#\n" + " -IH[#] or -ITC[#] Set to HORC mode [and HORCMINST#]\n" + " -IM[#] or -ISI[#] Set to MRCF mode [and HORCMINST#]\n" + " -z Set to the interactive mode\n" + " -zx Set to the interactive mode and HORCM monitoring\n" + " -q Quit(Return to main())\n" + " -g Specify for getting all group name on local\n" + " -l Specify the local query\n" + " -lm Specify the local query with full micro version\n" + " -r Specify the remote query\n" + " -f Specify display for floatable host\n" +) + +EXECUTE_TABLE = { + ('add', 'hba_iscsi', '-port', 'CL3-A-0', '-hba_iscsi_name', + 'iqn-initiator'): (vsp_horcm.EX_INVARG, STDOUT, STDERR), + ('add', 'host_grp', '-port', 'CL1-A', '-host_grp_name', + 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), + ('add', 'host_grp', '-port', 'CL1-B', '-host_grp_name', + 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), + ('add', 'host_grp', '-port', 'CL3-A', '-host_grp_name', + 'HBSD-127.0.0.1', '-iscsi_name', 'iqn-initiator.hbsd-target'): ( + SUCCEED, ADD_HOSTGRP_RESULT, STDERR), + ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', + 'HBSD-127.0.0.1', '-iscsi_name', 'iqn-initiator.hbsd-target'): ( + SUCCEED, ADD_HOSTGRP_RESULT, STDERR), + ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', + 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), + ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 0): ( + SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), + ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 1): ( + SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), + ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 5): ( + SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), + ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 6): ( + vsp_horcm.EX_CMDRJE, STDOUT, vsp_horcm._LU_PATH_DEFINED), + ('add', 'lun', '-port', 'CL1-B-0', '-ldev_id', 0, '-lun_id', 0): ( + SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), + ('extend', 'ldev', '-ldev_id', 3, '-capacity', '128G'): ( + vsp_horcm.EX_CMDIOE, STDOUT, + "raidcom: [EX_CMDIOE] Control command I/O error"), + ('get', 'hba_iscsi', '-port', 'CL1-A', 'HBSD-127.0.0.1'): ( + SUCCEED, GET_HBA_ISCSI_CL1A_HOSTGRP_RESULT, STDERR), + ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT, STDERR), + ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'P'): ( + SUCCEED, GET_DEVICE_GRP_MU1P_RESULT, STDERR), + ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'S'): ( + SUCCEED, GET_DEVICE_GRP_MU1S_RESULT, STDERR), + ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_RESULT, STDERR), + ('get', 'pool', '-key', 'opt'): (SUCCEED, GET_POOL_KEYOPT_RESULT, STDERR), + ('get', 'hba_iscsi', '-port', 'CL1-B-0'): ( + SUCCEED, GET_HBA_ISCSI_CL1B0_RESULT, STDERR), + ('get', 'host_grp', '-port', 'CL1-A'): ( + SUCCEED, GET_HOST_GRP_CL1A_RESULT, STDERR), + ('get', 'host_grp', '-port', 'CL1-B'): ( + SUCCEED, GET_HOST_GRP_CL1B_RESULT, STDERR), + ('get', 'ldev', '-ldev_list', 'undefined', '-cnt', '1'): ( + SUCCEED, GET_LDEV_LDEV_LIST_UNDEFINED, STDERR), + ('get', 'ldev', '-ldev_id', 0, '-cnt', 2, '-key', 'front_end'): ( + SUCCEED, GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2, STDERR), + ('get', 'ldev', '-ldev_id', 0, '-cnt', 10, '-key', 'front_end'): ( + SUCCEED, GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 0, '-check_status', 'NOT', 'DEFINED'): ( + 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), + ('get', 'ldev', '-ldev_id', 0): (SUCCEED, GET_LDEV_LDEV0_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 1): (SUCCEED, GET_LDEV_LDEV1_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 3): (SUCCEED, GET_LDEV_LDEV3_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 4): (SUCCEED, GET_LDEV_LDEV4_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 5): (SUCCEED, GET_LDEV_LDEV5_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 6): (SUCCEED, GET_LDEV_LDEV6_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 7): (SUCCEED, GET_LDEV_LDEV7_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 10): (SUCCEED, GET_LDEV_LDEV10_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 11): (SUCCEED, GET_LDEV_LDEV11_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 12): (SUCCEED, GET_LDEV_LDEV12_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 13): (SUCCEED, GET_LDEV_LDEV13_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 14): (SUCCEED, GET_LDEV_LDEV14_RESULT, STDERR), + ('get', 'ldev', '-ldev_id', 15): (vsp_horcm.EX_COMERR, "", STDERR), + ('get', 'lun', '-port', 'CL1-A-0'): ( + SUCCEED, GET_LUN_CL1A0_RESULT, STDERR), + ('get', 'port'): (SUCCEED, GET_PORT_RESULT, STDERR), + ('get', 'port', '-port', 'CL1-A', '-key', 'opt'): ( + SUCCEED, GET_PORT_CL1A_KEY_OPT_RESULT, STDERR), + ('get', 'port', '-port', 'CL1-B', '-key', 'opt'): ( + SUCCEED, GET_PORT_CL1B_KEY_OPT_RESULT, STDERR), + ('get', 'port', '-port', 'CL3-A', '-key', 'opt'): ( + SUCCEED, GET_PORT_CL3A_KEY_OPT_RESULT, STDERR), + ('get', 'port', '-port', 'CL3-B', '-key', 'opt'): ( + SUCCEED, GET_PORT_CL3B_KEY_OPT_RESULT, STDERR), + ('get', 'snapshot', '-ldev_id', 4): ( + SUCCEED, GET_SNAPSHOT_LDEV4_RESULT, STDERR), + ('get', 'snapshot', '-ldev_id', 7): ( + SUCCEED, GET_SNAPSHOT_LDEV7_RESULT, STDERR), + ('get', 'snapshot', '-ldev_id', 8): ( + SUCCEED, GET_SNAPSHOT_LDEV8_RESULT, STDERR), + ('get', 'snapshot', '-ldev_id', 11): ( + SUCCEED, GET_SNAPSHOT_LDEV11_RESULT, STDERR), + ('modify', 'ldev', '-ldev_id', 3, '-status', 'discard_zero_page'): ( + vsp_horcm.EX_CMDIOE, STDOUT, STDERR), + ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 10, 0, + '-IM%s' % INST_NUMS[1]): ( + SUCCEED, PAIRDISPLAY_LDEV7_10_RESULT, STDERR), + ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 12, 0, + '-IM%s' % INST_NUMS[1]): ( + SUCCEED, PAIRDISPLAY_LDEV7_12_RESULT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 8, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 10, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 12, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), + ('raidqry', '-h'): (SUCCEED, RAIDQRY_RESULT, STDERR), + ('tee', '/etc/horcm501.conf'): (1, STDOUT, STDERR), + ('-login', 'user', 'pasword'): (SUCCEED, STDOUT, STDERR), + ('-login', 'userX', 'paswordX'): (vsp_horcm.EX_ENAUTH, STDOUT, STDERR), + ('-login', 'userY', 'paswordY'): (vsp_horcm.EX_COMERR, STDOUT, STDERR), +} + +EXECUTE_TABLE2 = EXECUTE_TABLE.copy() +EXECUTE_TABLE2.update({ + ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT2, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUS, STDOUT, STDERR), +}) + +EXECUTE_TABLE3 = EXECUTE_TABLE2.copy() + +EXECUTE_TABLE4 = EXECUTE_TABLE.copy() +EXECUTE_TABLE4.update({ + ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), +}) + +EXECUTE_TABLE5 = EXECUTE_TABLE.copy() +EXECUTE_TABLE5.update({ + ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), + ('get', 'ldev', '-ldev_id', 1, '-check_status', 'NOT', 'DEFINED'): ( + 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), + ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 1, 0, + '-IM%s' % INST_NUMS[1]): ( + SUCCEED, PAIRDISPLAY_LDEV0_1_RESULT, STDERR), + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), +}) + +ERROR_EXECUTE_TABLE = { + ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_ERROR_RESULT, STDERR), +} + +DEFAULT_CONNECTOR = { + 'host': 'host', + 'ip': CONFIG_MAP['my_ip'], + 'initiator': 'iqn-initiator', + 'multipath': False, +} + +CTXT = cinder_context.get_admin_context() + +TEST_VOLUME = [] +for i in range(14): + volume = {} + volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) + volume['name'] = 'test-volume{0:d}'.format(i) + volume['provider_location'] = None if i == 2 else '{0:d}'.format(i) + volume['size'] = 256 if i == 1 else 128 + if i == 2: + volume['status'] = 'creating' + elif i == 5: + volume['status'] = 'in-use' + else: + volume['status'] = 'available' + volume = fake_volume.fake_volume_obj(CTXT, **volume) + TEST_VOLUME.append(volume) + + +def _volume_get(context, volume_id): + """Return predefined volume info.""" + return TEST_VOLUME[int(volume_id.replace("-", ""))] + +TEST_SNAPSHOT = [] +for i in range(8): + snapshot = {} + snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) + snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) + snapshot['provider_location'] = None if i == 2 else '{0:d}'.format( + i if i < 5 else i + 5) + snapshot['size'] = 256 if i == 1 else 128 + snapshot['status'] = 'creating' if i == 2 else 'available' + snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format( + i if i < 5 else 7) + snapshot['volume'] = _volume_get(None, snapshot['volume_id']) + snapshot['volume_name'] = 'test-volume{0:d}'.format(i if i < 5 else 7) + snapshot['volume_size'] = 256 if i == 1 else 128 + snapshot = obj_snap.Snapshot._from_db_object( + CTXT, obj_snap.Snapshot(), + fake_snapshot.fake_db_snapshot(**snapshot)) + TEST_SNAPSHOT.append(snapshot) + +# Flags that determine _fake_run_horcmstart() return values +run_horcmstart_returns_error = False +run_horcmstart_returns_error2 = False +run_horcmstart3_cnt = 0 + + +def _access(*args, **kargs): + """Assume access to the path is allowed.""" + return True + + +def _execute(*args, **kargs): + """Return predefined results for command execution.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) + return result + + +def _execute2(*args, **kargs): + """Return predefined results based on EXECUTE_TABLE2.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE2.get(cmd, CMD_SUCCEED) + return result + + +def _execute3(*args, **kargs): + """Change pairevtwait's dummy return value after it is called.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE3.get(cmd, CMD_SUCCEED) + if cmd == ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): + EXECUTE_TABLE3.update({ + ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', + '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), + }) + return result + + +def _execute4(*args, **kargs): + """Return predefined results based on EXECUTE_TABLE4.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE4.get(cmd, CMD_SUCCEED) + return result + + +def _execute5(*args, **kargs): + """Return predefined results based on EXECUTE_TABLE5.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = EXECUTE_TABLE5.get(cmd, CMD_SUCCEED) + return result + + +def _cinder_execute(*args, **kargs): + """Return predefined results or raise an exception.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + ret, stdout, stderr = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) + if ret == SUCCEED: + return stdout, stderr + else: + pee = processutils.ProcessExecutionError(exit_code=ret, + stdout=stdout, + stderr=stderr) + raise pee + + +def _error_execute(*args, **kargs): + """Return predefined error results.""" + cmd = args[1:-3] if args[0] == 'raidcom' else args + result = _execute(*args, **kargs) + ret = ERROR_EXECUTE_TABLE.get(cmd) + return ret if ret else result + + +def _brick_get_connector_properties(multipath=False, enforce_multipath=False): + """Return a predefined connector object.""" + return DEFAULT_CONNECTOR + + +def _brick_get_connector_properties_error(multipath=False, + enforce_multipath=False): + """Return an incomplete connector object.""" + connector = dict(DEFAULT_CONNECTOR) + del connector['initiator'] + return connector + + +def _connect_volume(*args, **kwargs): + """Return predefined volume info.""" + return {'path': u'/dev/disk/by-path/xxxx', 'type': 'block'} + + +def _disconnect_volume(*args, **kwargs): + """Return without doing anything.""" + pass + + +def _copy_volume(*args, **kwargs): + """Return without doing anything.""" + pass + + +def _volume_admin_metadata_get(context, volume_id): + """Return dummy admin metadata.""" + return {'fake_key': 'fake_value'} + + +def _snapshot_metadata_update(context, snapshot_id, metadata, delete): + """Return without doing anything.""" + pass + + +def _fake_is_smpl(*args): + """Assume the ShadowImage pair status is SMPL.""" + return True + + +def _fake_run_horcmgr(*args): + """Assume CCI is running.""" + return vsp_horcm._HORCM_RUNNING + + +def _fake_run_horcmstart(*args): + """Return a value based on a flag value.""" + return 0 if not run_horcmstart_returns_error else 3 + + +def _fake_run_horcmstart2(*args): + """Return a value based on a flag value.""" + return 0 if not run_horcmstart_returns_error2 else 3 + + +def _fake_run_horcmstart3(*args): + """Update a counter and return a value based on it.""" + global run_horcmstart3_cnt + run_horcmstart3_cnt = run_horcmstart3_cnt + 1 + return 0 if run_horcmstart3_cnt <= 1 else 3 + + +def _fake_check_ldev_status(*args, **kwargs): + """Assume LDEV status has changed as desired.""" + return None + + +def _fake_exists(path): + """Assume the path does not exist.""" + return False + + +class VSPHORCMISCSIDriverTest(test.TestCase): + """Unit test class for VSP HORCM interface iSCSI module.""" + + test_existing_ref = {'source-id': '0'} + test_existing_none_ldev_ref = {'source-id': '2'} + test_existing_invalid_ldev_ref = {'source-id': 'AAA'} + test_existing_value_error_ref = {'source-id': 'XX:XX:XX'} + test_existing_no_ldev_ref = {} + test_existing_invalid_sts_ldev = {'source-id': '13'} + test_existing_invalid_vol_attr = {'source-id': '12'} + test_existing_invalid_size = {'source-id': '14'} + test_existing_invalid_port_cnt = {'source-id': '6'} + test_existing_failed_to_start_horcmgr = {'source-id': '15'} + + def setUp(self): + """Set up the test environment.""" + super(VSPHORCMISCSIDriverTest, self).setUp() + + self.configuration = mock.Mock(conf.Configuration) + self.ctxt = cinder_context.get_admin_context() + self._setup_config() + self._setup_driver() + + def _setup_config(self): + """Set configuration parameter values.""" + self.configuration.config_group = "HORCM" + + self.configuration.volume_backend_name = "HORCMISCSI" + self.configuration.volume_driver = ( + "cinder.volume.drivers.hitachi.vsp_iscsi.VSPISCSIDriver") + self.configuration.reserved_percentage = "0" + self.configuration.use_multipath_for_image_xfer = False + self.configuration.enforce_multipath_for_image_xfer = False + self.configuration.num_volume_device_scan_tries = 3 + self.configuration.volume_dd_blocksize = "1000" + + self.configuration.vsp_storage_id = CONFIG_MAP['serial'] + self.configuration.vsp_pool = "30" + self.configuration.vsp_thin_pool = None + self.configuration.vsp_ldev_range = "0-1" + self.configuration.vsp_default_copy_method = 'FULL' + self.configuration.vsp_copy_speed = 3 + self.configuration.vsp_copy_check_interval = 1 + self.configuration.vsp_async_copy_check_interval = 1 + self.configuration.vsp_target_ports = "CL1-A" + self.configuration.vsp_group_request = True + + self.configuration.vsp_use_chap_auth = True + self.configuration.vsp_auth_user = "auth_user" + self.configuration.vsp_auth_password = "auth_password" + + self.configuration.vsp_horcm_numbers = INST_NUMS + self.configuration.vsp_horcm_user = "user" + self.configuration.vsp_horcm_password = "pasword" + self.configuration.vsp_horcm_add_conf = False + + self.configuration.safe_get = self._fake_safe_get + + CONF = cfg.CONF + CONF.my_ip = CONFIG_MAP['my_ip'] + + def _fake_safe_get(self, value): + """Retrieve a configuration value avoiding throwing an exception.""" + try: + val = getattr(self.configuration, value) + except AttributeError: + val = None + return val + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def _setup_driver(self, *args): + """Set up the driver environment.""" + self.driver = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self.driver.do_setup(None) + self.driver.check_for_setup_error() + self.driver.create_export(None, None, None) + self.driver.ensure_export(None, None) + self.driver.remove_export(None, None) + + # API test cases + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) + def test_do_setup(self, *args): + """Normal case: The host group exists beforehand.""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + + drv.do_setup(None) + self.assertEqual( + {'CL1-A': '11.22.33.44:3260'}, + drv.common.storage_info['portals']) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_raidqry_h_invalid(self, *args): + """Error case: 'raidqry -h' returns nothing. This error is ignored.""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + + raidqry_h_original = EXECUTE_TABLE[('raidqry', '-h')] + EXECUTE_TABLE[('raidqry', '-h')] = (SUCCEED, "", STDERR) + drv.do_setup(None) + self.assertEqual( + {'CL1-A': '11.22.33.44:3260'}, + drv.common.storage_info['portals']) + EXECUTE_TABLE[('raidqry', '-h')] = raidqry_h_original + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_specify_pool_name(self, *args): + """Normal case: Specify pool name rather than pool number.""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_pool = "VSPPOOL" + + drv.do_setup(None) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_create_hostgrp(self, *args): + """Normal case: The host groups does not exist beforehand.""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_target_ports = "CL3-B" + + drv.do_setup(None) + + @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_create_hostgrp_error(self, *args): + """Error case: 'add hba_iscsi' fails(MSGID0309-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_target_ports = "CL3-A" + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_thin_pool_not_specified(self, *args): + """Error case: Parameter error(vsp_thin_pool).(MSGID0601-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_default_copy_method = 'THIN' + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_ldev_range_not_specified(self, *args): + """Normal case: Not specify LDEV range.""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_ldev_range = None + + drv.do_setup(None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_storage_id_not_specified(self, *args): + """Error case: Parameter error(vsp_storage_id).(MSGID0601-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_storage_id = None + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_horcm_numbers_invalid(self, *args): + """Error case: Parameter error(vsp_horcm_numbers).(MSGID0601-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_numbers = (200, 200) + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_horcm_user_not_specified(self, *args): + """Error case: Parameter error(vsp_horcm_user).(MSGID0601-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_user = None + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(processutils, 'execute', side_effect=_execute) + @mock.patch.object(os.path, 'exists', side_effect=_fake_exists) + @mock.patch.object(os, 'access', side_effect=_access) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_failed_to_create_conf(self, *args): + """Error case: Writing into horcmxxx.conf fails.(MSGID0632-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_numbers = (500, 501) + self.configuration.vsp_horcm_add_conf = True + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_failed_to_login(self, *args): + """Error case: 'raidcom -login' fails with EX_ENAUTH(MSGID0600-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_user = "userX" + self.configuration.vsp_horcm_password = "paswordX" + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 2) + @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_failed_to_command(self, *args): + """Error case: 'raidcom -login' fails with EX_COMERR(MSGID0600-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_horcm_user = "userY" + self.configuration.vsp_horcm_password = "paswordY" + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 2) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm, '_run_horcmgr', side_effect=_fake_run_horcmgr) + def test_do_setup_failed_to_horcmshutdown(self, *args): + """Error case: CCI's status is always RUNNING(MSGID0608-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart) + def test_do_setup_failed_to_horcmstart(self, *args): + """Error case: _run_horcmstart() returns an error(MSGID0609-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + + global run_horcmstart_returns_error + run_horcmstart_returns_error = True + self.assertRaises(exception.VSPError, drv.do_setup, None) + run_horcmstart_returns_error = False + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties_error) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_initiator_not_found(self, *args): + """Error case: The connector does not have 'initiator'(MSGID0650-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties_error) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_do_setup_port_not_found(self, *args): + """Error case: The target port does not exist(MSGID0650-E).""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_target_ports = ["CL4-A"] + + self.assertRaises(exception.VSPError, drv.do_setup, None) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_extend_volume(self, *args): + """Normal case: Extend volume succeeds.""" + self.driver.extend_volume(TEST_VOLUME[0], 256) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_extend_volume_volume_provider_location_is_none(self, *args): + """Error case: The volume's provider_location is None(MSGID0613-E).""" + self.assertRaises( + exception.VSPError, self.driver.extend_volume, TEST_VOLUME[2], 256) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_extend_volume_volume_ldev_is_vvol(self, *args): + """Error case: The volume is a V-VOL(MSGID0618-E).""" + self.assertRaises( + exception.VSPError, self.driver.extend_volume, TEST_VOLUME[5], 256) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_extend_volume_volume_is_busy(self, *args): + """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" + self.assertRaises( + exception.VSPError, self.driver.extend_volume, TEST_VOLUME[4], 256) + + @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) + @mock.patch.object(vsp_horcm, '_EXTEND_WAITTIME', 1) + @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) + def test_extend_volume_raidcom_error(self, *args): + """Error case: 'extend ldev' returns an error(MSGID0600-E).""" + self.assertRaises( + exception.VSPError, self.driver.extend_volume, TEST_VOLUME[3], 256) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_get_volume_stats(self, *args): + """Normal case: Refreshing data required.""" + stats = self.driver.get_volume_stats(True) + self.assertEqual('Hitachi', stats['vendor_name']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_get_volume_stats_no_refresh(self, *args): + """Normal case: Refreshing data not required.""" + stats = self.driver.get_volume_stats() + self.assertEqual({}, stats) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_error_execute) + def test_get_volume_stats_failed_to_get_dp_pool(self, *args): + """Error case: The pool does not exist(MSGID0640-E, MSGID0620-E).""" + self.driver.common.storage_info['pool_id'] = 29 + + stats = self.driver.get_volume_stats(True) + self.assertEqual({}, stats) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume(self, *args): + """Normal case: Available LDEV range is 0-1.""" + ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) + self.assertEqual('1', ret['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_free_ldev_not_found_on_storage(self, *args): + """Error case: No unused LDEV exists(MSGID0648-E).""" + self.driver.common.storage_info['ldev_range'] = [0, 0] + + self.assertRaises( + exception.VSPError, self.driver.create_volume, TEST_VOLUME[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_no_setting_ldev_range(self, *args): + """Normal case: Available LDEV range is unlimited.""" + self.driver.common.storage_info['ldev_range'] = None + + ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) + self.assertEqual('1', ret['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm.VSPHORCM, + '_check_ldev_status', side_effect=_fake_check_ldev_status) + def test_delete_volume(self, *args): + """Normal case: Delete a volume.""" + self.driver.delete_volume(TEST_VOLUME[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_volume_provider_location_is_none(self, *args): + """Error case: The volume's provider_location is None(MSGID0304-W).""" + self.driver.delete_volume(TEST_VOLUME[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_volume_ldev_not_found_on_storage(self, *args): + """Unusual case: The volume's LDEV does not exist.(MSGID0319-W).""" + self.driver.delete_volume(TEST_VOLUME[3]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_volume_volume_is_busy(self, *args): + """Error case: The volume is a P-VOL of a THIN pair(MSGID0616-E).""" + self.assertRaises( + exception.VolumeIsBusy, self.driver.delete_volume, TEST_VOLUME[4]) + + @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot_full(self, *args): + """Normal case: copy_method=FULL.""" + self.driver.common.storage_info['ldev_range'] = [0, 9] + + ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) + self.assertEqual('8', ret['provider_location']) + + @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot_thin(self, *args): + """Normal case: copy_method=THIN.""" + self.driver.common.storage_info['ldev_range'] = [0, 9] + self.configuration.vsp_thin_pool = 31 + self.configuration.vsp_default_copy_method = "THIN" + + ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) + self.assertEqual('8', ret['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot_provider_location_is_none(self, *args): + """Error case: Source vol's provider_location is None(MSGID0624-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) + def test_create_snapshot_ldev_not_found_on_storage(self, *args): + """Error case: The src-vol's LDEV does not exist.(MSGID0612-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[3]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_full(self, *args): + """Normal case: Delete a snapshot.""" + self.driver.delete_snapshot(TEST_SNAPSHOT[5]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm.VSPHORCM, '_is_smpl', side_effect=_fake_is_smpl) + def test_delete_snapshot_full_smpl(self, *args): + """Normal case: The LDEV in an SI volume pair becomes SMPL.""" + self.driver.delete_snapshot(TEST_SNAPSHOT[7]) + + @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_vvol_timeout(self, *args): + """Error case: V-VOL is not deleted from a snapshot(MSGID0611-E).""" + self.assertRaises( + exception.VSPError, self.driver.delete_snapshot, TEST_SNAPSHOT[6]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_provider_location_is_none(self, *args): + """Error case: Snapshot's provider_location is None(MSGID0304-W).""" + self.driver.delete_snapshot(TEST_SNAPSHOT[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_ldev_not_found_on_storage(self, *args): + """Unusual case: The snapshot's LDEV does not exist.(MSGID0319-W).""" + self.driver.delete_snapshot(TEST_SNAPSHOT[3]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_delete_snapshot_snapshot_is_busy(self, *args): + """Error case: The snapshot is a P-VOL of a THIN pair(MSGID0616-E).""" + self.assertRaises( + exception.SnapshotIsBusy, self.driver.delete_snapshot, + TEST_SNAPSHOT[4]) + + @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object( + utils, 'brick_get_connector', + side_effect=mock.MagicMock()) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + brick_connector.ISCSIConnector, + 'connect_volume', _connect_volume) + @mock.patch.object( + brick_connector.ISCSIConnector, + 'disconnect_volume', _disconnect_volume) + def test_create_cloned_volume_with_dd_same_size(self, *args): + """Normal case: The source volume is a V-VOL and copied by dd.""" + vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[5]) + self.assertEqual('1', vol['provider_location']) + + @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object( + utils, 'brick_get_connector', + side_effect=mock.MagicMock()) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + brick_connector.ISCSIConnector, + 'connect_volume', _connect_volume) + @mock.patch.object( + brick_connector.ISCSIConnector, + 'disconnect_volume', _disconnect_volume) + def test_create_cloned_volume_with_dd_extend_size(self, *args): + """Normal case: Copy with dd and extend the size afterward.""" + vol = self.driver.create_cloned_volume(TEST_VOLUME[1], TEST_VOLUME[5]) + self.assertEqual('1', vol['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_cloned_volume_provider_location_is_none(self, *args): + """Error case: Source vol's provider_location is None(MSGID0624-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_cloned_volume, + TEST_VOLUME[0], TEST_VOLUME[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_cloned_volume_invalid_size(self, *args): + """Error case: src-size > clone-size(MSGID0617-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_cloned_volume, + TEST_VOLUME[0], TEST_VOLUME[1]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_cloned_volume_extend_size_thin(self, *args): + """Error case: clone > src and copy_method=THIN(MSGID0621-E).""" + self.configuration.vsp_thin_pool = 31 + test_vol_obj = copy.copy(TEST_VOLUME[1]) + test_vol_obj.metadata.update({'copy_method': 'THIN'}) + self.assertRaises( + exception.VSPError, self.driver.create_cloned_volume, + test_vol_obj, TEST_VOLUME[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_from_snapshot_same_size(self, *args): + """Normal case: Copy with ShadowImage.""" + vol = self.driver.create_volume_from_snapshot( + TEST_VOLUME[0], TEST_SNAPSHOT[0]) + self.assertEqual('1', vol['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute2) + def test_create_volume_from_snapshot_full_extend_normal(self, *args): + """Normal case: Copy with ShadowImage and extend the size afterward.""" + test_vol_obj = copy.copy(TEST_VOLUME[1]) + test_vol_obj.metadata.update({'copy_method': 'FULL'}) + vol = self.driver.create_volume_from_snapshot( + test_vol_obj, TEST_SNAPSHOT[0]) + self.assertEqual('1', vol['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute3) + def test_create_volume_from_snapshot_full_extend_PSUE(self, *args): + """Error case: SI copy -> pair status: PSUS -> PSUE(MSGID0722-E).""" + test_vol_obj = copy.copy(TEST_VOLUME[1]) + test_vol_obj.metadata.update({'copy_method': 'FULL'}) + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + test_vol_obj, TEST_SNAPSHOT[0]) + + @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute4) + def test_create_volume_from_snapshot_full_PSUE(self, *args): + """Error case: SI copy -> pair status becomes PSUE(MSGID0610-E).""" + test_vol_obj = copy.copy(TEST_VOLUME[0]) + test_vol_obj.metadata.update({'copy_method': 'FULL'}) + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + test_vol_obj, TEST_SNAPSHOT[0]) + + @mock.patch.object( + vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart3) + @mock.patch.object(vsp_horcm, '_LDEV_STATUS_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute5) + def test_create_volume_from_snapshot_full_SMPL(self, *args): + """Error case: SI copy -> pair status becomes SMPL(MSGID0610-E).""" + test_vol_obj = copy.copy(TEST_VOLUME[0]) + test_vol_obj.metadata.update({'copy_method': 'FULL'}) + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + test_vol_obj, TEST_SNAPSHOT[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_from_snapshot_invalid_size(self, *args): + """Error case: volume-size < snapshot-size(MSGID0617-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + TEST_VOLUME[0], TEST_SNAPSHOT[1]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_from_snapshot_thin_extend(self, *args): + """Error case: volume > snapshot and copy_method=THIN(MSGID0621-E).""" + self.configuration.vsp_thin_pool = 31 + test_vol_obj = copy.copy(TEST_VOLUME[1]) + test_vol_obj.metadata.update({'copy_method': 'THIN'}) + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + test_vol_obj, TEST_SNAPSHOT[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_create_volume_from_snapshot_provider_location_is_none( + self, *args): + """Error case: Snapshot's provider_location is None(MSGID0624-E).""" + self.assertRaises( + exception.VSPError, self.driver.create_volume_from_snapshot, + TEST_VOLUME[0], TEST_SNAPSHOT[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'volume_admin_metadata_get', + side_effect=_volume_admin_metadata_get) + def test_initialize_connection(self, *args): + """Normal case: Initialize connection.""" + ret = self.driver.initialize_connection( + TEST_VOLUME[0], DEFAULT_CONNECTOR) + self.assertEqual('iscsi', ret['driver_volume_type']) + self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) + self.assertEqual('iqn-initiator.hbsd-target', + ret['data']['target_iqn']) + self.assertEqual('CHAP', ret['data']['auth_method']) + self.assertEqual('auth_user', ret['data']['auth_username']) + self.assertEqual('auth_password', ret['data']['auth_password']) + self.assertEqual(0, ret['data']['target_lun']) + + @mock.patch.object( + utils, 'brick_get_connector_properties', + side_effect=_brick_get_connector_properties) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'volume_admin_metadata_get', + side_effect=_volume_admin_metadata_get) + def test_initialize_connection_multipath(self, *args): + """Normal case: Initialize connection in multipath environment.""" + drv = vsp_iscsi.VSPISCSIDriver( + configuration=self.configuration, db=db) + self._setup_config() + self.configuration.vsp_target_ports = ["CL1-A", "CL1-B"] + drv.do_setup(None) + multipath_connector = copy.copy(DEFAULT_CONNECTOR) + multipath_connector['multipath'] = True + ret = drv.initialize_connection(TEST_VOLUME[0], multipath_connector) + self.assertEqual('iscsi', ret['driver_volume_type']) + self.assertEqual(['11.22.33.44:3260', '11.22.33.44:3260'], + ret['data']['target_portals']) + self.assertEqual(['iqn-initiator.hbsd-target', + 'iqn-initiator.hbsd-target'], + ret['data']['target_iqns']) + self.assertEqual('CHAP', ret['data']['auth_method']) + self.assertEqual('auth_user', ret['data']['auth_username']) + self.assertEqual('auth_password', ret['data']['auth_password']) + self.assertEqual([0, 0], ret['data']['target_luns']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_initialize_connection_provider_location_is_none(self, *args): + """Error case: The volume's provider_location is None(MSGID0619-E).""" + self.assertRaises( + exception.VSPError, self.driver.initialize_connection, + TEST_VOLUME[2], DEFAULT_CONNECTOR) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + db, 'volume_admin_metadata_get', + side_effect=_volume_admin_metadata_get) + def test_initialize_connection_already_attached(self, *args): + """Unusual case: 'add lun' returns 'already defined' error.""" + ret = self.driver.initialize_connection( + TEST_VOLUME[6], DEFAULT_CONNECTOR) + self.assertEqual('iscsi', ret['driver_volume_type']) + self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) + self.assertEqual('iqn-initiator.hbsd-target', + ret['data']['target_iqn']) + self.assertEqual('CHAP', ret['data']['auth_method']) + self.assertEqual('auth_user', ret['data']['auth_username']) + self.assertEqual('auth_password', ret['data']['auth_password']) + self.assertEqual(255, ret['data']['target_lun']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_terminate_connection(self, *args): + """Normal case: Terminate connection.""" + self.driver.terminate_connection(TEST_VOLUME[6], DEFAULT_CONNECTOR) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_terminate_connection_provider_location_is_none(self, *args): + """Unusual case: Volume's provider_location is None(MSGID0302-W).""" + self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_terminate_connection_no_port_mapped_to_ldev(self, *args): + """Unusual case: No port is mapped to the LDEV.""" + self.driver.terminate_connection(TEST_VOLUME[3], DEFAULT_CONNECTOR) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_terminate_connection_initiator_iqn_not_found(self, *args): + """Error case: The connector does not have 'initiator'(MSGID0650-E).""" + connector = dict(DEFAULT_CONNECTOR) + del connector['initiator'] + + self.assertRaises( + exception.VSPError, self.driver.terminate_connection, + TEST_VOLUME[0], connector) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_copy_volume_to_image(self, *args): + """Normal case: Copy a volume to an image.""" + image_service = 'fake_image_service' + image_meta = 'fake_image_meta' + + with mock.patch.object(driver.VolumeDriver, 'copy_volume_to_image') \ + as mock_copy_volume_to_image: + self.driver.copy_volume_to_image( + self.ctxt, TEST_VOLUME[0], image_service, image_meta) + + mock_copy_volume_to_image.assert_called_with( + self.ctxt, TEST_VOLUME[0], image_service, image_meta) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing(self, *args): + """Normal case: Bring an existing volume under Cinder's control.""" + ret = self.driver.manage_existing( + TEST_VOLUME[0], self.test_existing_ref) + self.assertEqual('0', ret['provider_location']) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_normal(self, *args): + """Normal case: Return an existing LDEV's size.""" + self.driver.manage_existing_get_size( + TEST_VOLUME[0], self.test_existing_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_none_ldev_ref(self, *args): + """Error case: Source LDEV's properties do not exist(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_none_ldev_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_ldev_ref(self, *args): + """Error case: Source LDEV's ID is an invalid decimal(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_ldev_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_value_error_ref(self, *args): + """Error case: Source LDEV's ID is an invalid hex(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_value_error_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_no_ldev_ref(self, *args): + """Error case: Source LDEV's ID is not specified(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_no_ldev_ref) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_sts_ldev(self, *args): + """Error case: Source LDEV's STS is invalid(MSGID0707-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_sts_ldev) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_vol_attr(self, *args): + """Error case: Source LDEV's VOL_ATTR is invalid(MSGID0702-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_vol_attr) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_size_ref(self, *args): + """Error case: Source LDEV's VOL_Capacity is invalid(MSGID0703-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_size) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_manage_existing_get_size_invalid_port_cnt(self, *args): + """Error case: Source LDEV's NUM_PORT is invalid(MSGID0704-E).""" + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_invalid_port_cnt) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + @mock.patch.object( + vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart2) + def test_manage_existing_get_size_failed_to_start_horcmgr(self, *args): + """Error case: _start_horcmgr() returns an error(MSGID0320-W).""" + global run_horcmstart_returns_error2 + run_horcmstart_returns_error2 = True + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, TEST_VOLUME[0], + self.test_existing_failed_to_start_horcmgr) + run_horcmstart_returns_error2 = False + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_unmanage(self, *args): + """Normal case: Take out a volume from Cinder's control.""" + self.driver.unmanage(TEST_VOLUME[0]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_unmanage_provider_location_is_none(self, *args): + """Error case: The volume's provider_location is None(MSGID0304-W).""" + self.driver.unmanage(TEST_VOLUME[2]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_unmanage_volume_invalid_sts_ldev(self, *args): + """Unusual case: The volume's STS is BLK.""" + self.driver.unmanage(TEST_VOLUME[13]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_unmanage_volume_is_busy(self, *args): + """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" + self.assertRaises( + exception.VolumeIsBusy, self.driver.unmanage, TEST_VOLUME[4]) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_copy_image_to_volume(self, *args): + """Normal case: Copy an image to a volume.""" + image_service = 'fake_image_service' + image_id = 'fake_image_id' + self.configuration.vsp_horcm_numbers = (400, 401) + + with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ + as mock_copy_image: + self.driver.copy_image_to_volume( + self.ctxt, TEST_VOLUME[0], image_service, image_id) + + mock_copy_image.assert_called_with( + self.ctxt, TEST_VOLUME[0], image_service, image_id) + + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_restore_backup(self, *args): + """Normal case: Restore a backup volume.""" + backup = 'fake_backup' + backup_service = 'fake_backup_service' + + with mock.patch.object(driver.VolumeDriver, 'restore_backup') \ + as mock_restore_backup: + self.driver.restore_backup( + self.ctxt, backup, TEST_VOLUME[0], backup_service) + + mock_restore_backup.assert_called_with( + self.ctxt, backup, TEST_VOLUME[0], backup_service) + + @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) + def test_update_migrated_volume_success(self, *args): + """Normal case: 'modify ldev -status discard_zero_page' succeeds.""" + self.assertRaises( + NotImplementedError, + self.driver.update_migrated_volume, + self.ctxt, + TEST_VOLUME[0], + TEST_VOLUME[2], + "available") + + @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) + @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 1) + @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) + def test_update_migrated_volume_error(self, *args): + """Error case: 'modify ldev' fails(MSGID0315-W).""" + self.assertRaises( + NotImplementedError, + self.driver.update_migrated_volume, + self.ctxt, + TEST_VOLUME[0], + TEST_VOLUME[3], + "available") + + def test_get_ldev_volume_is_none(self, *args): + """Error case: The volume is None.""" + self.assertIsNone(vsp_utils.get_ldev(None)) + + def test_check_ignore_error_string(self, *args): + """Normal case: ignore_error is a string.""" + ignore_error = 'SSB=0xB980,0xB902' + stderr = ('raidcom: [EX_CMDRJE] An order to the control/command device' + ' was rejected\nIt was rejected due to SKEY=0x05, ASC=0x26, ' + 'ASCQ=0x00, SSB=0xB980,0xB902 on Serial#(400003)\nCAUSE : ' + 'The specified port can not be operated.') + self.assertTrue(vsp_utils.check_ignore_error(ignore_error, stderr)) + + def test_check_opts_parameter_specified(self, *args): + """Normal case: A valid parameter is specified.""" + cfg.CONF.paramAAA = 'aaa' + vsp_utils.check_opts(conf.Configuration(None), + [cfg.StrOpt('paramAAA')]) + + def test_check_opt_value_parameter_not_set(self, *args): + """Error case: A parameter is not set(MSGID0601-E).""" + self.assertRaises(cfg.NoSuchOptError, + vsp_utils.check_opt_value, + conf.Configuration(None), + ['paramCCC']) + + def test_build_initiator_target_map_no_lookup_service(self, *args): + """Normal case: None is specified for lookup_service.""" + connector = {'wwpns': ['0000000000000000', '1111111111111111']} + target_wwns = ['2222222222222222', '3333333333333333'] + init_target_map = vsp_utils.build_initiator_target_map(connector, + target_wwns, + None) + self.assertEqual( + {'0000000000000000': ['2222222222222222', '3333333333333333'], + '1111111111111111': ['2222222222222222', '3333333333333333']}, + init_target_map) + + def test_update_conn_info_not_update_conn_info(self, *args): + """Normal case: Not update connection info.""" + vsp_utils.update_conn_info(dict({'data': dict({'target_wwn': []})}), + dict({'wwpns': []}), + None) diff --git a/cinder/volume/drivers/hitachi/vsp_common.py b/cinder/volume/drivers/hitachi/vsp_common.py index dbe08a1cb0b..cbef0a7f5cd 100644 --- a/cinder/volume/drivers/hitachi/vsp_common.py +++ b/cinder/volume/drivers/hitachi/vsp_common.py @@ -95,8 +95,8 @@ common_opts = [ cfg.BoolOpt( 'vsp_group_request', default=False, - help='If True, the driver will create host groups on storage ports ' - 'as needed.'), + help='If True, the driver will create host groups or iSCSI targets on ' + 'storage ports as needed.'), ] _REQUIRED_COMMON_OPTS = [ @@ -151,6 +151,7 @@ class VSPCommon(object): 'ldev_range': [], 'ports': [], 'wwns': {}, + 'portals': {}, 'output_first': True, } @@ -627,6 +628,20 @@ class VSPCommon(object): if not self.conf.safe_get(opt): msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt) raise exception.VSPError(msg) + if self.storage_info['protocol'] == 'iSCSI': + self.check_param_iscsi() + + def check_param_iscsi(self): + """Check iSCSI-related parameter values and consistency among them.""" + if self.conf.vsp_use_chap_auth: + if not self.conf.vsp_auth_user: + msg = utils.output_log(MSG.INVALID_PARAMETER, + param='vsp_auth_user') + raise exception.VSPError(msg) + if not self.conf.vsp_auth_password: + msg = utils.output_log(MSG.INVALID_PARAMETER, + param='vsp_auth_password') + raise exception.VSPError(msg) def _range2list(self, param): """Analyze a 'xxx-xxx' string and return a list of two integers.""" @@ -674,7 +689,7 @@ class VSPCommon(object): def init_cinder_hosts(self, **kwargs): """Initialize server-storage connection.""" - targets = kwargs.pop('targets', {'info': {}, 'list': []}) + targets = kwargs.pop('targets', {'info': {}, 'list': [], 'iqns': {}}) connector = cinder_utils.brick_get_connector_properties( multipath=self.conf.use_multipath_for_image_xfer, enforce_multipath=self.conf.enforce_multipath_for_image_xfer) @@ -719,8 +734,9 @@ class VSPCommon(object): raise exception.VSPError(msg) def _create_target(self, targets, port, connector, hba_ids): - """Create a host group for the specified storage port.""" - target_name, gid = self.create_target_to_storage(port, connector) + """Create a host group or an iSCSI target on the storage port.""" + target_name, gid = self.create_target_to_storage(port, connector, + hba_ids) utils.output_log(MSG.OBJECT_CREATED, object='a target', details='port: %(port)s, gid: %(gid)s, target_name: ' '%(target)s' % @@ -735,13 +751,13 @@ class VSPCommon(object): targets['list'].append((port, gid)) @abc.abstractmethod - def create_target_to_storage(self, port, connector): - """Create a host group on the specified port.""" + def create_target_to_storage(self, port, connector, hba_ids): + """Create a host group or an iSCSI target on the specified port.""" raise NotImplementedError() @abc.abstractmethod def set_target_mode(self, port, gid): - """Configure the host group to meet the environment.""" + """Configure the target to meet the environment.""" raise NotImplementedError() @abc.abstractmethod @@ -751,7 +767,7 @@ class VSPCommon(object): @abc.abstractmethod def delete_target_from_storage(self, port, gid): - """Delete the host group from the port.""" + """Delete the host group or the iSCSI target from the port.""" raise NotImplementedError() def output_param_to_log(self): @@ -777,6 +793,7 @@ class VSPCommon(object): 'info': {}, 'list': [], 'lun': {}, + 'iqns': {}, } ldev = utils.get_ldev(volume) # When 'ldev' is 0, it should be true. @@ -813,10 +830,18 @@ class VSPCommon(object): multipath = connector.get('multipath', False) if self.storage_info['protocol'] == 'FC': data = self.get_properties_fc(targets) + elif self.storage_info['protocol'] == 'iSCSI': + data = self.get_properties_iscsi(targets, multipath) if target_lun is not None: data['target_discovered'] = False if not multipath or self.storage_info['protocol'] == 'FC': data['target_lun'] = target_lun + else: + target_luns = [] + for target in targets['list']: + if targets['lun'][target[0]]: + target_luns.append(target_lun) + data['target_luns'] = target_luns return data def get_properties_fc(self, targets): @@ -827,6 +852,27 @@ class VSPCommon(object): if targets['lun'][target[0]]] return data + def get_properties_iscsi(self, targets, multipath): + """Return iSCSI-specific server-LDEV connection info.""" + data = {} + primary_target = targets['list'][0] + if not multipath: + data['target_portal'] = self.storage_info[ + 'portals'][primary_target[0]] + data['target_iqn'] = targets['iqns'][primary_target] + else: + data['target_portals'] = [ + self.storage_info['portals'][target[0]] for target in + targets['list'] if targets['lun'][target[0]]] + data['target_iqns'] = [ + targets['iqns'][target] for target in targets['list'] + if targets['lun'][target[0]]] + if self.conf.vsp_use_chap_auth: + data['auth_method'] = 'CHAP' + data['auth_username'] = self.conf.vsp_auth_user + data['auth_password'] = self.conf.vsp_auth_password + return data + @coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-' '{connector[host]}') def terminate_connection(self, volume, connector): @@ -834,6 +880,7 @@ class VSPCommon(object): targets = { 'info': {}, 'list': [], + 'iqns': {}, } mapped_targets = { 'list': [], @@ -857,11 +904,12 @@ class VSPCommon(object): unmap_targets['list'].sort(reverse=True) self.unmap_ldev(unmap_targets, ldev) - target_wwn = [ - self.storage_info['wwns'][port_gid[:utils.PORT_ID_LENGTH]] - for port_gid in unmap_targets['list']] - return {'driver_volume_type': self.driver_info['volume_type'], - 'data': {'target_wwn': target_wwn}} + if self.storage_info['protocol'] == 'FC': + target_wwn = [ + self.storage_info['wwns'][port_gid[:utils.PORT_ID_LENGTH]] + for port_gid in unmap_targets['list']] + return {'driver_volume_type': self.driver_info['volume_type'], + 'data': {'target_wwn': target_wwn}} @abc.abstractmethod def find_mapped_targets_from_storage(self, targets, ldev, target_ports): diff --git a/cinder/volume/drivers/hitachi/vsp_horcm.py b/cinder/volume/drivers/hitachi/vsp_horcm.py index 1dd5d0d4380..cc122ade918 100644 --- a/cinder/volume/drivers/hitachi/vsp_horcm.py +++ b/cinder/volume/drivers/hitachi/vsp_horcm.py @@ -238,7 +238,7 @@ def horcmgr_synchronized(func): def _is_valid_target(target, target_name, target_ports, is_pair): - """Return True if the specified host group is valid, False otherwise.""" + """Return True if the specified target is valid, False otherwise.""" if is_pair: return (target[:utils.PORT_ID_LENGTH] in target_ports and target_name == _PAIR_TARGET_NAME) @@ -957,7 +957,7 @@ class VSPHORCM(common.VSPCommon): interval=interval, success_code=success_code, timeout=timeout) LOG.debug( 'Deleted logical unit path of the specified logical ' - 'device. (LDEV: %(ldev)s, host group: %(target)s)', + 'device. (LDEV: %(ldev)s, target: %(target)s)', {'ldev': ldev, 'target': target}) def find_all_mapped_targets_from_storage(self, targets, ldev): @@ -968,7 +968,7 @@ class VSPHORCM(common.VSPCommon): targets['list'].append(port.split()[0]) def delete_target_from_storage(self, port, gid): - """Delete the host group from the port.""" + """Delete the host group or the iSCSI target from the port.""" result = self.run_raidcom( 'delete', 'host_grp', '-port', '-'.join([port, gid]), do_raise=False) @@ -1123,6 +1123,7 @@ HORCM_CMD targets = { 'info': {}, 'list': [], + 'iqns': {}, } super(VSPHORCM, self).init_cinder_hosts(targets=targets) self._init_pair_targets(targets['info']) @@ -1141,7 +1142,7 @@ HORCM_CMD 'wwpns': [_PAIR_TARGET_NAME_BODY], } target_name, gid = self.create_target_to_storage( - port, connector) + port, connector, None) utils.output_log(MSG.OBJECT_CREATED, object='a target for pair operation', details='port: %(port)s, gid: %(gid)s, ' diff --git a/cinder/volume/drivers/hitachi/vsp_horcm_fc.py b/cinder/volume/drivers/hitachi/vsp_horcm_fc.py index 3430c0cbff7..fb9e6443661 100644 --- a/cinder/volume/drivers/hitachi/vsp_horcm_fc.py +++ b/cinder/volume/drivers/hitachi/vsp_horcm_fc.py @@ -66,7 +66,7 @@ class VSPHORCMFC(horcm.VSPHORCM): utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list', value=self.storage_info['wwns']) - def create_target_to_storage(self, port, connector): + def create_target_to_storage(self, port, connector, hba_ids): """Create a host group on the specified port.""" wwpns = self.get_hba_ids_from_connector(connector) target_name = utils.TARGET_PREFIX + min(wwpns) diff --git a/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py b/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py new file mode 100644 index 00000000000..c6d2f367818 --- /dev/null +++ b/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py @@ -0,0 +1,184 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""HORCM interface iSCSI module for Hitachi VSP Driver.""" + +import re + +from oslo_log import log as logging + +from cinder import exception +from cinder.volume.drivers.hitachi import vsp_horcm as horcm +from cinder.volume.drivers.hitachi import vsp_utils as utils + +_ISCSI_LINUX_MODE_OPTS = ['-host_mode', 'LINUX'] +_ISCSI_HOST_MODE_OPT = '-host_mode_opt' +_ISCSI_HMO_REPORT_FULL_PORTAL = 83 +_ISCSI_TARGETS_PATTERN = re.compile( + (r"^CL\w-\w+ +(?P\d+) +%s(?!pair00 )\S* +(?P\S+) +" + r"\w+ +\w +\d+ ") % utils.TARGET_PREFIX, re.M) +_ISCSI_PORT_PATTERN = re.compile( + r"^(CL\w-\w)\w* +ISCSI +TAR +\w+ +\w+ +\w +\w+ +Y ", re.M) +_ISCSI_IPV4_ADDR_PATTERN = re.compile( + r"^IPV4_ADDR +: +(?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$", re.M) +_ISCSI_TCP_PORT_PATTERN = re.compile( + r'^TCP_PORT\ +:\ +(?P\d+)$', re.M) + +LOG = logging.getLogger(__name__) +MSG = utils.VSPMsg + + +class VSPHORCMISCSI(horcm.VSPHORCM): + """HORCM interface iscsi class for Hitachi VSP Driver. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver. + + """ + + def connect_storage(self): + """Prepare for using the storage.""" + target_ports = self.conf.vsp_target_ports + + super(VSPHORCMISCSI, self).connect_storage() + result = self.run_raidcom('get', 'port') + for port in _ISCSI_PORT_PATTERN.findall(result[1]): + if (target_ports and port in target_ports and + self._set_target_portal(port)): + self.storage_info['ports'].append(port) + + self.check_ports_info() + utils.output_log(MSG.SET_CONFIG_VALUE, + object='port- list', + value=self.storage_info['portals']) + + def _set_target_portal(self, port): + """Get port info and store it in an instance variable.""" + ipv4_addr = None + tcp_port = None + result = self.run_raidcom( + 'get', 'port', '-port', port, '-key', 'opt') + match = _ISCSI_IPV4_ADDR_PATTERN.search(result[1]) + if match: + ipv4_addr = match.group('ipv4_addr') + match = _ISCSI_TCP_PORT_PATTERN.search(result[1]) + if match: + tcp_port = match.group('tcp_port') + if not ipv4_addr or not tcp_port: + return False + self.storage_info['portals'][port] = ':'.join( + [ipv4_addr, tcp_port]) + return True + + def create_target_to_storage(self, port, connector, hba_ids): + """Create an iSCSI target on the specified port.""" + target_name = utils.TARGET_PREFIX + connector['ip'] + args = [ + 'add', 'host_grp', '-port', port, '-host_grp_name', target_name] + if hba_ids: + args.extend(['-iscsi_name', hba_ids + utils.TARGET_IQN_SUFFIX]) + try: + result = self.run_raidcom(*args) + except exception.VSPError: + result = self.run_raidcom('get', 'host_grp', '-port', port) + hostgroup_pt = re.compile( + r"^CL\w-\w+ +(?P\d+) +%s +\S+ " % + target_name.replace('.', r'\.'), re.M) + gid = hostgroup_pt.findall(result[1]) + if gid: + return target_name, gid[0] + else: + raise + return target_name, horcm.find_value(result[1], 'gid') + + def set_hba_ids(self, port, gid, hba_ids): + """Connect the specified HBA with the specified port.""" + self.run_raidcom( + 'add', 'hba_iscsi', '-port', '-'.join([port, gid]), + '-hba_iscsi_name', hba_ids) + + def set_target_mode(self, port, gid): + """Configure the iSCSI target to meet the environment.""" + hostmode_setting = [] + hostmode_setting[:] = _ISCSI_LINUX_MODE_OPTS + hostmode_setting.append(_ISCSI_HOST_MODE_OPT) + hostmode_setting.append(_ISCSI_HMO_REPORT_FULL_PORTAL) + self.run_raidcom( + 'modify', 'host_grp', '-port', + '-'.join([port, gid]), *hostmode_setting) + + def find_targets_from_storage(self, targets, connector, target_ports): + """Find mapped ports, memorize them and return unmapped port count.""" + nr_not_found = 0 + target_name = utils.TARGET_PREFIX + connector['ip'] + success_code = horcm.HORCM_EXIT_CODE.union([horcm.EX_ENOOBJ]) + iqn = self.get_hba_ids_from_connector(connector) + iqn_pattern = re.compile( + r'^CL\w-\w+ +\d+ +\S+ +%s ' % iqn, re.M) + + for port in target_ports: + targets['info'][port] = False + + result = self.run_raidcom( + 'get', 'hba_iscsi', '-port', port, target_name, + success_code=success_code) + if iqn_pattern.search(result[1]): + gid = result[1].splitlines()[1].split()[1] + targets['info'][port] = True + targets['list'].append((port, gid)) + continue + + result = self.run_raidcom( + 'get', 'host_grp', '-port', port) + for gid, iqn in _ISCSI_TARGETS_PATTERN.findall(result[1]): + result = self.run_raidcom( + 'get', 'hba_iscsi', '-port', '-'.join([port, gid])) + if iqn_pattern.search(result[1]): + targets['info'][port] = True + targets['list'].append((port, gid)) + targets['iqns'][(port, gid)] = iqn + break + else: + nr_not_found += 1 + + return nr_not_found + + def get_properties_iscsi(self, targets, multipath): + """Check if specified iSCSI targets exist and store their IQNs.""" + if not multipath: + target_list = targets['list'][:1] + else: + target_list = targets['list'][:] + + for target in target_list: + if target not in targets['iqns']: + port, gid = target + result = self.run_raidcom('get', 'host_grp', '-port', port) + match = re.search( + r"^CL\w-\w+ +%s +\S+ +(?P\S+) +\w+ +\w +\d+ " % gid, + result[1], re.M) + if not match: + msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, + resource='Target IQN') + raise exception.VSPError(msg) + targets['iqns'][target] = match.group('iqn') + LOG.debug('Found iqn of the iSCSI target. (port: %(port)s, ' + 'gid: %(gid)s, target iqn: %(iqn)s)', + {'port': port, 'gid': gid, + 'iqn': match.group('iqn')}) + return super(VSPHORCMISCSI, self).get_properties_iscsi( + targets, multipath) diff --git a/cinder/volume/drivers/hitachi/vsp_iscsi.py b/cinder/volume/drivers/hitachi/vsp_iscsi.py new file mode 100644 index 00000000000..f4ab8e0de35 --- /dev/null +++ b/cinder/volume/drivers/hitachi/vsp_iscsi.py @@ -0,0 +1,185 @@ +# Copyright (C) 2016, Hitachi, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +"""iSCSI module for Hitachi VSP Driver.""" + +from oslo_config import cfg + +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.hitachi import vsp_common as common +from cinder.volume.drivers.hitachi import vsp_utils as utils + +iscsi_opts = [ + cfg.BoolOpt( + 'vsp_use_chap_auth', + default=False, + help='If True, CHAP authentication will be applied to communication ' + 'between hosts and any of the iSCSI targets on the storage ports.'), + cfg.StrOpt( + 'vsp_auth_user', + help='Name of the user used for CHAP authentication performed in ' + 'communication between hosts and iSCSI targets on the storage ports.'), + cfg.StrOpt( + 'vsp_auth_password', + secret=True, + help='Password corresponding to vsp_auth_user.'), +] + +MSG = utils.VSPMsg + +_DRIVER_INFO = { + 'proto': 'iSCSI', + 'hba_id': 'initiator', + 'hba_id_type': 'iSCSI initiator IQN', + 'msg_id': { + 'target': MSG.CREATE_ISCSI_TARGET_FAILED, + }, + 'volume_backend_name': utils.DRIVER_PREFIX + 'iSCSI', + 'volume_opts': iscsi_opts, + 'volume_type': 'iscsi', +} + +CONF = cfg.CONF +CONF.register_opts(iscsi_opts) + + +@interface.volumedriver +class VSPISCSIDriver(driver.ISCSIDriver): + """iSCSI class for Hitachi VSP Driver. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver. + + """ + + VERSION = common.VERSION + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Hitachi_VSP_CI" + + def __init__(self, *args, **kwargs): + """Initialize instance variables.""" + utils.output_log(MSG.DRIVER_INITIALIZATION_START, + driver=self.__class__.__name__, + version=self.get_version()) + super(VSPISCSIDriver, self).__init__(*args, **kwargs) + + self.configuration.append_config_values(common.common_opts) + self.configuration.append_config_values(iscsi_opts) + self.common = utils.import_object( + self.configuration, _DRIVER_INFO, kwargs.get('db')) + + def check_for_setup_error(self): + """Error are checked in do_setup() instead of this method.""" + pass + + @utils.output_start_end_log + def create_volume(self, volume): + """Create a volume and return its properties.""" + return self.common.create_volume(volume) + + @utils.output_start_end_log + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot and return its properties.""" + return self.common.create_volume_from_snapshot(volume, snapshot) + + @utils.output_start_end_log + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume and return its properties.""" + return self.common.create_cloned_volume(volume, src_vref) + + @utils.output_start_end_log + def delete_volume(self, volume): + """Delete the specified volume.""" + self.common.delete_volume(volume) + + @utils.output_start_end_log + def create_snapshot(self, snapshot): + """Create a snapshot from a volume and return its properties.""" + return self.common.create_snapshot(snapshot) + + @utils.output_start_end_log + def delete_snapshot(self, snapshot): + """Delete the specified snapshot.""" + self.common.delete_snapshot(snapshot) + + def get_volume_stats(self, refresh=False): + """Return properties, capabilities and current states of the driver.""" + return self.common.get_volume_stats(refresh) + + @utils.output_start_end_log + def update_migrated_volume( + self, ctxt, volume, new_volume, original_volume_status): + """Do any remaining jobs after migration.""" + self.common.discard_zero_page(new_volume) + super(VSPISCSIDriver, self).update_migrated_volume( + ctxt, volume, new_volume, original_volume_status) + + @utils.output_start_end_log + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + super(VSPISCSIDriver, self).copy_image_to_volume( + context, volume, image_service, image_id) + self.common.discard_zero_page(volume) + + @utils.output_start_end_log + def extend_volume(self, volume, new_size): + """Extend the specified volume to the specified size.""" + self.common.extend_volume(volume, new_size) + + @utils.output_start_end_log + def manage_existing(self, volume, existing_ref): + """Return volume properties which Cinder needs to manage the volume.""" + return self.common.manage_existing(existing_ref) + + @utils.output_start_end_log + def manage_existing_get_size(self, volume, existing_ref): + """Return the size[GB] of the specified volume.""" + return self.common.manage_existing_get_size(existing_ref) + + @utils.output_start_end_log + def unmanage(self, volume): + """Prepare the volume for removing it from Cinder management.""" + self.common.unmanage(volume) + + @utils.output_start_end_log + def do_setup(self, context): + """Prepare for the startup of the driver.""" + self.common.do_setup(context) + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def create_export(self, context, volume, connector): + """Export the volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + @utils.output_start_end_log + def initialize_connection(self, volume, connector): + """Initialize connection between the server and the volume.""" + return self.common.initialize_connection(volume, connector) + + @utils.output_start_end_log + def terminate_connection(self, volume, connector, **kwargs): + """Terminate connection between the server and the volume.""" + self.common.terminate_connection(volume, connector) diff --git a/cinder/volume/drivers/hitachi/vsp_utils.py b/cinder/volume/drivers/hitachi/vsp_utils.py index c9c74fac8cb..5978b499256 100644 --- a/cinder/volume/drivers/hitachi/vsp_utils.py +++ b/cinder/volume/drivers/hitachi/vsp_utils.py @@ -43,11 +43,13 @@ _DRIVER_DIR = 'cinder.volume.drivers.hitachi' _DRIVERS = { 'HORCM': { 'FC': 'vsp_horcm_fc.VSPHORCMFC', + 'iSCSI': 'vsp_horcm_iscsi.VSPHORCMISCSI', }, } DRIVER_PREFIX = 'VSP' TARGET_PREFIX = 'HBSD-' +TARGET_IQN_SUFFIX = '.hbsd-target' GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512 MAX_PROCESS_WAITTIME = 24 * 60 * 60 @@ -131,8 +133,8 @@ class VSPMsg(enum.Enum): DELETE_TARGET_FAILED = { 'msg_id': 306, 'loglevel': base_logging.WARNING, - 'msg': _LW('A host group could not be deleted. (port: %(port)s, ' - 'gid: %(id)s)'), + 'msg': _LW('A host group or an iSCSI target could not be deleted. ' + '(port: %(port)s, gid: %(id)s)'), 'suffix': WARNING_SUFFIX } CREATE_HOST_GROUP_FAILED = { @@ -141,6 +143,12 @@ class VSPMsg(enum.Enum): 'msg': _LW('A host group could not be added. (port: %(port)s)'), 'suffix': WARNING_SUFFIX } + CREATE_ISCSI_TARGET_FAILED = { + 'msg_id': 309, + 'loglevel': base_logging.WARNING, + 'msg': _LW('An iSCSI target could not be added. (port: %(port)s)'), + 'suffix': WARNING_SUFFIX + } UNMAP_LDEV_FAILED = { 'msg_id': 310, 'loglevel': base_logging.WARNING, @@ -408,7 +416,7 @@ class VSPMsg(enum.Enum): NO_CONNECTED_TARGET = { 'msg_id': 649, 'loglevel': base_logging.ERROR, - 'msg': _LE('The host group was not found.'), + 'msg': _LE('The host group or iSCSI target was not found.'), 'suffix': ERROR_SUFFIX } RESOURCE_NOT_FOUND = { diff --git a/releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml b/releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml new file mode 100644 index 00000000000..f2055424b22 --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Adds new Hitachi VSP iSCSI Driver.