diff --git a/cinder/exception.py b/cinder/exception.py index defa800c21d..48edd2ffd5c 100644 --- a/cinder/exception.py +++ b/cinder/exception.py @@ -1128,45 +1128,6 @@ class InvalidGroupSnapshotStatus(Invalid): message = _("Invalid GroupSnapshot Status: %(reason)s") -# Hitachi Block Storage Driver -class HBSDError(VolumeDriverException): - message = _("HBSD error occurs.") - - -class HBSDCmdError(HBSDError): - - def __init__(self, message=None, ret=None, err=None): - self.ret = ret - self.stderr = err - - super(HBSDCmdError, self).__init__(message=message) - - -class HBSDBusy(HBSDError): - message = "Device or resource is busy." - - -class HBSDNotFound(NotFound): - message = _("Storage resource could not be found.") - - -class HBSDVolumeIsBusy(VolumeIsBusy): - message = _("Volume %(volume_name)s is busy.") - - -# Hitachi VSP Driver -class VSPError(VolumeDriverException): - message = _("VSP error occurred. %(message)s") - - -class VSPBusy(VSPError): - message = _("Device or resource is busy.") - - -class VSPNotSupported(VSPError): - message = _("The function on the storage is not supported.") - - # Datera driver class DateraAPIException(VolumeBackendAPIException): message = _("Bad response from Datera API") @@ -1297,11 +1258,6 @@ class NotSupportedOperation(Invalid): code = 405 -# Hitachi HNAS drivers -class HNASConnError(VolumeDriverException): - message = "%(message)s" - - # NexentaStor driver exception class NexentaException(VolumeDriverException): message = "%(message)s" diff --git a/cinder/opts.py b/cinder/opts.py index d5a55532c99..167370b8634 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -99,26 +99,6 @@ from cinder.volume.drivers.fujitsu import eternus_dx_common as \ from cinder.volume.drivers.fusionstorage import dsware as \ cinder_volume_drivers_fusionstorage_dsware from cinder.volume.drivers import hgst as cinder_volume_drivers_hgst -from cinder.volume.drivers.hitachi import hbsd_common as \ - cinder_volume_drivers_hitachi_hbsdcommon -from cinder.volume.drivers.hitachi import hbsd_fc as \ - cinder_volume_drivers_hitachi_hbsdfc -from cinder.volume.drivers.hitachi import hbsd_horcm as \ - cinder_volume_drivers_hitachi_hbsdhorcm -from cinder.volume.drivers.hitachi import hbsd_iscsi as \ - cinder_volume_drivers_hitachi_hbsdiscsi -from cinder.volume.drivers.hitachi import hnas_nfs as \ - cinder_volume_drivers_hitachi_hnasnfs -from cinder.volume.drivers.hitachi import hnas_utils as \ - cinder_volume_drivers_hitachi_hnasutils -from cinder.volume.drivers.hitachi import vsp_common as \ - cinder_volume_drivers_hitachi_vspcommon -from cinder.volume.drivers.hitachi import vsp_fc as \ - cinder_volume_drivers_hitachi_vspfc -from cinder.volume.drivers.hitachi import vsp_horcm as \ - cinder_volume_drivers_hitachi_vsphorcm -from cinder.volume.drivers.hitachi import vsp_iscsi as \ - cinder_volume_drivers_hitachi_vspiscsi from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \ @@ -296,16 +276,6 @@ def list_opts(): FJ_ETERNUS_DX_OPT_opts, cinder_volume_drivers_fusionstorage_dsware.volume_opts, cinder_volume_drivers_hgst.hgst_opts, - cinder_volume_drivers_hitachi_hbsdcommon.volume_opts, - cinder_volume_drivers_hitachi_hbsdfc.volume_opts, - cinder_volume_drivers_hitachi_hbsdhorcm.volume_opts, - cinder_volume_drivers_hitachi_hbsdiscsi.volume_opts, - cinder_volume_drivers_hitachi_hnasnfs.NFS_OPTS, - cinder_volume_drivers_hitachi_hnasutils.drivers_common_opts, - cinder_volume_drivers_hitachi_vspcommon.common_opts, - cinder_volume_drivers_hitachi_vspfc.fc_opts, - cinder_volume_drivers_hitachi_vsphorcm.horcm_opts, - cinder_volume_drivers_hitachi_vspiscsi.iscsi_opts, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts, cinder_volume_drivers_huawei_huaweidriver.huawei_opts, diff --git a/cinder/tests/unit/volume/drivers/hitachi/__init__.py b/cinder/tests/unit/volume/drivers/hitachi/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_horcm_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_horcm_fc.py deleted file mode 100644 index d8117d7747d..00000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_horcm_fc.py +++ /dev/null @@ -1,1034 +0,0 @@ -# Copyright (C) 2014, 2015, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Self test for Hitachi Block Storage Driver -""" - -import mock - -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hbsd_basiclib -from cinder.volume.drivers.hitachi import hbsd_common -from cinder.volume.drivers.hitachi import hbsd_fc -from cinder.volume.drivers.hitachi import hbsd_horcm - - -def _exec_raidcom(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_vals.get(args) - - -def _exec_raidcom_get_ldev_no_stdout(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_no_stdout.get(args) - - -def _exec_raidcom_get_ldev_no_nml(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_no_nml.get(args) - - -def _exec_raidcom_get_ldev_no_open_v(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_no_open_v.get(args) - - -def _exec_raidcom_get_ldev_no_hdp(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_no_hdp.get(args) - - -def _exec_raidcom_get_ldev_pair(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_pair.get(args) - - -def _exec_raidcom_get_ldev_permit(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_permit.get(args) - - -def _exec_raidcom_get_ldev_invalid_size(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_invalid_size.get(args) - - -def _exec_raidcom_get_ldev_num_port(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_num_port.get(args) - - -class HBSDHORCMFCDriverTest(test.TestCase): - """Test HBSDHORCMFCDriver.""" - - raidqry_result = "DUMMY\n\ -Ver&Rev: 01-31-03/06" - - raidcom_get_host_grp_result = "DUMMY\n\ -CL1-A 0 HBSD-127.0.0.1 None -\n\ -CL1-A 1 - None -" - - raidcom_get_result = "LDEV : 0\n\ -VOL_TYPE : OPEN-V-CVS\n\ -LDEV : 1\n\ -VOL_TYPE : NOT DEFINED" - - raidcom_get_result2 = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - raidcom_get_result3 = "Serial# : 210944\n\ -LDEV : 0\n\ -SL : 0\n\ -CL : 0\n\ -VOL_TYPE : NOT DEFINED\n\ -VOL_Capacity(BLK) : 2098560\n\ -NUM_LDEV : 1\n\ -LDEVs : 0\n\ -NUM_PORT : 3\n\ -PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\ -F_POOLID : NONE\n\ -VOL_ATTR : CVS\n\ -RAID_LEVEL : RAID5\n\ -RAID_TYPE : 3D+1P\n\ -NUM_GROUP : 1\n\ -RAID_GROUPs : 01-01\n\ -DRIVE_TYPE : DKR5C-J600SS\n\ -DRIVE_Capa : 1143358736\n\ -LDEV_NAMING : test\n\ -STS : NML\n\ -OPE_TYPE : NONE\n\ -OPE_RATE : 100\n\ -MP# : 0\n\ -SSID : 0004" - - raidcom_get_command_status_result = "HANDLE SSB1 SSB2 ERR_CNT\ - Serial# Description\n\ -00d4 - - 0 210944 -" - - raidcom_get_result4 = "Serial# : 210944\n\ -LDEV : 0\n\ -SL : 0\n\ -CL : 0\n\ -VOL_TYPE : DEFINED\n\ -VOL_Capacity(BLK) : 2098560\n\ -NUM_LDEV : 1\n\ -LDEVs : 0\n\ -NUM_PORT : 3\n\ -PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\ -F_POOLID : NONE\n\ -VOL_ATTR : CVS\n\ -RAID_LEVEL : RAID5\n\ -RAID_TYPE : 3D+1P\n\ -NUM_GROUP : 1\n\ -RAID_GROUPs : 01-01\n\ -DRIVE_TYPE : DKR5C-J600SS\n\ -DRIVE_Capa : 1143358736\n\ -LDEV_NAMING : test\n\ -STS : NML\n\ -OPE_TYPE : NONE\n\ -OPE_RATE : 100\n\ -MP# : 0\n\ -SSID : 0004" - - raidcom_get_copy_grp_result = "DUMMY\n\ -HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31P - - None\n\ -HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31S - - None" - - raidcom_get_device_grp_result1 = "DUMMY\n\ -HBSD-127.0.0.1None1A31P HBSD-ldev-0-2 0 None" - - raidcom_get_device_grp_result2 = "DUMMY\n\ -HBSD-127.0.0.1None1A31S HBSD-ldev-0-2 2 None" - - raidcom_get_snapshot_result = "DUMMY\n\ -HBSD-sanp P-VOL PSUS None 0 3 3 18 100 G--- 53ee291f\n\ -HBSD-sanp P-VOL PSUS None 0 4 4 18 100 G--- 53ee291f" - - raidcom_dp_pool_result = "DUMMY \n\ -030 POLN 0 6006 6006 75 80 1 14860 32 167477" - - raidcom_port_result = "DUMMY\n\ -CL1-A FIBRE TAR AUT 01 Y PtoP Y 0 None 50060E801053C2E0 -" - - raidcom_port_result2 = "DUMMY\n\ -CL1-A 12345678912345aa None -\n\ -CL1-A 12345678912345bb None -" - - raidcom_host_grp_result = "DUMMY\n\ -CL1-A 0 HBSD-127.0.0.1 None LINUX/IRIX" - - raidcom_hba_wwn_result = "DUMMY\n\ -CL1-A 0 HBSD-127.0.0.1 12345678912345aa None -" - - raidcom_get_lun_result = "DUMMY\n\ -CL1-A 0 LINUX/IRIX 254 1 5 - None" - - pairdisplay_result = "DUMMY\n\ -HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 L CL1-A-0 0 0 0 None 0 P-VOL PSUS None 2\ - -\n\ -HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 R CL1-A-0 0 0 0 None 2 S-VOL SSUS - 0 -" - - pairdisplay_result2 = "DUMMY\n\ -HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 L CL1-A-1 0 0 0 None 1 P-VOL PAIR None 1\ - -\n\ -HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 R CL1-A-1 0 0 0 None 1 S-VOL PAIR - 1 -" - - horcm_vals = { - ('raidqry', u'-h'): - [0, "%s" % raidqry_result, ""], - ('raidcom', '-login user pasword'): - [0, "", ""], - ('raidcom', u'get host_grp -port CL1-A -key host_grp'): - [0, "%s" % raidcom_get_host_grp_result, ""], - ('raidcom', u'add host_grp -port CL1-A-1 -host_grp_name HBSD-pair00'): - [0, "", ""], - ('raidcom', - u'add host_grp -port CL1-A-1 -host_grp_name HBSD-127.0.0.2'): - [0, "", ""], - ('raidcom', u'delete host_grp -port CL1-A-1 HBSD-127.0.0.2'): - [1, "", ""], - ('raidcom', 'get ldev -ldev_id 0 -cnt 2'): - [0, "%s" % raidcom_get_result, ""], - ('raidcom', - 'add ldev -pool 30 -ldev_id 1 -capacity 128G -emulation OPEN-V'): - [0, "", ""], - ('raidcom', - 'add ldev -pool 30 -ldev_id 1 -capacity 256G -emulation OPEN-V'): - [1, "", "SSB=0x2E22,0x0001"], - ('raidcom', 'get command_status'): - [0, "%s" % raidcom_get_command_status_result, ""], - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_result2, ""], - ('raidcom', 'get ldev -ldev_id 1 -check_status NML -time 120'): - [0, "", ""], - ('raidcom', 'get snapshot -ldev_id 0'): - [0, "", ""], - ('raidcom', 'get snapshot -ldev_id 1'): - [0, "%s" % raidcom_get_snapshot_result, ""], - ('raidcom', 'get snapshot -ldev_id 2'): - [0, "", ""], - ('raidcom', 'get snapshot -ldev_id 3'): - [0, "", ""], - ('raidcom', 'get copy_grp'): - [0, "%s" % raidcom_get_copy_grp_result, ""], - ('raidcom', 'delete ldev -ldev_id 0'): - [0, "", ""], - ('raidcom', 'delete ldev -ldev_id 1'): - [0, "", ""], - ('raidcom', 'delete ldev -ldev_id 2'): - [1, "", "error"], - ('raidcom', 'delete ldev -ldev_id 3'): - [1, "", "SSB=0x2E20,0x0000"], - ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30P'): - [0, "", ""], - ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30S'): - [0, "", ""], - ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31P'): - [0, "%s" % raidcom_get_device_grp_result1, ""], - ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31S'): - [0, "%s" % raidcom_get_device_grp_result2, ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -CLI'): - [0, "", ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -CLI'): - [0, "", ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A31 -CLI'): - [0, "%s" % pairdisplay_result, ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -CLI'): - [0, "%s" % pairdisplay_result, ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -CLI'): - [0, "%s" % pairdisplay_result2, ""], - ('raidcom', - 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \ -HBSD-ldev-0-1 -ldev_id 0'): - [0, "", ""], - ('raidcom', - 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \ -HBSD-ldev-0-1 -ldev_id 1'): - [0, "", ""], - ('raidcom', - 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \ -HBSD-ldev-1-1 -ldev_id 1'): - [0, "", ""], - ('raidcom', - 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \ -HBSD-ldev-1-1 -ldev_id 1'): - [0, "", ""], - ('raidcom', - 'add copy_grp -copy_grp_name HBSD-127.0.0.1None1A30 \ -HBSD-127.0.0.1None1A30P HBSD-127.0.0.1None1A30S -mirror_id 0'): - [0, "", ""], - ('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 \ --split -fq quick -c 3 -vl'): - [0, "", ""], - ('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 \ --split -fq quick -c 3 -vl'): - [0, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowait'): - [4, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowaits'): - [4, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowait'): - [1, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowaits'): - [1, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowait'): - [4, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowaits'): - [200, "", ""], - ('pairsplit', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -S'): - [0, "", ""], - ('raidcom', 'extend ldev -ldev_id 0 -capacity 128G'): - [0, "", ""], - ('raidcom', 'get dp_pool'): - [0, "%s" % raidcom_dp_pool_result, ""], - ('raidcom', 'get port'): - [0, "%s" % raidcom_port_result, ""], - ('raidcom', 'get port -port CL1-A'): - [0, "%s" % raidcom_port_result2, ""], - ('raidcom', 'get host_grp -port CL1-A'): - [0, "%s" % raidcom_host_grp_result, ""], - ('raidcom', 'get hba_wwn -port CL1-A-0'): - [0, "%s" % raidcom_hba_wwn_result, ""], - ('raidcom', 'get hba_wwn -port CL1-A-1'): - [0, "", ""], - ('raidcom', 'add hba_wwn -port CL1-A-0 -hba_wwn 12345678912345bb'): - [0, "", ""], - ('raidcom', 'add hba_wwn -port CL1-A-1 -hba_wwn 12345678912345bb'): - [1, "", ""], - ('raidcom', u'get lun -port CL1-A-0'): - [0, "%s" % raidcom_get_lun_result, ""], - ('raidcom', u'get lun -port CL1-A-1'): - [0, "", ""], - ('raidcom', u'add lun -port CL1-A-0 -ldev_id 0 -lun_id 0'): - [0, "", ""], - ('raidcom', u'add lun -port CL1-A-0 -ldev_id 1 -lun_id 0'): - [0, "", ""], - ('raidcom', u'add lun -port CL1-A-1 -ldev_id 0 -lun_id 0'): - [0, "", ""], - ('raidcom', u'add lun -port CL1-A-1 -ldev_id 1 -lun_id 0'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-0 -ldev_id 0'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-0 -ldev_id 1'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 0'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 2'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 1'): - [1, "", ""]} - - horcm_get_ldev_no_stdout = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "", ""]} - - raidcom_get_ldev_no_nml = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS :" - - horcm_get_ldev_no_nml = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_no_nml, ""]} - - raidcom_get_ldev_no_open_v = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_no_open_v = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_no_open_v, ""]} - - raidcom_get_ldev_no_hdp = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS :\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_no_hdp = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_no_hdp, ""]} - - raidcom_get_ldev_pair = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : HORC : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_pair = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_pair, ""]} - - raidcom_get_ldev_permit = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : XXX : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_permit = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_permit, ""]} - - raidcom_get_ldev_invalid_size = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097151\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_invalid_size = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_invalid_size, ""]} - - raidcom_get_ldev_num_port = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 1\n\ -STS : NML" - - horcm_get_ldev_num_port = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_num_port, ""]} - -# The following information is passed on to tests, when creating a volume - - _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', - 'provider_location': '0', 'name': 'test', - 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} - - test_volume = {'name': 'test_volume', 'size': 128, - 'id': 'test-volume', - 'provider_location': '1', 'status': 'available'} - - test_volume_larger = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume', - 'provider_location': '1', 'status': 'available'} - - test_volume_error = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume', - 'status': 'creating'} - - test_volume_error2 = {'name': 'test_volume2', 'size': 128, - 'id': 'test-volume2', - 'provider_location': '1', 'status': 'available'} - - test_volume_error3 = {'name': 'test_volume3', 'size': 128, - 'id': 'test-volume3', - 'volume_metadata': [{'key': 'type', - 'value': 'V-VOL'}], - 'provider_location': '1', 'status': 'available'} - - test_volume_error4 = {'name': 'test_volume4', 'size': 128, - 'id': 'test-volume2', - 'provider_location': '3', 'status': 'available'} - - test_volume_error5 = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume', - 'provider_location': '1', 'status': 'available'} - - test_snapshot = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, - 'provider_location': '0', 'status': 'available'} - - test_snapshot_error = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', - 'volume': _VOLUME, - 'provider_location': '2', 'status': 'available'} - - test_snapshot_error2 = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', - 'volume': _VOLUME, - 'provider_location': '1', 'status': 'available'} - - SERIAL_NUM = '210944' - test_existing_ref = {'ldev': '1', 'serial_number': SERIAL_NUM} - test_existing_none_ldev_ref = {'ldev': None, - 'serial_number': SERIAL_NUM} - test_existing_invalid_ldev_ref = {'ldev': 'AAA', - 'serial_number': SERIAL_NUM} - test_existing_no_ldev_ref = {'serial_number': SERIAL_NUM} - test_existing_none_serial_ref = {'ldev': '1', 'serial_number': None} - test_existing_invalid_serial_ref = {'ldev': '1', 'serial_number': '999999'} - test_existing_no_serial_ref = {'ldev': '1'} - - def __init__(self, *args, **kwargs): - super(HBSDHORCMFCDriverTest, self).__init__(*args, **kwargs) - - @mock.patch.object(utils, 'brick_get_connector_properties', - return_value={'ip': '127.0.0.1', - 'wwpns': ['12345678912345aa']}) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(utils, 'execute', - return_value=['%s' % raidqry_result, '']) - def setUp(self, arg1, arg2, arg3, arg4): - super(HBSDHORCMFCDriverTest, self).setUp() - self._setup_config() - self._setup_driver() - self.driver.check_param() - self.driver.common.pair_flock = hbsd_basiclib.NopLock() - self.driver.common.command = hbsd_horcm.HBSDHORCM(self.configuration) - self.driver.common.command.horcmgr_flock = hbsd_basiclib.NopLock() - self.driver.common.create_lock_file() - self.driver.common.command.connect_storage() - self.driver.max_hostgroups = \ - self.driver.common.command.get_max_hostgroups() - self.driver.add_hostgroup() - self.driver.output_param_to_log() - self.driver.do_setup_status.set() - - def _setup_config(self): - self.configuration = mock.Mock(conf.Configuration) - self.configuration.hitachi_pool_id = 30 - self.configuration.hitachi_thin_pool_id = 31 - self.configuration.hitachi_target_ports = "CL1-A" - self.configuration.hitachi_debug_level = 0 - self.configuration.hitachi_serial_number = "None" - self.configuration.hitachi_unit_name = None - self.configuration.hitachi_group_request = True - self.configuration.hitachi_group_range = None - self.configuration.hitachi_zoning_request = False - self.configuration.config_group = "None" - self.configuration.hitachi_ldev_range = "0-1" - self.configuration.hitachi_default_copy_method = 'FULL' - self.configuration.hitachi_copy_check_interval = 1 - self.configuration.hitachi_async_copy_check_interval = 1 - self.configuration.hitachi_copy_speed = 3 - self.configuration.hitachi_horcm_add_conf = True - self.configuration.hitachi_horcm_numbers = "409,419" - self.configuration.hitachi_horcm_user = "user" - self.configuration.hitachi_horcm_password = "pasword" - self.configuration.hitachi_horcm_resource_lock_timeout = 600 - - def _setup_driver(self): - self.driver = hbsd_fc.HBSDFCDriver( - configuration=self.configuration) - context = None - db = None - self.driver.common = hbsd_common.HBSDCommon( - self.configuration, self.driver, context, db) - -# API test cases - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_create_volume(self, arg1, arg2, arg3): - """test create_volume.""" - ret = self.driver.create_volume(self._VOLUME) - vol = self._VOLUME.copy() - vol['provider_location'] = ret['provider_location'] - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_create_volume_error(self, arg1, arg2, arg3): - """test create_volume.""" - self.assertRaises(exception.HBSDError, self.driver.create_volume, - self.test_volume_error) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_get_volume_stats(self, arg1, arg2): - """test get_volume_stats.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_get_volume_stats_error(self, arg1, arg2): - """test get_volume_stats.""" - self.configuration.hitachi_pool_id = 29 - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - self.configuration.hitachi_pool_id = 30 - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_extend_volume(self, arg1, arg2, arg3, arg4): - """test extend_volume.""" - self.driver.extend_volume(self._VOLUME, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_extend_volume_error(self, arg1, arg2, arg3, arg4): - """test extend_volume.""" - self.assertRaises(exception.HBSDError, self.driver.extend_volume, - self.test_volume_error3, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_delete_volume(self, arg1, arg2, arg3, arg4): - """test delete_volume.""" - self.driver.delete_volume(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_delete_volume_error(self, arg1, arg2, arg3, arg4): - """test delete_volume.""" - self.driver.delete_volume(self.test_volume_error4) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5, arg6, arg7): - """test create_snapshot.""" - ret = self.driver.create_volume(self._VOLUME) - ret = self.driver.create_snapshot(self.test_snapshot) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5, arg6, - arg7): - """test create_snapshot.""" - ret = self.driver.create_volume(self.test_volume) - ret = self.driver.create_snapshot(self.test_snapshot_error) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_delete_snapshot(self, arg1, arg2, arg3, arg4): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_delete_snapshot_error(self, arg1, arg2, arg3, arg4): - """test delete_snapshot.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.delete_snapshot, - self.test_snapshot_error) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_volume_from_snapshot(self, arg1, arg2, arg3, arg4, arg5): - """test create_volume_from_snapshot.""" - vol = self.driver.create_volume_from_snapshot(self.test_volume, - self.test_snapshot) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3, arg4, - arg5): - """test create_volume_from_snapshot.""" - self.assertRaises(exception.HBSDError, - self.driver.create_volume_from_snapshot, - self.test_volume_error5, self.test_snapshot_error2) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_cloned_volume(self, arg1, arg2, arg3, arg4, arg5, arg6): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self.test_volume, - self._VOLUME) - self.assertEqual('1', vol['provider_location']) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_common.HBSDCommon, 'extend_volume') - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_cloned_volume_larger_size(self, arg1, arg2, arg3, arg4, - arg5, arg6, arg7): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self.test_volume_larger, - self._VOLUME) - self.assertEqual('1', vol['provider_location']) - arg5.assert_called_once_with(self.test_volume_larger, - self.test_volume_larger['size']) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4, arg5, - arg6): - """test create_cloned_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_cloned_volume, - self.test_volume, self.test_volume_error2) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_initialize_connection(self, arg1, arg2): - """test initialize connection.""" - connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], - 'ip': '127.0.0.1'} - rc = self.driver.initialize_connection(self._VOLUME, connector) - self.assertEqual('fibre_channel', rc['driver_volume_type']) - self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) - self.assertEqual(0, rc['data']['target_lun']) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_initialize_connection_error(self, arg1, arg2): - """test initialize connection.""" - connector = {'wwpns': ['12345678912345bb'], 'ip': '127.0.0.2'} - self.assertRaises(exception.HBSDError, - self.driver.initialize_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_terminate_connection(self, arg1, arg2): - """test terminate connection.""" - connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], - 'ip': '127.0.0.1'} - rc = self.driver.terminate_connection(self._VOLUME, connector) - self.assertEqual('fibre_channel', rc['driver_volume_type']) - self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_terminate_connection_error(self, arg1, arg2): - """test terminate connection.""" - connector = {'ip': '127.0.0.1'} - self.assertRaises(exception.HBSDError, - self.driver.terminate_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_manage_existing(self, arg1, arg2): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) - self.assertEqual(1, rc['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - size = self.driver.manage_existing_get_size(self._VOLUME, - self.test_existing_ref) - self.assertEqual(1, size) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_serial_ref(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_serial_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_serial_ref(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_serial_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_serial_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_serial_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_unmanage(self, arg1, arg2, arg3, arg4): - self.driver.unmanage(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_unmanage_busy(self, arg1, arg2): - self.assertRaises(exception.HBSDVolumeIsBusy, - self.driver.unmanage, self.test_volume_error3) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_no_stdout) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_no_stdout(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_no_nml) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_no_nml(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_no_open_v) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_no_open_v(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_no_hdp) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_no_hdp(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_pair) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_pair(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_permit) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_permit(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_invalid_size) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_invalid_size(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_num_port) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_num_port(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - def test_invalid_resource_lock_timeout_below_limit(self): - self.configuration.hitachi_horcm_resource_lock_timeout = -1 - self.assertRaises(exception.HBSDError, self.driver.check_param) - - def test_invalid_resource_lock_timeout_over_limit(self): - self.configuration.hitachi_horcm_resource_lock_timeout = 7201 - self.assertRaises(exception.HBSDError, self.driver.check_param) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_fc.py deleted file mode 100644 index 147d6ba33de..00000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_fc.py +++ /dev/null @@ -1,618 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Self test for Hitachi Block Storage Driver -""" - -import mock - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hbsd_basiclib -from cinder.volume.drivers.hitachi import hbsd_common -from cinder.volume.drivers.hitachi import hbsd_fc -from cinder.volume.drivers.hitachi import hbsd_snm2 - - -def _exec_hsnm(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_vals.get(args) - - -def _exec_hsnm_get_lu_ret_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_ret_err.get(args) - - -def _exec_hsnm_get_lu_vol_type_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_vol_type_err.get(args) - - -def _exec_hsnm_get_lu_dppool_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_dppool_err.get(args) - - -def _exec_hsnm_get_lu_size_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_size_err.get(args) - - -def _exec_hsnm_get_lu_num_port_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_num_port_err.get(args) - - -class HBSDSNM2FCDriverTest(test.TestCase): - """Test HBSDSNM2FCDriver.""" - - audppool_result = " DP RAID \ - Current Utilization Current Over Replication\ - Available Current Replication Rotational \ - \ - Stripe \ - Needing Preparation\n\ - Pool Tier Mode Level Total Capacity Consumed Capacity \ - Percent Provisioning Percent Capacity \ -Utilization Percent Type Speed Encryption Status \ - \ -Reconstruction Progress Size Capacity\n\ - 30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \ - 1% 24835% 532.0 GB \ - 1% SAS 10000rpm N/A Normal \ - N/A \ - 256KB 0.0 GB" - - aureplicationlocal_result = "Pair Name LUN Pair \ -LUN Status Copy Type Group \ - Point-in-Time MU Number\n\ - 0 10 0 Split( 99%) \ - ShadowImage ---:Ungrouped N/A\ - " - - auluref_result = " Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 Normal" - - auluref_result1 = " Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 DUMMY" - - auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \ -Name Port Name Host Group\n\ -HBSD-00 10000000C97BCE7A 001:HBSD-01\n\ - Assigned WWN\n Name Port Name \ -Host Group\n abcdefg 10000000C97BCE7A \ -001:HBSD-01" - - aufibre1_result = "Port Information\n\ - Port Address\n CTL Port\ - Node Name Port Name Setting Current\n 0 0 \ -50060E801053C2E0 50060E801053C2E0 0000EF 272700" - - auhgmap_result = "Mapping Mode = ON\nPort Group \ - H-LUN LUN\n 00 001:HBSD-00 0 1000" - - hsnm_vals = { - ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): - [0, "", ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'): - [1, "", ""], - ('aureplicationlocal', '-unit None -refer -pvol 1'): - [0, "%s" % aureplicationlocal_result, ""], - ('aureplicationlocal', '-unit None -refer -pvol 3'): - [1, "", "DMEC002015"], - ('aureplicationlocal', '-unit None -refer -svol 3'): - [1, "", "DMEC002015"], - ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): - [0, "", ""], - ('auluchgsize', '-unit None -lu 1 -size 256g'): - [0, "", ""], - ('auludel', '-unit None -lu 1 -f'): [0, 0, ""], - ('auludel', '-unit None -lu 3 -f'): [1, 0, ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, 0, ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""], - ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], - ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""], - ('auhgmap', '-unit None -add 0 0 1 1 1'): [0, 0, ""], - ('auhgwwn', '-unit None -refer'): [0, "%s" % auhgwwn_result, ""], - ('aufibre1', '-unit None -refer'): [0, "%s" % aufibre1_result, ""], - ('auhgmap', '-unit None -refer'): [0, "%s" % auhgmap_result, ""]} - - auluref_ret_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 Normal" - - hsnm_get_lu_ret_err = { - ('auluref', '-unit None -lu 0'): [1, "%s" % auluref_ret_err, ""], - } - - auluref_vol_type_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 DUMMY" - - hsnm_get_lu_vol_type_err = { - ('auluref', '-unit None -lu 0'): - [0, "%s" % auluref_vol_type_err, ""], - } - - auluref_dppool_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 N/A Enable 0 Normal" - - hsnm_get_lu_dppool_err = { - ('auluref', '-unit None -lu 0'): - [0, "%s" % auluref_dppool_err, ""], - } - - auluref_size_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097151 blocks 256KB N/A 0 Enable 0 Normal" - hsnm_get_lu_size_err = { - ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_size_err, ""], - } - - auluref_num_port_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 1 Normal" - - hsnm_get_lu_num_port_err = { - ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_num_port_err, ""], - } - -# The following information is passed on to tests, when creating a volume - - _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', - 'provider_location': '1', 'name': 'test', - 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} - - test_volume = {'name': 'test_volume', 'size': 128, - 'id': 'test-volume-0', - 'provider_location': '1', 'status': 'available'} - - test_volume_larger = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume-0', - 'provider_location': '1', 'status': 'available'} - - test_volume_error = {'name': 'test_volume_error', 'size': 256, - 'id': 'test-volume-error', - 'provider_location': '3', 'status': 'available'} - - test_volume_error1 = {'name': 'test_volume_error', 'size': 128, - 'id': 'test-volume-error', - 'provider_location': None, 'status': 'available'} - - test_volume_error2 = {'name': 'test_volume_error', 'size': 256, - 'id': 'test-volume-error', - 'provider_location': '1', 'status': 'available'} - - test_volume_error3 = {'name': 'test_volume3', 'size': 128, - 'id': 'test-volume3', - 'volume_metadata': [{'key': 'type', - 'value': 'V-VOL'}], - 'provider_location': '1', 'status': 'available'} - - test_volume_error4 = {'name': 'test_volume4', 'size': 128, - 'id': 'test-volume2', - 'provider_location': '3', 'status': 'available'} - - test_snapshot = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, - 'provider_location': '1', 'status': 'available'} - - test_snapshot_error2 = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', - 'volume': test_volume_error, - 'provider_location': None, 'status': 'available'} - - UNIT_NAME = 'HUS110_91122819' - test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME} - test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME} - test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME} - test_existing_no_ldev_ref = {'unit_name': UNIT_NAME} - test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None} - test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'} - test_existing_no_unit_ref = {'ldev': '0'} - - def __init__(self, *args, **kwargs): - super(HBSDSNM2FCDriverTest, self).__init__(*args, **kwargs) - - def setUp(self): - super(HBSDSNM2FCDriverTest, self).setUp() - self._setup_config() - self._setup_driver() - - def _setup_config(self): - self.configuration = mock.Mock(conf.Configuration) - self.configuration.hitachi_pool_id = 30 - self.configuration.hitachi_target_ports = "00" - self.configuration.hitachi_debug_level = 0 - self.configuration.hitachi_serial_number = "None" - self.configuration.hitachi_unit_name = "None" - self.configuration.hitachi_group_request = False - self.configuration.hitachi_zoning_request = False - self.configuration.config_group = "None" - self.configuration.hitachi_ldev_range = [0, 100] - self.configuration.hitachi_default_copy_method = 'SI' - self.configuration.hitachi_copy_check_interval = 1 - self.configuration.hitachi_copy_speed = 3 - - def _setup_driver(self): - self.driver = hbsd_fc.HBSDFCDriver( - configuration=self.configuration) - context = None - db = None - self.driver.common = hbsd_common.HBSDCommon( - self.configuration, self.driver, context, db) - self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration) - self.driver.common.pair_flock = \ - self.driver.common.command.set_pair_flock() - self.driver.common.horcmgr_flock = \ - self.driver.common.command.set_horcmgr_flock() - self.driver.do_setup_status.set() - -# API test cases - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume(self, arg1, arg2, arg3): - """test create_volume.""" - ret = self.driver.create_volume(self._VOLUME) - vol = self._VOLUME.copy() - vol['provider_location'] = ret['provider_location'] - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_error(self, arg1, arg2, arg3): - """test create_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_volume, - self.test_volume_error) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_get_volume_stats(self, arg1, arg2): - """test get_volume_stats.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_get_volume_stats_error(self, arg1, arg2): - """test get_volume_stats.""" - self.configuration.hitachi_pool_id = 29 - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - self.configuration.hitachi_pool_id = 30 - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_extend_volume(self, arg1, arg2): - """test extend_volume.""" - self.driver.extend_volume(self._VOLUME, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_extend_volume_error(self, arg1, arg2): - """test extend_volume.""" - self.assertRaises(exception.HBSDError, self.driver.extend_volume, - self.test_volume_error3, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_volume(self, arg1, arg2): - """test delete_volume.""" - self.driver.delete_volume(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_volume_error(self, arg1, arg2): - """test delete_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.delete_volume, - self.test_volume_error4) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5): - """test create_snapshot.""" - ret = self.driver.create_volume(self._VOLUME) - ret = self.driver.create_snapshot(self.test_snapshot) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=test_volume_error) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5): - """test create_snapshot.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_snapshot, - self.test_snapshot_error2) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_snapshot(self, arg1, arg2): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_snapshot_error(self, arg1, arg2): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot_error2) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_from_snapshot(self, arg1, arg2, arg3): - """test create_volume_from_snapshot.""" - vol = self.driver.create_volume_from_snapshot(self._VOLUME, - self.test_snapshot) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3): - """test create_volume_from_snapshot.""" - self.assertRaises(exception.HBSDError, - self.driver.create_volume_from_snapshot, - self.test_volume_error2, self.test_snapshot) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume(self, arg1, arg2, arg3, arg4): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self._VOLUME, - self.test_volume) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_common.HBSDCommon, 'extend_volume') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume_larger(self, arg1, arg2, arg3, arg4, arg5): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self.test_volume_larger, - self._VOLUME) - self.assertIsNotNone(vol) - arg3.assert_called_once_with(self.test_volume_larger, - self.test_volume_larger['size']) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=test_volume_error1) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4): - """test create_cloned_volume.""" - self.assertRaises(exception.HBSDError, - self.driver.create_cloned_volume, - self._VOLUME, self.test_volume_error1) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_initialize_connection(self, arg1, arg2): - """test initialize connection.""" - connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'} - rc = self.driver.initialize_connection(self._VOLUME, connector) - self.assertEqual('fibre_channel', rc['driver_volume_type']) - self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) - self.assertEqual(1, rc['data']['target_lun']) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_initialize_connection_error(self, arg1, arg2): - """test initialize connection.""" - connector = {'wwpns': 'x', 'ip': '0xc0a80100'} - self.assertRaises(exception.HBSDError, - self.driver.initialize_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_terminate_connection(self, arg1): - """test terminate connection.""" - connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'} - rc = self.driver.terminate_connection(self._VOLUME, connector) - self.assertEqual('fibre_channel', rc['driver_volume_type']) - self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) - return - - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_terminate_connection_error(self, arg1): - """test terminate connection.""" - connector = {'ip': '0xc0a80100'} - self.assertRaises(exception.HBSDError, - self.driver.terminate_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_manage_existing(self, arg1, arg2): - rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) - self.assertEqual(0, rc['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - size = self.driver.manage_existing_get_size(self._VOLUME, - self.test_existing_ref) - self.assertEqual(1, size) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_ret_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_ret_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_vol_type_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_lu_vol_type_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_dppool_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_lu_dppool_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_size_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_lu_size_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_num_port_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_lu_num_port_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_unmanage(self, arg1, arg2): - self.driver.unmanage(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_unmanage_busy(self, arg1, arg2): - self.assertRaises(exception.HBSDVolumeIsBusy, - self.driver.unmanage, self.test_volume_error3) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_iscsi.py deleted file mode 100644 index 28d64e3f951..00000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_iscsi.py +++ /dev/null @@ -1,607 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Self test for Hitachi Block Storage Driver -""" - -import mock - -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hbsd_basiclib -from cinder.volume.drivers.hitachi import hbsd_common -from cinder.volume.drivers.hitachi import hbsd_iscsi -from cinder.volume.drivers.hitachi import hbsd_snm2 - - -def _exec_hsnm(*args, **kargs): - return HBSDSNM2ISCSIDriverTest.hsnm_vals.get(args) - - -def _exec_hsnm_init(*args, **kargs): - return HBSDSNM2ISCSIDriverTest.hsnm_vals_init.get(args) - - -class HBSDSNM2ISCSIDriverTest(test.TestCase): - """Test HBSDSNM2ISCSIDriver.""" - - audppool_result = " DP RAID \ - Current Utilization Current Over Replication\ - Available Current Replication Rotational \ - \ - Stripe \ - Needing Preparation\n\ - Pool Tier Mode Level Total Capacity Consumed Capacity \ - Percent Provisioning Percent Capacity \ -Utilization Percent Type Speed Encryption Status \ - \ -Reconstruction Progress Size Capacity\n\ - 30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \ - 1% 24835% 532.0 GB \ - 1% SAS 10000rpm N/A Normal \ - N/A \ - 256KB 0.0 GB" - - aureplicationlocal_result = "Pair Name LUN Pair \ -LUN Status Copy Type Group \ - Point-in-Time MU Number\n\ - 0 10 0 Split( 99%) \ - ShadowImage ---:Ungrouped N/A\ - " - - auluref_result = " Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 Normal" - - auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \ -Name Port Name Host Group\n\ -HBSD-00 10000000C97BCE7A 001:HBSD-01\n\ - Assigned WWN\n Name Port Name \ -Host Group\n abcdefg 10000000C97BCE7A \ -001:HBSD-01" - - autargetini_result = "Port 00 Target Security ON\n\ - Target Name \ -iSCSI Name\n\ - 001:HBSD-01 \ -iqn" - - autargetini_result2 = "Port 00 Target Security ON\n\ - Target Name \ -iSCSI Name" - - autargetmap_result = "Mapping Mode = ON\n\ -Port Target H-LUN LUN\n\ - 00 001:HBSD-01 0 1000" - - auiscsi_result = "Port 00\n\ - Port Number : 3260\n\ - Keep Alive Timer[sec.] : 60\n\ - MTU : 1500\n\ - Transfer Rate : 1Gbps\n\ - Link Status : Link Up\n\ - Ether Address : 00:00:87:33:D1:3E\n\ - IPv4\n\ - IPv4 Address : 192.168.0.1\n\ - IPv4 Subnet Mask : 255.255.252.0\n\ - IPv4 Default Gateway : 0.0.0.0\n\ - IPv6 Status : Disable\n\ - Connecting Hosts : 0\n\ - Result : Normal\n\ - VLAN Status : Disable\n\ - VLAN ID : N/A\n\ - Header Digest : Enable\n\ - Data Digest : Enable\n\ - Window Scale : Disable" - - autargetdef_result = "Port 00\n\ - Authentication Mutual\n\ - Target Method CHAP Algorithm \ -Authentication\n\ - 001:T000 None --- ---\n\ - User Name : ---\n\ - iSCSI Name : iqn-target" - - hsnm_vals = { - ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): - [0, "", ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'): - [1, "", ""], - ('aureplicationlocal', '-unit None -refer -pvol 1'): - [0, "%s" % aureplicationlocal_result, ""], - ('aureplicationlocal', '-unit None -refer -pvol 3'): - [1, "", "DMEC002015"], - ('aureplicationlocal', '-unit None -refer -svol 3'): - [1, "", "DMEC002015"], - ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): - [0, "", ""], - ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 1'): - [1, "", ""], - ('auluchgsize', '-unit None -lu 1 -size 256g'): - [0, "", ""], - ('auludel', '-unit None -lu 1 -f'): [0, "", ""], - ('auludel', '-unit None -lu 3 -f'): [1, "", ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""], - ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], - ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""], - ('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""], - ('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""], - ('autargetini', '-unit None -refer'): - [0, "%s" % autargetini_result, ""], - ('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'): - [0, "", ""], - ('autargetmap', '-unit None -refer'): - [0, "%s" % autargetmap_result, ""], - ('autargetdef', - '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \ --authmethod None'): - [0, "", ""], - ('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 \ --iname iqnX.target -authmethod None'): - [1, "", ""], - ('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \ --ReportFullPortalList enable'): - [0, "", ""], - ('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""], - ('autargetdef', '-unit None -refer'): - [0, "%s" % autargetdef_result, ""]} - - hsnm_vals_init = { - ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): - [0, 0, ""], - ('aureplicationlocal', '-unit None -refer -pvol 1'): - [0, "%s" % aureplicationlocal_result, ""], - ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): - [0, 0, ""], - ('auluchgsize', '-unit None -lu 1 -size 256g'): - [0, 0, ""], - ('auludel', '-unit None -lu 1 -f'): [0, "", ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""], - ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], - ('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""], - ('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""], - ('autargetini', '-unit None -refer'): - [0, "%s" % autargetini_result2, ""], - ('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'): - [0, "", ""], - ('autargetmap', '-unit None -refer'): - [0, "%s" % autargetmap_result, ""], - ('autargetdef', - '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \ --authmethod None'): - [0, "", ""], - ('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \ --ReportFullPortalList enable'): - [0, "", ""], - ('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""], - ('autargetdef', '-unit None -refer'): - [0, "%s" % autargetdef_result, ""], - ('auman', '-help'): - [0, "Version 27.50", ""]} - -# The following information is passed on to tests, when creating a volume - - _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', - 'provider_location': '1', 'name': 'test', - 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} - - test_volume = {'name': 'test_volume', 'size': 128, - 'id': 'test-volume-0', - 'provider_location': '1', 'status': 'available'} - - test_volume_larger = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume-0', - 'provider_location': '1', 'status': 'available'} - - test_volume_error = {'name': 'test_volume_error', 'size': 256, - 'id': 'test-volume-error', - 'provider_location': '3', 'status': 'available'} - - test_volume_error1 = {'name': 'test_volume_error', 'size': 128, - 'id': 'test-volume-error', - 'provider_location': None, 'status': 'available'} - - test_volume_error2 = {'name': 'test_volume_error', 'size': 256, - 'id': 'test-volume-error', - 'provider_location': '1', 'status': 'available'} - - test_volume_error3 = {'name': 'test_volume3', 'size': 128, - 'id': 'test-volume3', - 'volume_metadata': [{'key': 'type', - 'value': 'V-VOL'}], - 'provider_location': '1', 'status': 'available'} - - test_volume_error4 = {'name': 'test_volume4', 'size': 128, - 'id': 'test-volume2', - 'provider_location': '3', 'status': 'available'} - - test_snapshot = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, - 'provider_location': '1', 'status': 'available'} - - test_snapshot_error2 = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', - 'volume': test_volume_error, - 'provider_location': None, 'status': 'available'} - - UNIT_NAME = 'HUS110_91122819' - test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME} - test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME} - test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME} - test_existing_no_ldev_ref = {'unit_name': UNIT_NAME} - test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None} - test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'} - test_existing_no_unit_ref = {'ldev': '0'} - - def __init__(self, *args, **kwargs): - super(HBSDSNM2ISCSIDriverTest, self).__init__(*args, **kwargs) - - @mock.patch.object(utils, 'brick_get_connector_properties', - return_value={'ip': '0.0.0.0', - 'initiator': 'iqn'}) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_init) - @mock.patch.object(utils, 'execute', - return_value=['', '']) - def setUp(self, args1, arg2, arg3, arg4): - super(HBSDSNM2ISCSIDriverTest, self).setUp() - self._setup_config() - self._setup_driver() - self.driver.check_param() - self.driver.common.create_lock_file() - self.driver.common.command.connect_storage() - self.driver.max_hostgroups = \ - self.driver.common.command.get_max_hostgroups() - self.driver.add_hostgroup() - self.driver.output_param_to_log() - self.driver.do_setup_status.set() - - def _setup_config(self): - self.configuration = mock.Mock(conf.Configuration) - self.configuration.hitachi_pool_id = 30 - self.configuration.hitachi_thin_pool_id = 31 - self.configuration.hitachi_target_ports = "00" - self.configuration.hitachi_debug_level = 0 - self.configuration.hitachi_serial_number = None - self.configuration.hitachi_unit_name = "None" - self.configuration.hitachi_group_request = True - self.configuration.hitachi_group_range = "0-1" - self.configuration.config_group = "None" - self.configuration.hitachi_ldev_range = "0-100" - self.configuration.hitachi_default_copy_method = 'FULL' - self.configuration.hitachi_copy_check_interval = 1 - self.configuration.hitachi_async_copy_check_interval = 1 - self.configuration.hitachi_copy_speed = 3 - self.configuration.hitachi_auth_method = None - self.configuration.hitachi_auth_user = "HBSD-CHAP-user" - self.configuration.hitachi_auth_password = "HBSD-CHAP-password" - self.configuration.hitachi_add_chap_user = "False" - - def _setup_driver(self): - self.driver = hbsd_iscsi.HBSDISCSIDriver( - configuration=self.configuration) - context = None - db = None - self.driver.common = hbsd_common.HBSDCommon( - self.configuration, self.driver, context, db) - self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration) - self.driver.common.horcmgr_flock = \ - self.driver.common.command.set_horcmgr_flock() - -# API test cases - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume(self, arg1, arg2, arg3): - """test create_volume.""" - ret = self.driver.create_volume(self._VOLUME) - vol = self._VOLUME.copy() - vol['provider_location'] = ret['provider_location'] - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_error(self, arg1, arg2, arg3): - """test create_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_volume, - self.test_volume_error) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_get_volume_stats(self, arg1, arg2): - """test get_volume_stats.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_get_volume_stats_error(self, arg1, arg2): - """test get_volume_stats.""" - self.configuration.hitachi_pool_id = 29 - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - self.configuration.hitachi_pool_id = 30 - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_extend_volume(self, arg1, arg2): - """test extend_volume.""" - self.driver.extend_volume(self._VOLUME, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_extend_volume_error(self, arg1, arg2): - """test extend_volume.""" - self.assertRaises(exception.HBSDError, self.driver.extend_volume, - self.test_volume_error3, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_volume(self, arg1, arg2): - """test delete_volume.""" - self.driver.delete_volume(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_volume_error(self, arg1, arg2): - """test delete_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.delete_volume, - self.test_volume_error4) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5): - """test create_snapshot.""" - ret = self.driver.create_volume(self._VOLUME) - ret = self.driver.create_snapshot(self.test_snapshot) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=test_volume_error) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5): - """test create_snapshot.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_snapshot, - self.test_snapshot_error2) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_snapshot(self, arg1, arg2): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_snapshot_error(self, arg1, arg2): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot_error2) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_from_snapshot(self, arg1, arg2, arg3): - """test create_volume_from_snapshot.""" - vol = self.driver.create_volume_from_snapshot(self._VOLUME, - self.test_snapshot) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3): - """test create_volume_from_snapshot.""" - self.assertRaises(exception.HBSDError, - self.driver.create_volume_from_snapshot, - self.test_volume_error2, self.test_snapshot) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume(self, arg1, arg2, arg3, arg4): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self._VOLUME, - self.test_snapshot) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_common.HBSDCommon, 'extend_volume') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume_larger(self, arg1, arg2, arg3, arg4, arg5): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self.test_volume_larger, - self._VOLUME) - self.assertIsNotNone(vol) - arg3.assert_called_once_with(self.test_volume_larger, - self.test_volume_larger['size']) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=test_volume_error1) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4): - """test create_cloned_volume.""" - self.assertRaises(exception.HBSDError, - self.driver.create_cloned_volume, - self._VOLUME, self.test_volume_error1) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_initialize_connection(self, arg1, arg2): - """test initialize connection.""" - connector = { - 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': - 'iqn'} - rc = self.driver.initialize_connection(self._VOLUME, connector) - self.assertEqual('iscsi', rc['driver_volume_type']) - self.assertEqual('iqn-target', rc['data']['target_iqn']) - self.assertEqual(1, rc['data']['target_lun']) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_initialize_connection_error(self, arg1, arg2): - """test initialize connection.""" - connector = { - 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': - 'iqnX'} - self.assertRaises(exception.HBSDError, - self.driver.initialize_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_terminate_connection(self, arg1): - """test terminate connection.""" - connector = { - 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': - 'iqn'} - self.driver.terminate_connection(self._VOLUME, connector) - return - - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_terminate_connection_error(self, arg1): - """test terminate connection.""" - connector = {'ip': '0.0.0.0'} - self.assertRaises(exception.HBSDError, - self.driver.terminate_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_manage_existing(self, arg1, arg2): - rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) - self.assertEqual(0, rc['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - size = self.driver.manage_existing_get_size(self._VOLUME, - self.test_existing_ref) - self.assertEqual(1, size) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_unmanage(self, arg1, arg2): - self.driver.unmanage(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_unmanage_busy(self, arg1, arg2): - self.assertRaises(exception.HBSDVolumeIsBusy, - self.driver.unmanage, self.test_volume_error3) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py deleted file mode 100644 index 3285c2c8d67..00000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py +++ /dev/null @@ -1,519 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock -import os -import paramiko -import time - -from oslo_concurrency import processutils as putils - -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume.drivers.hitachi import hnas_backend - - -evsfs_list = "\n\ -FS ID FS Label FS Permanent ID EVS ID EVS Label\n\ ------ ----------- ------------------ ------ ---------\n\ - 1026 gold 0xaadee0e035cfc0b7 1 EVS-Manila\n\ - 1029 test_hdp 0xaadee09634acfcac 1 EVS-Manila\n\ - 1030 fs-cinder 0xaadfcf742fba644e 2 EVS-Cinder\n\ - 1031 cinder2 0xaadfcf7e0769a6bc 3 EVS-Test\n\ - 1024 fs02-husvm 0xaac8715e2e9406cd 3 EVS-Test\n\ -\n" - -cluster_getmac = "cluster MAC: 83-68-96-AA-DA-5D" - -version = "\n\ -Model: HNAS 4040 \n\n\ -Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\n\ -Hardware: NAS Platform (M2SEKW1339109) \n\n\ -board MMB1 \n\ -mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\n\ -board MFB1 \n\ -mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \ - RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \ - WD v00E2 DI v001A FC v0002 \n\ -Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\n\ -board MCP \n\ -Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\ -\n" - -evsipaddr = "\n\ -EVS Type Label IP Address Mask Port \n\ ----------- --------------- ------------------ --------------- ------\n\ -admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\ -admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\ -evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\ -evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\ -evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\ -\n" - -df_f = "\n\ -ID Label EVS Size Used Snapshots Deduped Avail \ -Thin ThinSize ThinAvail FS Type\n\ ----- ---------- --- ------ ------------ --------- ------- ------------ \ ----- -------- --------- --------------------\n\ -1025 fs-cinder 2 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \ - No 32 KB,WFS-2,128 DSBs\n\ -\n" - -df_f_tb = "\n\ -ID Label EVS Size Used Snapshots Deduped Avail \ -Thin ThinSize ThinAvail FS Type\n\ ----- ---------- --- ------ ------------ --------- ------- ------------ \ ----- -------- --------- --------------------\n\ -1025 fs-cinder 2 250 TB 21.4 TB (9%) 0 B (0%) NA 228 TB (91%) \ - No 32 KB,WFS-2,128 DSBs\n\ -\n" - -nfs_export = "\n\ -Export name: /export01-husvm \n\ -Export path: /export01-husvm \n\ -File system label: fs-cinder \n\ -File system size: 250 GB \n\ -File system free space: 228 GB \n\ -File system state: \n\ -formatted = Yes \n\ -mounted = Yes \n\ -failed = No \n\ -thin provisioned = No \n\ -Access snapshots: Yes \n\ -Display snapshots: Yes \n\ -Read Caching: Disabled \n\ -Disaster recovery setting: \n\ -Recovered = No \n\ -Transfer setting = Use file system default \n\n\ -Export configuration: \n\ -127.0.0.1 \n\ -\n" - -df_f_single_evs = "\n\ -ID Label Size Used Snapshots Deduped Avail \ -Thin ThinSize ThinAvail FS Type\n\ ----- ---------- ------ ------------ --------- ------- ------------ \ ----- -------- --------- --------------------\n\ -1025 fs-cinder 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \ - No 32 KB,WFS-2,128 DSBs\n\ -\n" - -nfs_export_tb = "\n\ -Export name: /export01-husvm \n\ -Export path: /export01-husvm \n\ -File system label: fs-cinder \n\ -File system size: 250 TB \n\ -File system free space: 228 TB \n\ -\n" - -nfs_export_not_available = "\n\ -Export name: /export01-husvm \n\ -Export path: /export01-husvm \n\ -File system label: fs-cinder \n\ - *** not available *** \n\ -\n" - -evs_list = "\n\ -Node EVS ID Type Label Enabled Status IP Address Port \n\ ----- ------ ------- --------------- ------- ------ ------------------- ---- \n\ - 1 Cluster hnas4040 Yes Online 192.0.2.200 eth1 \n\ - 1 0 Admin hnas4040 Yes Online 192.0.2.2 eth1 \n\ - 172.24.44.15 eth0 \n\ - 172.24.49.101 ag2 \n\ - 1 1 Service EVS-Manila Yes Online 172.24.49.32 ag2 \n\ - 172.24.48.32 ag4 \n\ - 1 2 Service EVS-Cinder Yes Online 172.24.49.21 ag2 \n\ - 1 3 Service EVS-Test Yes Online 192.168.100.100 ag2 \n\ -\n" - -lu_list = "Name : cinder-lu \n\ -Comment: \n\ -Path : /.cinder/cinder-lu \n\ -Size : 2 GB \n\ -File System : fs-cinder \n\ -File System Mounted : YES \n\ -Logical Unit Mounted: No" - -lu_list_tb = "Name : test-lu \n\ -Comment: \n\ -Path : /.cinder/test-lu \n\ -Size : 2 TB \n\ -File System : fs-cinder \n\ -File System Mounted : YES \n\ -Logical Unit Mounted: No" - -hnas_fs_list = "%(l1)s\n\n%(l2)s\n\n " % {'l1': lu_list, - 'l2': lu_list_tb} - -add_targetsecret = "Target created successfully." - -backend_opts = {'mgmt_ip0': '0.0.0.0', - 'cluster_admin_ip0': None, - 'ssh_port': '22', - 'username': 'supervisor', - 'password': 'supervisor', - 'ssh_private_key': 'test_key'} - -target_chap_disable = "\n\ -Alias : cinder-default \n\ -Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\ -Comment : \n\ -Secret : \n\ -Authentication : Disabled \n\ -Logical units : No logical units. \n\ -\n\ - LUN Logical Unit \n\ - ---- -------------------------------- \n\ - 0 cinder-lu \n\ - 1 volume-99da7ae7-1e7f-4d57-8bf... \n\ -\n\ -Access configuration: \n\ -" - -file_clone_stat = "Clone: /nfs_cinder/cinder-lu \n\ - SnapshotFile: FileHandle[00000000004010000d20116826ffffffffffffff] \n\ -\n\ - SnapshotFile: FileHandle[00000000004029000d81f26826ffffffffffffff] \n\ -" - -file_clone_stat_snap_file1 = "\ -FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\ -References: \n\ - Clone: /nfs_cinder/cinder-lu \n\ - Clone: /nfs_cinder/snapshot-lu-1 \n\ - Clone: /nfs_cinder/snapshot-lu-2 \n\ -" - -file_clone_stat_snap_file2 = "\ -FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\ -References: \n\ - Clone: /nfs_cinder/volume-not-used \n\ - Clone: /nfs_cinder/snapshot-1 \n\ - Clone: /nfs_cinder/snapshot-2 \n\ -" - -not_a_clone = "\ -file-clone-stat: failed to get predecessor snapshot-files: File is not a clone" - -file_relatives =\ - [' /nfs_cinder/snapshot-lu-1 ', - ' /nfs_cinder/snapshot-lu-2 ', - ' /nfs_cinder/volume-not-used ', - ' /nfs_cinder/snapshot-1 ', - ' /nfs_cinder/snapshot-2 '] - - -class HDSHNASBackendTest(test.TestCase): - - def __init__(self, *args, **kwargs): - super(HDSHNASBackendTest, self).__init__(*args, **kwargs) - - def setUp(self): - super(HDSHNASBackendTest, self).setUp() - self.hnas_backend = hnas_backend.HNASSSHBackend(backend_opts) - - def test_run_cmd(self): - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(utils, 'execute') - self.mock_object(time, 'sleep') - self.mock_object(paramiko, 'SSHClient') - self.mock_object(paramiko.RSAKey, 'from_private_key_file') - self.mock_object(putils, 'ssh_execute', - return_value=(df_f, '')) - - out, err = self.hnas_backend._run_cmd('ssh', '0.0.0.0', - 'supervisor', 'supervisor', - 'df', '-a') - - self.assertIn('fs-cinder', out) - self.assertIn('WFS-2,128 DSBs', out) - - def test_run_cmd_retry_exception(self): - self.hnas_backend.cluster_admin_ip0 = '172.24.44.11' - - exceptions = [putils.ProcessExecutionError(stderr='Connection reset'), - putils.ProcessExecutionError(stderr='Failed to establish' - ' SSC connection'), - putils.ProcessExecutionError(stderr='Connection reset'), - putils.ProcessExecutionError(stderr='Connection reset'), - putils.ProcessExecutionError(stderr='Connection reset')] - - self.mock_object(os.path, 'isfile', - return_value=True) - self.mock_object(utils, 'execute') - self.mock_object(time, 'sleep') - self.mock_object(paramiko, 'SSHClient') - self.mock_object(paramiko.RSAKey, 'from_private_key_file') - self.mock_object(putils, 'ssh_execute', - side_effect=exceptions) - - self.assertRaises(exception.HNASConnError, self.hnas_backend._run_cmd, - 'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', - '-a') - - def test_run_cmd_exception_without_retry(self): - self.mock_object(os.path, 'isfile', - return_value=True) - self.mock_object(utils, 'execute') - self.mock_object(time, 'sleep') - self.mock_object(paramiko, 'SSHClient') - self.mock_object(paramiko.RSAKey, 'from_private_key_file') - self.mock_object(putils, 'ssh_execute', - side_effect=putils.ProcessExecutionError( - stderr='Error')) - - self.assertRaises(putils.ProcessExecutionError, - self.hnas_backend._run_cmd, 'ssh', '0.0.0.0', - 'supervisor', 'supervisor', 'df', '-a') - - def test_get_version(self): - expected_out = { - 'hardware': 'NAS Platform (M2SEKW1339109)', - 'mac': '83-68-96-AA-DA-5D', - 'version': '11.2.3319.14', - 'model': 'HNAS 4040', - 'serial': 'B1339745' - } - - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(cluster_getmac, ''), (version, '')]) - - out = self.hnas_backend.get_version() - - self.assertEqual(expected_out, out) - - def test_get_evs(self): - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=(evsfs_list, '')) - - out = self.hnas_backend.get_evs('fs-cinder') - - self.assertEqual('2', out) - - def test_get_export_list(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(nfs_export, ''), - (evsfs_list, ''), - (evs_list, '')]) - - out = self.hnas_backend.get_export_list() - - self.assertEqual('fs-cinder', out[0]['fs']) - self.assertEqual(250.0, out[0]['size']) - self.assertEqual(228.0, out[0]['free']) - self.assertEqual('/export01-husvm', out[0]['path']) - - def test_get_export_list_data_not_available(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(nfs_export_not_available, ''), - (evsfs_list, ''), - (evs_list, '')]) - - out = self.hnas_backend.get_export_list() - - self.assertEqual('fs-cinder', out[0]['fs']) - self.assertEqual('/export01-husvm', out[0]['path']) - self.assertEqual(-1, out[0]['size']) - self.assertEqual(-1, out[0]['free']) - - def test_get_export_list_tb(self): - size = float(250 * 1024) - free = float(228 * 1024) - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(nfs_export_tb, ''), - (evsfs_list, ''), - (evs_list, '')]) - - out = self.hnas_backend.get_export_list() - - self.assertEqual('fs-cinder', out[0]['fs']) - self.assertEqual(size, out[0]['size']) - self.assertEqual(free, out[0]['free']) - self.assertEqual('/export01-husvm', out[0]['path']) - - def test_file_clone(self): - path1 = '/.cinder/path1' - path2 = '/.cinder/path2' - - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=(evsfs_list, '')) - - self.hnas_backend.file_clone('fs-cinder', path1, path2) - - calls = [mock.call('evsfs', 'list'), mock.call('console-context', - '--evs', '2', - 'file-clone-create', - '-f', 'fs-cinder', - path1, path2)] - self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) - - def test_file_clone_wrong_fs(self): - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=(evsfs_list, '')) - - self.assertRaises(exception.InvalidParameterValue, - self.hnas_backend.file_clone, 'fs-fake', 'src', - 'dst') - - def test_get_evs_info(self): - expected_out = {'evs_number': '1'} - expected_out2 = {'evs_number': '2'} - - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=(evsipaddr, '')) - - out = self.hnas_backend.get_evs_info() - - self.hnas_backend._run_cmd.assert_called_with('evsipaddr', '-l') - self.assertEqual(expected_out, out['10.0.0.20']) - self.assertEqual(expected_out, out['172.24.44.20']) - self.assertEqual(expected_out2, out['172.24.44.21']) - - def test_get_fs_info(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(df_f, ''), (evsfs_list, ''), - (hnas_fs_list, '')]) - - out = self.hnas_backend.get_fs_info('fs-cinder') - - self.assertEqual('2', out['evs_id']) - self.assertEqual('fs-cinder', out['label']) - self.assertEqual('228', out['available_size']) - self.assertEqual('250', out['total_size']) - self.assertEqual(0, out['provisioned_capacity']) - - def test_get_fs_empty_return(self): - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=('Not mounted', '')) - - out = self.hnas_backend.get_fs_info('fs-cinder') - self.assertEqual({}, out) - - def test_get_fs_info_single_evs(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(df_f_single_evs, ''), (evsfs_list, ''), - (hnas_fs_list, '')]) - - out = self.hnas_backend.get_fs_info('fs-cinder') - - self.assertEqual('fs-cinder', out['label']) - self.assertEqual('228', out['available_size']) - self.assertEqual('250', out['total_size']) - self.assertEqual(0, out['provisioned_capacity']) - - def test_get_fs_tb(self): - available_size = float(228 * 1024 ** 2) - total_size = float(250 * 1024 ** 2) - - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(df_f_tb, ''), (evsfs_list, ''), - (hnas_fs_list, '')]) - - out = self.hnas_backend.get_fs_info('fs-cinder') - - self.assertEqual('fs-cinder', out['label']) - self.assertEqual(str(available_size), out['available_size']) - self.assertEqual(str(total_size), out['total_size']) - self.assertEqual(0, out['provisioned_capacity']) - - def test_get_fs_single_evs_tb(self): - available_size = float(228 * 1024 ** 2) - total_size = float(250 * 1024 ** 2) - - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(df_f_tb, ''), (evsfs_list, ''), - (hnas_fs_list, '')]) - - out = self.hnas_backend.get_fs_info('fs-cinder') - - self.assertEqual('fs-cinder', out['label']) - self.assertEqual(str(available_size), out['available_size']) - self.assertEqual(str(total_size), out['total_size']) - self.assertEqual(0, out['provisioned_capacity']) - - def test_get_cloned_file_relatives(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), (file_clone_stat, ''), - (file_clone_stat_snap_file1, ''), - (file_clone_stat_snap_file2, '')]) - out = self.hnas_backend.get_cloned_file_relatives('cinder-lu', - 'fs-cinder') - self.assertEqual(file_relatives, out) - self.hnas_backend._run_cmd.assert_called_with('console-context', - '--evs', '2', - 'file-clone-stat-' - 'snapshot-file', - '-f', 'fs-cinder', - '00000000004029000d81' - 'f26826ffffffffffffff]') - - def test_get_cloned_file_relatives_not_clone_except(self): - exc = putils.ProcessExecutionError(stderr='File is not a clone') - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), exc]) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.hnas_backend.get_cloned_file_relatives, - 'cinder-lu', 'fs-cinder', True) - - def test_get_cloned_file_relatives_not_clone_no_except(self): - exc = putils.ProcessExecutionError(stderr='File is not a clone') - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), exc]) - - out = self.hnas_backend.get_cloned_file_relatives('cinder-lu', - 'fs-cinder') - - self.assertEqual([], out) - - def test_check_snapshot_parent_true(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), - (file_clone_stat, ''), - (file_clone_stat_snap_file1, ''), - (file_clone_stat_snap_file2, '')]) - out = self.hnas_backend.check_snapshot_parent('cinder-lu', - 'snapshot-lu-1', - 'fs-cinder') - - self.assertTrue(out) - - def test_check_snapshot_parent_false(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), - (file_clone_stat, ''), - (file_clone_stat_snap_file1, ''), - (file_clone_stat_snap_file2, '')]) - out = self.hnas_backend.check_snapshot_parent('cinder-lu', - 'snapshot-lu-3', - 'fs-cinder') - - self.assertFalse(out) - - def test_get_export_path(self): - export_out = '/export01-husvm' - - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), (nfs_export, '')]) - - out = self.hnas_backend.get_export_path(export_out, 'fs-cinder') - - self.assertEqual(export_out, out) - self.hnas_backend._run_cmd.assert_called_with('console-context', - '--evs', '2', - 'nfs-export', 'list', - export_out) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py deleted file mode 100644 index f94aea7f842..00000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py +++ /dev/null @@ -1,834 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock -import os - -from oslo_concurrency import processutils as putils -import socket - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hnas_backend as backend -from cinder.volume.drivers.hitachi import hnas_nfs as nfs -from cinder.volume.drivers.hitachi import hnas_utils -from cinder.volume.drivers import nfs as base_nfs -from cinder.volume import utils as vutils - -_VOLUME = {'name': 'cinder-volume', - 'id': fake.VOLUME_ID, - 'size': 128, - 'host': 'host1@hnas-nfs-backend#default', - 'volume_type': 'default', - 'provider_location': 'hnas'} - -_SNAPSHOT = { - 'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', - 'id': fake.SNAPSHOT_ID, - 'size': 128, - 'volume_type': None, - 'provider_location': 'hnas', - 'volume_size': 128, - 'volume': _VOLUME, - 'volume_name': _VOLUME['name'], - 'host': 'host1@hnas-iscsi-backend#silver', - 'volume_type_id': fake.VOLUME_TYPE_ID, -} - - -class HNASNFSDriverTest(test.TestCase): - """Test HNAS NFS volume driver.""" - - def __init__(self, *args, **kwargs): - super(HNASNFSDriverTest, self).__init__(*args, **kwargs) - - def instantiate_snapshot(self, snap): - snap = snap.copy() - snap['volume'] = fake_volume.fake_volume_obj( - None, **snap['volume']) - snapshot = fake_snapshot.fake_snapshot_obj( - None, expected_attrs=['volume'], **snap) - return snapshot - - def setUp(self): - super(HNASNFSDriverTest, self).setUp() - self.context = context.get_admin_context() - - self.volume = fake_volume.fake_volume_obj( - self.context, - **_VOLUME) - - self.snapshot = self.instantiate_snapshot(_SNAPSHOT) - - self.volume_type = fake_volume.fake_volume_type_obj( - None, - **{'name': 'silver'} - ) - self.clone = fake_volume.fake_volume_obj( - None, - **{'id': fake.VOLUME2_ID, - 'size': 128, - 'host': 'host1@hnas-nfs-backend#default', - 'volume_type': 'default', - 'provider_location': 'hnas'}) - - # xml parsed from utils - self.parsed_xml = { - 'username': 'supervisor', - 'password': 'supervisor', - 'hnas_cmd': 'ssc', - 'ssh_port': '22', - 'services': { - 'default': { - 'hdp': '172.24.49.21:/fs-cinder', - 'pool_name': 'default', - 'label': 'svc_0', - 'ctl': '1', - 'export': { - 'fs': 'fs-cinder', - 'path': '/export-cinder/volume' - } - }, - }, - 'cluster_admin_ip0': None, - 'ssh_private_key': None, - 'chap_enabled': 'True', - 'mgmt_ip0': '172.17.44.15', - 'ssh_enabled': None - } - - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.hds_hnas_nfs_config_file = 'fake.xml' - - self.mock_object(hnas_utils, 'read_cinder_conf', - return_value=self.parsed_xml) - - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.max_over_subscription_ratio = 20.0 - self.configuration.reserved_percentage = 0 - self.configuration.hds_hnas_nfs_config_file = 'fake_config.xml' - self.configuration.nfs_shares_config = 'fake_nfs_share.xml' - self.configuration.num_shell_tries = 2 - self.configuration.nfs_mount_point_base = '%state_path/mnt' - self.configuration.nfs_mount_options = None - - self.driver = nfs.HNASNFSDriver(configuration=self.configuration) - - def test_check_pool_and_share_no_default_configured(self): - nfs_shares = '172.24.49.21:/fs-cinder' - - self.mock_object(hnas_utils, 'get_pool', return_value='default') - - self.driver.config['services'] = { - 'silver': { - 'hdp': 'fs3', - 'iscsi_ip': '172.17.39.133', - 'iscsi_port': '3260', - 'port': '22', - 'volume_type': 'silver', - 'label': 'svc_1', - 'evs': '2', - 'tgt': { - 'alias': 'iscsi-test', - 'secret': 'itEpgB5gPefGhW2' - } - } - } - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver._check_pool_and_share, self.volume, - nfs_shares) - - def test_check_pool_and_share_mismatch_exception(self): - # passing a share that does not exists in config should raise an - # exception - nfs_shares = '172.24.49.21:/nfs_share' - - self.mock_object(hnas_utils, 'get_pool', return_value='default') - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver._check_pool_and_share, self.volume, - nfs_shares) - - def test_check_pool_and_share_type_mismatch_exception(self): - nfs_shares = '172.24.49.21:/fs-cinder' - self.volume.host = 'host1@hnas-nfs-backend#gold' - - # returning a pool different from 'default' should raise an exception - self.mock_object(hnas_utils, 'get_pool', return_value='default') - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver._check_pool_and_share, self.volume, - nfs_shares) - - def test_do_setup(self): - version_info = { - 'mac': '83-68-96-AA-DA-5D', - 'model': 'HNAS 4040', - 'version': '12.4.3924.11', - 'hardware': 'NAS Platform', - 'serial': 'B1339109', - } - export_list = [ - {'fs': 'fs-cinder', - 'name': '/fs-cinder', - 'free': 228.0, - 'path': '/fs-cinder', - 'evs': ['172.24.49.21'], - 'size': 250.0} - ] - - showmount = "Export list for 172.24.49.21: \n\ -/fs-cinder * \n\ -/shares/9bcf0bcc-8cc8-437e38bcbda9 127.0.0.1,10.1.0.5,172.24.44.141 \n\ -" - - self.mock_object(backend.HNASSSHBackend, 'get_version', - return_value=version_info) - self.mock_object(self.driver, '_load_shares_config') - self.mock_object(backend.HNASSSHBackend, 'get_export_list', - return_value=export_list) - self.mock_object(self.driver, '_execute', return_value=(showmount, '')) - - self.driver.do_setup(None) - - self.driver._execute.assert_called_with('showmount', '-e', - '172.24.49.21') - self.assertTrue(backend.HNASSSHBackend.get_export_list.called) - - def test_do_setup_execute_exception(self): - version_info = { - 'mac': '83-68-96-AA-DA-5D', - 'model': 'HNAS 4040', - 'version': '12.4.3924.11', - 'hardware': 'NAS Platform', - 'serial': 'B1339109', - } - - export_list = [ - {'fs': 'fs-cinder', - 'name': '/fs-cinder', - 'free': 228.0, - 'path': '/fs-cinder', - 'evs': ['172.24.49.21'], - 'size': 250.0} - ] - - self.mock_object(backend.HNASSSHBackend, 'get_version', - return_value=version_info) - self.mock_object(self.driver, '_load_shares_config') - self.mock_object(backend.HNASSSHBackend, 'get_export_list', - return_value=export_list) - self.mock_object(self.driver, '_execute', - side_effect=putils.ProcessExecutionError) - - self.assertRaises(putils.ProcessExecutionError, self.driver.do_setup, - None) - - def test_do_setup_missing_export(self): - version_info = { - 'mac': '83-68-96-AA-DA-5D', - 'model': 'HNAS 4040', - 'version': '12.4.3924.11', - 'hardware': 'NAS Platform', - 'serial': 'B1339109', - } - export_list = [ - {'fs': 'fs-cinder', - 'name': '/wrong-fs', - 'free': 228.0, - 'path': '/fs-cinder', - 'evs': ['172.24.49.21'], - 'size': 250.0} - ] - - showmount = "Export list for 172.24.49.21: \n\ -/fs-cinder * \n\ -" - - self.mock_object(backend.HNASSSHBackend, 'get_version', - return_value=version_info) - self.mock_object(self.driver, '_load_shares_config') - self.mock_object(backend.HNASSSHBackend, 'get_export_list', - return_value=export_list) - self.mock_object(self.driver, '_execute', return_value=(showmount, '')) - - self.assertRaises(exception.InvalidParameterValue, - self.driver.do_setup, None) - - def test_create_volume(self): - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(self.driver, '_do_create_volume') - - out = self.driver.create_volume(self.volume) - - self.assertEqual('172.24.49.21:/fs-cinder', out['provider_location']) - self.assertTrue(self.driver._ensure_shares_mounted.called) - - def test_create_volume_exception(self): - # pool 'original' doesnt exists in services - self.volume.host = 'host1@hnas-nfs-backend#original' - - self.mock_object(self.driver, '_ensure_shares_mounted') - - self.assertRaises(exception.ParameterNotFound, - self.driver.create_volume, self.volume) - - def test_create_cloned_volume(self): - self.volume.size = 150 - - self.mock_object(self.driver, 'extend_volume') - self.mock_object(backend.HNASSSHBackend, 'file_clone') - - out = self.driver.create_cloned_volume(self.volume, self.clone) - - self.assertEqual('hnas', out['provider_location']) - - def test_create_cloned_volume_invalid_volume_type(self): - self.volume.volume_type_id = fake.VOLUME_TYPE_ID - self.clone.volume_type_id = fake.VOLUME_TYPE2_ID - - self.mock_object(self.driver, 'extend_volume') - self.mock_object(backend.HNASSSHBackend, 'file_clone') - - self.assertRaises(exception.InvalidVolumeType, - self.driver.create_cloned_volume, self.volume, - self.clone) - - def test_get_volume_stats(self): - self.driver.pools = [{'pool_name': 'default', - 'service_label': 'default', - 'fs': '172.24.49.21:/easy-stack'}, - {'pool_name': 'cinder_svc', - 'service_label': 'cinder_svc', - 'fs': '172.24.49.26:/MNT-CinderTest2'}] - - self.mock_object(self.driver, '_update_volume_stats') - self.mock_object(self.driver, '_get_capacity_info', - return_value=(150, 50, 100)) - - out = self.driver.get_volume_stats() - - self.assertEqual('6.0.0', out['driver_version']) - self.assertEqual('Hitachi', out['vendor_name']) - self.assertEqual('NFS', out['storage_protocol']) - - def test_create_volume_from_snapshot(self): - expected_out = {'provider_location': 'hnas'} - - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=False)) - self.mock_object(backend.HNASSSHBackend, 'file_clone') - result = self.driver.create_volume_from_snapshot(self.volume, - self.snapshot) - - self.assertEqual(expected_out, result) - - def test_create_volume_from_snapshot_legacy(self): - expected_out = {'provider_location': 'hnas'} - - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=True)) - self.mock_object(backend.HNASSSHBackend, 'file_clone') - result = self.driver.create_volume_from_snapshot(self.volume, - self.snapshot) - - self.assertEqual(expected_out, result) - - def test_create_snapshot(self): - expected_out = {'provider_location': 'hnas'} - self.mock_object(backend.HNASSSHBackend, 'file_clone') - result = self.driver.create_snapshot(self.snapshot) - - self.assertEqual(expected_out, result) - - def test_delete_snapshot(self): - nfs_mount = "/opt/stack/data/cinder/mnt/" - path = nfs_mount + self.driver._get_snapshot_name(self.snapshot) - - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=False)) - - self.mock_object(self.driver, '_get_file_path', - mock.Mock(return_value=path)) - self.mock_object(self.driver, '_execute') - - self.driver.delete_snapshot(self.snapshot) - - self.driver._execute.assert_called_with('rm', path, run_as_root=True) - - def test_delete_snapshot_legacy(self): - nfs_mount = "/opt/stack/data/cinder/mnt/" - legacy_path = nfs_mount + self.snapshot.name - - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=True)) - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=False)) - self.mock_object(self.driver, '_get_file_path', - mock.Mock(return_value=legacy_path)) - self.mock_object(self.driver, '_execute') - - self.driver.delete_snapshot(self.snapshot) - - self.driver._execute.assert_called_with('rm', legacy_path, - run_as_root=True) - - def test_extend_volume(self): - share_mount_point = '/fs-cinder' - data = image_utils.imageutils.QemuImgInfo - data.virtual_size = 200 * 1024 ** 3 - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=share_mount_point) - self.mock_object(image_utils, 'qemu_img_info', return_value=data) - - self.driver.extend_volume(self.volume, 200) - - self.driver._get_mount_point_for_share.assert_called_with('hnas') - - def test_extend_volume_resizing_exception(self): - share_mount_point = '/fs-cinder' - data = image_utils.imageutils.QemuImgInfo - data.virtual_size = 2048 ** 3 - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=share_mount_point) - self.mock_object(image_utils, 'qemu_img_info', return_value=data) - - self.mock_object(image_utils, 'resize_image') - - self.assertRaises(exception.InvalidResults, - self.driver.extend_volume, self.volume, 200) - - def test_manage_existing(self): - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value='/fs-cinder/cinder-volume') - self.mock_object(utils, 'resolve_hostname', - return_value='172.24.49.21') - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(self.driver, '_execute') - - out = self.driver.manage_existing(self.volume, existing_vol_ref) - - loc = {'provider_location': '172.24.49.21:/fs-cinder'} - self.assertEqual(loc, out) - - os.path.isfile.assert_called_once_with('/fs-cinder/cinder-volume/') - self.driver._get_mount_point_for_share.assert_called_once_with( - '172.24.49.21:/fs-cinder') - utils.resolve_hostname.assert_called_with('172.24.49.21') - self.driver._ensure_shares_mounted.assert_called_once_with() - - def test_manage_existing_name_matches(self): - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=('172.24.49.21:/fs-cinder', - '/mnt/silver', - self.volume.name)) - - out = self.driver.manage_existing(self.volume, existing_vol_ref) - - loc = {'provider_location': '172.24.49.21:/fs-cinder'} - self.assertEqual(loc, out) - - def test_manage_existing_exception(self): - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=('172.24.49.21:/fs-cinder', - '/mnt/silver', - 'cinder-volume')) - self.mock_object(self.driver, '_execute', - side_effect=putils.ProcessExecutionError) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing, self.volume, - existing_vol_ref) - - def test_manage_existing_missing_source_name(self): - # empty source-name should raise an exception - existing_vol_ref = {} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.volume, - existing_vol_ref) - - def test_manage_existing_already_managed(self): - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - expected_size = 1 - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/silver') - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(utils, 'get_file_size', return_value=expected_size) - - self.mock_object(vutils, 'check_already_managed_volume', - return_value=True) - - self.assertRaises(exception.ManageExistingAlreadyManaged, - self.driver.manage_existing, self.volume, - existing_vol_ref) - - def test_manage_existing_missing_volume_in_backend(self): - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(utils, 'resolve_hostname', - side_effect=['172.24.49.21', '172.24.49.22']) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.volume, - existing_vol_ref) - - def test_manage_existing_get_size(self): - existing_vol_ref = { - 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', - } - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - expected_size = 1 - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(utils, 'resolve_hostname', - return_value='172.24.49.21') - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/silver') - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(utils, 'get_file_size', return_value=expected_size) - - out = self.driver.manage_existing_get_size(self.volume, - existing_vol_ref) - - self.assertEqual(1, out) - utils.get_file_size.assert_called_once_with( - '/mnt/silver/cinder-volume') - utils.resolve_hostname.assert_called_with('172.24.49.21') - - def test_manage_existing_get_size_exception(self): - existing_vol_ref = { - 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', - } - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=('172.24.49.21:/fs-cinder', - '/mnt/silver', - 'cinder-volume')) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing_get_size, self.volume, - existing_vol_ref) - - def test_manage_existing_get_size_resolving_hostname_exception(self): - existing_vol_ref = { - 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', - } - - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(utils, 'resolve_hostname', - side_effect=socket.gaierror) - - self.assertRaises(socket.gaierror, - self.driver.manage_existing_get_size, self.volume, - existing_vol_ref) - - def test_unmanage(self): - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - vol_str = 'volume-' + self.volume.id - vol_path = os.path.join(path, vol_str) - new_path = os.path.join(path, 'unmanage-' + vol_str) - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=path) - self.mock_object(self.driver, '_execute') - - self.driver.unmanage(self.volume) - - self.driver._execute.assert_called_with('mv', vol_path, new_path, - run_as_root=False, - check_exit_code=True) - self.driver._get_mount_point_for_share.assert_called_with( - self.volume.provider_location) - - def test_unmanage_volume_exception(self): - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=path) - self.mock_object(self.driver, '_execute', side_effect=ValueError) - - self.driver.unmanage(self.volume) - - def test_manage_existing_snapshot(self): - nfs_share = "172.24.49.21:/fs-cinder" - nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID - path = "unmanage-%s.%s" % (self.snapshot.volume.name, self.snapshot.id) - loc = {'provider_location': '172.24.49.21:/fs-cinder'} - existing_ref = {'source-name': '172.24.49.21:/fs-cinder/' - + fake.SNAPSHOT_ID} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=(nfs_share, nfs_mount, path)) - self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent', - return_value=True) - self.mock_object(self.driver, '_execute') - self.mock_object(backend.HNASSSHBackend, 'get_export_path', - return_value='fs-cinder') - - out = self.driver.manage_existing_snapshot(self.snapshot, - existing_ref) - - self.assertEqual(loc, out) - - def test_manage_existing_snapshot_legacy(self): - nfs_share = "172.24.49.21:/fs-cinder" - nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID - path = "unmanage-snapshot-%s" % self.snapshot.id - loc = {'provider_location': '172.24.49.21:/fs-cinder'} - existing_ref = { - 'source-name': '172.24.49.21:/fs-cinder/' + fake.SNAPSHOT_ID} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=(nfs_share, nfs_mount, path)) - self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent', - return_value=True) - self.mock_object(self.driver, '_execute') - self.mock_object(backend.HNASSSHBackend, 'get_export_path', - return_value='fs-cinder') - - out = self.driver.manage_existing_snapshot(self.snapshot, existing_ref) - - self.assertEqual(loc, out) - - def test_manage_existing_snapshot_not_parent_exception(self): - nfs_share = "172.24.49.21:/fs-cinder" - nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID - path = "unmanage-%s.%s" % (fake.VOLUME_ID, self.snapshot.id) - - existing_ref = {'source-name': '172.24.49.21:/fs-cinder/' - + fake.SNAPSHOT_ID} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=(nfs_share, nfs_mount, path)) - self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent', - return_value=False) - self.mock_object(backend.HNASSSHBackend, 'get_export_path', - return_value='fs-cinder') - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, self.snapshot, - existing_ref) - - def test_manage_existing_snapshot_get_size(self): - existing_ref = { - 'source-name': '172.24.49.21:/fs-cinder/cinder-snapshot', - } - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - expected_size = 1 - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(utils, 'resolve_hostname', - return_value='172.24.49.21') - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/silver') - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(utils, 'get_file_size', return_value=expected_size) - - out = self.driver.manage_existing_snapshot_get_size( - self.snapshot, existing_ref) - - self.assertEqual(1, out) - utils.get_file_size.assert_called_once_with( - '/mnt/silver/cinder-snapshot') - utils.resolve_hostname.assert_called_with('172.24.49.21') - - def test_unmanage_snapshot(self): - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - snapshot_name = "%s.%s" % (self.snapshot.volume.name, self.snapshot.id) - old_path = os.path.join(path, snapshot_name) - new_path = os.path.join(path, 'unmanage-' + snapshot_name) - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=path) - self.mock_object(self.driver, '_execute') - - self.driver.unmanage_snapshot(self.snapshot) - - self.driver._execute.assert_called_with('mv', old_path, new_path, - run_as_root=False, - check_exit_code=True) - self.driver._get_mount_point_for_share.assert_called_with( - self.snapshot.provider_location) - - def test_get_manageable_volumes_not_safe(self): - manageable_vol = [{'cinder_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a', - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'reference': { - 'source-name': - '172.24.49.21:/fs-cinder/volume-1e5177e7-' - '95e5-4a0f-b170-e45f4b469f6a'}, - 'safe_to_manage': False, - 'size': 128}] - - rsrc = [self.volume] - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value=path) - vols_exp = [self.volume.name] - self.mock_object(self.driver, '_get_volumes_from_export', - return_value=vols_exp) - self.mock_object(self.driver, '_get_file_size', - return_value=self.volume.size) - - out = self.driver._get_manageable_resource_info( - rsrc, "volume", None, 1000, 0, ['reference'], ['desc']) - - self.driver._get_volumes_from_export.assert_called_with( - '172.24.49.21:/fs-cinder') - self.driver._get_file_size.assert_called_with('%s/%s' % ( - path, self.volume.name)) - self.driver._get_mount_point_for_share(self.volume.provider_location) - - self.assertEqual(out, manageable_vol) - - def test_get_manageable_volumes(self): - manageable_vol = [{ - 'cinder_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a', - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'reference': { - 'source-name': '172.24.49.21:/fs-cinder/' - 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a'}, - 'safe_to_manage': False, - 'size': 128}] - - rsrc = [self.volume] - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value=path) - vols_exp = [fake.VOLUME_NAME] - self.mock_object(self.driver, '_get_volumes_from_export', - return_value=vols_exp) - self.mock_object(self.driver, '_get_file_size', - return_value=self.volume.size) - - out = self.driver._get_manageable_resource_info(rsrc, "volume", None, - 1000, 0, ['reference'], - ['desc']) - - self.driver._get_volumes_from_export.assert_called_with( - '172.24.49.21:/fs-cinder') - self.driver._get_file_size.assert_called_with( - '%s/%s' % (path, self.volume.name)) - self.driver._get_mount_point_for_share(self.volume.provider_location) - - self.assertEqual(out, manageable_vol) - - def test_get_manageable_snapshots(self): - manageable_snap = [{ - 'cinder_id': '253b2878-ec60-4793-ad19-e65496ec7aab', - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'reference': { - 'source-name': '172.24.49.21:/fs-cinder/' - 'snapshot-253b2878-ec60-4793-' - 'ad19-e65496ec7aab'}, - 'safe_to_manage': False, - 'size': 128, - 'source_reference': {'id': '1'}}] - - rsrc = [self.snapshot] - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value=path) - vols_exp = [fake.SNAPSHOT_NAME] - self.mock_object(self.driver, '_get_volumes_from_export', - return_value=vols_exp) - self.mock_object(self.driver, '_get_file_size', - return_value=self.volume.size) - self.mock_object(backend.HNASSSHBackend, 'get_cloned_file_relatives', - return_value=[' /nfs_cinder/volume-1', - '/nfs_cinder/snapshot2']) - - out = self.driver._get_manageable_resource_info(rsrc, "snapshot", None, - 1000, 0, ['reference'], - ['desc']) - - self.driver._get_volumes_from_export.assert_called_with( - '172.24.49.21:/fs-cinder') - self.driver._get_file_size.assert_called_with( - '%s/%s' % (path, self.snapshot.name)) - self.driver._get_mount_point_for_share(self.snapshot.provider_location) - - self.assertEqual(out, manageable_snap) - - def test_get_manageable_snapshots_unknown_origin(self): - manageable_snap = [{ - 'cinder_id': '253b2878-ec60-4793-ad19-e65496ec7aab', - 'extra_info': 'Could not determine the volume that owns ' - 'the snapshot', - 'reason_not_safe': 'already managed', - 'reference': { - 'source-name': '172.24.49.21:/fs-cinder/' - 'snapshot-253b2878-ec60-4793-' - 'ad19-e65496ec7aab'}, - 'safe_to_manage': False, - 'size': 128, - 'source_reference': {'id': 'unknown'}}] - - rsrc = [self.snapshot] - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value=path) - vols_exp = [fake.SNAPSHOT_NAME] - self.mock_object(self.driver, '_get_volumes_from_export', - return_value=vols_exp) - self.mock_object(self.driver, '_get_file_size', - return_value=self.volume.size) - self.mock_object(backend.HNASSSHBackend, 'get_cloned_file_relatives', - return_value=[' /nfs_cinder/volume-1', - ' /nfs_cinder/volume-2', - '/nfs_cinder/snapshot2']) - - out = self.driver._get_manageable_resource_info(rsrc, "snapshot", None, - 1000, 0, ['reference'], - ['desc']) - - self.driver._get_volumes_from_export.assert_called_with( - '172.24.49.21:/fs-cinder') - self.driver._get_mount_point_for_share(self.snapshot.provider_location) - self.driver._get_file_size.assert_called_with('%s/%s' % ( - path, self.snapshot.name)) - self.assertEqual(out, manageable_snap) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py deleted file mode 100644 index 730e0197186..00000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright (c) 2016 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import ddt -import os - -from xml.etree import ElementTree as ETree - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hnas_utils -from cinder.volume import volume_types - -_VOLUME = {'name': 'cinder-volume', - 'id': fake_constants.VOLUME_ID, - 'size': 128, - 'host': 'host1@hnas-nfs-backend#default', - 'volume_type': 'default', - 'provider_location': 'hnas'} - -service_parameters = ['volume_type', 'hdp'] -optional_parameters = ['ssc_cmd', 'cluster_admin_ip0'] - -config_from_cinder_conf = { - 'username': 'supervisor', - 'fs': {'easy-stack': 'easy-stack', - 'silver': 'silver'}, - 'ssh_port': 22, - 'cluster_admin_ip0': None, - 'ssh_private_key': None, - 'mgmt_ip0': '172.24.44.15', - 'ssc_cmd': 'ssc', - 'services': { - 'default': { - 'label': u'svc_0', - 'pool_name': 'default', - 'hdp': 'easy-stack'}, - 'FS-CinderDev1': { - 'label': u'svc_1', - 'pool_name': 'FS-CinderDev1', - 'hdp': 'silver'}}, - 'password': 'supervisor'} - -valid_XML_str = ''' - - 172.24.44.15 - supervisor - supervisor - False - /home/ubuntu/.ssh/id_rsa - - default - easy-stack - - - silver - FS-CinderDev1 - - -''' - -XML_no_authentication = ''' - - 172.24.44.15 - supervisor - False - -''' - -XML_empty_authentication_param = ''' - - 172.24.44.15 - supervisor - - False - - - default - easy-stack - - -''' - -# missing mgmt_ip0 -XML_without_mandatory_params = ''' - - supervisor - supervisor - False - - default - easy-stack - - -''' - -XML_no_services_configured = ''' - - 172.24.44.15 - supervisor - supervisor - 10 - False - /home/ubuntu/.ssh/id_rsa - -''' - -parsed_xml = {'username': 'supervisor', 'password': 'supervisor', - 'ssc_cmd': 'ssc', 'ssh_port': 22, - 'fs': {'easy-stack': 'easy-stack', - 'FS-CinderDev1': 'FS-CinderDev1'}, - 'cluster_admin_ip0': None, - 'ssh_private_key': '/home/ubuntu/.ssh/id_rsa', - 'services': { - 'default': {'hdp': 'easy-stack', 'pool_name': 'default', - 'label': 'svc_0'}, - 'silver': {'hdp': 'FS-CinderDev1', 'pool_name': 'silver', - 'label': 'svc_1'}}, - 'mgmt_ip0': '172.24.44.15'} - -valid_XML_etree = ETree.XML(valid_XML_str) -invalid_XML_etree_no_authentication = ETree.XML(XML_no_authentication) -invalid_XML_etree_empty_parameter = ETree.XML(XML_empty_authentication_param) -invalid_XML_etree_no_mandatory_params = ETree.XML(XML_without_mandatory_params) -invalid_XML_etree_no_service = ETree.XML(XML_no_services_configured) - - -@ddt.ddt -class HNASUtilsTest(test.TestCase): - - def __init__(self, *args, **kwargs): - super(HNASUtilsTest, self).__init__(*args, **kwargs) - - def setUp(self): - super(HNASUtilsTest, self).setUp() - - self.fake_conf = conf.Configuration(hnas_utils.drivers_common_opts, - conf.SHARED_CONF_GROUP) - - self.override_config('hnas_username', 'supervisor', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_password', 'supervisor', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_mgmt_ip0', '172.24.44.15', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_svc0_pool_name', 'default', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_svc0_hdp', 'easy-stack', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_svc1_pool_name', 'FS-CinderDev1', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_svc1_hdp', 'silver', - conf.SHARED_CONF_GROUP) - - self.context = context.get_admin_context() - self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME) - self.volume_type = (fake_volume.fake_volume_type_obj(None, **{ - 'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'})) - - def test_read_xml_config(self): - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=valid_XML_etree) - - xml_path = 'xml_file_found' - out = hnas_utils.read_xml_config(xml_path, - service_parameters, - optional_parameters) - - self.assertEqual(parsed_xml, out) - - def test_read_xml_config_parser_error(self): - xml_file = 'hnas_nfs.xml' - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', side_effect=ETree.ParseError) - - self.assertRaises(exception.ConfigNotFound, hnas_utils.read_xml_config, - xml_file, service_parameters, optional_parameters) - - def test_read_xml_config_not_found(self): - self.mock_object(os, 'access', return_value=False) - - xml_path = 'xml_file_not_found' - self.assertRaises(exception.NotFound, hnas_utils.read_xml_config, - xml_path, service_parameters, optional_parameters) - - def test_read_xml_config_without_services_configured(self): - xml_file = 'hnas_nfs.xml' - - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=invalid_XML_etree_no_service) - - self.assertRaises(exception.ParameterNotFound, - hnas_utils.read_xml_config, xml_file, - service_parameters, optional_parameters) - - def test_read_xml_config_empty_authentication_parameter(self): - xml_file = 'hnas_nfs.xml' - - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=invalid_XML_etree_empty_parameter) - - self.assertRaises(exception.ParameterNotFound, - hnas_utils.read_xml_config, xml_file, - service_parameters, optional_parameters) - - def test_read_xml_config_mandatory_parameters_missing(self): - xml_file = 'hnas_nfs.xml' - - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=invalid_XML_etree_no_mandatory_params) - - self.assertRaises(exception.ParameterNotFound, - hnas_utils.read_xml_config, xml_file, - service_parameters, optional_parameters) - - def test_read_config_xml_without_authentication_parameter(self): - xml_file = 'hnas_nfs.xml' - - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=invalid_XML_etree_no_authentication) - - self.assertRaises(exception.ConfigNotFound, hnas_utils.read_xml_config, - xml_file, service_parameters, optional_parameters) - - def test_get_pool_with_vol_type(self): - self.mock_object(volume_types, 'get_volume_type_extra_specs', - return_value={'service_label': 'silver'}) - - self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID - self.volume.volume_type = self.volume_type - - out = hnas_utils.get_pool(parsed_xml, self.volume) - - self.assertEqual('silver', out) - - def test_get_pool_with_vol_type_id_none(self): - self.volume.volume_type_id = None - self.volume.volume_type = self.volume_type - - out = hnas_utils.get_pool(parsed_xml, self.volume) - - self.assertEqual('default', out) - - def test_get_pool_with_missing_service_label(self): - self.mock_object(volume_types, 'get_volume_type_extra_specs', - return_value={'service_label': 'gold'}) - - self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID - self.volume.volume_type = self.volume_type - - out = hnas_utils.get_pool(parsed_xml, self.volume) - - self.assertEqual('default', out) - - def test_get_pool_without_vol_type(self): - out = hnas_utils.get_pool(parsed_xml, self.volume) - self.assertEqual('default', out) - - def test_read_cinder_conf_nfs(self): - out = hnas_utils.read_cinder_conf(self.fake_conf) - - self.assertEqual(config_from_cinder_conf, out) - - def test_read_cinder_conf_break(self): - self.override_config('hnas_username', None, conf.SHARED_CONF_GROUP) - self.override_config('hnas_password', None, conf.SHARED_CONF_GROUP) - self.override_config('hnas_mgmt_ip0', None, conf.SHARED_CONF_GROUP) - out = hnas_utils.read_cinder_conf(self.fake_conf) - self.assertIsNone(out) - - @ddt.data('hnas_username', 'hnas_password', - 'hnas_mgmt_ip0', 'hnas_svc0_pool_name', - 'hnas_svc0_hdp', ) - def test_init_invalid_conf_parameters(self, attr_name): - self.override_config(attr_name, None, conf.SHARED_CONF_GROUP) - - self.assertRaises(exception.InvalidParameterValue, - hnas_utils.read_cinder_conf, self.fake_conf) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py deleted file mode 100644 index 4fef1bfad1d..00000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py +++ /dev/null @@ -1,1806 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Unit tests for Hitachi VSP Driver.""" - -import copy -import os - -import mock -from os_brick.initiator import connector as brick_connector -from oslo_concurrency import processutils -from oslo_config import cfg -from six.moves import range - -from cinder import context as cinder_context -from cinder import db -from cinder.db.sqlalchemy import api as sqlalchemy_api -from cinder import exception -from cinder.objects import snapshot as obj_snap -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume import driver -from cinder.volume.drivers.hitachi import vsp_fc -from cinder.volume.drivers.hitachi import vsp_horcm -from cinder.volume.drivers.hitachi import vsp_utils -from cinder.volume import utils as volume_utils - -# Dummy return values -SUCCEED = 0 -STDOUT = "" -STDERR = "" -CMD_SUCCEED = (SUCCEED, STDOUT, STDERR) - -# Configuration parameter values -CONFIG_MAP = { - 'serial': '492015', - 'my_ip': '127.0.0.1', -} - -# CCI instance numbers -INST_NUMS = (200, 201) - -# Shadow Image copy group names -CG_MAP = {'cg%s' % x: vsp_horcm._COPY_GROUP % ( - CONFIG_MAP['my_ip'], CONFIG_MAP['serial'], INST_NUMS[1], x) - for x in range(3) -} - -# Map containing all maps for dummy response creation -DUMMY_RESPONSE_MAP = CONFIG_MAP.copy() -DUMMY_RESPONSE_MAP.update(CG_MAP) - -# Dummy response for FC zoning device mapping -DEVICE_MAP = { - 'fabric_name': { - 'initiator_port_wwn_list': ['123456789abcdee', '123456789abcdef'], - 'target_port_wwn_list': ['111111112345678']}} - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT = ( - "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - "%(cg0)s %(cg0)sP 0 - %(serial)s\n" - "%(cg1)s %(cg1)sP 0 - %(serial)s\n" - "%(cg1)s %(cg1)sS - - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT2 = "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT3 = ( - "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - "%(cg0)s %(cg0)sP 0 - %(serial)s\n" - "%(cg0)s %(cg0)sS 0 - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91P -GET_DEVICE_GRP_MU1P_RESULT = ( - "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" - "%(cg1)sP VSP-LDEV-0-2 0 %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91S -GET_DEVICE_GRP_MU1S_RESULT = ( - "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" - "%(cg1)sS VSP-LDEV-0-2 2 %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get hba_wwn -port CL1-A HBSD-0123456789abcdef -GET_HBA_WWN_CL1A_HOSTGRP_RESULT = ( - "PORT GID GROUP_NAME HWWN Serial# NICK_NAME\n" - "CL1-A 0 HBSD-0123456789abcdef 0123456789abcdef %(serial)s -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get dp_pool -GET_DP_POOL_RESULT = ( - "PID POLS U(%) AV_CAP(MB) TP_CAP(MB) W(%) H(%) Num LDEV# LCNT " - "TL_CAP(MB) BM TR_CAP(MB) RCNT\n" - "030 POLN 0 6006 6006 75 80 1 14860 32 167477 NB 0 0\n" -) - -# cmd: raidcom get dp_pool -GET_DP_POOL_ERROR_RESULT = ( - "PID POLS U(%) POOL_NAME Seq# Num LDEV# H(%) VCAP(%) TYPE PM PT\n" -) - -# cmd: raidcom get pool -key opt -GET_POOL_KEYOPT_RESULT = ( - "PID POLS U(%%) POOL_NAME Seq# Num LDEV# H(%%) VCAP(%%) TYPE PM PT\n" - "030 POLM 30 VSPPOOL %(serial)s 1 10000 80 - OPEN N HDP\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get hba_wwn -port CL1-B-0 -GET_HBA_WWN_CL1B0_RESULT = ( - "PORT GID GROUP_NAME HWWN Serial# NICK_NAME\n" - "CL1-B 0 HBSD-0123456789abcdef 0123456789abcdef %(serial)s -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get host_grp -port CL1-A -GET_HOST_GRP_CL1A_RESULT = ( - "PORT GID GROUP_NAME Serial# HMD HMO_BITs\n" - "CL1-A 0 HBSD-0123456789abcdef %(serial)s LINUX/IRIX 91\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get host_grp -port CL1-B -GET_HOST_GRP_CL1B_RESULT = ( - "PORT GID GROUP_NAME Serial# HMD HMO_BITs\n" - "CL1-B 0 HBSD-0123456789abcdef %(serial)s LINUX/IRIX 91\n" -) % DUMMY_RESPONSE_MAP - -# raidcom add host_grp -port CLx-y -host_grp_name HBSD-0123456789abcdef -ADD_HOSTGRP_RESULT = "raidcom: Host group ID 0(0x0) will be used for adding.\n" - -# raidcom add host_grp -port CLx-y -host_grp_name HBSD-pair00 -ADD_HOSTGRP_PAIR_RESULT = ( - "raidcom: Host group ID 2(0x2) will be used for adding.\n" -) - -# raidcom add lun -port CL1-A-0 -ldev_id x -ADD_LUN_LUN0_RESULT = "raidcom: LUN 0(0x0) will be used for adding.\n" - -# cmd: raidcom get ldev -ldev_list undefined -cnt 1 -GET_LDEV_LDEV_LIST_UNDEFINED = ( - "LDEV : 1 VIR_LDEV : 65534\n" - "VOL_TYPE : NOT DEFINED\n" -) - -# cmd: raidcom get ldev -ldev_id 0 -cnt 2 -key front_end (LDEV) -GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2 = ( - " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" - " Ports PORT_No:LU#:GRPNAME\n" - " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 1 - - NOT DEFINED - - - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get ldev -ldev_id 0 -cnt 10 -key front_end (LDEV) -GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT = ( - " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" - " Ports PORT_No:LU#:GRPNAME\n" - " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 1 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 2 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 3 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 4 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 5 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 6 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 7 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 8 - - NOT DEFINED - - - -\n" - " %(serial)s 9 - - NOT DEFINED - - - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get ldev -ldev_id x -check_status NOT DEFINED -GET_LDEV_CHECKSTATUS_ERR = ( - "raidcom: testing condition has failed with exit(1).\n" -) - -# cmd: raidcom get ldev -ldev_id 0 -GET_LDEV_LDEV0_RESULT = """ -LDEV : 0 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 1 -GET_LDEV_LDEV1_RESULT = """ -LDEV : 1 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 268435456 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 3 -GET_LDEV_LDEV3_RESULT = """ -LDEV : 3 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : -""" - -# cmd: raidcom get ldev -ldev_id 4 -GET_LDEV_LDEV4_RESULT = """ -LDEV : 4 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 5 -GET_LDEV_LDEV5_RESULT = """ -LDEV : 5 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP : VVOL -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 6 -GET_LDEV_LDEV6_RESULT = """ -LDEV : 6 -VOL_TYPE : OPEN-V-CVS -PORTs : CL1-A-0 0 HBSD-0123456789abcdef -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 7 -GET_LDEV_LDEV7_RESULT = """ -LDEV : 7 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 10 -GET_LDEV_LDEV10_RESULT = """ -LDEV : 10 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 11 -GET_LDEV_LDEV11_RESULT = """ -LDEV : 11 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 12 -GET_LDEV_LDEV12_RESULT = """ -LDEV : 12 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 13 -GET_LDEV_LDEV13_RESULT = """ -LDEV : 13 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : BLK -""" - -# cmd: raidcom get ldev -ldev_id 14 -GET_LDEV_LDEV14_RESULT = """ -LDEV : 14 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP : HDT -VOL_Capacity(BLK) : 9999999 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get lun -port CL1-A-0 -GET_LUN_CL1A0_RESULT = ( - "PORT GID HMD LUN NUM LDEV CM Serial# HMO_BITs\n" - "CL1-A 0 LINUX/IRIX 4 1 4 - %(serial)s\n" - "CL1-A 0 LINUX/IRIX 254 1 5 - %(serial)s\n" - "CL1-A 0 LINUX/IRIX 255 1 6 - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -GET_PORT_RESULT = ( - "PORT TYPE ATTR SPD LPID FAB CONN SSW SL Serial# WWN PHY_PORT\n" - "CL1-A FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" - "CL1-B FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" - "CL3-A FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" - "CL3-B FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 4 -GET_SNAPSHOT_LDEV4_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 4 3 8 31 100 ---- 57db5cb0\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 4 4 9 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 7 -GET_SNAPSHOT_LDEV7_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 7 3 8 31 100 ---- 57db5cb0\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 7 4 9 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 8 -GET_SNAPSHOT_LDEV8_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 S-VOL SSUS %(serial)s 8 3 7 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 11 -GET_SNAPSHOT_LDEV11_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 S-VOL SSUS %(serial)s 11 3 7 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 1 0 -IM201 -PAIRDISPLAY_LDEV0_1_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-0-1 L CL1-A-0 0 0 0 %(serial)s 0 " - "P-VOL PSUS %(serial)s 1 W\n" - "%(cg0)s VSP-LDEV-0-1 R CL1-A-0 0 1 0 %(serial)s 1 " - "S-VOL SSUS - 0 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 10 0 -IM201 -PAIRDISPLAY_LDEV7_10_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-7-10 L CL1-A-1 0 0 0 %(serial)s 7 " - "P-VOL PSUS %(serial)s 10 W\n" - "%(cg0)s VSP-LDEV-7-10 R CL1-A-1 0 1 0 %(serial)s 10 " - "S-VOL SSUS - 7 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 12 0 -IM201 -PAIRDISPLAY_LDEV7_12_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-7-12 L CL1-A-1 0 0 0 %(serial)s 7 " - "P-VOL PSUS %(serial)s 12 W\n" - "%(cg0)s VSP-LDEV-7-12 R CL1-A-1 0 1 0 %(serial)s 12 " - "S-VOL SSUS - 7 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidqry -h -RAIDQRY_RESULT = ( - "Model : RAID-Manager/Linux/x64\n" - "Ver&Rev: 01-39-03/03\n" - "Usage : raidqry [options] for HORC[200]\n" - " -h Help/Usage\n" - " -I[#] Set to HORCMINST#\n" - " -IH[#] or -ITC[#] Set to HORC mode [and HORCMINST#]\n" - " -IM[#] or -ISI[#] Set to MRCF mode [and HORCMINST#]\n" - " -z Set to the interactive mode\n" - " -zx Set to the interactive mode and HORCM monitoring\n" - " -q Quit(Return to main())\n" - " -g Specify for getting all group name on local\n" - " -l Specify the local query\n" - " -lm Specify the local query with full micro version\n" - " -r Specify the remote query\n" - " -f Specify display for floatable host\n" -) - -EXECUTE_TABLE = { - ('add', 'hba_wwn', '-port', 'CL3-A-0', '-hba_wwn', '0123456789abcdef'): ( - vsp_horcm.EX_INVARG, STDOUT, STDERR), - ('add', 'host_grp', '-port', 'CL1-A', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL1-B', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-A', '-host_grp_name', - 'HBSD-0123456789abcdef'): (SUCCEED, ADD_HOSTGRP_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', - 'HBSD-0123456789abcdef'): (SUCCEED, ADD_HOSTGRP_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 0): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 1): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 5): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 6): ( - vsp_horcm.EX_CMDRJE, STDOUT, vsp_horcm._LU_PATH_DEFINED), - ('add', 'lun', '-port', 'CL1-B-0', '-ldev_id', 0, '-lun_id', 0): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('extend', 'ldev', '-ldev_id', 3, '-capacity', '128G'): ( - vsp_horcm.EX_CMDIOE, STDOUT, - "raidcom: [EX_CMDIOE] Control command I/O error"), - ('get', 'hba_wwn', '-port', 'CL1-A', 'HBSD-0123456789abcdef'): ( - SUCCEED, GET_HBA_WWN_CL1A_HOSTGRP_RESULT, STDERR), - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT, STDERR), - ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'P'): ( - SUCCEED, GET_DEVICE_GRP_MU1P_RESULT, STDERR), - ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'S'): ( - SUCCEED, GET_DEVICE_GRP_MU1S_RESULT, STDERR), - ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_RESULT, STDERR), - ('get', 'pool', '-key', 'opt'): (SUCCEED, GET_POOL_KEYOPT_RESULT, STDERR), - ('get', 'hba_wwn', '-port', 'CL1-B-0'): ( - SUCCEED, GET_HBA_WWN_CL1B0_RESULT, STDERR), - ('get', 'host_grp', '-port', 'CL1-A'): ( - SUCCEED, GET_HOST_GRP_CL1A_RESULT, STDERR), - ('get', 'host_grp', '-port', 'CL1-B'): ( - SUCCEED, GET_HOST_GRP_CL1B_RESULT, STDERR), - ('get', 'ldev', '-ldev_list', 'undefined', '-cnt', '1'): ( - SUCCEED, GET_LDEV_LDEV_LIST_UNDEFINED, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-cnt', 2, '-key', 'front_end'): ( - SUCCEED, GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-cnt', 10, '-key', 'front_end'): ( - SUCCEED, GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-check_status', 'NOT', 'DEFINED'): ( - 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), - ('get', 'ldev', '-ldev_id', 0): (SUCCEED, GET_LDEV_LDEV0_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 1): (SUCCEED, GET_LDEV_LDEV1_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 3): (SUCCEED, GET_LDEV_LDEV3_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 4): (SUCCEED, GET_LDEV_LDEV4_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 5): (SUCCEED, GET_LDEV_LDEV5_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 6): (SUCCEED, GET_LDEV_LDEV6_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 7): (SUCCEED, GET_LDEV_LDEV7_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 10): (SUCCEED, GET_LDEV_LDEV10_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 11): (SUCCEED, GET_LDEV_LDEV11_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 12): (SUCCEED, GET_LDEV_LDEV12_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 13): (SUCCEED, GET_LDEV_LDEV13_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 14): (SUCCEED, GET_LDEV_LDEV14_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 15): (vsp_horcm.EX_COMERR, "", STDERR), - ('get', 'lun', '-port', 'CL1-A-0'): ( - SUCCEED, GET_LUN_CL1A0_RESULT, STDERR), - ('get', 'port'): (SUCCEED, GET_PORT_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 4): ( - SUCCEED, GET_SNAPSHOT_LDEV4_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 7): ( - SUCCEED, GET_SNAPSHOT_LDEV7_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 8): ( - SUCCEED, GET_SNAPSHOT_LDEV8_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 11): ( - SUCCEED, GET_SNAPSHOT_LDEV11_RESULT, STDERR), - ('modify', 'ldev', '-ldev_id', 3, '-status', 'discard_zero_page'): ( - vsp_horcm.EX_CMDIOE, STDOUT, STDERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 10, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV7_10_RESULT, STDERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 12, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV7_12_RESULT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 8, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 10, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 12, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), - ('raidqry', '-h'): (SUCCEED, RAIDQRY_RESULT, STDERR), - ('tee', '/etc/horcm501.conf'): (1, STDOUT, STDERR), - ('-login', 'user', 'pasword'): (SUCCEED, STDOUT, STDERR), - ('-login', 'userX', 'paswordX'): (vsp_horcm.EX_ENAUTH, STDOUT, STDERR), - ('-login', 'userY', 'paswordY'): (vsp_horcm.EX_COMERR, STDOUT, STDERR), -} - -EXECUTE_TABLE2 = EXECUTE_TABLE.copy() -EXECUTE_TABLE2.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT2, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUS, STDOUT, STDERR), -}) - -EXECUTE_TABLE3 = EXECUTE_TABLE2.copy() - -EXECUTE_TABLE4 = EXECUTE_TABLE.copy() -EXECUTE_TABLE4.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), -}) - -EXECUTE_TABLE5 = EXECUTE_TABLE.copy() -EXECUTE_TABLE5.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), - ('get', 'ldev', '-ldev_id', 1, '-check_status', 'NOT', 'DEFINED'): ( - 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 1, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV0_1_RESULT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), -}) - -ERROR_EXECUTE_TABLE = { - ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_ERROR_RESULT, STDERR), -} - -DEFAULT_CONNECTOR = { - 'host': 'host', - 'ip': CONFIG_MAP['my_ip'], - 'wwpns': ['0123456789abcdef'], - 'multipath': False, -} - -CTXT = cinder_context.get_admin_context() - -TEST_VOLUME = [] -for i in range(14): - volume = {} - volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) - volume['name'] = 'test-volume{0:d}'.format(i) - volume['provider_location'] = None if i == 2 else '{0:d}'.format(i) - volume['size'] = 256 if i == 1 else 128 - if i == 2: - volume['status'] = 'creating' - elif i == 5: - volume['status'] = 'in-use' - else: - volume['status'] = 'available' - volume = fake_volume.fake_volume_obj(CTXT, **volume) - TEST_VOLUME.append(volume) - - -def _volume_get(context, volume_id): - """Return predefined volume info.""" - return TEST_VOLUME[int(volume_id.replace("-", ""))] - -TEST_SNAPSHOT = [] -for i in range(8): - snapshot = {} - snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) - snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) - snapshot['provider_location'] = None if i == 2 else '{0:d}'.format( - i if i < 5 else i + 5) - snapshot['status'] = 'creating' if i == 2 else 'available' - snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format( - i if i < 5 else 7) - snapshot['volume'] = _volume_get(None, snapshot['volume_id']) - snapshot['volume_name'] = 'test-volume{0:d}'.format(i if i < 5 else 7) - snapshot['volume_size'] = 256 if i == 1 else 128 - snapshot = obj_snap.Snapshot._from_db_object( - CTXT, obj_snap.Snapshot(), - fake_snapshot.fake_db_snapshot(**snapshot)) - TEST_SNAPSHOT.append(snapshot) - -# Flags that determine _fake_run_horcmstart() return values -run_horcmstart_returns_error = False -run_horcmstart_returns_error2 = False -run_horcmstart3_cnt = 0 - - -def _access(*args, **kargs): - """Assume access to the path is allowed.""" - return True - - -def _execute(*args, **kargs): - """Return predefined results for command execution.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) - return result - - -def _execute2(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE2.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE2.get(cmd, CMD_SUCCEED) - return result - - -def _execute3(*args, **kargs): - """Change pairevtwait's dummy return value after it is called.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE3.get(cmd, CMD_SUCCEED) - if cmd == ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): - EXECUTE_TABLE3.update({ - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), - }) - return result - - -def _execute4(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE4.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE4.get(cmd, CMD_SUCCEED) - return result - - -def _execute5(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE5.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE5.get(cmd, CMD_SUCCEED) - return result - - -def _cinder_execute(*args, **kargs): - """Return predefined results or raise an exception.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - ret, stdout, stderr = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) - if ret == SUCCEED: - return stdout, stderr - else: - pee = processutils.ProcessExecutionError(exit_code=ret, - stdout=stdout, - stderr=stderr) - raise pee - - -def _error_execute(*args, **kargs): - """Return predefined error results.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = _execute(*args, **kargs) - ret = ERROR_EXECUTE_TABLE.get(cmd) - return ret if ret else result - - -def _brick_get_connector_properties(multipath=False, enforce_multipath=False): - """Return a predefined connector object.""" - return DEFAULT_CONNECTOR - - -def _brick_get_connector_properties_error(multipath=False, - enforce_multipath=False): - """Return an incomplete connector object.""" - connector = dict(DEFAULT_CONNECTOR) - del connector['wwpns'] - return connector - - -def _connect_volume(*args, **kwargs): - """Return predefined volume info.""" - return {'path': u'/dev/disk/by-path/xxxx', 'type': 'block'} - - -def _disconnect_volume(*args, **kwargs): - """Return without doing anything.""" - pass - - -def _copy_volume(*args, **kwargs): - """Return without doing anything.""" - pass - - -def _volume_admin_metadata_get(context, volume_id): - """Return dummy admin metadata.""" - return {'fake_key': 'fake_value'} - - -def _snapshot_metadata_update(context, snapshot_id, metadata, delete): - """Return without doing anything.""" - pass - - -def _fake_is_smpl(*args): - """Assume the Shadow Image pair status is SMPL.""" - return True - - -def _fake_run_horcmgr(*args): - """Assume CCI is running.""" - return vsp_horcm._HORCM_RUNNING - - -def _fake_run_horcmstart(*args): - """Return a value based on a flag value.""" - return 0 if not run_horcmstart_returns_error else 3 - - -def _fake_run_horcmstart2(*args): - """Return a value based on a flag value.""" - return 0 if not run_horcmstart_returns_error2 else 3 - - -def _fake_run_horcmstart3(*args): - """Update a counter and return a value based on it.""" - global run_horcmstart3_cnt - run_horcmstart3_cnt = run_horcmstart3_cnt + 1 - return 0 if run_horcmstart3_cnt <= 1 else 3 - - -def _fake_check_ldev_status(*args, **kwargs): - """Assume LDEV status has changed as desired.""" - return None - - -def _fake_exists(path): - """Assume the path does not exist.""" - return False - - -class FakeLookupService(object): - """Dummy FC zoning mapping lookup service class.""" - - def get_device_mapping_from_network(self, initiator_wwns, target_wwns): - """Return predefined FC zoning mapping.""" - return DEVICE_MAP - - -class VSPHORCMFCDriverTest(test.TestCase): - """Unit test class for VSP HORCM interface fibre channel module.""" - - test_existing_ref = {'source-id': '0'} - test_existing_none_ldev_ref = {'source-id': '2'} - test_existing_invalid_ldev_ref = {'source-id': 'AAA'} - test_existing_value_error_ref = {'source-id': 'XX:XX:XX'} - test_existing_no_ldev_ref = {} - test_existing_invalid_sts_ldev = {'source-id': '13'} - test_existing_invalid_vol_attr = {'source-id': '12'} - test_existing_invalid_size = {'source-id': '14'} - test_existing_invalid_port_cnt = {'source-id': '6'} - test_existing_failed_to_start_horcmgr = {'source-id': '15'} - - def setUp(self): - """Set up the test environment.""" - super(VSPHORCMFCDriverTest, self).setUp() - - self.configuration = mock.Mock(conf.Configuration) - self.ctxt = cinder_context.get_admin_context() - self._setup_config() - self._setup_driver() - - def _setup_config(self): - """Set configuration parameter values.""" - self.configuration.config_group = "HORCM" - - self.configuration.volume_backend_name = "HORCMFC" - self.configuration.volume_driver = ( - "cinder.volume.drivers.hitachi.vsp_fc.VSPFCDriver") - self.configuration.reserved_percentage = "0" - self.configuration.use_multipath_for_image_xfer = False - self.configuration.enforce_multipath_for_image_xfer = False - self.configuration.num_volume_device_scan_tries = 3 - self.configuration.volume_dd_blocksize = "1000" - - self.configuration.vsp_storage_id = CONFIG_MAP['serial'] - self.configuration.vsp_pool = "30" - self.configuration.vsp_thin_pool = None - self.configuration.vsp_ldev_range = "0-1" - self.configuration.vsp_default_copy_method = 'FULL' - self.configuration.vsp_copy_speed = 3 - self.configuration.vsp_copy_check_interval = 1 - self.configuration.vsp_async_copy_check_interval = 1 - self.configuration.vsp_target_ports = "CL1-A" - self.configuration.vsp_compute_target_ports = "CL1-A" - self.configuration.vsp_horcm_pair_target_ports = "CL1-A" - self.configuration.vsp_group_request = True - - self.configuration.vsp_zoning_request = False - - self.configuration.vsp_horcm_numbers = INST_NUMS - self.configuration.vsp_horcm_user = "user" - self.configuration.vsp_horcm_password = "pasword" - self.configuration.vsp_horcm_add_conf = False - - self.configuration.safe_get = self._fake_safe_get - - CONF = cfg.CONF - CONF.my_ip = CONFIG_MAP['my_ip'] - - def _fake_safe_get(self, value): - """Retrieve a configuration value avoiding throwing an exception.""" - try: - val = getattr(self.configuration, value) - except AttributeError: - val = None - return val - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def _setup_driver(self, execute, brick_get_connector_properties): - """Set up the driver environment.""" - self.driver = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self.driver.do_setup(None) - self.driver.check_for_setup_error() - self.driver.create_export(None, None, None) - self.driver.ensure_export(None, None) - self.driver.remove_export(None, None) - - # API test cases - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - def test_do_setup(self, execute, brick_get_connector_properties): - """Normal case: The host group exists beforehand.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - drv.do_setup(None) - self.assertEqual( - {'CL1-A': '0123456789abcdef'}, - drv.common.storage_info['wwns']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_raidqry_h_invalid( - self, execute, brick_get_connector_properties): - """Error case: 'raidqry -h' returns nothing. This error is ignored.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - raidqry_h_original = EXECUTE_TABLE[('raidqry', '-h')] - EXECUTE_TABLE[('raidqry', '-h')] = (SUCCEED, "", STDERR) - drv.do_setup(None) - self.assertEqual( - {'CL1-A': '0123456789abcdef'}, - drv.common.storage_info['wwns']) - EXECUTE_TABLE[('raidqry', '-h')] = raidqry_h_original - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_specify_pool_name( - self, execute, brick_get_connector_properties): - """Normal case: Specify pool name rather than pool number.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_pool = "VSPPOOL" - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_create_hostgrp( - self, execute, brick_get_connector_properties): - """Normal case: The host groups does not exist beforehand.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = "CL3-B" - - drv.do_setup(None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_create_hostgrp_error( - self, execute, brick_get_connector_properties): - """Error case: 'add hba_wwn' fails(MSGID0614-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = "CL3-A" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_thin_pool_not_specified(self, execute): - """Error case: Parameter error(vsp_thin_pool).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_default_copy_method = 'THIN' - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_ldev_range_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Not specify LDEV range.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_ldev_range = None - - drv.do_setup(None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_storage_id_not_specified(self, execute): - """Error case: Parameter error(vsp_storage_id).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_storage_id = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_horcm_numbers_invalid(self, execute): - """Error case: Parameter error(vsp_horcm_numbers).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_numbers = (200, 200) - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_horcm_user_not_specified(self, execute): - """Error case: Parameter error(vsp_horcm_user).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only target_ports is not specified.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_compute_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only compute_target_ports is not specified.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_compute_target_ports = None - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_pair_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only pair_target_ports is not specified.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_pair_target_ports = None - - drv.do_setup(None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_compute_target_ports_not_specified(self, execute): - """Error case: Parameter error(compute_target_ports).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_compute_target_ports = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_pair_target_ports_not_specified(self, execute): - """Error case: Parameter error(pair_target_ports).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_horcm_pair_target_ports = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(processutils, 'execute', side_effect=_execute) - @mock.patch.object(os.path, 'exists', side_effect=_fake_exists) - @mock.patch.object(os, 'access', side_effect=_access) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_create_conf( - self, vsp_utils_execute, access, exists, processutils_execute, - brick_get_connector_properties): - """Error case: Writing into horcmxxx.conf fails.(MSGID0632-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_numbers = (500, 501) - self.configuration.vsp_horcm_add_conf = True - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_login( - self, execute, brick_get_connector_properties): - """Error case: 'raidcom -login' fails with EX_ENAUTH(MSGID0600-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = "userX" - self.configuration.vsp_horcm_password = "paswordX" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 2) - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_command( - self, execute, brick_get_connector_properties): - """Error case: 'raidcom -login' fails with EX_COMERR(MSGID0600-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = "userY" - self.configuration.vsp_horcm_password = "paswordY" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 2) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmgr', side_effect=_fake_run_horcmgr) - def test_do_setup_failed_to_horcmshutdown( - self, _run_horcmgr, execute, brick_get_connector_properties): - """Error case: CCI's status is always RUNNING(MSGID0608-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart) - def test_do_setup_failed_to_horcmstart( - self, _run_horcmstart, execute, brick_get_connector_properties): - """Error case: _run_horcmstart() returns an error(MSGID0609-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - global run_horcmstart_returns_error - run_horcmstart_returns_error = True - self.assertRaises(exception.VSPError, drv.do_setup, None) - run_horcmstart_returns_error = False - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties_error) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_wwn_not_found( - self, execute, brick_get_connector_properties): - """Error case: The connector does not have 'wwpns'(MSGID0650-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_port_not_found(self, execute): - """Error case: The target port does not exist(MSGID0650-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = ["CL4-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_compute_target_ports_not_found(self, execute): - """Error case: Compute target port does not exist(MSGID0650-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_compute_target_ports = ["CL4-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_pair_target_ports_not_found(self, execute): - """Error case: Pair target port does not exist(MSGID0650-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_horcm_pair_target_ports = ["CL5-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume(self, execute): - """Normal case: Extend volume succeeds.""" - self.driver.extend_volume(TEST_VOLUME[0], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0613-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[2], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_ldev_is_vvol(self, execute): - """Error case: The volume is a V-VOL(MSGID0618-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[5], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_is_busy(self, execute): - """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[4], 256) - - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - @mock.patch.object(vsp_horcm, '_EXTEND_WAITTIME', 1) - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - def test_extend_volume_raidcom_error(self, execute,): - """Error case: 'extend ldev' returns an error(MSGID0600-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[3], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_get_volume_stats(self, execute): - """Normal case: Refreshing data required.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - self.assertFalse(stats['multiattach']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_get_volume_stats_no_refresh(self, execute): - """Normal case: Refreshing data not required.""" - stats = self.driver.get_volume_stats() - self.assertEqual({}, stats) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_error_execute) - def test_get_volume_stats_failed_to_get_dp_pool(self, execute): - """Error case: The pool does not exist(MSGID0640-E, MSGID0620-E).""" - self.driver.common.storage_info['pool_id'] = 29 - - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume(self, execute): - """Normal case: Available LDEV range is 0-1.""" - ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_free_ldev_not_found_on_storage(self, execute): - """Error case: No unused LDEV exists(MSGID0648-E).""" - self.driver.common.storage_info['ldev_range'] = [0, 0] - - self.assertRaises( - exception.VSPError, self.driver.create_volume, TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_no_setting_ldev_range(self, execute): - """Normal case: Available LDEV range is unlimited.""" - self.driver.common.storage_info['ldev_range'] = None - - ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm.VSPHORCM, - '_check_ldev_status', side_effect=_fake_check_ldev_status) - def test_delete_volume(self, _check_ldev_status, execute): - """Normal case: Delete a volume.""" - self.driver.delete_volume(TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0304-W).""" - self.driver.delete_volume(TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_ldev_not_found_on_storage(self, execute): - """Unusual case: The volume's LDEV does not exist.(MSGID0319-W).""" - self.driver.delete_volume(TEST_VOLUME[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_volume_is_busy(self, execute): - """Error case: The volume is a P-VOL of a THIN pair(MSGID0616-E).""" - self.assertRaises( - exception.VolumeIsBusy, self.driver.delete_volume, TEST_VOLUME[4]) - - @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_full( - self, volume_get, snapshot_metadata_update, execute): - """Normal case: copy_method=FULL.""" - self.driver.common.storage_info['ldev_range'] = [0, 9] - - ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) - self.assertEqual('8', ret['provider_location']) - - @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_thin( - self, volume_get, snapshot_metadata_update, execute): - """Normal case: copy_method=THIN.""" - self.driver.common.storage_info['ldev_range'] = [0, 9] - self.configuration.vsp_thin_pool = 31 - self.configuration.vsp_default_copy_method = "THIN" - - ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) - self.assertEqual('8', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_provider_location_is_none( - self, volume_get, execute): - """Error case: Source vol's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_ldev_not_found_on_storage( - self, volume_get, execute): - """Error case: The src-vol's LDEV does not exist.(MSGID0612-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_full(self, execute): - """Normal case: Delete a snapshot.""" - self.driver.delete_snapshot(TEST_SNAPSHOT[5]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm.VSPHORCM, '_is_smpl', side_effect=_fake_is_smpl) - def test_delete_snapshot_full_smpl(self, _is_smpl, execute): - """Normal case: The LDEV in an SI volume pair becomes SMPL.""" - self.driver.delete_snapshot(TEST_SNAPSHOT[7]) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_vvol_timeout(self, execute): - """Error case: V-VOL is not deleted from a snapshot(MSGID0611-E).""" - self.assertRaises( - exception.VSPError, self.driver.delete_snapshot, TEST_SNAPSHOT[6]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_provider_location_is_none(self, execute): - """Error case: Snapshot's provider_location is None(MSGID0304-W).""" - self.driver.delete_snapshot(TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_ldev_not_found_on_storage(self, execute): - """Unusual case: The snapshot's LDEV does not exist.(MSGID0319-W).""" - self.driver.delete_snapshot(TEST_SNAPSHOT[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_snapshot_is_busy(self, execute): - """Error case: The snapshot is a P-VOL of a THIN pair(MSGID0616-E).""" - self.assertRaises( - exception.SnapshotIsBusy, self.driver.delete_snapshot, - TEST_SNAPSHOT[4]) - - @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object( - utils, 'brick_get_connector', - side_effect=mock.MagicMock()) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - brick_connector.FibreChannelConnector, - 'connect_volume', _connect_volume) - @mock.patch.object( - brick_connector.FibreChannelConnector, - 'disconnect_volume', _disconnect_volume) - def test_create_cloned_volume_with_dd_same_size( - self, execute, brick_get_connector, brick_get_connector_properties, - copy_volume): - """Normal case: The source volume is a V-VOL and copied by dd.""" - vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[5]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object( - utils, 'brick_get_connector', - side_effect=mock.MagicMock()) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - brick_connector.FibreChannelConnector, - 'connect_volume', _connect_volume) - @mock.patch.object( - brick_connector.FibreChannelConnector, - 'disconnect_volume', _disconnect_volume) - def test_create_cloned_volume_with_dd_extend_size( - self, execute, brick_get_connector, brick_get_connector_properties, - copy_volume): - """Normal case: Copy with dd and extend the size afterward.""" - vol = self.driver.create_cloned_volume(TEST_VOLUME[1], TEST_VOLUME[5]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_provider_location_is_none(self, execute): - """Error case: Source vol's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - TEST_VOLUME[0], TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_invalid_size(self, execute): - """Error case: src-size > clone-size(MSGID0617-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - TEST_VOLUME[0], TEST_VOLUME[1]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_extend_size_thin(self, execute): - """Error case: clone > src and copy_method=THIN(MSGID0621-E).""" - self.configuration.vsp_thin_pool = 31 - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'THIN'}) - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - test_vol_obj, TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_same_size(self, execute): - """Normal case: Copy with Shadow Image.""" - vol = self.driver.create_volume_from_snapshot( - TEST_VOLUME[0], TEST_SNAPSHOT[0]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute2) - def test_create_volume_from_snapshot_full_extend_normal(self, execute): - """Normal case: Copy with Shadow Image and extend the size.""" - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - vol = self.driver.create_volume_from_snapshot( - test_vol_obj, TEST_SNAPSHOT[0]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute3) - def test_create_volume_from_snapshot_full_extend_PSUE(self, execute): - """Error case: SI copy -> pair status: PSUS -> PSUE(MSGID0722-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute4) - def test_create_volume_from_snapshot_full_PSUE(self, execute): - """Error case: SI copy -> pair status becomes PSUE(MSGID0610-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[0]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart3) - @mock.patch.object(vsp_horcm, '_LDEV_STATUS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute5) - def test_create_volume_from_snapshot_full_SMPL( - self, execute, _run_horcmstart): - """Error case: SI copy -> pair status becomes SMPL(MSGID0610-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[0]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_invalid_size(self, execute): - """Error case: volume-size < snapshot-size(MSGID0617-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - TEST_VOLUME[0], TEST_SNAPSHOT[1]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_thin_extend(self, execute): - """Error case: volume > snapshot and copy_method=THIN(MSGID0621-E).""" - self.configuration.vsp_thin_pool = 31 - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'THIN'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_provider_location_is_none( - self, execute): - """Error case: Snapshot's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - TEST_VOLUME[0], TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection(self, volume_admin_metadata_get, execute): - """Normal case: Initialize connection.""" - self.configuration.vsp_zoning_request = True - self.driver.common._lookup_service = FakeLookupService() - - ret = self.driver.initialize_connection( - TEST_VOLUME[0], DEFAULT_CONNECTOR) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_multipath( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: Initialize connection in multipath environment.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = ["CL1-A", "CL1-B"] - drv.do_setup(None) - multipath_connector = copy.copy(DEFAULT_CONNECTOR) - multipath_connector['multipath'] = True - ret = drv.initialize_connection(TEST_VOLUME[0], multipath_connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef', '0123456789abcdef'], - ret['data']['target_wwn']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_initialize_connection_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0619-E).""" - self.assertRaises( - exception.VSPError, self.driver.initialize_connection, - TEST_VOLUME[2], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_already_attached( - self, volume_admin_metadata_get, execute): - """Unusual case: 'add lun' returns 'already defined' error.""" - ret = self.driver.initialize_connection( - TEST_VOLUME[6], DEFAULT_CONNECTOR) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) - self.assertEqual(255, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_target_port_not_specified( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: target_port is not specified.""" - compute_connector = DEFAULT_CONNECTOR.copy() - compute_connector['ip'] = '127.0.0.2' - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - drv.do_setup(None) - ret = drv.initialize_connection(TEST_VOLUME[0], compute_connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_compute_port_not_specified( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: compute_target_port is not specified.""" - compute_connector = DEFAULT_CONNECTOR.copy() - compute_connector['ip'] = '127.0.0.2' - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_compute_target_ports = None - drv.do_setup(None) - ret = drv.initialize_connection(TEST_VOLUME[0], compute_connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection(self, execute): - """Normal case: Terminate connection.""" - self.driver.terminate_connection(TEST_VOLUME[6], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_provider_location_is_none(self, execute): - """Unusual case: Volume's provider_location is None(MSGID0302-W).""" - self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_no_port_mapped_to_ldev(self, execute): - """Unusual case: No port is mapped to the LDEV.""" - self.driver.terminate_connection(TEST_VOLUME[3], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_initiator_iqn_not_found(self, execute): - """Error case: The connector does not have 'wwpns'(MSGID0650-E).""" - connector = dict(DEFAULT_CONNECTOR) - del connector['wwpns'] - - self.assertRaises( - exception.VSPError, self.driver.terminate_connection, - TEST_VOLUME[0], connector) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_copy_volume_to_image(self, execute): - """Normal case: Copy a volume to an image.""" - image_service = 'fake_image_service' - image_meta = 'fake_image_meta' - - with mock.patch.object(driver.VolumeDriver, 'copy_volume_to_image') \ - as mock_copy_volume_to_image: - self.driver.copy_volume_to_image( - self.ctxt, TEST_VOLUME[0], image_service, image_meta) - - mock_copy_volume_to_image.assert_called_with( - self.ctxt, TEST_VOLUME[0], image_service, image_meta) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing(self, execute): - """Normal case: Bring an existing volume under Cinder's control.""" - ret = self.driver.manage_existing( - TEST_VOLUME[0], self.test_existing_ref) - self.assertEqual('0', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_normal(self, execute): - """Normal case: Return an existing LDEV's size.""" - self.driver.manage_existing_get_size( - TEST_VOLUME[0], self.test_existing_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_none_ldev_ref(self, execute): - """Error case: Source LDEV's properties do not exist(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_none_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_ldev_ref(self, execute): - """Error case: Source LDEV's ID is an invalid decimal(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_value_error_ref(self, execute): - """Error case: Source LDEV's ID is an invalid hex(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_value_error_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_no_ldev_ref(self, execute): - """Error case: Source LDEV's ID is not specified(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_no_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_sts_ldev(self, execute): - """Error case: Source LDEV's STS is invalid(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_sts_ldev) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_vol_attr(self, execute): - """Error case: Source LDEV's VOL_ATTR is invalid(MSGID0702-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_vol_attr) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_size_ref(self, execute): - """Error case: Source LDEV's VOL_Capacity is invalid(MSGID0703-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_size) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_port_cnt(self, execute): - """Error case: Source LDEV's NUM_PORT is invalid(MSGID0704-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_port_cnt) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart2) - def test_manage_existing_get_size_failed_to_start_horcmgr( - self, _run_horcmstart, execute): - """Error case: _start_horcmgr() returns an error(MSGID0320-W).""" - global run_horcmstart_returns_error2 - run_horcmstart_returns_error2 = True - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_failed_to_start_horcmgr) - run_horcmstart_returns_error2 = False - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage(self, execute): - """Normal case: Take out a volume from Cinder's control.""" - self.driver.unmanage(TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0304-W).""" - self.driver.unmanage(TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_volume_invalid_sts_ldev(self, execute): - """Unusual case: The volume's STS is BLK.""" - self.driver.unmanage(TEST_VOLUME[13]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_volume_is_busy(self, execute): - """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" - self.assertRaises( - exception.VolumeIsBusy, self.driver.unmanage, TEST_VOLUME[4]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_copy_image_to_volume(self, execute): - """Normal case: Copy an image to a volume.""" - image_service = 'fake_image_service' - image_id = 'fake_image_id' - self.configuration.vsp_horcm_numbers = (400, 401) - - with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ - as mock_copy_image: - self.driver.copy_image_to_volume( - self.ctxt, TEST_VOLUME[0], image_service, image_id) - - mock_copy_image.assert_called_with( - self.ctxt, TEST_VOLUME[0], image_service, image_id) - - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - def test_update_migrated_volume_success(self, execute): - """Normal case: 'modify ldev -status discard_zero_page' succeeds.""" - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[2], - "available") - - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_update_migrated_volume_error(self, execute): - """Error case: 'modify ldev' fails(MSGID0315-W).""" - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[3], - "available") - - def test_get_ldev_volume_is_none(self): - """Error case: The volume is None.""" - self.assertIsNone(vsp_utils.get_ldev(None)) - - def test_check_ignore_error_string(self): - """Normal case: ignore_error is a string.""" - ignore_error = 'SSB=0xB980,0xB902' - stderr = ('raidcom: [EX_CMDRJE] An order to the control/command device' - ' was rejected\nIt was rejected due to SKEY=0x05, ASC=0x26, ' - 'ASCQ=0x00, SSB=0xB980,0xB902 on Serial#(400003)\nCAUSE : ' - 'The specified port can not be operated.') - self.assertTrue(vsp_utils.check_ignore_error(ignore_error, stderr)) - - def test_check_opts_parameter_specified(self): - """Normal case: A valid parameter is specified.""" - cfg.CONF.paramAAA = 'aaa' - vsp_utils.check_opts(conf.Configuration(None), - [cfg.StrOpt('paramAAA')]) - - def test_check_opt_value_parameter_not_set(self): - """Error case: A parameter is not set(MSGID0601-E).""" - self.assertRaises(cfg.NoSuchOptError, - vsp_utils.check_opt_value, - conf.Configuration(None), - ['paramCCC']) - - def test_build_initiator_target_map_no_lookup_service(self): - """Normal case: None is specified for lookup_service.""" - connector = {'wwpns': ['0000000000000000', '1111111111111111']} - target_wwns = ['2222222222222222', '3333333333333333'] - init_target_map = vsp_utils.build_initiator_target_map(connector, - target_wwns, - None) - self.assertEqual( - {'0000000000000000': ['2222222222222222', '3333333333333333'], - '1111111111111111': ['2222222222222222', '3333333333333333']}, - init_target_map) - - def test_update_conn_info_not_update_conn_info(self): - """Normal case: Not update connection info.""" - vsp_utils.update_conn_info(dict({'data': dict({'target_wwn': []})}), - dict({'wwpns': []}), - None) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py deleted file mode 100644 index ff1ccaa2dd9..00000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py +++ /dev/null @@ -1,1900 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Unit tests for Hitachi VSP Driver.""" - -import copy -import os - -import mock -from os_brick.initiator import connector as brick_connector -from oslo_concurrency import processutils -from oslo_config import cfg -from six.moves import range - -from cinder import context as cinder_context -from cinder import db -from cinder.db.sqlalchemy import api as sqlalchemy_api -from cinder import exception -from cinder.objects import snapshot as obj_snap -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume import driver -from cinder.volume.drivers.hitachi import vsp_horcm -from cinder.volume.drivers.hitachi import vsp_iscsi -from cinder.volume.drivers.hitachi import vsp_utils -from cinder.volume import utils as volume_utils - -# Dummy return values -SUCCEED = 0 -STDOUT = "" -STDERR = "" -CMD_SUCCEED = (SUCCEED, STDOUT, STDERR) - -# Configuration parameter values -CONFIG_MAP = { - 'serial': '492015', - 'my_ip': '127.0.0.1', -} - -# CCI instance numbers -INST_NUMS = (200, 201) - -# Shadow Image copy group names -CG_MAP = {'cg%s' % x: vsp_horcm._COPY_GROUP % ( - CONFIG_MAP['my_ip'], CONFIG_MAP['serial'], INST_NUMS[1], x) - for x in range(3) -} - -# Map containing all maps for dummy response creation -DUMMY_RESPONSE_MAP = CONFIG_MAP.copy() -DUMMY_RESPONSE_MAP.update(CG_MAP) - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT = ( - "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - "%(cg0)s %(cg0)sP 0 - %(serial)s\n" - "%(cg1)s %(cg1)sP 0 - %(serial)s\n" - "%(cg1)s %(cg1)sS - - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT2 = "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT3 = ( - "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - "%(cg0)s %(cg0)sP 0 - %(serial)s\n" - "%(cg0)s %(cg0)sS 0 - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91P -GET_DEVICE_GRP_MU1P_RESULT = ( - "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" - "%(cg1)sP VSP-LDEV-0-2 0 %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91S -GET_DEVICE_GRP_MU1S_RESULT = ( - "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" - "%(cg1)sS VSP-LDEV-0-2 2 %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get hba_iscsi -port CL1-A HBSD-127.0.0.1 -GET_HBA_ISCSI_CL1A_HOSTGRP_RESULT = ( - "PORT GID GROUP_NAME IQN Serial# NICK_NAME\n" - "CL1-A 0 HBSD-127.0.0.1 iqn-initiator %(serial)s -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get dp_pool -GET_DP_POOL_RESULT = ( - "PID POLS U(%) AV_CAP(MB) TP_CAP(MB) W(%) H(%) Num LDEV# LCNT " - "TL_CAP(MB) BM TR_CAP(MB) RCNT\n" - "030 POLN 0 6006 6006 75 80 1 14860 32 167477 NB 0 0\n" -) - -# cmd: raidcom get dp_pool -GET_DP_POOL_ERROR_RESULT = ( - "PID POLS U(%) POOL_NAME Seq# Num LDEV# H(%) VCAP(%) TYPE PM PT\n" -) - -# cmd: raidcom get pool -key opt -GET_POOL_KEYOPT_RESULT = ( - "PID POLS U(%%) POOL_NAME Seq# Num LDEV# H(%%) VCAP(%%) TYPE PM PT\n" - "030 POLM 30 VSPPOOL %(serial)s 1 10000 80 - OPEN N HDP\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get hba_iscsi -port CL1-B-0 -GET_HBA_ISCSI_CL1B0_RESULT = ( - "PORT GID GROUP_NAME IQN Serial# NICK_NAME\n" - "CL1-B 0 HBSD-127.0.0.1 iqn-initiator %(serial)s -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get host_grp -port CL1-A -GET_HOST_GRP_CL1A_RESULT = ( - "PORT GID GROUP_NAME IQN AMD D Serial# HMD HMO_BITs\n" - "CL1-A 0 HBSD-127.0.0.1 iqn-initiator.hbsd-target BOTH S " - "%(serial)s LINUX/IRIX 83 91\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get host_grp -port CL1-B -GET_HOST_GRP_CL1B_RESULT = ( - "PORT GID GROUP_NAME IQN AMD D Serial# HMD HMO_BITs\n" - "CL1-B 0 HBSD-127.0.0.1 iqn-initiator.hbsd-target BOTH S " - "%(serial)s LINUX/IRIX 83 91\n" -) % DUMMY_RESPONSE_MAP - -# raidcom add host_grp -port CLx-y -host_grp_name HBSD-127.0.0.1 -ADD_HOSTGRP_RESULT = "raidcom: Host group ID 0(0x0) will be used for adding.\n" - -# raidcom add host_grp -port CLx-y -host_grp_name HBSD-pair00 -ADD_HOSTGRP_PAIR_RESULT = ( - "raidcom: Host group ID 2(0x2) will be used for adding.\n" -) - -# raidcom add lun -port CL1-A-0 -ldev_id x -ADD_LUN_LUN0_RESULT = "raidcom: LUN 0(0x0) will be used for adding.\n" - -# cmd: raidcom get ldev -ldev_list undefined -cnt 1 -GET_LDEV_LDEV_LIST_UNDEFINED = ( - "LDEV : 1 VIR_LDEV : 65534\n" - "VOL_TYPE : NOT DEFINED\n" -) - -# cmd: raidcom get ldev -ldev_id 0 -cnt 2 -key front_end (LDEV) -GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2 = ( - " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" - " Ports PORT_No:LU#:GRPNAME\n" - " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 1 - - NOT DEFINED - - - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get ldev -ldev_id 0 -cnt 10 -key front_end (LDEV) -GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT = ( - " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" - " Ports PORT_No:LU#:GRPNAME\n" - " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 1 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 2 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 3 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 4 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 5 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 6 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 7 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 8 - - NOT DEFINED - - - -\n" - " %(serial)s 9 - - NOT DEFINED - - - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get ldev -ldev_id x -check_status NOT DEFINED -GET_LDEV_CHECKSTATUS_ERR = ( - "raidcom: testing condition has failed with exit(1).\n" -) - -# cmd: raidcom get ldev -ldev_id 0 -GET_LDEV_LDEV0_RESULT = """ -LDEV : 0 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 1 -GET_LDEV_LDEV1_RESULT = """ -LDEV : 1 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 268435456 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 3 -GET_LDEV_LDEV3_RESULT = """ -LDEV : 3 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : -""" - -# cmd: raidcom get ldev -ldev_id 4 -GET_LDEV_LDEV4_RESULT = """ -LDEV : 4 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 5 -GET_LDEV_LDEV5_RESULT = """ -LDEV : 5 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP : VVOL -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 6 -GET_LDEV_LDEV6_RESULT = """ -LDEV : 6 -VOL_TYPE : OPEN-V-CVS -PORTs : CL1-A-0 0 HBSD-127.0.0.1 -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 7 -GET_LDEV_LDEV7_RESULT = """ -LDEV : 7 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 10 -GET_LDEV_LDEV10_RESULT = """ -LDEV : 10 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 11 -GET_LDEV_LDEV11_RESULT = """ -LDEV : 11 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 12 -GET_LDEV_LDEV12_RESULT = """ -LDEV : 12 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 13 -GET_LDEV_LDEV13_RESULT = """ -LDEV : 13 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : BLK -""" - -# cmd: raidcom get ldev -ldev_id 14 -GET_LDEV_LDEV14_RESULT = """ -LDEV : 14 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP : HDT -VOL_Capacity(BLK) : 9999999 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get lun -port CL1-A-0 -GET_LUN_CL1A0_RESULT = ( - "PORT GID HMD LUN NUM LDEV CM Serial# HMO_BITs\n" - "CL1-A 0 LINUX/IRIX 4 1 4 - %(serial)s\n" - "CL1-A 0 LINUX/IRIX 254 1 5 - %(serial)s\n" - "CL1-A 0 LINUX/IRIX 255 1 6 - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -GET_PORT_RESULT = ( - "PORT TYPE ATTR SPD LPID FAB CONN SSW SL Serial# WWN PHY_PORT\n" - "CL1-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" - "CL1-B ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" - "CL3-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" - "CL3-B ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" - "CL4-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -port CL1-A -key opt -GET_PORT_CL1A_KEY_OPT_RESULT = ( - "PORT : CL1-A\n" - "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" - "TCP_MTU : 1500\n" - "WSZ : 64KB\n" - "KA_TIMER : 60\n" - "TCP_PORT : 3260\n" - "IPV4_ADDR : 11.22.33.44\n" - "IPV4_SMSK : 255.255.0.0\n" - "IPV4_GWAD : 0.0.0.0\n" - "IPV6_ADDR_INF : INV : AM : fe80::\n" - "IPV6_GADR_INF : INV : AM : ::\n" - "IPV6_GWAD_INF : INV : :: : ::\n" - "VLAN_ID : -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -port CL1-B -key opt -GET_PORT_CL1B_KEY_OPT_RESULT = ( - "PORT : CL1-B\n" - "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" - "TCP_MTU : 1500\n" - "WSZ : 64KB\n" - "KA_TIMER : 60\n" - "TCP_PORT : 3260\n" - "IPV4_ADDR : 11.22.33.44\n" - "IPV4_SMSK : 255.255.0.0\n" - "IPV4_GWAD : 0.0.0.0\n" - "IPV6_ADDR_INF : INV : AM : fe80::\n" - "IPV6_GADR_INF : INV : AM : ::\n" - "IPV6_GWAD_INF : INV : :: : ::\n" - "VLAN_ID : -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -port CL3-A -key opt -GET_PORT_CL3A_KEY_OPT_RESULT = ( - "PORT : CL3-A\n" - "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" - "TCP_MTU : 1500\n" - "WSZ : 64KB\n" - "KA_TIMER : 60\n" - "TCP_PORT : 3260\n" - "IPV4_ADDR : 11.22.33.44\n" - "IPV4_SMSK : 255.255.0.0\n" - "IPV4_GWAD : 0.0.0.0\n" - "IPV6_ADDR_INF : INV : AM : fe80::\n" - "IPV6_GADR_INF : INV : AM : ::\n" - "IPV6_GWAD_INF : INV : :: : ::\n" - "VLAN_ID : -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -port CL3-A -key opt -GET_PORT_CL3B_KEY_OPT_RESULT = ( - "PORT : CL3-B\n" - "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" - "TCP_MTU : 1500\n" - "WSZ : 64KB\n" - "KA_TIMER : 60\n" - "TCP_PORT : 3260\n" - "IPV4_ADDR : 11.22.33.44\n" - "IPV4_SMSK : 255.255.0.0\n" - "IPV4_GWAD : 0.0.0.0\n" - "IPV6_ADDR_INF : INV : AM : fe80::\n" - "IPV6_GADR_INF : INV : AM : ::\n" - "IPV6_GWAD_INF : INV : :: : ::\n" - "VLAN_ID : -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 4 -GET_SNAPSHOT_LDEV4_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 4 3 8 31 100 ---- 57db5cb0\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 4 4 9 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 7 -GET_SNAPSHOT_LDEV7_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 7 3 8 31 100 ---- 57db5cb0\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 7 4 9 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 8 -GET_SNAPSHOT_LDEV8_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 S-VOL SSUS %(serial)s 8 3 7 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 11 -GET_SNAPSHOT_LDEV11_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 S-VOL SSUS %(serial)s 11 3 7 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 1 0 -IM201 -PAIRDISPLAY_LDEV0_1_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-0-1 L CL1-A-0 0 0 0 %(serial)s 0 " - "P-VOL PSUS %(serial)s 1 W\n" - "%(cg0)s VSP-LDEV-0-1 R CL1-A-0 0 1 0 %(serial)s 1 " - "S-VOL SSUS - 0 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 10 0 -IM201 -PAIRDISPLAY_LDEV7_10_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-7-10 L CL1-A-1 0 0 0 %(serial)s 7 " - "P-VOL PSUS %(serial)s 10 W\n" - "%(cg0)s VSP-LDEV-7-10 R CL1-A-1 0 1 0 %(serial)s 10 " - "S-VOL SSUS - 7 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 12 0 -IM201 -PAIRDISPLAY_LDEV7_12_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-7-12 L CL1-A-1 0 0 0 %(serial)s 7 " - "P-VOL PSUS %(serial)s 12 W\n" - "%(cg0)s VSP-LDEV-7-12 R CL1-A-1 0 1 0 %(serial)s 12 " - "S-VOL SSUS - 7 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidqry -h -RAIDQRY_RESULT = ( - "Model : RAID-Manager/Linux/x64\n" - "Ver&Rev: 01-39-03/03\n" - "Usage : raidqry [options] for HORC[200]\n" - " -h Help/Usage\n" - " -I[#] Set to HORCMINST#\n" - " -IH[#] or -ITC[#] Set to HORC mode [and HORCMINST#]\n" - " -IM[#] or -ISI[#] Set to MRCF mode [and HORCMINST#]\n" - " -z Set to the interactive mode\n" - " -zx Set to the interactive mode and HORCM monitoring\n" - " -q Quit(Return to main())\n" - " -g Specify for getting all group name on local\n" - " -l Specify the local query\n" - " -lm Specify the local query with full micro version\n" - " -r Specify the remote query\n" - " -f Specify display for floatable host\n" -) - -EXECUTE_TABLE = { - ('add', 'hba_iscsi', '-port', 'CL3-A-0', '-hba_iscsi_name', - 'iqn-initiator'): (vsp_horcm.EX_INVARG, STDOUT, STDERR), - ('add', 'host_grp', '-port', 'CL1-A', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL1-B', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-A', '-host_grp_name', - 'HBSD-127.0.0.1', '-iscsi_name', 'iqn-initiator.hbsd-target'): ( - SUCCEED, ADD_HOSTGRP_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', - 'HBSD-127.0.0.1', '-iscsi_name', 'iqn-initiator.hbsd-target'): ( - SUCCEED, ADD_HOSTGRP_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 0): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 1): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 5): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 6): ( - vsp_horcm.EX_CMDRJE, STDOUT, vsp_horcm._LU_PATH_DEFINED), - ('add', 'lun', '-port', 'CL1-B-0', '-ldev_id', 0, '-lun_id', 0): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('extend', 'ldev', '-ldev_id', 3, '-capacity', '128G'): ( - vsp_horcm.EX_CMDIOE, STDOUT, - "raidcom: [EX_CMDIOE] Control command I/O error"), - ('get', 'hba_iscsi', '-port', 'CL1-A', 'HBSD-127.0.0.1'): ( - SUCCEED, GET_HBA_ISCSI_CL1A_HOSTGRP_RESULT, STDERR), - ('get', 'hba_iscsi', '-port', 'CL1-A', 'HBSD-127.0.0.2'): ( - SUCCEED, GET_HBA_ISCSI_CL1A_HOSTGRP_RESULT, STDERR), - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT, STDERR), - ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'P'): ( - SUCCEED, GET_DEVICE_GRP_MU1P_RESULT, STDERR), - ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'S'): ( - SUCCEED, GET_DEVICE_GRP_MU1S_RESULT, STDERR), - ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_RESULT, STDERR), - ('get', 'pool', '-key', 'opt'): (SUCCEED, GET_POOL_KEYOPT_RESULT, STDERR), - ('get', 'hba_iscsi', '-port', 'CL1-B-0'): ( - SUCCEED, GET_HBA_ISCSI_CL1B0_RESULT, STDERR), - ('get', 'host_grp', '-port', 'CL1-A'): ( - SUCCEED, GET_HOST_GRP_CL1A_RESULT, STDERR), - ('get', 'host_grp', '-port', 'CL1-B'): ( - SUCCEED, GET_HOST_GRP_CL1B_RESULT, STDERR), - ('get', 'ldev', '-ldev_list', 'undefined', '-cnt', '1'): ( - SUCCEED, GET_LDEV_LDEV_LIST_UNDEFINED, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-cnt', 2, '-key', 'front_end'): ( - SUCCEED, GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-cnt', 10, '-key', 'front_end'): ( - SUCCEED, GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-check_status', 'NOT', 'DEFINED'): ( - 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), - ('get', 'ldev', '-ldev_id', 0): (SUCCEED, GET_LDEV_LDEV0_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 1): (SUCCEED, GET_LDEV_LDEV1_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 3): (SUCCEED, GET_LDEV_LDEV3_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 4): (SUCCEED, GET_LDEV_LDEV4_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 5): (SUCCEED, GET_LDEV_LDEV5_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 6): (SUCCEED, GET_LDEV_LDEV6_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 7): (SUCCEED, GET_LDEV_LDEV7_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 10): (SUCCEED, GET_LDEV_LDEV10_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 11): (SUCCEED, GET_LDEV_LDEV11_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 12): (SUCCEED, GET_LDEV_LDEV12_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 13): (SUCCEED, GET_LDEV_LDEV13_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 14): (SUCCEED, GET_LDEV_LDEV14_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 15): (vsp_horcm.EX_COMERR, "", STDERR), - ('get', 'lun', '-port', 'CL1-A-0'): ( - SUCCEED, GET_LUN_CL1A0_RESULT, STDERR), - ('get', 'port'): (SUCCEED, GET_PORT_RESULT, STDERR), - ('get', 'port', '-port', 'CL1-A', '-key', 'opt'): ( - SUCCEED, GET_PORT_CL1A_KEY_OPT_RESULT, STDERR), - ('get', 'port', '-port', 'CL1-B', '-key', 'opt'): ( - SUCCEED, GET_PORT_CL1B_KEY_OPT_RESULT, STDERR), - ('get', 'port', '-port', 'CL3-A', '-key', 'opt'): ( - SUCCEED, GET_PORT_CL3A_KEY_OPT_RESULT, STDERR), - ('get', 'port', '-port', 'CL3-B', '-key', 'opt'): ( - SUCCEED, GET_PORT_CL3B_KEY_OPT_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 4): ( - SUCCEED, GET_SNAPSHOT_LDEV4_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 7): ( - SUCCEED, GET_SNAPSHOT_LDEV7_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 8): ( - SUCCEED, GET_SNAPSHOT_LDEV8_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 11): ( - SUCCEED, GET_SNAPSHOT_LDEV11_RESULT, STDERR), - ('modify', 'ldev', '-ldev_id', 3, '-status', 'discard_zero_page'): ( - vsp_horcm.EX_CMDIOE, STDOUT, STDERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 10, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV7_10_RESULT, STDERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 12, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV7_12_RESULT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 8, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 10, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 12, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), - ('raidqry', '-h'): (SUCCEED, RAIDQRY_RESULT, STDERR), - ('tee', '/etc/horcm501.conf'): (1, STDOUT, STDERR), - ('-login', 'user', 'pasword'): (SUCCEED, STDOUT, STDERR), - ('-login', 'userX', 'paswordX'): (vsp_horcm.EX_ENAUTH, STDOUT, STDERR), - ('-login', 'userY', 'paswordY'): (vsp_horcm.EX_COMERR, STDOUT, STDERR), -} - -EXECUTE_TABLE2 = EXECUTE_TABLE.copy() -EXECUTE_TABLE2.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT2, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUS, STDOUT, STDERR), -}) - -EXECUTE_TABLE3 = EXECUTE_TABLE2.copy() - -EXECUTE_TABLE4 = EXECUTE_TABLE.copy() -EXECUTE_TABLE4.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), -}) - -EXECUTE_TABLE5 = EXECUTE_TABLE.copy() -EXECUTE_TABLE5.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), - ('get', 'ldev', '-ldev_id', 1, '-check_status', 'NOT', 'DEFINED'): ( - 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 1, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV0_1_RESULT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), -}) - -ERROR_EXECUTE_TABLE = { - ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_ERROR_RESULT, STDERR), -} - -DEFAULT_CONNECTOR = { - 'host': 'host', - 'ip': CONFIG_MAP['my_ip'], - 'initiator': 'iqn-initiator', - 'multipath': False, -} - -CTXT = cinder_context.get_admin_context() - -TEST_VOLUME = [] -for i in range(14): - volume = {} - volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) - volume['name'] = 'test-volume{0:d}'.format(i) - volume['provider_location'] = None if i == 2 else '{0:d}'.format(i) - volume['size'] = 256 if i == 1 else 128 - if i == 2: - volume['status'] = 'creating' - elif i == 5: - volume['status'] = 'in-use' - else: - volume['status'] = 'available' - volume = fake_volume.fake_volume_obj(CTXT, **volume) - TEST_VOLUME.append(volume) - - -def _volume_get(context, volume_id): - """Return predefined volume info.""" - return TEST_VOLUME[int(volume_id.replace("-", ""))] - -TEST_SNAPSHOT = [] -for i in range(8): - snapshot = {} - snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) - snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) - snapshot['provider_location'] = None if i == 2 else '{0:d}'.format( - i if i < 5 else i + 5) - snapshot['status'] = 'creating' if i == 2 else 'available' - snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format( - i if i < 5 else 7) - snapshot['volume'] = _volume_get(None, snapshot['volume_id']) - snapshot['volume_name'] = 'test-volume{0:d}'.format(i if i < 5 else 7) - snapshot['volume_size'] = 256 if i == 1 else 128 - snapshot = obj_snap.Snapshot._from_db_object( - CTXT, obj_snap.Snapshot(), - fake_snapshot.fake_db_snapshot(**snapshot)) - TEST_SNAPSHOT.append(snapshot) - -# Flags that determine _fake_run_horcmstart() return values -run_horcmstart_returns_error = False -run_horcmstart_returns_error2 = False -run_horcmstart3_cnt = 0 - - -def _access(*args, **kargs): - """Assume access to the path is allowed.""" - return True - - -def _execute(*args, **kargs): - """Return predefined results for command execution.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) - return result - - -def _execute2(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE2.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE2.get(cmd, CMD_SUCCEED) - return result - - -def _execute3(*args, **kargs): - """Change pairevtwait's dummy return value after it is called.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE3.get(cmd, CMD_SUCCEED) - if cmd == ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): - EXECUTE_TABLE3.update({ - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), - }) - return result - - -def _execute4(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE4.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE4.get(cmd, CMD_SUCCEED) - return result - - -def _execute5(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE5.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE5.get(cmd, CMD_SUCCEED) - return result - - -def _cinder_execute(*args, **kargs): - """Return predefined results or raise an exception.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - ret, stdout, stderr = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) - if ret == SUCCEED: - return stdout, stderr - else: - pee = processutils.ProcessExecutionError(exit_code=ret, - stdout=stdout, - stderr=stderr) - raise pee - - -def _error_execute(*args, **kargs): - """Return predefined error results.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = _execute(*args, **kargs) - ret = ERROR_EXECUTE_TABLE.get(cmd) - return ret if ret else result - - -def _brick_get_connector_properties(multipath=False, enforce_multipath=False): - """Return a predefined connector object.""" - return DEFAULT_CONNECTOR - - -def _brick_get_connector_properties_error(multipath=False, - enforce_multipath=False): - """Return an incomplete connector object.""" - connector = dict(DEFAULT_CONNECTOR) - del connector['initiator'] - return connector - - -def _connect_volume(*args, **kwargs): - """Return predefined volume info.""" - return {'path': u'/dev/disk/by-path/xxxx', 'type': 'block'} - - -def _disconnect_volume(*args, **kwargs): - """Return without doing anything.""" - pass - - -def _copy_volume(*args, **kwargs): - """Return without doing anything.""" - pass - - -def _volume_admin_metadata_get(context, volume_id): - """Return dummy admin metadata.""" - return {'fake_key': 'fake_value'} - - -def _snapshot_metadata_update(context, snapshot_id, metadata, delete): - """Return without doing anything.""" - pass - - -def _fake_is_smpl(*args): - """Assume the Shadow Image pair status is SMPL.""" - return True - - -def _fake_run_horcmgr(*args): - """Assume CCI is running.""" - return vsp_horcm._HORCM_RUNNING - - -def _fake_run_horcmstart(*args): - """Return a value based on a flag value.""" - return 0 if not run_horcmstart_returns_error else 3 - - -def _fake_run_horcmstart2(*args): - """Return a value based on a flag value.""" - return 0 if not run_horcmstart_returns_error2 else 3 - - -def _fake_run_horcmstart3(*args): - """Update a counter and return a value based on it.""" - global run_horcmstart3_cnt - run_horcmstart3_cnt = run_horcmstart3_cnt + 1 - return 0 if run_horcmstart3_cnt <= 1 else 3 - - -def _fake_check_ldev_status(*args, **kwargs): - """Assume LDEV status has changed as desired.""" - return None - - -def _fake_exists(path): - """Assume the path does not exist.""" - return False - - -class VSPHORCMISCSIDriverTest(test.TestCase): - """Unit test class for VSP HORCM interface iSCSI module.""" - - test_existing_ref = {'source-id': '0'} - test_existing_none_ldev_ref = {'source-id': '2'} - test_existing_invalid_ldev_ref = {'source-id': 'AAA'} - test_existing_value_error_ref = {'source-id': 'XX:XX:XX'} - test_existing_no_ldev_ref = {} - test_existing_invalid_sts_ldev = {'source-id': '13'} - test_existing_invalid_vol_attr = {'source-id': '12'} - test_existing_invalid_size = {'source-id': '14'} - test_existing_invalid_port_cnt = {'source-id': '6'} - test_existing_failed_to_start_horcmgr = {'source-id': '15'} - - def setUp(self): - """Set up the test environment.""" - super(VSPHORCMISCSIDriverTest, self).setUp() - - self.configuration = mock.Mock(conf.Configuration) - self.ctxt = cinder_context.get_admin_context() - self._setup_config() - self._setup_driver() - - def _setup_config(self): - """Set configuration parameter values.""" - self.configuration.config_group = "HORCM" - - self.configuration.volume_backend_name = "HORCMISCSI" - self.configuration.volume_driver = ( - "cinder.volume.drivers.hitachi.vsp_iscsi.VSPISCSIDriver") - self.configuration.reserved_percentage = "0" - self.configuration.use_multipath_for_image_xfer = False - self.configuration.enforce_multipath_for_image_xfer = False - self.configuration.num_volume_device_scan_tries = 3 - self.configuration.volume_dd_blocksize = "1000" - - self.configuration.vsp_storage_id = CONFIG_MAP['serial'] - self.configuration.vsp_pool = "30" - self.configuration.vsp_thin_pool = None - self.configuration.vsp_ldev_range = "0-1" - self.configuration.vsp_default_copy_method = 'FULL' - self.configuration.vsp_copy_speed = 3 - self.configuration.vsp_copy_check_interval = 1 - self.configuration.vsp_async_copy_check_interval = 1 - self.configuration.vsp_target_ports = "CL1-A" - self.configuration.vsp_compute_target_ports = "CL1-A" - self.configuration.vsp_horcm_pair_target_ports = "CL1-A" - self.configuration.vsp_group_request = True - - self.configuration.vsp_use_chap_auth = True - self.configuration.vsp_auth_user = "auth_user" - self.configuration.vsp_auth_password = "auth_password" - - self.configuration.vsp_horcm_numbers = INST_NUMS - self.configuration.vsp_horcm_user = "user" - self.configuration.vsp_horcm_password = "pasword" - self.configuration.vsp_horcm_add_conf = False - - self.configuration.safe_get = self._fake_safe_get - - CONF = cfg.CONF - CONF.my_ip = CONFIG_MAP['my_ip'] - - def _fake_safe_get(self, value): - """Retrieve a configuration value avoiding throwing an exception.""" - try: - val = getattr(self.configuration, value) - except AttributeError: - val = None - return val - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def _setup_driver(self, execute, brick_get_connector_properties): - """Set up the driver environment.""" - self.driver = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self.driver.do_setup(None) - self.driver.check_for_setup_error() - self.driver.create_export(None, None, None) - self.driver.ensure_export(None, None) - self.driver.remove_export(None, None) - - # API test cases - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - def test_do_setup(self, execute, brick_get_connector_properties): - """Normal case: The host group exists beforehand.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - drv.do_setup(None) - self.assertEqual( - {'CL1-A': '11.22.33.44:3260'}, - drv.common.storage_info['portals']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_raidqry_h_invalid( - self, execute, brick_get_connector_properties): - """Error case: 'raidqry -h' returns nothing. This error is ignored.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - raidqry_h_original = EXECUTE_TABLE[('raidqry', '-h')] - EXECUTE_TABLE[('raidqry', '-h')] = (SUCCEED, "", STDERR) - drv.do_setup(None) - self.assertEqual( - {'CL1-A': '11.22.33.44:3260'}, - drv.common.storage_info['portals']) - EXECUTE_TABLE[('raidqry', '-h')] = raidqry_h_original - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_specify_pool_name( - self, execute, brick_get_connector_properties): - """Normal case: Specify pool name rather than pool number.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_pool = "VSPPOOL" - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_create_hostgrp( - self, execute, brick_get_connector_properties): - """Normal case: The host groups does not exist beforehand.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = "CL3-B" - - drv.do_setup(None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_create_hostgrp_error( - self, execute, brick_get_connector_properties): - """Error case: 'add hba_iscsi' fails(MSGID0309-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = "CL3-A" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_thin_pool_not_specified(self, execute): - """Error case: Parameter error(vsp_thin_pool).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_default_copy_method = 'THIN' - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_ldev_range_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Not specify LDEV range.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_ldev_range = None - - drv.do_setup(None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_storage_id_not_specified(self, execute): - """Error case: Parameter error(vsp_storage_id).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_storage_id = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_horcm_numbers_invalid(self, execute): - """Error case: Parameter error(vsp_horcm_numbers).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_numbers = (200, 200) - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_horcm_user_not_specified(self, execute): - """Error case: Parameter error(vsp_horcm_user).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only target_ports is not specified.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_compute_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only compute_target_ports is not specified.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_compute_target_ports = None - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_pair_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only pair_target_ports is not specified.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_pair_target_ports = None - - drv.do_setup(None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_compute_target_ports_not_specified(self, execute): - """Error case: Parameter error(compute_target_ports).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_compute_target_ports = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_pair_target_ports_not_specified(self, execute): - """Error case: Parameter error(pair_target_ports).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_horcm_pair_target_ports = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(processutils, 'execute', side_effect=_execute) - @mock.patch.object(os.path, 'exists', side_effect=_fake_exists) - @mock.patch.object(os, 'access', side_effect=_access) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_create_conf( - self, vsp_utils_execute, access, exists, processutils_execute, - brick_get_connector_properties): - """Error case: Writing into horcmxxx.conf fails.(MSGID0632-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_numbers = (500, 501) - self.configuration.vsp_horcm_add_conf = True - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_login( - self, execute, brick_get_connector_properties): - """Error case: 'raidcom -login' fails with EX_ENAUTH(MSGID0600-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = "userX" - self.configuration.vsp_horcm_password = "paswordX" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 2) - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_command( - self, execute, brick_get_connector_properties): - """Error case: 'raidcom -login' fails with EX_COMERR(MSGID0600-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = "userY" - self.configuration.vsp_horcm_password = "paswordY" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 2) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmgr', side_effect=_fake_run_horcmgr) - def test_do_setup_failed_to_horcmshutdown( - self, _run_horcmgr, execute, brick_get_connector_properties): - """Error case: CCI's status is always RUNNING(MSGID0608-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart) - def test_do_setup_failed_to_horcmstart( - self, _run_horcmstart, execute, brick_get_connector_properties): - """Error case: _run_horcmstart() returns an error(MSGID0609-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - global run_horcmstart_returns_error - run_horcmstart_returns_error = True - self.assertRaises(exception.VSPError, drv.do_setup, None) - run_horcmstart_returns_error = False - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties_error) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_initiator_not_found( - self, execute, brick_get_connector_properties): - """Error case: The connector does not have 'initiator'(MSGID0650-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_port_not_found(self, execute): - """Error case: The target port does not exist(MSGID0650-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = ["CL4-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_compute_target_ports_not_found(self, execute): - """Error case: Compute target port does not exist(MSGID0650-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_compute_target_ports = ["CL4-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_pair_target_ports_not_found(self, execute): - """Error case: Pair target port does not exist(MSGID0650-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_horcm_pair_target_ports = ["CL5-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume(self, execute): - """Normal case: Extend volume succeeds.""" - self.driver.extend_volume(TEST_VOLUME[0], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0613-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[2], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_ldev_is_vvol(self, execute): - """Error case: The volume is a V-VOL(MSGID0618-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[5], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_is_busy(self, execute): - """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[4], 256) - - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - @mock.patch.object(vsp_horcm, '_EXTEND_WAITTIME', 1) - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - def test_extend_volume_raidcom_error(self, execute,): - """Error case: 'extend ldev' returns an error(MSGID0600-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[3], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_get_volume_stats(self, execute): - """Normal case: Refreshing data required.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - self.assertFalse(stats['multiattach']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_get_volume_stats_no_refresh(self, execute): - """Normal case: Refreshing data not required.""" - stats = self.driver.get_volume_stats() - self.assertEqual({}, stats) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_error_execute) - def test_get_volume_stats_failed_to_get_dp_pool(self, execute): - """Error case: The pool does not exist(MSGID0640-E, MSGID0620-E).""" - self.driver.common.storage_info['pool_id'] = 29 - - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume(self, execute): - """Normal case: Available LDEV range is 0-1.""" - ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_free_ldev_not_found_on_storage(self, execute): - """Error case: No unused LDEV exists(MSGID0648-E).""" - self.driver.common.storage_info['ldev_range'] = [0, 0] - - self.assertRaises( - exception.VSPError, self.driver.create_volume, TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_no_setting_ldev_range(self, execute): - """Normal case: Available LDEV range is unlimited.""" - self.driver.common.storage_info['ldev_range'] = None - - ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm.VSPHORCM, - '_check_ldev_status', side_effect=_fake_check_ldev_status) - def test_delete_volume(self, _check_ldev_status, execute): - """Normal case: Delete a volume.""" - self.driver.delete_volume(TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0304-W).""" - self.driver.delete_volume(TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_ldev_not_found_on_storage(self, execute): - """Unusual case: The volume's LDEV does not exist.(MSGID0319-W).""" - self.driver.delete_volume(TEST_VOLUME[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_volume_is_busy(self, execute): - """Error case: The volume is a P-VOL of a THIN pair(MSGID0616-E).""" - self.assertRaises( - exception.VolumeIsBusy, self.driver.delete_volume, TEST_VOLUME[4]) - - @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_full( - self, volume_get, snapshot_metadata_update, execute): - """Normal case: copy_method=FULL.""" - self.driver.common.storage_info['ldev_range'] = [0, 9] - - ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) - self.assertEqual('8', ret['provider_location']) - - @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_thin( - self, volume_get, snapshot_metadata_update, execute): - """Normal case: copy_method=THIN.""" - self.driver.common.storage_info['ldev_range'] = [0, 9] - self.configuration.vsp_thin_pool = 31 - self.configuration.vsp_default_copy_method = "THIN" - - ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) - self.assertEqual('8', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_provider_location_is_none( - self, volume_get, execute): - """Error case: Source vol's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_ldev_not_found_on_storage( - self, volume_get, execute): - """Error case: The src-vol's LDEV does not exist.(MSGID0612-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_full(self, execute): - """Normal case: Delete a snapshot.""" - self.driver.delete_snapshot(TEST_SNAPSHOT[5]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm.VSPHORCM, '_is_smpl', side_effect=_fake_is_smpl) - def test_delete_snapshot_full_smpl(self, _is_smpl, execute): - """Normal case: The LDEV in an SI volume pair becomes SMPL.""" - self.driver.delete_snapshot(TEST_SNAPSHOT[7]) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_vvol_timeout(self, execute): - """Error case: V-VOL is not deleted from a snapshot(MSGID0611-E).""" - self.assertRaises( - exception.VSPError, self.driver.delete_snapshot, TEST_SNAPSHOT[6]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_provider_location_is_none(self, execute): - """Error case: Snapshot's provider_location is None(MSGID0304-W).""" - self.driver.delete_snapshot(TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_ldev_not_found_on_storage(self, execute): - """Unusual case: The snapshot's LDEV does not exist.(MSGID0319-W).""" - self.driver.delete_snapshot(TEST_SNAPSHOT[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_snapshot_is_busy(self, execute): - """Error case: The snapshot is a P-VOL of a THIN pair(MSGID0616-E).""" - self.assertRaises( - exception.SnapshotIsBusy, self.driver.delete_snapshot, - TEST_SNAPSHOT[4]) - - @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object( - utils, 'brick_get_connector', - side_effect=mock.MagicMock()) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - brick_connector.ISCSIConnector, - 'connect_volume', _connect_volume) - @mock.patch.object( - brick_connector.ISCSIConnector, - 'disconnect_volume', _disconnect_volume) - def test_create_cloned_volume_with_dd_same_size( - self, execute, brick_get_connector, brick_get_connector_properties, - copy_volume): - """Normal case: The source volume is a V-VOL and copied by dd.""" - vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[5]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object( - utils, 'brick_get_connector', - side_effect=mock.MagicMock()) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - brick_connector.ISCSIConnector, - 'connect_volume', _connect_volume) - @mock.patch.object( - brick_connector.ISCSIConnector, - 'disconnect_volume', _disconnect_volume) - def test_create_cloned_volume_with_dd_extend_size( - self, execute, brick_get_connector, brick_get_connector_properties, - copy_volume): - """Normal case: Copy with dd and extend the size afterward.""" - vol = self.driver.create_cloned_volume(TEST_VOLUME[1], TEST_VOLUME[5]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_provider_location_is_none(self, execute): - """Error case: Source vol's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - TEST_VOLUME[0], TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_invalid_size(self, execute): - """Error case: src-size > clone-size(MSGID0617-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - TEST_VOLUME[0], TEST_VOLUME[1]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_extend_size_thin(self, execute): - """Error case: clone > src and copy_method=THIN(MSGID0621-E).""" - self.configuration.vsp_thin_pool = 31 - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'THIN'}) - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - test_vol_obj, TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_same_size(self, execute): - """Normal case: Copy with Shadow Image.""" - vol = self.driver.create_volume_from_snapshot( - TEST_VOLUME[0], TEST_SNAPSHOT[0]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute2) - def test_create_volume_from_snapshot_full_extend_normal(self, execute): - """Normal case: Copy with Shadow Image and extend the size.""" - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - vol = self.driver.create_volume_from_snapshot( - test_vol_obj, TEST_SNAPSHOT[0]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute3) - def test_create_volume_from_snapshot_full_extend_PSUE(self, execute): - """Error case: SI copy -> pair status: PSUS -> PSUE(MSGID0722-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute4) - def test_create_volume_from_snapshot_full_PSUE(self, execute): - """Error case: SI copy -> pair status becomes PSUE(MSGID0610-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[0]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart3) - @mock.patch.object(vsp_horcm, '_LDEV_STATUS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute5) - def test_create_volume_from_snapshot_full_SMPL( - self, execute, _run_horcmstart): - """Error case: SI copy -> pair status becomes SMPL(MSGID0610-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[0]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_invalid_size(self, execute): - """Error case: volume-size < snapshot-size(MSGID0617-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - TEST_VOLUME[0], TEST_SNAPSHOT[1]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_thin_extend(self, execute): - """Error case: volume > snapshot and copy_method=THIN(MSGID0621-E).""" - self.configuration.vsp_thin_pool = 31 - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'THIN'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_provider_location_is_none( - self, execute): - """Error case: Snapshot's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - TEST_VOLUME[0], TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection(self, volume_admin_metadata_get, execute): - """Normal case: Initialize connection.""" - ret = self.driver.initialize_connection( - TEST_VOLUME[0], DEFAULT_CONNECTOR) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) - self.assertEqual('iqn-initiator.hbsd-target', - ret['data']['target_iqn']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_multipath( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: Initialize connection in multipath environment.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = ["CL1-A", "CL1-B"] - drv.do_setup(None) - multipath_connector = copy.copy(DEFAULT_CONNECTOR) - multipath_connector['multipath'] = True - ret = drv.initialize_connection(TEST_VOLUME[0], multipath_connector) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual(['11.22.33.44:3260', '11.22.33.44:3260'], - ret['data']['target_portals']) - self.assertEqual(['iqn-initiator.hbsd-target', - 'iqn-initiator.hbsd-target'], - ret['data']['target_iqns']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual([0, 0], ret['data']['target_luns']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_initialize_connection_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0619-E).""" - self.assertRaises( - exception.VSPError, self.driver.initialize_connection, - TEST_VOLUME[2], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_already_attached( - self, volume_admin_metadata_get, execute): - """Unusual case: 'add lun' returns 'already defined' error.""" - ret = self.driver.initialize_connection( - TEST_VOLUME[6], DEFAULT_CONNECTOR) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) - self.assertEqual('iqn-initiator.hbsd-target', - ret['data']['target_iqn']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual(255, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_target_port_not_specified( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: target_port is not specified.""" - compute_connector = DEFAULT_CONNECTOR.copy() - compute_connector['ip'] = '127.0.0.2' - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - drv.do_setup(None) - ret = drv.initialize_connection(TEST_VOLUME[0], compute_connector) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) - self.assertEqual('iqn-initiator.hbsd-target', - ret['data']['target_iqn']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_compute_port_not_specified( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: compute_target_port is not specified.""" - compute_connector = DEFAULT_CONNECTOR.copy() - compute_connector['ip'] = '127.0.0.2' - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_compute_target_ports = None - drv.do_setup(None) - ret = drv.initialize_connection(TEST_VOLUME[0], compute_connector) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) - self.assertEqual('iqn-initiator.hbsd-target', - ret['data']['target_iqn']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection(self, execute): - """Normal case: Terminate connection.""" - self.driver.terminate_connection(TEST_VOLUME[6], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_provider_location_is_none(self, execute): - """Unusual case: Volume's provider_location is None(MSGID0302-W).""" - self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_no_port_mapped_to_ldev(self, execute): - """Unusual case: No port is mapped to the LDEV.""" - self.driver.terminate_connection(TEST_VOLUME[3], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_initiator_iqn_not_found(self, execute): - """Error case: The connector does not have 'initiator'(MSGID0650-E).""" - connector = dict(DEFAULT_CONNECTOR) - del connector['initiator'] - - self.assertRaises( - exception.VSPError, self.driver.terminate_connection, - TEST_VOLUME[0], connector) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_copy_volume_to_image(self, execute): - """Normal case: Copy a volume to an image.""" - image_service = 'fake_image_service' - image_meta = 'fake_image_meta' - - with mock.patch.object(driver.VolumeDriver, 'copy_volume_to_image') \ - as mock_copy_volume_to_image: - self.driver.copy_volume_to_image( - self.ctxt, TEST_VOLUME[0], image_service, image_meta) - - mock_copy_volume_to_image.assert_called_with( - self.ctxt, TEST_VOLUME[0], image_service, image_meta) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing(self, execute): - """Normal case: Bring an existing volume under Cinder's control.""" - ret = self.driver.manage_existing( - TEST_VOLUME[0], self.test_existing_ref) - self.assertEqual('0', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_normal(self, execute): - """Normal case: Return an existing LDEV's size.""" - self.driver.manage_existing_get_size( - TEST_VOLUME[0], self.test_existing_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_none_ldev_ref(self, execute): - """Error case: Source LDEV's properties do not exist(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_none_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_ldev_ref(self, execute): - """Error case: Source LDEV's ID is an invalid decimal(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_value_error_ref(self, execute): - """Error case: Source LDEV's ID is an invalid hex(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_value_error_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_no_ldev_ref(self, execute): - """Error case: Source LDEV's ID is not specified(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_no_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_sts_ldev(self, execute): - """Error case: Source LDEV's STS is invalid(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_sts_ldev) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_vol_attr(self, execute): - """Error case: Source LDEV's VOL_ATTR is invalid(MSGID0702-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_vol_attr) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_size_ref(self, execute): - """Error case: Source LDEV's VOL_Capacity is invalid(MSGID0703-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_size) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_port_cnt(self, execute): - """Error case: Source LDEV's NUM_PORT is invalid(MSGID0704-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_port_cnt) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart2) - def test_manage_existing_get_size_failed_to_start_horcmgr( - self, _run_horcmstart, execute): - """Error case: _start_horcmgr() returns an error(MSGID0320-W).""" - global run_horcmstart_returns_error2 - run_horcmstart_returns_error2 = True - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_failed_to_start_horcmgr) - run_horcmstart_returns_error2 = False - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage(self, execute): - """Normal case: Take out a volume from Cinder's control.""" - self.driver.unmanage(TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0304-W).""" - self.driver.unmanage(TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_volume_invalid_sts_ldev(self, execute): - """Unusual case: The volume's STS is BLK.""" - self.driver.unmanage(TEST_VOLUME[13]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_volume_is_busy(self, execute): - """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" - self.assertRaises( - exception.VolumeIsBusy, self.driver.unmanage, TEST_VOLUME[4]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_copy_image_to_volume(self, execute): - """Normal case: Copy an image to a volume.""" - image_service = 'fake_image_service' - image_id = 'fake_image_id' - self.configuration.vsp_horcm_numbers = (400, 401) - - with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ - as mock_copy_image: - self.driver.copy_image_to_volume( - self.ctxt, TEST_VOLUME[0], image_service, image_id) - - mock_copy_image.assert_called_with( - self.ctxt, TEST_VOLUME[0], image_service, image_id) - - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - def test_update_migrated_volume_success(self, execute): - """Normal case: 'modify ldev -status discard_zero_page' succeeds.""" - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[2], - "available") - - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_update_migrated_volume_error(self, execute): - """Error case: 'modify ldev' fails(MSGID0315-W).""" - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[3], - "available") - - def test_get_ldev_volume_is_none(self): - """Error case: The volume is None.""" - self.assertIsNone(vsp_utils.get_ldev(None)) - - def test_check_ignore_error_string(self): - """Normal case: ignore_error is a string.""" - ignore_error = 'SSB=0xB980,0xB902' - stderr = ('raidcom: [EX_CMDRJE] An order to the control/command device' - ' was rejected\nIt was rejected due to SKEY=0x05, ASC=0x26, ' - 'ASCQ=0x00, SSB=0xB980,0xB902 on Serial#(400003)\nCAUSE : ' - 'The specified port can not be operated.') - self.assertTrue(vsp_utils.check_ignore_error(ignore_error, stderr)) - - def test_check_opts_parameter_specified(self): - """Normal case: A valid parameter is specified.""" - cfg.CONF.paramAAA = 'aaa' - vsp_utils.check_opts(conf.Configuration(None), - [cfg.StrOpt('paramAAA')]) - - def test_check_opt_value_parameter_not_set(self): - """Error case: A parameter is not set(MSGID0601-E).""" - self.assertRaises(cfg.NoSuchOptError, - vsp_utils.check_opt_value, - conf.Configuration(None), - ['paramCCC']) - - def test_build_initiator_target_map_no_lookup_service(self): - """Normal case: None is specified for lookup_service.""" - connector = {'wwpns': ['0000000000000000', '1111111111111111']} - target_wwns = ['2222222222222222', '3333333333333333'] - init_target_map = vsp_utils.build_initiator_target_map(connector, - target_wwns, - None) - self.assertEqual( - {'0000000000000000': ['2222222222222222', '3333333333333333'], - '1111111111111111': ['2222222222222222', '3333333333333333']}, - init_target_map) - - def test_update_conn_info_not_update_conn_info(self): - """Normal case: Not update connection info.""" - vsp_utils.update_conn_info(dict({'data': dict({'target_wwn': []})}), - dict({'wwpns': []}), - None) diff --git a/cinder/volume/drivers/hitachi/__init__.py b/cinder/volume/drivers/hitachi/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cinder/volume/drivers/hitachi/hbsd_basiclib.py b/cinder/volume/drivers/hitachi/hbsd_basiclib.py deleted file mode 100644 index 9b22e21739c..00000000000 --- a/cinder/volume/drivers/hitachi/hbsd_basiclib.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import os -import shlex - -from oslo_concurrency import lockutils -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils - -SMPL = 1 -COPY = 2 -PAIR = 3 -PSUS = 4 -PSUE = 5 -UNKN = 0xff - -FULL = 'Full copy' -THIN = 'Thin copy' - -DEFAULT_TRY_RANGE = range(3) -MAX_PROCESS_WAITTIME = 86400 -DEFAULT_PROCESS_WAITTIME = 900 - -GETSTORAGEARRAY_ONCE = 100 - -WARNING_ID = 300 - -DEFAULT_GROUP_RANGE = [0, 65535] - -NAME_PREFIX = 'HBSD-' - -NORMAL_VOLUME_TYPE = 'Normal' - -LOCK_DIR = '/var/lock/hbsd/' - -LOG = logging.getLogger(__name__) - -HBSD_INFO_MSG = { - 1: _('The parameter of the storage backend. ' - '(config_group: %(config_group)s)'), - 3: _('The storage backend can be used. (config_group: %(config_group)s)'), - 4: _('The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)'), - 5: _('The volume %(volume_id)s is unmanaged successfully. ' - '(LDEV: %(ldev)s)'), -} - -HBSD_WARN_MSG = { - 301: _('A LUN (HLUN) was not found. (LDEV: %(ldev)s)'), - 302: _('Failed to specify a logical device for the volume ' - '%(volume_id)s to be unmapped.'), - 303: _('An iSCSI CHAP user could not be deleted. (username: %(user)s)'), - 304: _('Failed to specify a logical device to be deleted. ' - '(method: %(method)s, id: %(id)s)'), - 305: _('The logical device for specified %(type)s %(id)s ' - 'was already deleted.'), - 306: _('A host group could not be deleted. (port: %(port)s, ' - 'gid: %(gid)s, name: %(name)s)'), - 307: _('An iSCSI target could not be deleted. (port: %(port)s, ' - 'tno: %(tno)s, alias: %(alias)s)'), - 308: _('A host group could not be added. (port: %(port)s, ' - 'name: %(name)s)'), - 309: _('An iSCSI target could not be added. ' - '(port: %(port)s, alias: %(alias)s, reason: %(reason)s)'), - 310: _('Failed to unmap a logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)'), - 311: _('A free LUN (HLUN) was not found. Add a different host' - ' group. (LDEV: %(ldev)s)'), - 312: _('Failed to get a storage resource. The system will attempt ' - 'to get the storage resource again. (resource: %(resource)s)'), - 313: _('Failed to delete a logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)'), - 314: _('Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, ' - 'port: %(port)s, id: %(id)s)'), - 315: _('Failed to perform a zero-page reclamation. ' - '(LDEV: %(ldev)s, reason: %(reason)s)'), - 316: _('Failed to assign the iSCSI initiator IQN. (port: %(port)s, ' - 'reason: %(reason)s)'), -} - -HBSD_ERR_MSG = { - 600: _('The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, ' - 'stderr: %(err)s)'), - 601: _('A parameter is invalid. (%(param)s)'), - 602: _('A parameter value is invalid. (%(meta)s)'), - 603: _('Failed to acquire a resource lock. (serial: %(serial)s, ' - 'inst: %(inst)s, ret: %(ret)s, stderr: %(err)s)'), - 604: _('Cannot set both hitachi_serial_number and hitachi_unit_name.'), - 605: _('Either hitachi_serial_number or hitachi_unit_name is required.'), - 615: _('A pair could not be created. The maximum number of pair is ' - 'exceeded. (copy method: %(copy_method)s, P-VOL: %(pvol)s)'), - 616: _('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)'), - 617: _('The specified operation is not supported. The volume size ' - 'must be the same as the source %(type)s. (volume: %(volume_id)s)'), - 618: _('The volume %(volume_id)s could not be extended. ' - 'The volume type must be Normal.'), - 619: _('The volume %(volume_id)s to be mapped was not found.'), - 624: _('The %(type)s %(id)s source to be replicated was not found.'), - 631: _('Failed to create a file. (file: %(file)s, ret: %(ret)s, ' - 'stderr: %(err)s)'), - 632: _('Failed to open a file. (file: %(file)s, ret: %(ret)s, ' - 'stderr: %(err)s)'), - 633: _('%(file)s: Permission denied.'), - 636: _('Failed to add the logical device.'), - 637: _('The method %(method)s is timed out. (timeout value: %(timeout)s)'), - 640: _('A pool could not be found. (pool id: %(pool_id)s)'), - 641: _('The host group or iSCSI target could not be added.'), - 642: _('An iSCSI CHAP user could not be added. (username: %(user)s)'), - 643: _('The iSCSI CHAP user %(user)s does not exist.'), - 648: _('There are no resources available for use. ' - '(resource: %(resource)s)'), - 649: _('The host group or iSCSI target was not found.'), - 650: _('The resource %(resource)s was not found.'), - 651: _('The IP Address was not found.'), - 653: _('The creation of a logical device could not be ' - 'completed. (LDEV: %(ldev)s)'), - 654: _('A volume status is invalid. (status: %(status)s)'), - 655: _('A snapshot status is invalid. (status: %(status)s)'), - 659: _('A host group is invalid. (host group: %(gid)s)'), - 660: _('The specified %(desc)s is busy.'), - 700: _('There is no designation of the %(param)s. ' - 'The specified storage is essential to manage the volume.'), - 701: _('There is no designation of the ldev. ' - 'The specified ldev is essential to manage the volume.'), - 702: _('The specified ldev %(ldev)s could not be managed. ' - 'The volume type must be DP-VOL.'), - 703: _('The specified ldev %(ldev)s could not be managed. ' - 'The ldev size must be in multiples of gigabyte.'), - 704: _('The specified ldev %(ldev)s could not be managed. ' - 'The ldev must not be mapping.'), - 705: _('The specified ldev %(ldev)s could not be managed. ' - 'The ldev must not be paired.'), - 706: _('The volume %(volume_id)s could not be unmanaged. ' - 'The volume type must be %(volume_type)s.'), -} - - -def set_msg(msg_id, **kwargs): - if msg_id < WARNING_ID: - msg_header = 'MSGID%04d-I:' % msg_id - msg_body = HBSD_INFO_MSG.get(msg_id) - else: - msg_header = 'MSGID%04d-W:' % msg_id - msg_body = HBSD_WARN_MSG.get(msg_id) - - return '%(header)s %(body)s' % {'header': msg_header, - 'body': msg_body % kwargs} - - -def output_err(msg_id, **kwargs): - msg = HBSD_ERR_MSG.get(msg_id) % kwargs - - LOG.error("MSGID%(id)04d-E: %(msg)s", {'id': msg_id, 'msg': msg}) - - return msg - - -def get_process_lock(file): - if not os.access(file, os.W_OK): - msg = output_err(633, file=file) - raise exception.HBSDError(message=msg) - return lockutils.InterProcessLock(file) - - -def create_empty_file(filename): - if not os.path.exists(filename): - try: - utils.execute('touch', filename) - except putils.ProcessExecutionError as ex: - msg = output_err( - 631, file=filename, ret=ex.exit_code, err=ex.stderr) - raise exception.HBSDError(message=msg) - - -class FileLock(lockutils.InterProcessLock): - - def __init__(self, name, lock_object): - self.lock_object = lock_object - - super(FileLock, self).__init__(name) - - def __enter__(self): - self.lock_object.acquire() - - try: - ret = super(FileLock, self).__enter__() - except Exception: - with excutils.save_and_reraise_exception(): - self.lock_object.release() - - return ret - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - super(FileLock, self).__exit__(exc_type, exc_val, exc_tb) - finally: - self.lock_object.release() - - -class NopLock(object): - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -class HBSDBasicLib(object): - - def __init__(self, conf=None): - self.conf = conf - - def exec_command(self, cmd, args=None, printflag=True): - if printflag: - if args: - LOG.debug('cmd: %(cmd)s, args: %(args)s', - {'cmd': cmd, 'args': args}) - else: - LOG.debug('cmd: %s', cmd) - - cmd = [cmd] - - if args: - if six.PY2 and isinstance(args, six.text_type): - cmd += shlex.split(args.encode()) - else: - cmd += shlex.split(args) - - try: - stdout, stderr = utils.execute(*cmd, run_as_root=True) - ret = 0 - except putils.ProcessExecutionError as e: - ret = e.exit_code - stdout = e.stdout - stderr = e.stderr - - LOG.debug('cmd: %s', cmd) - LOG.debug('from: %s', inspect.stack()[2]) - LOG.debug('ret: %d', ret) - LOG.debug('stdout: %s', stdout.replace(os.linesep, ' ')) - LOG.debug('stderr: %s', stderr.replace(os.linesep, ' ')) - - return ret, stdout, stderr - - def set_pair_flock(self): - return NopLock() - - def set_horcmgr_flock(self): - return NopLock() - - def discard_zero_page(self, ldev): - pass - - def output_param_to_log(self, conf): - pass - - def connect_storage(self): - pass - - def get_max_hostgroups(self): - pass - - def restart_pair_horcm(self): - pass diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py deleted file mode 100644 index 66b4692d3d8..00000000000 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ /dev/null @@ -1,835 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Common class for Hitachi storage drivers. - -""" - -import re -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib -from cinder.volume.drivers.hitachi import hbsd_horcm as horcm -from cinder.volume.drivers.hitachi import hbsd_snm2 as snm2 -from cinder.volume import utils as volume_utils - -""" -Version history: - 1.0.0 - Initial driver - 1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods -""" -VERSION = '1.1.0' - -PARAM_RANGE = { - 'hitachi_copy_check_interval': {'min': 1, 'max': 600}, - 'hitachi_async_copy_check_interval': {'min': 1, 'max': 600}, - 'hitachi_copy_speed': {'min': 1, 'max': 15}, -} - -DEFAULT_LDEV_RANGE = [0, 65535] - -COPY_METHOD = ('FULL', 'THIN') -VALID_DP_VOLUME_STATUS = ['available', 'in-use'] -VALID_V_VOLUME_STATUS = ['available'] -SYSTEM_LOCK_FILE = basic_lib.LOCK_DIR + 'system' -SERVICE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'service_' -STORAGE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'storage_' - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.StrOpt('hitachi_serial_number', - help='Serial number of storage system'), - cfg.StrOpt('hitachi_unit_name', - help='Name of an array unit'), - cfg.IntOpt('hitachi_pool_id', - help='Pool ID of storage system'), - cfg.IntOpt('hitachi_thin_pool_id', - help='Thin pool ID of storage system'), - cfg.StrOpt('hitachi_ldev_range', - help='Range of logical device of storage system'), - cfg.StrOpt('hitachi_default_copy_method', - default='FULL', - help='Default copy method of storage system'), - cfg.IntOpt('hitachi_copy_speed', - default=3, - help='Copy speed of storage system'), - cfg.IntOpt('hitachi_copy_check_interval', - default=3, - help='Interval to check copy'), - cfg.IntOpt('hitachi_async_copy_check_interval', - default=10, - help='Interval to check copy asynchronously'), - cfg.StrOpt('hitachi_target_ports', - help='Control port names for HostGroup or iSCSI Target'), - cfg.StrOpt('hitachi_group_range', - help='Range of group number'), - cfg.BoolOpt('hitachi_group_request', - default=False, - secret=True, - help='Request for creating HostGroup or iSCSI Target'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -class TryLock(object): - - def __init__(self): - self.lock = threading.RLock() - self.desc = None - - def set_desc(self, description): - self.desc = description - - def __enter__(self): - if not self.lock.acquire(False): - msg = basic_lib.output_err(660, desc=self.desc) - raise exception.HBSDError(message=msg) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.lock.release() - - -class HBSDCommon(object): - - def __init__(self, conf, parent, context, db): - self.configuration = conf - self.generated_from = parent - self.context = context - self.db = db - - self.system_lock_file = SYSTEM_LOCK_FILE - self.service_lock_file = '%s%s' % (SERVICE_LOCK_PATH_BASE, - conf.config_group) - if conf.hitachi_serial_number: - self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE, - six.text_type( - conf.hitachi_serial_number)) - elif conf.hitachi_unit_name: - self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE, - six.text_type( - conf.hitachi_unit_name)) - - self.storage_obj_lock = threading.Lock() - self.volinfo_lock = threading.Lock() - self.volume_info = {} - self.output_first = True - - def get_volume(self, volume_id): - return self.db.volume_get(self.context, volume_id) - - def get_volume_metadata(self, volume_id): - return self.db.volume_metadata_get(self.context, volume_id) - - def get_snapshot_metadata(self, snapshot_id): - return self.db.snapshot_metadata_get(self.context, snapshot_id) - - def _update_volume_metadata(self, volume_id, volume_metadata): - self.db.volume_metadata_update(self.context, volume_id, - volume_metadata, False) - - def get_ldev(self, obj): - if not obj: - return None - - ldev = obj.get('provider_location') - if not ldev or not ldev.isdigit(): - return None - else: - return int(ldev) - - def get_value(self, obj, name, key): - if not obj: - return None - - if obj.get(name): - if isinstance(obj[name], dict): - return obj[name].get(key) - else: - for i in obj[name]: - if i['key'] == key: - return i['value'] - return None - - def get_is_vvol(self, obj, name): - return self.get_value(obj, name, 'type') == 'V-VOL' - - def get_volume_is_vvol(self, volume): - return self.get_is_vvol(volume, 'volume_metadata') - - def get_snapshot_is_vvol(self, snapshot): - return self.get_is_vvol(snapshot, 'metadata') - - def get_copy_method(self, volume): - method = self.get_value(volume, 'volume_metadata', 'copy_method') - if method: - if method not in COPY_METHOD: - msg = basic_lib.output_err(602, meta='copy_method') - raise exception.HBSDError(message=msg) - elif (method == 'THIN' - and self.configuration.hitachi_thin_pool_id is None): - msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') - raise exception.HBSDError(message=msg) - else: - method = self.configuration.hitachi_default_copy_method - return method - - def _string2int(self, num): - if not num: - return None - if num.isdigit(): - return int(num, 10) - if not re.match(r'\w\w:\w\w:\w\w', num): - return None - - try: - num = int(num.replace(':', ''), 16) - except ValueError: - return None - - return num - - def _range2list(self, conf, param): - str = getattr(conf, param) - lists = str.split('-') - if len(lists) != 2: - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - - first_type = None - for i in range(len(lists)): - if lists[i].isdigit(): - lists[i] = int(lists[i], 10) - if first_type == 'hex': - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - first_type = 'dig' - else: - if (first_type == 'dig' - or not re.match(r'\w\w:\w\w:\w\w', lists[i])): - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - try: - lists[i] = int(lists[i].replace(':', ''), 16) - first_type = 'hex' - except Exception: - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - if lists[0] > lists[1]: - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - return lists - - def output_param_to_log(self, storage_protocol): - essential_inherited_param = ['volume_backend_name', 'volume_driver'] - conf = self.configuration - - LOG.info(basic_lib.set_msg(1, config_group=conf.config_group)) - version = self.command.get_comm_version() - if conf.hitachi_unit_name: - prefix = 'HSNM2 version' - else: - prefix = 'RAID Manager version' - LOG.info('\t%(prefix)-35s : %(version)s', - {'prefix': prefix, 'version': version}) - for param in essential_inherited_param: - value = conf.safe_get(param) - LOG.info('\t%(param)-35s : %(value)s', - {'param': param, 'value': value}) - for opt in volume_opts: - if not opt.secret: - value = getattr(conf, opt.name) - LOG.info('\t%(name)-35s : %(value)s', - {'name': opt.name, 'value': value}) - - if storage_protocol == 'iSCSI': - value = getattr(conf, 'hitachi_group_request') - LOG.info('\t%(request)-35s : %(value)s', - {'request': 'hitachi_group_request', 'value': value}) - - def check_param(self): - conf = self.configuration - - if conf.hitachi_unit_name and conf.hitachi_serial_number: - msg = basic_lib.output_err(604) - raise exception.HBSDError(message=msg) - - if not conf.hitachi_unit_name and not conf.hitachi_serial_number: - msg = basic_lib.output_err(605) - raise exception.HBSDError(message=msg) - - if conf.hitachi_pool_id is None: - msg = basic_lib.output_err(601, param='hitachi_pool_id') - raise exception.HBSDError(message=msg) - - for param in PARAM_RANGE.keys(): - _value = getattr(conf, param) - if (_value and - (not PARAM_RANGE[param]['min'] <= _value <= - PARAM_RANGE[param]['max'])): - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - - if conf.hitachi_default_copy_method not in COPY_METHOD: - msg = basic_lib.output_err(601, - param='hitachi_default_copy_method') - raise exception.HBSDError(message=msg) - - if (conf.hitachi_default_copy_method == 'THIN' - and conf.hitachi_thin_pool_id is None): - msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') - raise exception.HBSDError(message=msg) - - for param in ('hitachi_ldev_range', 'hitachi_group_range'): - if not getattr(conf, param): - continue - else: - _value = self._range2list(conf, param) - setattr(conf, param, _value) - - if conf.hitachi_target_ports: - conf.hitachi_target_ports = conf.hitachi_target_ports.split(',') - - for opt in volume_opts: - getattr(conf, opt.name) - - if conf.hitachi_unit_name: - self.command = snm2.HBSDSNM2(conf) - else: - conf.append_config_values(horcm.volume_opts) - self.command = horcm.HBSDHORCM(conf) - self.command.check_param() - self.pair_flock = self.command.set_pair_flock() - self.horcmgr_flock = self.command.set_horcmgr_flock() - - def create_lock_file(self): - basic_lib.create_empty_file(self.system_lock_file) - basic_lib.create_empty_file(self.service_lock_file) - basic_lib.create_empty_file(self.storage_lock_file) - self.command.create_lock_file() - - def _add_ldev(self, volume_num, capacity, pool_id, is_vvol): - self.command.comm_add_ldev(pool_id, volume_num, capacity, is_vvol) - - def _get_unused_volume_num(self, ldev_range): - return self.command.get_unused_ldev(ldev_range) - - def add_volinfo(self, ldev, id=None, type='volume'): - with self.volinfo_lock: - if ldev not in self.volume_info: - self.init_volinfo(self.volume_info, ldev) - if id: - desc = '%s %s' % (type, id) - self.volume_info[ldev]['in_use'].set_desc(desc) - - def delete_pair(self, ldev, all_split=True, is_vvol=None): - paired_info = self.command.get_paired_info(ldev) - LOG.debug('paired_info: %s', paired_info) - pvol = paired_info['pvol'] - svols = paired_info['svol'] - driver = self.generated_from - restart = False - svol_list = [] - try: - if pvol is None: - return - elif pvol == ldev: - for svol in svols[:]: - if svol['is_vvol'] or svol['status'] != basic_lib.PSUS: - continue - - self.command.delete_pair(pvol, svol['lun'], False) - restart = True - driver.pair_terminate_connection(svol['lun']) - svols.remove(svol) - - if all_split and svols: - svol_list.append(six.text_type(svols[0]['lun'])) - for svol in svols[1:]: - svol_list.append(', %d' % svol['lun']) - - msg = basic_lib.output_err(616, pvol=pvol, - svol=''.join(svol_list)) - raise exception.HBSDBusy(message=msg) - - if not svols: - driver.pair_terminate_connection(pvol) - - else: - self.add_volinfo(pvol) - if not self.volume_info[pvol]['in_use'].lock.acquire(False): - desc = self.volume_info[pvol]['in_use'].desc - msg = basic_lib.output_err(660, desc=desc) - raise exception.HBSDBusy(message=msg) - try: - paired_info = self.command.get_paired_info(ldev) - if paired_info['pvol'] is None: - return - svol = paired_info['svol'][0] - if svol['status'] != basic_lib.PSUS: - msg = basic_lib.output_err(616, pvol=pvol, svol=ldev) - raise exception.HBSDBusy(message=msg) - - self.command.delete_pair(pvol, ldev, svol['is_vvol']) - if not svol['is_vvol']: - restart = True - driver.pair_terminate_connection(ldev) - paired_info = self.command.get_paired_info(pvol) - if paired_info['pvol'] is None: - driver.pair_terminate_connection(pvol) - finally: - self.volume_info[pvol]['in_use'].lock.release() - except Exception: - with excutils.save_and_reraise_exception(): - if restart: - try: - self.command.restart_pair_horcm() - except Exception as e: - LOG.warning('Failed to restart horcm: %s', e) - else: - if (all_split or is_vvol) and restart: - try: - self.command.restart_pair_horcm() - except Exception as e: - LOG.warning('Failed to restart horcm: %s', e) - - def copy_async_data(self, pvol, svol, is_vvol): - path_list = [] - driver = self.generated_from - try: - with self.pair_flock: - self.delete_pair(pvol, all_split=False, is_vvol=is_vvol) - paired_info = self.command.get_paired_info(pvol) - if paired_info['pvol'] is None: - driver.pair_initialize_connection(pvol) - path_list.append(pvol) - driver.pair_initialize_connection(svol) - path_list.append(svol) - self.command.comm_create_pair(pvol, svol, is_vvol) - except Exception: - with excutils.save_and_reraise_exception(): - for ldev in path_list: - try: - driver.pair_terminate_connection(ldev) - except Exception as ex: - LOG.warning(basic_lib.set_msg(310, ldev=ldev, - reason=ex)) - - def copy_sync_data(self, src_ldev, dest_ldev, size): - src_vol = {'provider_location': six.text_type(src_ldev), - 'id': 'src_vol'} - dest_vol = {'provider_location': six.text_type(dest_ldev), - 'id': 'dest_vol'} - properties = utils.brick_get_connector_properties() - driver = self.generated_from - src_info = None - dest_info = None - try: - dest_info = driver._attach_volume(self.context, dest_vol, - properties) - src_info = driver._attach_volume(self.context, src_vol, - properties) - volume_utils.copy_volume(src_info['device']['path'], - dest_info['device']['path'], size * 1024, - self.configuration.volume_dd_blocksize) - finally: - if dest_info: - driver._detach_volume(self.context, dest_info, - dest_vol, properties) - if src_info: - driver._detach_volume(self.context, src_info, - src_vol, properties) - self.command.discard_zero_page(dest_ldev) - - def copy_data(self, pvol, size, p_is_vvol, method): - type = 'Normal' - is_vvol = method == 'THIN' - svol = self._create_volume(size, is_vvol=is_vvol) - try: - if p_is_vvol: - self.copy_sync_data(pvol, svol, size) - else: - if is_vvol: - type = 'V-VOL' - self.copy_async_data(pvol, svol, is_vvol) - except Exception: - with excutils.save_and_reraise_exception(): - try: - self.delete_ldev(svol, is_vvol) - except Exception as ex: - LOG.warning(basic_lib.set_msg(313, ldev=svol, - reason=ex)) - - return six.text_type(svol), type - - def add_lun(self, command, hostgroups, ldev, is_once=False): - lock = basic_lib.get_process_lock(self.storage_lock_file) - with lock: - self.command.comm_add_lun(command, hostgroups, ldev, is_once) - - def create_ldev(self, size, ldev_range, pool_id, is_vvol): - LOG.debug('create start (normal)') - for i in basic_lib.DEFAULT_TRY_RANGE: - LOG.debug('Try number: %(tries)s / %(max_tries)s', - {'tries': i + 1, - 'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)}) - new_ldev = self._get_unused_volume_num(ldev_range) - try: - self._add_ldev(new_ldev, size, pool_id, is_vvol) - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(312, resource='LDEV')) - continue - else: - break - else: - msg = basic_lib.output_err(636) - raise exception.HBSDError(message=msg) - LOG.debug('create end (normal: %s)', new_ldev) - self.init_volinfo(self.volume_info, new_ldev) - return new_ldev - - def _create_volume(self, size, is_vvol=False): - ldev_range = self.configuration.hitachi_ldev_range - if not ldev_range: - ldev_range = DEFAULT_LDEV_RANGE - pool_id = self.configuration.hitachi_pool_id - - lock = basic_lib.get_process_lock(self.storage_lock_file) - with self.storage_obj_lock, lock: - ldev = self.create_ldev(size, ldev_range, pool_id, is_vvol) - return ldev - - def create_volume(self, volume): - volume_metadata = self.get_volume_metadata(volume['id']) - volume_metadata['type'] = 'Normal' - - size = volume['size'] - ldev = self._create_volume(size) - volume_metadata['ldev'] = six.text_type(ldev) - - return {'provider_location': six.text_type(ldev), - 'metadata': volume_metadata} - - def delete_ldev(self, ldev, is_vvol): - LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)', - {'ldev': ldev, 'vvol': is_vvol}) - with self.pair_flock: - self.delete_pair(ldev) - self.command.comm_delete_ldev(ldev, is_vvol) - with self.volinfo_lock: - if ldev in self.volume_info: - self.volume_info.pop(ldev) - LOG.debug('delete_ldev is finished ' - '(LDEV: %(ldev)d, is_vvol: %(vvol)s)', - {'ldev': ldev, 'vvol': is_vvol}) - - def delete_volume(self, volume): - ldev = self.get_ldev(volume) - if ldev is None: - LOG.warning(basic_lib.set_msg(304, method='delete_volume', - id=volume['id'])) - return - self.add_volinfo(ldev, volume['id']) - if not self.volume_info[ldev]['in_use'].lock.acquire(False): - desc = self.volume_info[ldev]['in_use'].desc - basic_lib.output_err(660, desc=desc) - raise exception.VolumeIsBusy(volume_name=volume['name']) - try: - is_vvol = self.get_volume_is_vvol(volume) - try: - self.delete_ldev(ldev, is_vvol) - except exception.HBSDNotFound: - with self.volinfo_lock: - if ldev in self.volume_info: - self.volume_info.pop(ldev) - LOG.warning(basic_lib.set_msg( - 305, type='volume', id=volume['id'])) - except exception.HBSDBusy: - raise exception.VolumeIsBusy(volume_name=volume['name']) - finally: - if ldev in self.volume_info: - self.volume_info[ldev]['in_use'].lock.release() - - def check_volume_status(self, volume, is_vvol): - if not is_vvol: - status = VALID_DP_VOLUME_STATUS - else: - status = VALID_V_VOLUME_STATUS - if volume['status'] not in status: - msg = basic_lib.output_err(654, status=volume['status']) - raise exception.HBSDError(message=msg) - - def create_snapshot(self, snapshot): - src_ref = self.get_volume(snapshot['volume_id']) - pvol = self.get_ldev(src_ref) - if pvol is None: - msg = basic_lib.output_err(624, type='volume', id=src_ref['id']) - raise exception.HBSDError(message=msg) - - self.add_volinfo(pvol, src_ref['id']) - with self.volume_info[pvol]['in_use']: - is_vvol = self.get_volume_is_vvol(src_ref) - self.check_volume_status(src_ref, is_vvol) - size = snapshot['volume_size'] - snap_metadata = snapshot.get('metadata') - method = None if is_vvol else self.get_copy_method(src_ref) - - svol, type = self.copy_data(pvol, size, is_vvol, method) - - if type == 'V-VOL': - snap_metadata['type'] = type - snap_metadata['ldev'] = svol - - return {'provider_location': svol, - 'metadata': snap_metadata} - - def delete_snapshot(self, snapshot): - ldev = self.get_ldev(snapshot) - if ldev is None: - LOG.warning(basic_lib.set_msg( - 304, method='delete_snapshot', id=snapshot['id'])) - return - self.add_volinfo(ldev, id=snapshot['id'], type='snapshot') - if not self.volume_info[ldev]['in_use'].lock.acquire(False): - desc = self.volume_info[ldev]['in_use'].desc - basic_lib.output_err(660, desc=desc) - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - try: - is_vvol = self.get_snapshot_is_vvol(snapshot) - try: - self.delete_ldev(ldev, is_vvol) - except exception.HBSDNotFound: - with self.volinfo_lock: - if ldev in self.volume_info: - self.volume_info.pop(ldev) - LOG.warning(basic_lib.set_msg( - 305, type='snapshot', id=snapshot['id'])) - except exception.HBSDBusy: - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - finally: - if ldev in self.volume_info: - self.volume_info[ldev]['in_use'].lock.release() - - def create_cloned_volume(self, volume, src_vref): - pvol = self.get_ldev(src_vref) - if pvol is None: - msg = basic_lib.output_err(624, type='volume', id=src_vref['id']) - raise exception.HBSDError(message=msg) - - self.add_volinfo(pvol, src_vref['id']) - with self.volume_info[pvol]['in_use']: - is_vvol = self.get_volume_is_vvol(src_vref) - self.check_volume_status(self.get_volume(src_vref['id']), is_vvol) - size = volume['size'] - src_size = src_vref['size'] - if size < src_size: - msg = basic_lib.output_err(617, type='volume', - volume_id=volume['id']) - raise exception.HBSDError(message=msg) - - metadata = self.get_volume_metadata(volume['id']) - method = None if is_vvol else self.get_copy_method(volume) - - svol, type = self.copy_data(pvol, src_size, is_vvol, method) - - if size > src_size: - self.extend_volume(volume, size) - - metadata['type'] = type - metadata['volume'] = src_vref['id'] - metadata['ldev'] = svol - - return {'provider_location': svol, 'metadata': metadata} - - def create_volume_from_snapshot(self, volume, snapshot): - pvol = self.get_ldev(snapshot) - if pvol is None: - msg = basic_lib.output_err(624, type='snapshot', id=snapshot['id']) - raise exception.HBSDError(message=msg) - - self.add_volinfo(pvol, id=snapshot['id'], type='snapshot') - with self.volume_info[pvol]['in_use']: - is_vvol = self.get_snapshot_is_vvol(snapshot) - if snapshot['status'] != 'available': - msg = basic_lib.output_err(655, status=snapshot['status']) - raise exception.HBSDError(message=msg) - - size = volume['size'] - src_size = snapshot['volume_size'] - if size != src_size: - msg = basic_lib.output_err(617, type='snapshot', - volume_id=volume['id']) - raise exception.HBSDError(message=msg) - - metadata = self.get_volume_metadata(volume['id']) - method = None if is_vvol else self.get_copy_method(volume) - svol, type = self.copy_data(pvol, size, is_vvol, method) - - metadata['type'] = type - metadata['snapshot'] = snapshot['id'] - metadata['ldev'] = svol - - return {'provider_location': svol, 'metadata': metadata} - - def _extend_volume(self, ldev, old_size, new_size): - with self.pair_flock: - self.delete_pair(ldev) - self.command.comm_extend_ldev(ldev, old_size, new_size) - - def extend_volume(self, volume, new_size): - pvol = self.get_ldev(volume) - self.add_volinfo(pvol, volume['id']) - with self.volume_info[pvol]['in_use']: - if self.get_volume_is_vvol(volume): - msg = basic_lib.output_err(618, volume_id=volume['id']) - raise exception.HBSDError(message=msg) - self._extend_volume(pvol, volume['size'], new_size) - - def output_backend_available_once(self): - if self.output_first: - self.output_first = False - LOG.warning(basic_lib.set_msg( - 3, config_group=self.configuration.config_group)) - - def update_volume_stats(self, storage_protocol): - data = {} - total_gb = None - free_gb = None - data['volume_backend_name'] = self.configuration.safe_get( - 'volume_backend_name') or 'HBSD%s' % storage_protocol - data['vendor_name'] = 'Hitachi' - data['driver_version'] = VERSION - data['storage_protocol'] = storage_protocol - - try: - total_gb, free_gb = self.command.comm_get_dp_pool( - self.configuration.hitachi_pool_id) - except Exception as ex: - LOG.error('Failed to update volume status: %s', ex) - return None - - data['total_capacity_gb'] = total_gb - data['free_capacity_gb'] = free_gb - data['reserved_percentage'] = self.configuration.safe_get( - 'reserved_percentage') - data['QoS_support'] = False - - LOG.debug('Updating volume status (%s)', data) - - return data - - def init_volinfo(self, vol_info, ldev): - vol_info[ldev] = {'in_use': TryLock(), 'lock': threading.Lock()} - - def manage_existing(self, volume, existing_ref): - """Manage an existing Hitachi storage volume. - - existing_ref is a dictionary of the form: - - For HUS 100 Family: - - .. code-block:: default - - { - 'ldev': , - 'unit_name': - } - - For VSP G1000/VSP/HUS VM: - - .. code-block:: default - - { - 'ldev': , - 'serial_number': - } - - """ - - ldev = self._string2int(existing_ref.get('ldev')) - - LOG.info(basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev)) - - return {'provider_location': ldev} - - def _manage_existing_get_size(self, volume, existing_ref): - """Return size of volume for manage_existing.""" - - ldev = self._string2int(existing_ref.get('ldev')) - if ldev is None: - msg = basic_lib.output_err(701) - raise exception.HBSDError(data=msg) - - size = self.command.get_ldev_size_in_gigabyte(ldev, existing_ref) - - metadata = {'type': basic_lib.NORMAL_VOLUME_TYPE, 'ldev': ldev} - self._update_volume_metadata(volume['id'], metadata) - - return size - - def manage_existing_get_size(self, volume, existing_ref): - try: - return self._manage_existing_get_size(volume, existing_ref) - except exception.HBSDError as ex: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=six.text_type(ex)) - - def _unmanage(self, volume, ldev): - with self.horcmgr_flock: - self.delete_pair(ldev) - - with self.volinfo_lock: - if ldev in self.volume_info: - self.volume_info.pop(ldev) - - def unmanage(self, volume): - """Remove the specified volume from Cinder management.""" - - ldev = self.get_ldev(volume) - - if ldev is None: - return - - self.add_volinfo(ldev, volume['id']) - if not self.volume_info[ldev]['in_use'].lock.acquire(False): - desc = self.volume_info[ldev]['in_use'].desc - basic_lib.output_err(660, desc=desc) - raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) - - is_vvol = self.get_volume_is_vvol(volume) - if is_vvol: - basic_lib.output_err(706, volume_id=volume['id'], - volume_type=basic_lib.NORMAL_VOLUME_TYPE) - raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) - try: - self._unmanage(volume, ldev) - except exception.HBSDBusy: - raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) - else: - LOG.info(basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev)) - finally: - if ldev in self.volume_info: - self.volume_info[ldev]['in_use'].lock.release() diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py deleted file mode 100644 index aaf581ab437..00000000000 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Fibre channel Cinder volume driver for Hitachi storage. - -""" - -import os -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -import cinder.volume.driver -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib -from cinder.volume.drivers.hitachi import hbsd_common as common -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.BoolOpt('hitachi_zoning_request', - default=False, - help='Request for FC Zone creating HostGroup'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver): - VERSION = common.VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = ["Hitachi_HBSD_CI", "Hitachi_HBSD2_CI"] - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - os.environ['LANG'] = 'C' - super(HBSDFCDriver, self).__init__(*args, **kwargs) - self.db = kwargs.get('db') - self.common = None - self.configuration.append_config_values(common.volume_opts) - self._stats = {} - self.context = None - self.max_hostgroups = None - self.pair_hostgroups = [] - self.pair_hostnum = 0 - self.do_setup_status = threading.Event() - - def _check_param(self): - self.configuration.append_config_values(volume_opts) - for opt in volume_opts: - getattr(self.configuration, opt.name) - - def check_param(self): - try: - self.common.check_param() - self._check_param() - except exception.HBSDError: - raise - except Exception as ex: - msg = basic_lib.output_err(601, param=six.text_type(ex)) - raise exception.HBSDError(message=msg) - - def output_param_to_log(self): - lock = basic_lib.get_process_lock(self.common.system_lock_file) - - with lock: - self.common.output_param_to_log('FC') - for opt in volume_opts: - if not opt.secret: - value = getattr(self.configuration, opt.name) - LOG.info('\t%(name)-35s : %(value)s', - {'name': opt.name, 'value': value}) - self.common.command.output_param_to_log(self.configuration) - - def _add_wwn(self, hgs, port, gid, wwns): - for wwn in wwns: - wwn = six.text_type(wwn) - self.common.command.comm_add_hbawwn(port, gid, wwn) - detected = self.common.command.is_detected(port, wwn) - hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn, - 'detected': detected}) - LOG.debug('Create host group for %s', hgs) - - def _add_lun(self, hostgroups, ldev): - if hostgroups is self.pair_hostgroups: - is_once = True - else: - is_once = False - self.common.add_lun('auhgmap', hostgroups, ldev, is_once) - - def _delete_lun(self, hostgroups, ldev): - try: - self.common.command.comm_delete_lun(hostgroups, ldev) - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(301, ldev=ldev)) - - def _get_hgname_gid(self, port, host_grp_name): - return self.common.command.get_hgname_gid(port, host_grp_name) - - def _get_unused_gid(self, port): - group_range = self.configuration.hitachi_group_range - if not group_range: - group_range = basic_lib.DEFAULT_GROUP_RANGE - return self.common.command.get_unused_gid(group_range, port) - - def _get_hostgroup_info(self, hgs, wwns, login=True): - target_ports = self.configuration.hitachi_target_ports - return self.common.command.comm_get_hostgroup_info( - hgs, wwns, target_ports, login=login) - - def _fill_group(self, hgs, port, host_grp_name, wwns): - added_hostgroup = False - LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s ' - 'name: %(name)s wwns: %(wwns)s)', - {'hgs': hgs, 'port': port, - 'name': host_grp_name, 'wwns': wwns}) - gid = self._get_hgname_gid(port, host_grp_name) - if gid is None: - for retry_cnt in basic_lib.DEFAULT_TRY_RANGE: - try: - gid = self._get_unused_gid(port) - self._add_hostgroup(port, gid, host_grp_name) - added_hostgroup = True - except exception.HBSDNotFound: - gid = None - LOG.warning(basic_lib.set_msg(312, resource='GID')) - continue - else: - LOG.debug('Completed to add host target' - '(port: %(port)s gid: %(gid)d)', - {'port': port, 'gid': gid}) - break - else: - msg = basic_lib.output_err(641) - raise exception.HBSDError(message=msg) - - try: - if wwns: - self._add_wwn(hgs, port, gid, wwns) - else: - hgs.append({'port': port, 'gid': gid, 'initiator_wwn': None, - 'detected': True}) - except Exception: - with excutils.save_and_reraise_exception(): - if added_hostgroup: - self._delete_hostgroup(port, gid, host_grp_name) - - def add_hostgroup_master(self, hgs, master_wwns, host_ip, security_ports): - target_ports = self.configuration.hitachi_target_ports - group_request = self.configuration.hitachi_group_request - wwns = [] - for wwn in master_wwns: - wwns.append(wwn.lower()) - if target_ports and group_request: - host_grp_name = '%s%s' % (basic_lib.NAME_PREFIX, host_ip) - for port in security_ports: - wwns_copy = wwns[:] - for hostgroup in hgs: - if (hostgroup['port'] == port and - hostgroup['initiator_wwn'].lower() in wwns_copy): - wwns_copy.remove(hostgroup['initiator_wwn'].lower()) - if wwns_copy: - try: - self._fill_group(hgs, port, host_grp_name, wwns_copy) - except Exception as ex: - LOG.warning('Failed to add host group: %s', ex) - LOG.warning(basic_lib.set_msg( - 308, port=port, name=host_grp_name)) - - if not hgs: - raise exception.HBSDError(message=basic_lib.output_err(649)) - - def add_hostgroup_pair(self, pair_hostgroups): - if self.configuration.hitachi_unit_name: - return - - properties = utils.brick_get_connector_properties() - if 'wwpns' not in properties: - msg = basic_lib.output_err(650, resource='HBA') - raise exception.HBSDError(message=msg) - hostgroups = [] - self._get_hostgroup_info(hostgroups, properties['wwpns'], - login=False) - host_grp_name = '%spair%02x' % (basic_lib.NAME_PREFIX, - self.pair_hostnum) - for hostgroup in hostgroups: - gid = self._get_hgname_gid(hostgroup['port'], - host_grp_name) - - # When 'gid' is 0, it should be true. - # So, it cannot remove 'is not None'. - if gid is not None: - pair_hostgroups.append({'port': hostgroup['port'], - 'gid': gid, 'initiator_wwn': None, - 'detected': True}) - break - - if not pair_hostgroups: - for hostgroup in hostgroups: - pair_port = hostgroup['port'] - try: - self._fill_group(pair_hostgroups, pair_port, - host_grp_name, None) - except Exception: - if hostgroup is hostgroups[-1]: - raise - else: - break - - def add_hostgroup(self): - properties = utils.brick_get_connector_properties() - if 'wwpns' not in properties: - msg = basic_lib.output_err(650, resource='HBA') - raise exception.HBSDError(message=msg) - LOG.debug("wwpns: %s", properties['wwpns']) - - hostgroups = [] - security_ports = self._get_hostgroup_info( - hostgroups, properties['wwpns'], login=False) - self.add_hostgroup_master(hostgroups, properties['wwpns'], - properties['ip'], security_ports) - self.add_hostgroup_pair(self.pair_hostgroups) - - def _get_target_wwn(self, port): - target_wwns = self.common.command.comm_set_target_wwns( - self.configuration.hitachi_target_ports) - return target_wwns[port] - - def _add_hostgroup(self, port, gid, host_grp_name): - self.common.command.comm_add_hostgrp(port, gid, host_grp_name) - - def _delete_hostgroup(self, port, gid, host_grp_name): - try: - self.common.command.comm_del_hostgrp(port, gid, host_grp_name) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.warning(basic_lib.set_msg( - 306, port=port, gid=gid, name=host_grp_name)) - - def _check_volume_mapping(self, hostgroup): - port = hostgroup['port'] - gid = hostgroup['gid'] - if self.common.command.get_hostgroup_luns(port, gid): - return True - else: - return False - - def _build_initiator_target_map(self, hostgroups, terminate=False): - target_wwns = [] - init_targ_map = {} - - target_ports = self.configuration.hitachi_target_ports - zoning_request = self.configuration.hitachi_zoning_request - - for hostgroup in hostgroups: - target_wwn = self._get_target_wwn(hostgroup['port']) - - if target_wwn not in target_wwns: - target_wwns.append(target_wwn) - - if target_ports and zoning_request: - if terminate and self._check_volume_mapping(hostgroup): - continue - - initiator_wwn = hostgroup['initiator_wwn'] - if initiator_wwn not in init_targ_map: - init_targ_map[initiator_wwn] = [] - - init_targ_map[initiator_wwn].append(target_wwn) - - return target_wwns, init_targ_map - - def _get_properties(self, volume, hostgroups, terminate=False): - properties = {} - - target_wwns, init_targ_map = self._build_initiator_target_map( - hostgroups, terminate) - - properties['target_wwn'] = target_wwns - - if init_targ_map: - properties['initiator_target_map'] = init_targ_map - - if not terminate: - properties['target_lun'] = hostgroups[0]['lun'] - - return properties - - def do_setup(self, context): - self.context = context - self.common = common.HBSDCommon(self.configuration, self, - context, self.db) - msg = _("The HBSD FC driver is deprecated and " - "will be removed in P release.") - versionutils.report_deprecated_feature(LOG, msg) - - self.check_param() - - self.common.create_lock_file() - - self.common.command.connect_storage() - self.max_hostgroups = self.common.command.get_max_hostgroups() - - lock = basic_lib.get_process_lock(self.common.service_lock_file) - with lock: - self.add_hostgroup() - - self.output_param_to_log() - self.do_setup_status.set() - - def check_for_setup_error(self): - pass - - def extend_volume(self, volume, new_size): - self.do_setup_status.wait() - self.common.extend_volume(volume, new_size) - - def get_volume_stats(self, refresh=False): - if refresh: - if self.do_setup_status.isSet(): - self.common.output_backend_available_once() - _stats = self.common.update_volume_stats("FC") - if _stats: - self._stats = _stats - return self._stats - - def create_volume(self, volume): - self.do_setup_status.wait() - metadata = self.common.create_volume(volume) - return metadata - - def delete_volume(self, volume): - self.do_setup_status.wait() - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - self.do_setup_status.wait() - metadata = self.common.create_snapshot(snapshot) - return metadata - - def delete_snapshot(self, snapshot): - self.do_setup_status.wait() - self.common.delete_snapshot(snapshot) - - def create_cloned_volume(self, volume, src_vref): - self.do_setup_status.wait() - metadata = self.common.create_cloned_volume(volume, src_vref) - return metadata - - def create_volume_from_snapshot(self, volume, snapshot): - self.do_setup_status.wait() - metadata = self.common.create_volume_from_snapshot(volume, snapshot) - return metadata - - def _initialize_connection(self, ldev, connector, src_hgs=None): - LOG.debug("Call _initialize_connection " - "(config_group: %(group)s ldev: %(ldev)d)", - {'group': self.configuration.config_group, 'ldev': ldev}) - if src_hgs is self.pair_hostgroups: - hostgroups = src_hgs - else: - hostgroups = [] - security_ports = self._get_hostgroup_info( - hostgroups, connector['wwpns'], login=True) - self.add_hostgroup_master(hostgroups, connector['wwpns'], - connector['ip'], security_ports) - - if src_hgs is self.pair_hostgroups: - try: - self._add_lun(hostgroups, ldev) - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(311, ldev=ldev)) - for i in range(self.max_hostgroups + 1): - self.pair_hostnum += 1 - pair_hostgroups = [] - try: - self.add_hostgroup_pair(pair_hostgroups) - self.pair_hostgroups.extend(pair_hostgroups) - except exception.HBSDNotFound: - if i >= self.max_hostgroups: - msg = basic_lib.output_err(648, resource='GID') - raise exception.HBSDError(message=msg) - else: - break - self.pair_initialize_connection(ldev) - else: - self._add_lun(hostgroups, ldev) - - return hostgroups - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - self.do_setup_status.wait() - ldev = self.common.get_ldev(volume) - if ldev is None: - msg = basic_lib.output_err(619, volume_id=volume['id']) - raise exception.HBSDError(message=msg) - self.common.add_volinfo(ldev, volume['id']) - with self.common.volume_info[ldev]['lock'],\ - self.common.volume_info[ldev]['in_use']: - hostgroups = self._initialize_connection(ldev, connector) - properties = self._get_properties(volume, hostgroups) - LOG.debug('Initialize volume_info: %s', - self.common.volume_info) - - LOG.debug('HFCDrv: properties=%s', properties) - return { - 'driver_volume_type': 'fibre_channel', - 'data': properties - } - - def _terminate_connection(self, ldev, connector, src_hgs): - LOG.debug("Call _terminate_connection(config_group: %s)", - self.configuration.config_group) - hostgroups = src_hgs[:] - self._delete_lun(hostgroups, ldev) - LOG.debug("*** _terminate_ ***") - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - self.do_setup_status.wait() - ldev = self.common.get_ldev(volume) - if ldev is None: - LOG.warning(basic_lib.set_msg(302, volume_id=volume['id'])) - return - - if 'wwpns' not in connector: - msg = basic_lib.output_err(650, resource='HBA') - raise exception.HBSDError(message=msg) - - hostgroups = [] - self._get_hostgroup_info(hostgroups, - connector['wwpns'], login=False) - if not hostgroups: - msg = basic_lib.output_err(649) - raise exception.HBSDError(message=msg) - - self.common.add_volinfo(ldev, volume['id']) - with self.common.volume_info[ldev]['lock'],\ - self.common.volume_info[ldev]['in_use']: - self._terminate_connection(ldev, connector, hostgroups) - properties = self._get_properties(volume, hostgroups, - terminate=True) - LOG.debug('Terminate volume_info: %s', self.common.volume_info) - - return { - 'driver_volume_type': 'fibre_channel', - 'data': properties - } - - def pair_initialize_connection(self, ldev): - if self.configuration.hitachi_unit_name: - return - self._initialize_connection(ldev, None, self.pair_hostgroups) - - def pair_terminate_connection(self, ldev): - if self.configuration.hitachi_unit_name: - return - self._terminate_connection(ldev, None, self.pair_hostgroups) - - def discard_zero_page(self, volume): - self.common.command.discard_zero_page(self.common.get_ldev(volume)) - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - def copy_image_to_volume(self, context, volume, image_service, image_id): - self.do_setup_status.wait() - super(HBSDFCDriver, self).copy_image_to_volume(context, volume, - image_service, - image_id) - self.discard_zero_page(volume) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - self.do_setup_status.wait() - if volume['volume_attachment']: - desc = 'volume %s' % volume['id'] - msg = basic_lib.output_err(660, desc=desc) - raise exception.HBSDError(message=msg) - super(HBSDFCDriver, self).copy_volume_to_image(context, volume, - image_service, - image_meta) - - def before_volume_copy(self, context, src_vol, dest_vol, remote=None): - """Driver-specific actions before copyvolume data. - - This method will be called before _copy_volume_data during volume - migration - """ - self.do_setup_status.wait() - - def after_volume_copy(self, context, src_vol, dest_vol, remote=None): - """Driver-specific actions after copyvolume data. - - This method will be called after _copy_volume_data during volume - migration - """ - self.discard_zero_page(dest_vol) - - def manage_existing(self, volume, existing_ref): - return self.common.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - self.do_setup_status.wait() - return self.common.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - self.do_setup_status.wait() - self.common.unmanage(volume) diff --git a/cinder/volume/drivers/hitachi/hbsd_horcm.py b/cinder/volume/drivers/hitachi/hbsd_horcm.py deleted file mode 100644 index d27f59ec54d..00000000000 --- a/cinder/volume/drivers/hitachi/hbsd_horcm.py +++ /dev/null @@ -1,1502 +0,0 @@ -# Copyright (C) 2014, 2015, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import os -import re -import shlex -import threading -import time - -from oslo_concurrency import processutils as putils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib - -GETSTORAGEARRAY_ONCE = 100 -MAX_SNAPSHOT_COUNT = 1021 -SNAP_LAST_PATH_SSB = '0xB958,0x020A' -HOST_IO_SSB = '0xB958,0x0233' -INVALID_LUN_SSB = '0x2E20,0x0000' -INTERCEPT_LDEV_SSB = '0x2E22,0x0001' -HOSTGROUP_INSTALLED = '0xB956,0x3173' -RESOURCE_LOCKED = 'SSB=0x2E11,0x2205' - -LDEV_STATUS_WAITTIME = 120 -LUN_DELETE_WAITTIME = basic_lib.DEFAULT_PROCESS_WAITTIME -LUN_DELETE_INTERVAL = 3 -EXEC_MAX_WAITTIME = 30 -EXEC_RETRY_INTERVAL = 5 -HORCM_WAITTIME = 1 -PAIR_TYPE = ('HORC', 'MRCF', 'QS') -PERMITTED_TYPE = ('CVS', 'HDP', 'HDT') - -RAIDCOM_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_' -HORCMGR_LOCK_FILE = basic_lib.LOCK_DIR + 'horcmgr_' -RESOURCE_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_resource_' - -STATUS_TABLE = { - 'SMPL': basic_lib.SMPL, - 'COPY': basic_lib.COPY, - 'RCPY': basic_lib.COPY, - 'PAIR': basic_lib.PAIR, - 'PFUL': basic_lib.PAIR, - 'PSUS': basic_lib.PSUS, - 'PFUS': basic_lib.PSUS, - 'SSUS': basic_lib.PSUS, - 'PSUE': basic_lib.PSUE, -} -NOT_SET = '-' -HORCM_RUNNING = 1 -COPY_GROUP = basic_lib.NAME_PREFIX + '%s%s%03X%d' -SNAP_NAME = basic_lib.NAME_PREFIX + 'snap' -LDEV_NAME = basic_lib.NAME_PREFIX + 'ldev-%d-%d' -MAX_MUNS = 3 - -EX_ENAUTH = 202 -EX_ENOOBJ = 205 -EX_CMDRJE = 221 -EX_CMDIOE = 237 -EX_INVCMD = 240 -EX_INVMOD = 241 -EX_ENODEV = 246 -EX_ENOENT = 247 -EX_OPTINV = 248 -EX_ATTDBG = 250 -EX_ATTHOR = 251 -EX_COMERR = 255 - -NO_SUCH_DEVICE = (EX_ENODEV, EX_ENOENT) - -COMMAND_IO_TO_RAID = (EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV) - -HORCM_ERROR = (EX_ATTDBG, EX_ATTHOR, EX_COMERR) - -MAX_HOSTGROUPS = 254 -MAX_HLUN = 2047 - -DEFAULT_PORT_BASE = 31000 - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.StrOpt('hitachi_horcm_numbers', - default='200,201', - help='Instance numbers for HORCM'), - cfg.StrOpt('hitachi_horcm_user', - help='Username of storage system for HORCM'), - cfg.StrOpt('hitachi_horcm_password', - help='Password of storage system for HORCM', - secret=True), - cfg.BoolOpt('hitachi_horcm_add_conf', - default=True, - help='Add to HORCM configuration'), - cfg.IntOpt('hitachi_horcm_resource_lock_timeout', - default=600, - help='Timeout until a resource lock is released, in seconds. ' - 'The value must be between 0 and 7200.'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -def horcm_synchronized(function): - @functools.wraps(function) - def wrapper(*args, **kargs): - if len(args) == 1: - inst = args[0].conf.hitachi_horcm_numbers[0] - raidcom_obj_lock = args[0].raidcom_lock - else: - inst = args[1] - raidcom_obj_lock = args[0].raidcom_pair_lock - raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) - lock = basic_lib.get_process_lock(raidcom_lock_file) - with raidcom_obj_lock, lock: - return function(*args, **kargs) - return wrapper - - -def storage_synchronized(function): - @functools.wraps(function) - def wrapper(*args, **kargs): - serial = args[0].conf.hitachi_serial_number - resource_lock = args[0].resource_lock - resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial) - lock = basic_lib.get_process_lock(resource_lock_file) - with resource_lock, lock: - return function(*args, **kargs) - return wrapper - - -class HBSDHORCM(basic_lib.HBSDBasicLib): - - def __init__(self, conf): - super(HBSDHORCM, self).__init__(conf=conf) - - self.copy_groups = [None] * MAX_MUNS - self.raidcom_lock = threading.Lock() - self.raidcom_pair_lock = threading.Lock() - self.horcmgr_lock = threading.Lock() - self.horcmgr_flock = None - self.resource_lock = threading.Lock() - - def check_param(self): - numbers = self.conf.hitachi_horcm_numbers.split(',') - if len(numbers) != 2: - msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') - raise exception.HBSDError(message=msg) - for i in numbers: - if not i.isdigit(): - msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') - raise exception.HBSDError(message=msg) - self.conf.hitachi_horcm_numbers = [int(num) for num in numbers] - inst = self.conf.hitachi_horcm_numbers[0] - pair_inst = self.conf.hitachi_horcm_numbers[1] - if inst == pair_inst: - msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') - raise exception.HBSDError(message=msg) - for param in ('hitachi_horcm_user', 'hitachi_horcm_password'): - if not getattr(self.conf, param): - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - if self.conf.hitachi_thin_pool_id == self.conf.hitachi_pool_id: - msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') - raise exception.HBSDError(message=msg) - resource_lock_timeout = self.conf.hitachi_horcm_resource_lock_timeout - if not ((resource_lock_timeout >= 0) and - (resource_lock_timeout <= 7200)): - msg = basic_lib.output_err( - 601, param='hitachi_horcm_resource_lock_timeout') - raise exception.HBSDError(message=msg) - for opt in volume_opts: - getattr(self.conf, opt.name) - - def set_copy_groups(self, host_ip): - serial = self.conf.hitachi_serial_number - inst = self.conf.hitachi_horcm_numbers[1] - - for mun in range(MAX_MUNS): - copy_group = COPY_GROUP % (host_ip, serial, inst, mun) - self.copy_groups[mun] = copy_group - - def set_pair_flock(self): - inst = self.conf.hitachi_horcm_numbers[1] - name = '%s%d' % (HORCMGR_LOCK_FILE, inst) - self.horcmgr_flock = basic_lib.FileLock(name, self.horcmgr_lock) - return self.horcmgr_flock - - def check_horcm(self, inst): - args = 'HORCMINST=%d horcmgr -check' % inst - ret, _stdout, _stderr = self.exec_command('env', args=args, - printflag=False) - return ret - - def shutdown_horcm(self, inst): - ret, stdout, stderr = self.exec_command( - 'horcmshutdown.sh', args=six.text_type(inst), printflag=False) - return ret - - def start_horcm(self, inst): - return self.exec_command('horcmstart.sh', args=six.text_type(inst), - printflag=False) - - def _wait_for_horcm_shutdown(self, inst): - if self.check_horcm(inst) != HORCM_RUNNING: - raise loopingcall.LoopingCallDone() - - if self.shutdown_horcm(inst): - LOG.error("Failed to shutdown horcm.") - raise loopingcall.LoopingCallDone() - - @horcm_synchronized - def restart_horcm(self, inst=None): - if inst is None: - inst = self.conf.hitachi_horcm_numbers[0] - - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_horcm_shutdown, inst) - - loop.start(interval=HORCM_WAITTIME).wait() - - ret, stdout, stderr = self.start_horcm(inst) - if ret: - msg = basic_lib.output_err( - 600, cmd='horcmstart.sh', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def restart_pair_horcm(self): - inst = self.conf.hitachi_horcm_numbers[1] - self.restart_horcm(inst=inst) - - def setup_horcmgr(self, host_ip): - pair_inst = self.conf.hitachi_horcm_numbers[1] - self.set_copy_groups(host_ip) - if self.conf.hitachi_horcm_add_conf: - self.create_horcmconf() - self.create_horcmconf(inst=pair_inst) - self.restart_horcm() - with self.horcmgr_flock: - self.restart_pair_horcm() - ret, stdout, stderr = self.comm_login() - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom -login', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def _wait_for_exec_horcm(self, cmd, args, printflag, start): - if cmd == 'raidcom': - serial = self.conf.hitachi_serial_number - inst = self.conf.hitachi_horcm_numbers[0] - raidcom_obj_lock = self.raidcom_lock - args = '%s -s %s -I%d' % (args, serial, inst) - else: - inst = self.conf.hitachi_horcm_numbers[1] - raidcom_obj_lock = self.raidcom_pair_lock - args = '%s -ISI%d' % (args, inst) - user = self.conf.hitachi_horcm_user - passwd = self.conf.hitachi_horcm_password - raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) - lock = basic_lib.get_process_lock(raidcom_lock_file) - - with raidcom_obj_lock, lock: - ret, stdout, stderr = self.exec_command(cmd, args=args, - printflag=printflag) - - # The resource group may be locked by other software. - # Therefore, wait until the lock is released. - if (RESOURCE_LOCKED in stderr and - (time.time() - start < - self.conf.hitachi_horcm_resource_lock_timeout)): - return - - if not ret or ret <= 127: - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - if time.time() - start >= EXEC_MAX_WAITTIME: - LOG.error("horcm command timeout.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - if (ret == EX_ENAUTH and - not re.search("-login %s %s" % (user, passwd), args)): - _ret, _stdout, _stderr = self.comm_login() - if _ret: - LOG.error("Failed to authenticate user.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - elif ret in HORCM_ERROR: - _ret = 0 - with raidcom_obj_lock, lock: - if self.check_horcm(inst) != HORCM_RUNNING: - _ret, _stdout, _stderr = self.start_horcm(inst) - if _ret and _ret != HORCM_RUNNING: - LOG.error("Failed to start horcm.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - elif ret not in COMMAND_IO_TO_RAID: - LOG.error("Unexpected error occurs in horcm.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - def exec_raidcom(self, cmd, args, printflag=True): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_exec_horcm, cmd, args, printflag, time.time()) - - return loop.start(interval=EXEC_RETRY_INTERVAL).wait() - - def comm_login(self): - rmi_user = self.conf.hitachi_horcm_user - rmi_pass = self.conf.hitachi_horcm_password - args = '-login %s %s' % (rmi_user, rmi_pass) - return self.exec_raidcom('raidcom', args, printflag=False) - - def comm_reset_status(self): - self.exec_raidcom('raidcom', 'reset command_status') - - def comm_get_status(self): - return self.exec_raidcom('raidcom', 'get command_status') - - def get_command_error(self, stdout): - lines = stdout.splitlines() - line = shlex.split(lines[1]) - return int(line[3]) - - def comm_get_ldev(self, ldev): - opt = 'get ldev -ldev_id %s' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def add_used_hlun(self, port, gid, used_list): - opt = 'get lun -port %s-%d' % (port, gid) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - lines = stdout.splitlines() - for line in lines[1:]: - lun = int(shlex.split(line)[3]) - if lun not in used_list: - used_list.append(lun) - - def get_unused_ldev(self, ldev_range): - start = ldev_range[0] - end = ldev_range[1] - - while start < end: - if end - start + 1 > GETSTORAGEARRAY_ONCE: - cnt = GETSTORAGEARRAY_ONCE - else: - cnt = end - start + 1 - opt = 'get ldev -ldev_id %d -cnt %d' % (start, cnt) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - ldev_num = None - for line in lines: - if re.match("LDEV :", line): - ldev_num = int(shlex.split(line)[2]) - continue - if re.match("VOL_TYPE : NOT DEFINED", line): - return ldev_num - - start += GETSTORAGEARRAY_ONCE - else: - msg = basic_lib.output_err(648, resource='LDEV') - raise exception.HBSDError(message=msg) - - def get_hgname_gid(self, port, host_grp_name): - opt = 'get host_grp -port %s -key host_grp' % port - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[2] == host_grp_name: - return int(line[1]) - return None - - def get_unused_gid(self, range, port): - _min = range[0] - _max = range[1] - opt = 'get host_grp -port %s -key host_grp' % port - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - free_gid = None - for line in lines[_min + 1:]: - line = shlex.split(line) - if int(line[1]) > _max: - break - if line[2] == '-': - free_gid = int(line[1]) - break - if free_gid is None: - msg = basic_lib.output_err(648, resource='GID') - raise exception.HBSDError(message=msg) - return free_gid - - def comm_set_target_wwns(self, target_ports): - opt = 'get port' - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - target_wwns = {} - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - port = line[0][:5] - if target_ports and port not in target_ports: - continue - - target_wwns[port] = line[10] - LOG.debug('target wwns: %s', target_wwns) - return target_wwns - - def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected): - opt = 'get host_grp -port %s' % port - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - found_wwns = 0 - for line in lines[1:]: - line = shlex.split(line) - if not re.match(basic_lib.NAME_PREFIX, line[2]): - continue - gid = line[1] - opt = 'get hba_wwn -port %s-%s' % (port, gid) - ret, stdout, stderr = self.exec_raidcom( - 'raidcom', opt, printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[1:]: - hba_info = shlex.split(line) - - if hba_info[3] in wwns: - hostgroups.append({'port': six.text_type(port), - 'gid': int(hba_info[1]), - 'initiator_wwn': hba_info[3], - 'detected': is_detected}) - found_wwns += 1 - if len(wwns) == found_wwns: - break - - if len(wwns) == found_wwns: - break - - def comm_chk_login_wwn(self, wwns, port): - opt = 'get port -port %s' % port - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[1:]: - login_info = shlex.split(line) - if login_info[1] in wwns: - return True - else: - return False - - def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True): - security_ports = [] - hostgroups = [] - - opt = 'get port' - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - - for line in lines[1:]: - line = shlex.split(line) - port = line[0][:5] - if target_ports and port not in target_ports: - continue - security = True if line[7] == 'Y' else False - - is_detected = None - if login: - is_detected = self.comm_chk_login_wwn(wwns, port) - - if security: - self.comm_get_hbawwn(hostgroups, wwns, port, is_detected) - security_ports.append(port) - - for hostgroup in hostgroups: - hgs.append(hostgroup) - - return security_ports - - def _get_lun(self, port, gid, ldev): - lun = None - - opt = 'get lun -port %s-%d' % (port, gid) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[5] == six.text_type(ldev): - lun = int(line[3]) - break - - return lun - - def _wait_for_delete_lun(self, hostgroup, ldev, start): - opt = 'delete lun -port %s-%d -ldev_id %d' % (hostgroup['port'], - hostgroup['gid'], ldev) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if not ret: - raise loopingcall.LoopingCallDone() - - if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and - not self.comm_get_snapshot(ldev) or - re.search('SSB=%s' % HOST_IO_SSB, stderr)): - LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=stderr)) - - if time.time() - start >= LUN_DELETE_WAITTIME: - msg = basic_lib.output_err( - 637, method='_wait_for_delete_lun', - timeout=LUN_DELETE_WAITTIME) - raise exception.HBSDError(message=msg) - else: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_delete_lun_core(self, hostgroup, ldev): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_delete_lun, hostgroup, ldev, time.time()) - - loop.start(interval=LUN_DELETE_INTERVAL).wait() - - def comm_delete_lun(self, hostgroups, ldev): - deleted_hostgroups = [] - no_ldev_cnt = 0 - for hostgroup in hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - is_deleted = False - for deleted in deleted_hostgroups: - if port == deleted['port'] and gid == deleted['gid']: - is_deleted = True - if is_deleted: - continue - try: - self.comm_delete_lun_core(hostgroup, ldev) - except exception.HBSDCmdError as ex: - no_ldev_cnt += 1 - if ex.ret == EX_ENOOBJ: - if no_ldev_cnt != len(hostgroups): - continue - raise exception.HBSDNotFound - else: - raise - deleted_hostgroups.append({'port': port, 'gid': gid}) - - def _check_ldev_status(self, ldev, status): - opt = ('get ldev -ldev_id %s -check_status %s -time %s' % - (ldev, status, LDEV_STATUS_WAITTIME)) - ret, _stdout, _stderr = self.exec_raidcom('raidcom', opt) - return ret - - # Don't remove a storage_syncronized decorator. - # It is need to avoid comm_add_ldev() and comm_delete_ldev() are - # executed concurrently. - @storage_synchronized - def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol): - emulation = 'OPEN-V' - if is_vvol: - opt = ('add ldev -pool snap -ldev_id %d ' - '-capacity %dG -emulation %s' - % (ldev, capacity, emulation)) - else: - opt = ('add ldev -pool %d -ldev_id %d ' - '-capacity %dG -emulation %s' - % (pool_id, ldev, capacity, emulation)) - - self.comm_reset_status() - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - if re.search('SSB=%s' % INTERCEPT_LDEV_SSB, stderr): - raise exception.HBSDNotFound - - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - if self._check_ldev_status(ldev, "NML"): - msg = basic_lib.output_err(653, ldev=ldev) - raise exception.HBSDError(message=msg) - - def comm_add_hostgrp(self, port, gid, host_grp_name): - opt = 'add host_grp -port %s-%d -host_grp_name %s' % (port, gid, - host_grp_name) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - if re.search('SSB=%s' % HOSTGROUP_INSTALLED, stderr): - raise exception.HBSDNotFound - - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_del_hostgrp(self, port, gid, host_grp_name): - opt = 'delete host_grp -port %s-%d %s' % (port, gid, host_grp_name) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_hbawwn(self, port, gid, wwn): - opt = 'add hba_wwn -port %s-%s -hba_wwn %s' % (port, gid, wwn) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - @storage_synchronized - def comm_add_lun(self, unused_command, hostgroups, ldev, is_once=False): - tmp_hostgroups = hostgroups[:] - is_ok = False - used_list = [] - lun = None - old_lun = None - - for hostgroup in hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - self.add_used_hlun(port, gid, used_list) - lun = self._get_lun(port, gid, ldev) - - # When 'lun' or 'old_lun' is 0, it should be true. - # So, it cannot remove 'is not None'. - if lun is not None: - if old_lun is not None and old_lun != lun: - msg = basic_lib.output_err(648, resource='LUN (HLUN)') - raise exception.HBSDError(message=msg) - is_ok = True - hostgroup['lun'] = lun - tmp_hostgroups.remove(hostgroup) - old_lun = lun - - if is_once: - # When 'lun' is 0, it should be true. - # So, it cannot remove 'is not None'. - if lun is not None: - return - elif len(used_list) < MAX_HLUN + 1: - break - else: - tmp_hostgroups.remove(hostgroup) - if tmp_hostgroups: - used_list = [] - - if not used_list: - lun = 0 - elif lun is None: - for i in range(MAX_HLUN + 1): - if i not in used_list: - lun = i - break - else: - raise exception.HBSDNotFound - - opt = None - ret = 0 - stdout = None - stderr = None - invalid_hgs_str = None - - for hostgroup in tmp_hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - if not hostgroup['detected']: - if invalid_hgs_str: - invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str, - port, gid) - else: - invalid_hgs_str = '%s:%d' % (port, gid) - continue - opt = 'add lun -port %s-%d -ldev_id %d -lun_id %d' % ( - port, gid, ldev, lun) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if not ret: - is_ok = True - hostgroup['lun'] = lun - if is_once: - break - else: - LOG.warning(basic_lib.set_msg( - 314, ldev=ldev, lun=lun, port=port, id=gid)) - - if not is_ok: - if stderr: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - else: - msg = basic_lib.output_err(659, gid=invalid_hgs_str) - raise exception.HBSDError(message=msg) - - # Don't remove a storage_syncronized decorator. - # It is need to avoid comm_add_ldev() and comm_delete_ldev() are - # executed concurrently. - @storage_synchronized - def comm_delete_ldev(self, ldev, is_vvol): - ret = -1 - stdout = "" - stderr = "" - self.comm_reset_status() - opt = 'delete ldev -ldev_id %d' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - if re.search('SSB=%s' % INVALID_LUN_SSB, stderr): - raise exception.HBSDNotFound - - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - ret, stdout, stderr = self.comm_get_status() - if ret or self.get_command_error(stdout): - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_extend_ldev(self, ldev, old_size, new_size): - extend_size = new_size - old_size - opt = 'extend ldev -ldev_id %d -capacity %dG' % (ldev, extend_size) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_get_dp_pool(self, pool_id): - opt = 'get dp_pool' - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[1:]: - if int(shlex.split(line)[0]) == pool_id: - free_gb = int(shlex.split(line)[3]) / 1024 - total_gb = int(shlex.split(line)[4]) / 1024 - return total_gb, free_gb - - msg = basic_lib.output_err(640, pool_id=pool_id) - raise exception.HBSDError(message=msg) - - def comm_modify_ldev(self, ldev): - args = 'modify ldev -ldev_id %d -status discard_zero_page' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', args) - if ret: - LOG.warning(basic_lib.set_msg(315, ldev=ldev, reason=stderr)) - - def is_detected(self, port, wwn): - return self.comm_chk_login_wwn([wwn], port) - - def discard_zero_page(self, ldev): - try: - self.comm_modify_ldev(ldev) - except Exception as ex: - LOG.warning('Failed to discard zero page: %s', ex) - - def comm_add_snapshot(self, pvol, svol): - pool = self.conf.hitachi_thin_pool_id - copy_size = self.conf.hitachi_copy_speed - args = ('add snapshot -ldev_id %d %d -pool %d ' - '-snapshot_name %s -copy_size %d' - % (pvol, svol, pool, SNAP_NAME, copy_size)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_delete_snapshot(self, ldev): - args = 'delete snapshot -ldev_id %d' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', args) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_modify_snapshot(self, ldev, op): - args = ('modify snapshot -ldev_id %d -snapshot_data %s' % (ldev, op)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def _wait_for_snap_status(self, pvol, svol, status, timeout, start): - if (self.get_snap_pvol_status(pvol, svol) in status and - self.get_snap_svol_status(svol) in status): - raise loopingcall.LoopingCallDone() - - if time.time() - start >= timeout: - msg = basic_lib.output_err( - 637, method='_wait_for_snap_status', timuout=timeout) - raise exception.HBSDError(message=msg) - - def wait_snap(self, pvol, svol, status, timeout, interval): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_snap_status, pvol, - svol, status, timeout, time.time()) - - loop.start(interval=interval).wait() - - def comm_get_snapshot(self, ldev): - args = 'get snapshot -ldev_id %d' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def check_snap_count(self, ldev): - stdout = self.comm_get_snapshot(ldev) - if not stdout: - return - lines = stdout.splitlines() - if len(lines) >= MAX_SNAPSHOT_COUNT + 1: - msg = basic_lib.output_err( - 615, copy_method=basic_lib.THIN, pvol=ldev) - raise exception.HBSDBusy(message=msg) - - def get_snap_pvol_status(self, pvol, svol): - stdout = self.comm_get_snapshot(pvol) - if not stdout: - return basic_lib.SMPL - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if int(line[6]) == svol: - return STATUS_TABLE[line[2]] - else: - return basic_lib.SMPL - - def get_snap_svol_status(self, ldev): - stdout = self.comm_get_snapshot(ldev) - if not stdout: - return basic_lib.SMPL - lines = stdout.splitlines() - line = shlex.split(lines[1]) - return STATUS_TABLE[line[2]] - - @horcm_synchronized - def create_horcmconf(self, inst=None): - if inst is None: - inst = self.conf.hitachi_horcm_numbers[0] - - serial = self.conf.hitachi_serial_number - filename = '/etc/horcm%d.conf' % inst - - port = DEFAULT_PORT_BASE + inst - - found = False - - if not os.path.exists(filename): - file_str = """ -HORCM_MON -#ip_address service poll(10ms) timeout(10ms) -127.0.0.1 %16d 6000 3000 -HORCM_CMD -""" % port - else: - file_str = utils.read_file_as_root(filename) - - lines = file_str.splitlines() - for line in lines: - if re.match(r'\\\\.\\CMD-%s:/dev/sd' % serial, line): - found = True - break - - if not found: - insert_str = r'\\\\.\\CMD-%s:/dev/sd' % serial - file_str = re.sub(r'(\n\bHORCM_CMD.*|^\bHORCM_CMD.*)', - r'\1\n%s\n' % insert_str, file_str) - - try: - utils.execute('tee', filename, process_input=file_str, - run_as_root=True) - except putils.ProcessExecutionError as ex: - msg = basic_lib.output_err( - 632, file=filename, ret=ex.exit_code, err=ex.stderr) - raise exception.HBSDError(message=msg) - - def comm_get_copy_grp(self): - ret, stdout, stderr = self.exec_raidcom('raidcom', 'get copy_grp', - printflag=False) - if ret: - opt = 'raidcom get copy_grp' - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def comm_add_copy_grp(self, copy_group, pvol_group, svol_group, mun): - args = ('add copy_grp -copy_grp_name %s %s %s -mirror_id %d' - % (copy_group, pvol_group, svol_group, mun)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_delete_copy_grp(self, copy_group): - args = 'delete copy_grp -copy_grp_name %s' % copy_group - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_get_device_grp(self, group_name): - args = 'get device_grp -device_grp_name %s' % group_name - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def comm_add_device_grp(self, group_name, ldev_name, ldev): - args = ('add device_grp -device_grp_name %s %s -ldev_id %d' - % (group_name, ldev_name, ldev)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_delete_device_grp(self, group_name, ldev): - args = ('delete device_grp -device_grp_name %s -ldev_id %d' - % (group_name, ldev)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_paircreate(self, copy_group, ldev_name): - args = ('-g %s -d %s -split -fq quick -c %d -vl' - % (copy_group, ldev_name, self.conf.hitachi_copy_speed)) - ret, stdout, stderr = self.exec_raidcom('paircreate', args) - if ret: - opt = 'paircreate %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_pairsplit(self, copy_group, ldev_name): - args = '-g %s -d %s -S' % (copy_group, ldev_name) - ret, stdout, stderr = self.exec_raidcom('pairsplit', args) - if ret: - opt = 'pairsplit %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_pairevtwait(self, copy_group, ldev_name, check_svol): - if not check_svol: - option = '-nowait' - else: - option = '-nowaits' - args = '-g %s -d %s %s' % (copy_group, ldev_name, option) - ret, stdout, stderr = self.exec_raidcom('pairevtwait', args, - printflag=False) - if ret > 127: - opt = 'pairevtwait %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return ret - - def comm_pairdisplay(self, copy_group, ldev_name=None): - if not ldev_name: - args = '-g %s -CLI' % copy_group - else: - args = '-g %s -d %s -CLI' % (copy_group, ldev_name) - ret, stdout, stderr = self.exec_raidcom('pairdisplay', args, - printflag=False) - if ret and ret not in NO_SUCH_DEVICE: - opt = 'pairdisplay %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return ret, stdout, stderr - - def check_copy_grp(self, copy_group): - stdout = self.comm_get_copy_grp() - lines = stdout.splitlines() - count = 0 - for line in lines[1:]: - line = shlex.split(line) - if line[0] == copy_group: - count += 1 - if count == 2: - break - return count - - def check_device_grp(self, group_name, ldev, ldev_name=None): - stdout = self.comm_get_device_grp(group_name) - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if int(line[2]) == ldev: - if not ldev_name: - return True - else: - return line[1] == ldev_name - else: - return False - - def is_smpl(self, copy_group, ldev_name): - ret, stdout, stderr = self.comm_pairdisplay(copy_group, - ldev_name=ldev_name) - if not stdout: - return True - - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[9] in [NOT_SET, 'SMPL']: - return True - else: - return False - - def get_copy_groups(self): - copy_groups = [] - stdout = self.comm_get_copy_grp() - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[0] in self.copy_groups and line[0] not in copy_groups: - copy_groups.append(line[0]) - return copy_groups - - def get_matched_copy_group(self, pvol, svol, ldev_name): - for copy_group in self.get_copy_groups(): - pvol_group = '%sP' % copy_group - if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name): - return copy_group - else: - return None - - def get_paired_info(self, ldev, only_flag=False): - paired_info = {'pvol': None, 'svol': []} - pvol = None - is_svol = False - - stdout = self.comm_get_snapshot(ldev) - if stdout: - lines = stdout.splitlines() - line = shlex.split(lines[1]) - status = STATUS_TABLE.get(line[2], basic_lib.UNKN) - - if line[1] == 'P-VOL': - pvol = ldev - svol = int(line[6]) - else: - is_svol = True - pvol = int(line[6]) - svol = ldev - - if status == basic_lib.PSUS: - status = self.get_snap_pvol_status(pvol, svol) - - svol_info = {'lun': svol, 'status': status, 'is_vvol': True} - paired_info['svol'].append(svol_info) - paired_info['pvol'] = pvol - - if only_flag or is_svol: - return paired_info - - for copy_group in self.get_copy_groups(): - ldev_name = None - pvol_status = basic_lib.UNKN - svol_status = basic_lib.UNKN - - ret, stdout, stderr = self.comm_pairdisplay(copy_group) - if not stdout: - continue - - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[9] not in ['P-VOL', 'S-VOL']: - continue - - ldev0 = int(line[8]) - ldev1 = int(line[12]) - if ldev not in [ldev0, ldev1]: - continue - - ldev_name = line[1] - - if line[9] == 'P-VOL': - pvol = ldev0 - svol = ldev1 - pvol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN) - else: - svol = ldev0 - pvol = ldev1 - svol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN) - - if svol == ldev: - is_svol = True - - if not ldev_name: - continue - - pvol_group = '%sP' % copy_group - pvol_ok = self.check_device_grp(pvol_group, pvol, - ldev_name=ldev_name) - - svol_group = '%sS' % copy_group - svol_ok = self.check_device_grp(svol_group, svol, - ldev_name=ldev_name) - - if pvol_ok and svol_ok: - if pvol_status == basic_lib.PSUS: - status = svol_status - else: - status = pvol_status - - svol_info = {'lun': svol, 'status': status, 'is_vvol': False} - paired_info['svol'].append(svol_info) - - if is_svol: - break - - # When 'pvol' is 0, it should be true. - # So, it cannot remove 'is not None'. - if pvol is not None and paired_info['pvol'] is None: - paired_info['pvol'] = pvol - - return paired_info - - def add_pair_config(self, pvol, svol, copy_group, ldev_name, mun): - pvol_group = '%sP' % copy_group - svol_group = '%sS' % copy_group - self.comm_add_device_grp(pvol_group, ldev_name, pvol) - self.comm_add_device_grp(svol_group, ldev_name, svol) - nr_copy_groups = self.check_copy_grp(copy_group) - if nr_copy_groups == 1: - self.comm_delete_copy_grp(copy_group) - if nr_copy_groups != 2: - self.comm_add_copy_grp(copy_group, pvol_group, svol_group, mun) - - def delete_pair_config(self, pvol, svol, copy_group, ldev_name): - pvol_group = '%sP' % copy_group - svol_group = '%sS' % copy_group - if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name): - self.comm_delete_device_grp(pvol_group, pvol) - if self.check_device_grp(svol_group, svol, ldev_name=ldev_name): - self.comm_delete_device_grp(svol_group, svol) - - def _wait_for_pair_status(self, copy_group, ldev_name, - status, timeout, check_svol, start): - if self.comm_pairevtwait(copy_group, ldev_name, - check_svol) in status: - raise loopingcall.LoopingCallDone() - - if time.time() - start >= timeout: - msg = basic_lib.output_err( - 637, method='_wait_for_pair_status', timout=timeout) - raise exception.HBSDError(message=msg) - - def wait_pair(self, copy_group, ldev_name, status, timeout, - interval, check_svol=False): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_pair_status, copy_group, ldev_name, - status, timeout, check_svol, time.time()) - - loop.start(interval=interval).wait() - - def comm_create_pair(self, pvol, svol, is_vvol): - timeout = basic_lib.DEFAULT_PROCESS_WAITTIME - interval = self.conf.hitachi_copy_check_interval - if not is_vvol: - restart = False - create = False - ldev_name = LDEV_NAME % (pvol, svol) - mun = 0 - for mun in range(MAX_MUNS): - copy_group = self.copy_groups[mun] - pvol_group = '%sP' % copy_group - - if not self.check_device_grp(pvol_group, pvol): - break - else: - msg = basic_lib.output_err( - 615, copy_method=basic_lib.FULL, pvol=pvol) - raise exception.HBSDBusy(message=msg) - try: - self.add_pair_config(pvol, svol, copy_group, ldev_name, mun) - self.restart_pair_horcm() - restart = True - self.comm_paircreate(copy_group, ldev_name) - create = True - self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS], - timeout, interval) - self.wait_pair(copy_group, ldev_name, - [basic_lib.PSUS, basic_lib.COPY], - timeout, interval, check_svol=True) - except Exception: - with excutils.save_and_reraise_exception(): - if create: - try: - self.wait_pair(copy_group, ldev_name, - [basic_lib.PSUS], timeout, - interval) - self.wait_pair(copy_group, ldev_name, - [basic_lib.PSUS], timeout, - interval, check_svol=True) - except Exception as ex: - LOG.warning('Failed to create pair: %s', ex) - - try: - self.comm_pairsplit(copy_group, ldev_name) - self.wait_pair( - copy_group, ldev_name, - [basic_lib.SMPL], timeout, - self.conf.hitachi_async_copy_check_interval) - except Exception as ex: - LOG.warning('Failed to create pair: %s', ex) - - if self.is_smpl(copy_group, ldev_name): - try: - self.delete_pair_config(pvol, svol, copy_group, - ldev_name) - except Exception as ex: - LOG.warning('Failed to create pair: %s', ex) - - if restart: - try: - self.restart_pair_horcm() - except Exception as ex: - LOG.warning('Failed to restart horcm: %s', ex) - - else: - self.check_snap_count(pvol) - self.comm_add_snapshot(pvol, svol) - - try: - self.wait_snap(pvol, svol, [basic_lib.PAIR], timeout, interval) - self.comm_modify_snapshot(svol, 'create') - self.wait_snap(pvol, svol, [basic_lib.PSUS], timeout, interval) - except Exception: - with excutils.save_and_reraise_exception(): - try: - self.comm_delete_snapshot(svol) - self.wait_snap( - pvol, svol, [basic_lib.SMPL], timeout, - self.conf.hitachi_async_copy_check_interval) - except Exception as ex: - LOG.warning('Failed to create pair: %s', ex) - - def delete_pair(self, pvol, svol, is_vvol): - timeout = basic_lib.DEFAULT_PROCESS_WAITTIME - interval = self.conf.hitachi_async_copy_check_interval - if not is_vvol: - ldev_name = LDEV_NAME % (pvol, svol) - copy_group = self.get_matched_copy_group(pvol, svol, ldev_name) - if not copy_group: - return - try: - self.comm_pairsplit(copy_group, ldev_name) - self.wait_pair(copy_group, ldev_name, [basic_lib.SMPL], - timeout, interval) - finally: - if self.is_smpl(copy_group, ldev_name): - self.delete_pair_config(pvol, svol, copy_group, ldev_name) - else: - self.comm_delete_snapshot(svol) - self.wait_snap(pvol, svol, [basic_lib.SMPL], timeout, interval) - - def comm_raidqry(self): - ret, stdout, stderr = self.exec_command('raidqry', '-h') - if ret: - opt = 'raidqry -h' - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def get_comm_version(self): - stdout = self.comm_raidqry() - lines = stdout.splitlines() - return shlex.split(lines[1])[1] - - def output_param_to_log(self, conf): - for opt in volume_opts: - if not opt.secret: - value = getattr(conf, opt.name) - LOG.info('\t%(name)-35s : %(value)s', - {'name': opt.name, 'value': value}) - - def create_lock_file(self): - inst = self.conf.hitachi_horcm_numbers[0] - pair_inst = self.conf.hitachi_horcm_numbers[1] - serial = self.conf.hitachi_serial_number - raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) - raidcom_pair_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, pair_inst) - horcmgr_lock_file = '%s%d' % (HORCMGR_LOCK_FILE, pair_inst) - resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial) - - basic_lib.create_empty_file(raidcom_lock_file) - basic_lib.create_empty_file(raidcom_pair_lock_file) - basic_lib.create_empty_file(horcmgr_lock_file) - basic_lib.create_empty_file(resource_lock_file) - - def connect_storage(self): - properties = utils.brick_get_connector_properties() - self.setup_horcmgr(properties['ip']) - - def get_max_hostgroups(self): - """return the maximum value of hostgroup id.""" - return MAX_HOSTGROUPS - - def get_hostgroup_luns(self, port, gid): - list = [] - self.add_used_hlun(port, gid, list) - - return list - - def get_ldev_size_in_gigabyte(self, ldev, existing_ref): - param = 'serial_number' - - if param not in existing_ref: - msg = basic_lib.output_err(700, param=param) - raise exception.HBSDError(data=msg) - - storage = existing_ref.get(param) - if storage != self.conf.hitachi_serial_number: - msg = basic_lib.output_err(648, resource=param) - raise exception.HBSDError(data=msg) - - stdout = self.comm_get_ldev(ldev) - if not stdout: - msg = basic_lib.output_err(648, resource='LDEV') - raise exception.HBSDError(data=msg) - - sts_line = vol_type = "" - vol_attrs = [] - size = num_port = 1 - - lines = stdout.splitlines() - for line in lines: - if line.startswith("STS :"): - sts_line = line - - elif line.startswith("VOL_TYPE :"): - vol_type = shlex.split(line)[2] - - elif line.startswith("VOL_ATTR :"): - vol_attrs = shlex.split(line)[2:] - - elif line.startswith("VOL_Capacity(BLK) :"): - size = int(shlex.split(line)[2]) - - elif line.startswith("NUM_PORT :"): - num_port = int(shlex.split(line)[2]) - - if 'NML' not in sts_line: - msg = basic_lib.output_err(648, resource='LDEV') - - raise exception.HBSDError(data=msg) - - if 'OPEN-V' not in vol_type: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - if 'HDP' not in vol_attrs: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - for vol_attr in vol_attrs: - if vol_attr == ':': - continue - - if vol_attr in PAIR_TYPE: - msg = basic_lib.output_err(705, ldev=ldev) - raise exception.HBSDError(data=msg) - - if vol_attr not in PERMITTED_TYPE: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - # Hitachi storage calculates volume sizes in a block unit, 512 bytes. - # So, units.Gi is divided by 512. - if size % (units.Gi / 512): - msg = basic_lib.output_err(703, ldev=ldev) - raise exception.HBSDError(data=msg) - - if num_port: - msg = basic_lib.output_err(704, ldev=ldev) - raise exception.HBSDError(data=msg) - - return size / (units.Gi / 512) diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py deleted file mode 100644 index 425d4d6e0a8..00000000000 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ /dev/null @@ -1,432 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -iSCSI Cinder volume driver for Hitachi storage. - -""" - -import os -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -import cinder.volume.driver -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib -from cinder.volume.drivers.hitachi import hbsd_common as common - -LOG = logging.getLogger(__name__) - -CHAP_METHOD = ('None', 'CHAP None', 'CHAP') - -volume_opts = [ - cfg.BoolOpt('hitachi_add_chap_user', - default=False, - help='Add CHAP user'), - cfg.StrOpt('hitachi_auth_method', - help='iSCSI authentication method'), - cfg.StrOpt('hitachi_auth_user', - default='%sCHAP-user' % basic_lib.NAME_PREFIX, - help='iSCSI authentication username'), - cfg.StrOpt('hitachi_auth_password', - default='%sCHAP-password' % basic_lib.NAME_PREFIX, - help='iSCSI authentication password', secret=True), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver): - VERSION = common.VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = ["Hitachi_HBSD_CI", "Hitachi_HBSD2_CI"] - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - os.environ['LANG'] = 'C' - super(HBSDISCSIDriver, self).__init__(*args, **kwargs) - self.db = kwargs.get('db') - self.common = None - self.configuration.append_config_values(common.volume_opts) - self._stats = {} - self.context = None - self.do_setup_status = threading.Event() - - def _check_param(self): - self.configuration.append_config_values(volume_opts) - if (self.configuration.hitachi_auth_method and - self.configuration.hitachi_auth_method not in CHAP_METHOD): - raise exception.HBSDError( - message=basic_lib.output_err(601, param='hitachi_auth_method')) - if self.configuration.hitachi_auth_method == 'None': - self.configuration.hitachi_auth_method = None - for opt in volume_opts: - getattr(self.configuration, opt.name) - - def check_param(self): - try: - self.common.check_param() - self._check_param() - except exception.HBSDError: - raise - except Exception as ex: - raise exception.HBSDError( - message=basic_lib.output_err(601, param=six.text_type(ex))) - - def output_param_to_log(self): - lock = basic_lib.get_process_lock(self.common.system_lock_file) - - with lock: - self.common.output_param_to_log('iSCSI') - for opt in volume_opts: - if not opt.secret: - value = getattr(self.configuration, opt.name) - LOG.info('\t%(name)-35s : %(value)s', - {'name': opt.name, 'value': value}) - - def _delete_lun_iscsi(self, hostgroups, ldev): - try: - self.common.command.comm_delete_lun_iscsi(hostgroups, ldev) - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(301, ldev=ldev)) - - def _add_target(self, hostgroups, ldev): - self.common.add_lun('autargetmap', hostgroups, ldev) - - def _add_initiator(self, hgs, port, gid, host_iqn): - self.common.command.comm_add_initiator(port, gid, host_iqn) - hgs.append({'port': port, 'gid': int(gid), 'detected': True}) - LOG.debug("Create iSCSI target for %s", hgs) - - def _get_unused_gid_iscsi(self, port): - group_range = self.configuration.hitachi_group_range - if not group_range: - group_range = basic_lib.DEFAULT_GROUP_RANGE - return self.common.command.get_unused_gid_iscsi(group_range, port) - - def _delete_iscsi_target(self, port, target_no, target_alias): - ret, _stdout, _stderr = self.common.command.delete_iscsi_target( - port, target_no, target_alias) - if ret: - LOG.warning(basic_lib.set_msg( - 307, port=port, tno=target_no, alias=target_alias)) - - def _delete_chap_user(self, port): - ret, _stdout, _stderr = self.common.command.delete_chap_user(port) - if ret: - LOG.warning(basic_lib.set_msg( - 303, user=self.configuration.hitachi_auth_user)) - - def _get_hostgroup_info_iscsi(self, hgs, host_iqn): - return self.common.command.comm_get_hostgroup_info_iscsi( - hgs, host_iqn, self.configuration.hitachi_target_ports) - - def _discovery_iscsi_target(self, hostgroups): - for hostgroup in hostgroups: - ip_addr, ip_port = self.common.command.comm_get_iscsi_ip( - hostgroup['port']) - target_iqn = self.common.command.comm_get_target_iqn( - hostgroup['port'], hostgroup['gid']) - hostgroup['ip_addr'] = ip_addr - hostgroup['ip_port'] = ip_port - hostgroup['target_iqn'] = target_iqn - LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s", - {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn}) - - def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn): - for port in ports: - added_hostgroup = False - added_user = False - LOG.debug('Create target (hgs: %(hgs)s port: %(port)s ' - 'target_iqn: %(tiqn)s target_alias: %(alias)s ' - 'add_iqn: %(aiqn)s)', - {'hgs': hgs, 'port': port, 'tiqn': target_iqn, - 'alias': target_alias, 'aiqn': add_iqn}) - gid = self.common.command.get_gid_from_targetiqn( - target_iqn, target_alias, port) - if gid is None: - for retry_cnt in basic_lib.DEFAULT_TRY_RANGE: - gid = None - try: - gid = self._get_unused_gid_iscsi(port) - self.common.command.comm_add_hostgrp_iscsi( - port, gid, target_alias, target_iqn) - added_hostgroup = True - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(312, resource='GID')) - continue - except Exception as ex: - LOG.warning(basic_lib.set_msg( - 309, port=port, alias=target_alias, - reason=ex)) - break - else: - LOG.debug('Completed to add target' - '(port: %(port)s gid: %(gid)d)', - {'port': port, 'gid': gid}) - break - if gid is None: - LOG.error('Failed to add target(port: %s)', port) - continue - try: - if added_hostgroup: - if self.configuration.hitachi_auth_method: - added_user = self.common.command.set_chap_authention( - port, gid) - self.common.command.comm_set_hostgrp_reportportal( - port, target_alias) - self._add_initiator(hgs, port, gid, add_iqn) - except Exception as ex: - LOG.warning(basic_lib.set_msg( - 316, port=port, reason=ex)) - if added_hostgroup: - if added_user: - self._delete_chap_user(port) - self._delete_iscsi_target(port, gid, target_alias) - - def add_hostgroup_core(self, hgs, ports, target_iqn, - target_alias, add_iqn): - if ports: - self._fill_groups(hgs, ports, target_iqn, target_alias, add_iqn) - - def add_hostgroup_master(self, hgs, master_iqn, host_ip, security_ports): - target_ports = self.configuration.hitachi_target_ports - group_request = self.configuration.hitachi_group_request - target_alias = '%s%s' % (basic_lib.NAME_PREFIX, host_ip) - if target_ports and group_request: - target_iqn = '%s.target' % master_iqn - - diff_ports = [] - for port in security_ports: - for hostgroup in hgs: - if hostgroup['port'] == port: - break - else: - diff_ports.append(port) - - self.add_hostgroup_core(hgs, diff_ports, target_iqn, - target_alias, master_iqn) - if not hgs: - raise exception.HBSDError(message=basic_lib.output_err(649)) - - def add_hostgroup(self): - properties = utils.brick_get_connector_properties() - if 'initiator' not in properties: - raise exception.HBSDError( - message=basic_lib.output_err(650, resource='HBA')) - LOG.debug("initiator: %s", properties['initiator']) - hostgroups = [] - security_ports = self._get_hostgroup_info_iscsi( - hostgroups, properties['initiator']) - self.add_hostgroup_master(hostgroups, properties['initiator'], - properties['ip'], security_ports) - - def _get_properties(self, volume, hostgroups): - conf = self.configuration - properties = {} - self._discovery_iscsi_target(hostgroups) - hostgroup = hostgroups[0] - - properties['target_discovered'] = True - properties['target_portal'] = "%s:%s" % (hostgroup['ip_addr'], - hostgroup['ip_port']) - properties['target_iqn'] = hostgroup['target_iqn'] - properties['target_lun'] = hostgroup['lun'] - - if conf.hitachi_auth_method: - properties['auth_method'] = 'CHAP' - properties['auth_username'] = conf.hitachi_auth_user - properties['auth_password'] = conf.hitachi_auth_password - - return properties - - def do_setup(self, context): - self.context = context - self.common = common.HBSDCommon(self.configuration, self, - context, self.db) - msg = _("The HBSD iSCSI driver is deprecated and " - "will be removed in P release") - versionutils.report_deprecated_feature(LOG, msg) - - self.check_param() - - self.common.create_lock_file() - - self.common.command.connect_storage() - - lock = basic_lib.get_process_lock(self.common.service_lock_file) - with lock: - self.add_hostgroup() - - self.output_param_to_log() - self.do_setup_status.set() - - def check_for_setup_error(self): - pass - - def extend_volume(self, volume, new_size): - self.do_setup_status.wait() - self.common.extend_volume(volume, new_size) - - def get_volume_stats(self, refresh=False): - if refresh: - if self.do_setup_status.isSet(): - self.common.output_backend_available_once() - _stats = self.common.update_volume_stats("iSCSI") - if _stats: - self._stats = _stats - return self._stats - - def create_volume(self, volume): - self.do_setup_status.wait() - metadata = self.common.create_volume(volume) - return metadata - - def delete_volume(self, volume): - self.do_setup_status.wait() - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - self.do_setup_status.wait() - metadata = self.common.create_snapshot(snapshot) - return metadata - - def delete_snapshot(self, snapshot): - self.do_setup_status.wait() - self.common.delete_snapshot(snapshot) - - def create_cloned_volume(self, volume, src_vref): - self.do_setup_status.wait() - metadata = self.common.create_cloned_volume(volume, src_vref) - return metadata - - def create_volume_from_snapshot(self, volume, snapshot): - self.do_setup_status.wait() - metadata = self.common.create_volume_from_snapshot(volume, snapshot) - return metadata - - def _initialize_connection(self, ldev, connector, src_hgs=None): - LOG.debug("Call _initialize_connection " - "(config_group: %(group)s ldev: %(ldev)d)", - {'group': self.configuration.config_group, 'ldev': ldev}) - if src_hgs: - hostgroups = src_hgs[:] - else: - hostgroups = [] - security_ports = self._get_hostgroup_info_iscsi( - hostgroups, connector['initiator']) - self.add_hostgroup_master(hostgroups, connector['initiator'], - connector['ip'], security_ports) - - self._add_target(hostgroups, ldev) - - return hostgroups - - def initialize_connection(self, volume, connector): - self.do_setup_status.wait() - ldev = self.common.get_ldev(volume) - if ldev is None: - raise exception.HBSDError( - message=basic_lib.output_err(619, volume_id=volume['id'])) - self.common.add_volinfo(ldev, volume['id']) - with self.common.volume_info[ldev]['lock'],\ - self.common.volume_info[ldev]['in_use']: - hostgroups = self._initialize_connection(ldev, connector) - protocol = 'iscsi' - properties = self._get_properties(volume, hostgroups) - LOG.debug('Initialize volume_info: %s', - self.common.volume_info) - - LOG.debug('HFCDrv: properties=%s', properties) - return { - 'driver_volume_type': protocol, - 'data': properties - } - - def _terminate_connection(self, ldev, connector, src_hgs): - LOG.debug("Call _terminate_connection(config_group: %s)", - self.configuration.config_group) - hostgroups = src_hgs[:] - self._delete_lun_iscsi(hostgroups, ldev) - - LOG.debug("*** _terminate_ ***") - - def terminate_connection(self, volume, connector, **kwargs): - self.do_setup_status.wait() - ldev = self.common.get_ldev(volume) - if ldev is None: - LOG.warning(basic_lib.set_msg(302, volume_id=volume['id'])) - return - - if 'initiator' not in connector: - raise exception.HBSDError( - message=basic_lib.output_err(650, resource='HBA')) - - hostgroups = [] - self._get_hostgroup_info_iscsi(hostgroups, - connector['initiator']) - if not hostgroups: - raise exception.HBSDError(message=basic_lib.output_err(649)) - - self.common.add_volinfo(ldev, volume['id']) - with self.common.volume_info[ldev]['lock'],\ - self.common.volume_info[ldev]['in_use']: - self._terminate_connection(ldev, connector, hostgroups) - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - def pair_initialize_connection(self, unused_ldev): - pass - - def pair_terminate_connection(self, unused_ldev): - pass - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - self.do_setup_status.wait() - if volume['volume_attachment']: - desc = 'volume %s' % volume['id'] - raise exception.HBSDError( - message=basic_lib.output_err(660, desc=desc)) - super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume, - image_service, - image_meta) - - def manage_existing(self, volume, existing_ref): - return self.common.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - self.do_setup_status.wait() - return self.common.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - self.do_setup_status.wait() - self.common.unmanage(volume) diff --git a/cinder/volume/drivers/hitachi/hbsd_snm2.py b/cinder/volume/drivers/hitachi/hbsd_snm2.py deleted file mode 100644 index 14a9905791c..00000000000 --- a/cinder/volume/drivers/hitachi/hbsd_snm2.py +++ /dev/null @@ -1,1154 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import shlex -import threading -import time - -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder import utils -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib - -LOG = logging.getLogger(__name__) - -SNM2_ENV = ('LANG=C STONAVM_HOME=/usr/stonavm ' - 'LD_LIBRARY_PATH=/usr/stonavm/lib ' - 'STONAVM_RSP_PASS=on STONAVM_ACT=on') - -MAX_HOSTGROUPS = 127 -MAX_HOSTGROUPS_ISCSI = 254 -MAX_HLUN = 2047 -EXEC_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'hsnm_' -EXEC_TIMEOUT = 10 -EXEC_INTERVAL = 1 - -CHAP_TIMEOUT = 5 -PAIRED = 12 -DUMMY_LU = -1 - - -class HBSDSNM2(basic_lib.HBSDBasicLib): - - def __init__(self, conf): - super(HBSDSNM2, self).__init__(conf=conf) - - self.unit_name = conf.hitachi_unit_name - self.hsnm_lock = threading.Lock() - self.hsnm_lock_file = ('%s%s' - % (EXEC_LOCK_PATH_BASE, self.unit_name)) - copy_speed = conf.hitachi_copy_speed - if copy_speed <= 2: - self.pace = 'slow' - elif copy_speed == 3: - self.pace = 'normal' - else: - self.pace = 'prior' - - def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start): - lock = basic_lib.get_process_lock(self.hsnm_lock_file) - with self.hsnm_lock, lock: - ret, stdout, stderr = self.exec_command('env', args=args, - printflag=printflag) - - if not ret or noretry: - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - if time.time() - start >= timeout: - LOG.error("snm2 command timeout.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - if (re.search('DMEC002047', stderr) - or re.search('DMEC002048', stderr) - or re.search('DMED09000A', stderr) - or re.search('DMED090026', stderr) - or re.search('DMED0E002B', stderr) - or re.search('DMER03006A', stderr) - or re.search('DMER030080', stderr) - or re.search('DMER0300B8', stderr) - or re.search('DMER0800CF', stderr) - or re.search('DMER0800D[0-6D]', stderr) - or re.search('DMES052602', stderr)): - LOG.error("Unexpected error occurs in snm2.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - def exec_hsnm(self, command, args, printflag=True, noretry=False, - timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL): - args = '%s %s %s' % (SNM2_ENV, command, args) - - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_exec_hsnm, args, printflag, - noretry, timeout, time.time()) - - return loop.start(interval=interval).wait() - - def _execute_with_exception(self, cmd, args, **kwargs): - ret, stdout, stderr = self.exec_hsnm(cmd, args, **kwargs) - if ret: - cmds = '%(cmd)s %(args)s' % {'cmd': cmd, 'args': args} - msg = basic_lib.output_err( - 600, cmd=cmds, ret=ret, out=stdout, err=stderr) - raise exception.HBSDError(data=msg) - - return ret, stdout, stderr - - def _execute_and_return_stdout(self, cmd, args, **kwargs): - result = self._execute_with_exception(cmd, args, **kwargs) - - return result[1] - - def get_comm_version(self): - ret, stdout, stderr = self.exec_hsnm('auman', '-help') - m = re.search(r'Version (\d+).(\d+)', stdout) - if not m: - msg = basic_lib.output_err( - 600, cmd='auman', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return '%s.%s' % (m.group(1), m.group(2)) - - def add_used_hlun(self, command, port, gid, used_list, ldev): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm(command, - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - lines = stdout.splitlines() - for line in lines[2:]: - line = shlex.split(line) - if not line: - continue - if line[0] == port and int(line[1][0:3]) == gid: - if int(line[2]) not in used_list: - used_list.append(int(line[2])) - if int(line[3]) == ldev: - hlu = int(line[2]) - LOG.warning('ldev(%(ldev)d) is already mapped ' - '(hlun: %(hlu)d)', - {'ldev': ldev, 'hlu': hlu}) - return hlu - return None - - def _get_lu(self, lu=None): - # When 'lu' is 0, it should be true. So, it cannot remove 'is None'. - if lu is None: - args = '-unit %s' % self.unit_name - else: - args = '-unit %s -lu %s' % (self.unit_name, lu) - - return self._execute_and_return_stdout('auluref', args) - - def get_unused_ldev(self, ldev_range): - start = ldev_range[0] - end = ldev_range[1] - unit = self.unit_name - - ret, stdout, stderr = self.exec_hsnm('auluref', '-unit %s' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auluref', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - free_ldev = start - lines = stdout.splitlines() - found = False - for line in lines[2:]: - line = shlex.split(line) - if not line: - continue - ldev_num = int(line[0]) - if free_ldev > ldev_num: - continue - if free_ldev == ldev_num: - free_ldev += 1 - else: - found = True - break - if free_ldev > end: - break - else: - found = True - - if not found: - msg = basic_lib.output_err(648, resource='LDEV') - raise exception.HBSDError(message=msg) - - return free_ldev - - def get_hgname_gid(self, port, host_grp_name): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('auhgdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - lines = stdout.splitlines() - is_target_port = False - for line in lines: - line = shlex.split(line) - if not line: - continue - if line[0] == 'Port' and line[1] == port: - is_target_port = True - continue - if is_target_port: - if line[0] == 'Port': - break - if not line[0].isdigit(): - continue - gid = int(line[0]) - if line[1] == host_grp_name: - return gid - return None - - def get_unused_gid(self, group_range, port): - start = group_range[0] - end = group_range[1] - unit = self.unit_name - - ret, stdout, stderr = self.exec_hsnm('auhgdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - is_target_port = False - free_gid = start - found = False - for line in lines: - line = shlex.split(line) - if not line: - continue - if line[0] == 'Port' and line[1] == port: - is_target_port = True - continue - if is_target_port: - if line[0] == 'Port': - found = True - break - if not line[0].isdigit(): - continue - - gid = int(line[0]) - if free_gid > gid: - continue - if free_gid == gid: - free_gid += 1 - else: - found = True - break - if free_gid > end or free_gid > MAX_HOSTGROUPS: - break - else: - found = True - - if not found: - msg = basic_lib.output_err(648, resource='GID') - raise exception.HBSDError(message=msg) - - return free_gid - - def comm_set_target_wwns(self, target_ports): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('aufibre1', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='aufibre1', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - target_wwns = {} - for line in lines[3:]: - if re.match('Transfer', line): - break - - line = shlex.split(line) - if len(line) < 4: - continue - - port = '%s%s' % (line[0], line[1]) - if target_ports: - if port in target_ports: - target_wwns[port] = line[3] - else: - target_wwns[port] = line[3] - - LOG.debug('target wwns: %s', target_wwns) - return target_wwns - - def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login): - for pt in wwns: - for line in buf[port]['assigned']: - hgname = shlex.split(line[38:])[1][4:] - if not re.match(basic_lib.NAME_PREFIX, hgname): - continue - if pt.search(line[38:54]): - wwn = line[38:54] - gid = int(shlex.split(line[38:])[1][0:3]) - is_detected = None - if login: - for line in buf[port]['detected']: - if pt.search(line[38:54]): - is_detected = True - break - else: - is_detected = False - hostgroups.append({'port': six.text_type(port), 'gid': gid, - 'initiator_wwn': wwn, - 'detected': is_detected}) - - def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('auhgwwn', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - security_ports = [] - patterns = [] - for wwn in wwns: - pt = re.compile(wwn, re.IGNORECASE) - patterns.append(pt) - - lines = stdout.splitlines() - buf = {} - _buffer = [] - port = None - security = None - for line in lines: - if re.match('Port', line): - port = shlex.split(line)[1] - if target_ports and port not in target_ports: - port = None - else: - security = True if shlex.split(line)[5] == 'ON' else False - buf[port] = {'detected': [], 'assigned': [], - 'assignable': []} - if security: - security_ports.append(port) - continue - if port and security: - if re.search('Detected WWN', line): - _buffer = buf[port]['detected'] - continue - elif re.search('Assigned WWN', line): - _buffer = buf[port]['assigned'] - continue - elif re.search('Assignable WWN', line): - _buffer = buf[port]['assignable'] - continue - _buffer.append(line) - - hostgroups = [] - for port in buf.keys(): - self.get_hostgroup_from_wwns( - hostgroups, port, patterns, buf, login) - - for hostgroup in hostgroups: - hgs.append(hostgroup) - - return security_ports - - def comm_delete_lun_core(self, command, hostgroups, lun): - unit = self.unit_name - - no_lun_cnt = 0 - deleted_hostgroups = [] - for hostgroup in hostgroups: - LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup) - port = hostgroup['port'] - gid = hostgroup['gid'] - ctl_no = port[0] - port_no = port[1] - - is_deleted = False - for deleted in deleted_hostgroups: - if port == deleted['port'] and gid == deleted['gid']: - is_deleted = True - if is_deleted: - continue - ret, stdout, stderr = self.exec_hsnm(command, - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[2:]: - line = shlex.split(line) - if not line: - continue - if (line[0] == port and int(line[1][0:3]) == gid - and int(line[3]) == lun): - hlu = int(line[2]) - break - else: - no_lun_cnt += 1 - if no_lun_cnt == len(hostgroups): - raise exception.HBSDNotFound - else: - continue - - opt = '-unit %s -rm %s %s %d %d %d' % (unit, ctl_no, port_no, - gid, hlu, lun) - ret, stdout, stderr = self.exec_hsnm(command, opt) - if ret: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - deleted_hostgroups.append({'port': port, 'gid': gid}) - LOG.debug('comm_delete_lun is over (%d)', lun) - - def comm_delete_lun(self, hostgroups, ldev): - self.comm_delete_lun_core('auhgmap', hostgroups, ldev) - - def comm_delete_lun_iscsi(self, hostgroups, ldev): - self.comm_delete_lun_core('autargetmap', hostgroups, ldev) - - def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol): - unit = self.unit_name - - if is_vvol: - command = 'aureplicationvvol' - opt = ('-unit %s -add -lu %d -size %dg' - % (unit, ldev, capacity)) - else: - command = 'auluadd' - opt = ('-unit %s -lu %d -dppoolno %d -size %dg' - % (unit, ldev, pool_id, capacity)) - - ret, stdout, stderr = self.exec_hsnm(command, opt) - if ret: - if (re.search('DMEC002047', stderr) - or re.search('DMES052602', stderr) - or re.search('DMED09000A', stderr)): - raise exception.HBSDNotFound - else: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_hostgrp(self, port, gid, host_grp_name): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - - opt = '-unit %s -add %s %s -gno %d -gname %s' % (unit, ctl_no, - port_no, gid, - host_grp_name) - ret, stdout, stderr = self.exec_hsnm('auhgdef', opt) - if ret: - raise exception.HBSDNotFound - - def comm_del_hostgrp(self, port, gid, host_grp_name): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -rm %s %s -gname %s' % (unit, ctl_no, port_no, - host_grp_name) - ret, stdout, stderr = self.exec_hsnm('auhgdef', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_hbawwn(self, port, gid, wwn): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -set -permhg %s %s %s -gno %d' % (unit, ctl_no, - port_no, wwn, gid) - ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt) - if ret: - opt = '-unit %s -assign -permhg %s %s %s -gno %d' % (unit, ctl_no, - port_no, wwn, - gid) - ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_lun(self, command, hostgroups, ldev, is_once=False): - unit = self.unit_name - tmp_hostgroups = hostgroups[:] - used_list = [] - is_ok = False - hlu = None - old_hlu = None - for hostgroup in hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - hlu = self.add_used_hlun(command, port, gid, used_list, ldev) - # When 'hlu' or 'old_hlu' is 0, it should be true. - # So, it cannot remove 'is not None'. - if hlu is not None: - if old_hlu is not None and old_hlu != hlu: - msg = basic_lib.output_err(648, resource='LUN (HLUN)') - raise exception.HBSDError(message=msg) - is_ok = True - hostgroup['lun'] = hlu - tmp_hostgroups.remove(hostgroup) - old_hlu = hlu - else: - hlu = old_hlu - - if not used_list: - hlu = 0 - elif hlu is None: - for i in range(MAX_HLUN + 1): - if i not in used_list: - hlu = i - break - else: - raise exception.HBSDNotFound - - ret = 0 - stdout = None - stderr = None - invalid_hgs_str = None - for hostgroup in tmp_hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - ctl_no = port[0] - port_no = port[1] - if not hostgroup['detected']: - if invalid_hgs_str: - invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str, - port, gid) - else: - invalid_hgs_str = '%s:%d' % (port, gid) - continue - opt = '-unit %s -add %s %s %d %d %d' % (unit, ctl_no, port_no, - gid, hlu, ldev) - ret, stdout, stderr = self.exec_hsnm(command, opt) - if ret == 0: - is_ok = True - hostgroup['lun'] = hlu - if is_once: - break - else: - LOG.warning(basic_lib.set_msg( - 314, ldev=ldev, lun=hlu, port=port, id=gid)) - - if not is_ok: - if stderr: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - else: - msg = basic_lib.output_err(659, gid=invalid_hgs_str) - raise exception.HBSDError(message=msg) - - def comm_delete_ldev(self, ldev, is_vvol): - unit = self.unit_name - - if is_vvol: - command = 'aureplicationvvol' - opt = '-unit %s -rm -lu %d' % (unit, ldev) - else: - command = 'auludel' - opt = '-unit %s -lu %d -f' % (unit, ldev) - - ret, stdout, stderr = self.exec_hsnm(command, opt, - timeout=30, interval=3) - if ret: - if (re.search('DMEC002048', stderr) - or re.search('DMED090026', stderr)): - raise exception.HBSDNotFound - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return ret - - def comm_extend_ldev(self, ldev, old_size, new_size): - unit = self.unit_name - command = 'auluchgsize' - options = '-unit %s -lu %d -size %dg' % (unit, ldev, new_size) - - ret, stdout, stderr = self.exec_hsnm(command, options) - if ret: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def delete_chap_user(self, port): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - auth_username = self.conf.hitachi_auth_user - - opt = '-unit %s -rm %s %s -user %s' % (unit, ctl_no, port_no, - auth_username) - return self.exec_hsnm('auchapuser', opt) - - def _wait_for_add_chap_user(self, cmd, auth_username, - auth_password, start): - # Don't move 'import pexpect' to the beginning of the file so that - # a tempest can work. - import pexpect - - lock = basic_lib.get_process_lock(self.hsnm_lock_file) - with self.hsnm_lock, lock: - try: - child = pexpect.spawn(cmd) - child.expect('Secret: ', timeout=CHAP_TIMEOUT) - child.sendline(auth_password) - child.expect('Re-enter Secret: ', - timeout=CHAP_TIMEOUT) - child.sendline(auth_password) - child.expect('The CHAP user information has ' - 'been added successfully.', - timeout=CHAP_TIMEOUT) - except Exception: - if time.time() - start >= EXEC_TIMEOUT: - msg = basic_lib.output_err(642, user=auth_username) - raise exception.HBSDError(message=msg) - else: - raise loopingcall.LoopingCallDone(True) - - def set_chap_authention(self, port, gid): - ctl_no = port[0] - port_no = port[1] - unit = self.unit_name - auth_username = self.conf.hitachi_auth_user - auth_password = self.conf.hitachi_auth_password - add_chap_user = self.conf.hitachi_add_chap_user - assign_flag = True - added_flag = False - opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no, - auth_username) - ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True) - - if ret: - if not add_chap_user: - msg = basic_lib.output_err(643, user=auth_username) - raise exception.HBSDError(message=msg) - - root_helper = utils.get_root_helper() - cmd = ('%s env %s auchapuser -unit %s -add %s %s ' - '-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no, - port_no, gid, auth_username)) - - LOG.debug('Add CHAP user') - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_add_chap_user, cmd, - auth_username, auth_password, time.time()) - - added_flag = loop.start(interval=EXEC_INTERVAL).wait() - - else: - lines = stdout.splitlines()[4:] - for line in lines: - if int(shlex.split(line)[0][0:3]) == gid: - assign_flag = False - break - - if assign_flag: - opt = '-unit %s -assign %s %s -tno %d -user %s' % (unit, ctl_no, - port_no, gid, - auth_username) - ret, stdout, stderr = self.exec_hsnm('auchapuser', opt) - if ret: - if added_flag: - _ret, _stdout, _stderr = self.delete_chap_user(port) - if _ret: - LOG.warning(basic_lib.set_msg( - 303, user=auth_username)) - - msg = basic_lib.output_err( - 600, cmd='auchapuser', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - return added_flag - - def comm_add_hostgrp_iscsi(self, port, gid, target_alias, target_iqn): - auth_method = self.conf.hitachi_auth_method - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - if auth_method: - auth_arg = '-authmethod %s -mutual disable' % auth_method - else: - auth_arg = '-authmethod None' - - opt = '-unit %s -add %s %s -tno %d' % (unit, ctl_no, port_no, gid) - opt = '%s -talias %s -iname %s %s' % (opt, target_alias, target_iqn, - auth_arg) - ret, stdout, stderr = self.exec_hsnm('autargetdef', opt) - - if ret: - raise exception.HBSDNotFound - - def delete_iscsi_target(self, port, _target_no, target_alias): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -rm %s %s -talias %s' % (unit, ctl_no, port_no, - target_alias) - return self.exec_hsnm('autargetdef', opt) - - def comm_set_hostgrp_reportportal(self, port, target_alias): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -set %s %s -talias %s' % (unit, ctl_no, port_no, - target_alias) - opt = '%s -ReportFullPortalList enable' % opt - ret, stdout, stderr = self.exec_hsnm('autargetopt', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetopt', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_initiator(self, port, gid, host_iqn): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -add %s %s -tno %d -iname %s' % (unit, ctl_no, - port_no, gid, - host_iqn) - ret, stdout, stderr = self.exec_hsnm('autargetini', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetini', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_get_hostgroup_info_iscsi(self, hgs, host_iqn, target_ports): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('autargetini', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetini', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - security_ports = [] - lines = stdout.splitlines() - hostgroups = [] - security = True - for line in lines: - if not shlex.split(line): - continue - if re.match('Port', line): - line = shlex.split(line) - port = line[1] - security = True if line[4] == 'ON' else False - continue - - if target_ports and port not in target_ports: - continue - - if security: - if (host_iqn in shlex.split(line[72:]) and - re.match(basic_lib.NAME_PREFIX, - shlex.split(line)[0][4:])): - gid = int(shlex.split(line)[0][0:3]) - hostgroups.append( - {'port': port, 'gid': gid, 'detected': True}) - LOG.debug('Find port=%(port)s gid=%(gid)d', - {'port': port, 'gid': gid}) - if port not in security_ports: - security_ports.append(port) - - for hostgroup in hostgroups: - hgs.append(hostgroup) - - return security_ports - - def comm_get_iscsi_ip(self, port): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('auiscsi', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auiscsi', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - is_target_port = False - for line in lines: - line_array = shlex.split(line) - if not line_array: - continue - if line_array[0] == 'Port' and line_array[1] != 'Number': - if line_array[1] == port: - is_target_port = True - else: - is_target_port = False - continue - if is_target_port and re.search('IPv4 Address', line): - ip_addr = shlex.split(line)[3] - break - if is_target_port and re.search('Port Number', line): - ip_port = shlex.split(line)[3] - else: - msg = basic_lib.output_err(651) - raise exception.HBSDError(message=msg) - - return ip_addr, ip_port - - def comm_get_target_iqn(self, port, gid): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('autargetdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - is_target_host = False - tmp_port = None - lines = stdout.splitlines() - for line in lines: - line = shlex.split(line) - if not line: - continue - - if line[0] == "Port": - tmp_port = line[1] - continue - - if port != tmp_port: - continue - - gid_tmp = line[0][0:3] - if gid_tmp.isdigit() and int(gid_tmp) == gid: - is_target_host = True - continue - if is_target_host and line[0] == "iSCSI": - target_iqn = line[3] - break - else: - msg = basic_lib.output_err(650, resource='IQN') - raise exception.HBSDError(message=msg) - - return target_iqn - - def get_unused_gid_iscsi(self, group_range, port): - start = group_range[0] - end = min(group_range[1], MAX_HOSTGROUPS_ISCSI) - unit = self.unit_name - - ret, stdout, stderr = self.exec_hsnm('autargetdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - used_list = [] - tmp_port = None - lines = stdout.splitlines() - for line in lines: - line = shlex.split(line) - if not line: - continue - - if line[0] == "Port": - tmp_port = line[1] - continue - - if port != tmp_port: - continue - - if line[0][0:3].isdigit(): - gid = int(line[0][0:3]) - if start <= gid <= end: - used_list.append(gid) - if not used_list: - return start - - for gid in range(start, end + 1): - if gid not in used_list: - break - else: - msg = basic_lib.output_err(648, resource='GID') - raise exception.HBSDError(message=msg) - - return gid - - def get_gid_from_targetiqn(self, target_iqn, target_alias, port): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('autargetdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - gid = None - tmp_port = None - found_alias_full = False - found_alias_part = False - lines = stdout.splitlines() - for line in lines: - line = shlex.split(line) - if not line: - continue - - if line[0] == "Port": - tmp_port = line[1] - continue - - if port != tmp_port: - continue - - if line[0][0:3].isdigit(): - tmp_gid = int(line[0][0:3]) - if re.match(basic_lib.NAME_PREFIX, line[0][4:]): - found_alias_part = True - if line[0][4:] == target_alias: - found_alias_full = True - continue - - if line[0] == "iSCSI": - if line[3] == target_iqn: - gid = tmp_gid - break - else: - found_alias_part = False - - if found_alias_full and gid is None: - msg = basic_lib.output_err(641) - raise exception.HBSDError(message=msg) - - # When 'gid' is 0, it should be true. - # So, it cannot remove 'is not None'. - if not found_alias_part and gid is not None: - msg = basic_lib.output_err(641) - raise exception.HBSDError(message=msg) - - return gid - - def comm_get_dp_pool(self, pool_id): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('audppool', - '-unit %s -refer -g' % unit, - printflag=False) - if ret: - msg = basic_lib.output_err( - 600, cmd='audppool', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[2:]: - tc_cc = re.search(r'\s(\d+\.\d) GB\s+(\d+\.\d) GB\s', line) - pool_tmp = re.match(r'\s*\d+', line) - if (pool_tmp and tc_cc - and int(pool_tmp.group(0)) == pool_id): - total_gb = int(float(tc_cc.group(1))) - free_gb = total_gb - int(float(tc_cc.group(2))) - return total_gb, free_gb - - msg = basic_lib.output_err(640, pool_id=pool_id) - raise exception.HBSDError(message=msg) - - def is_detected(self, port, wwn): - hgs = [] - self.comm_get_hostgroup_info(hgs, [wwn], [port], login=True) - return hgs[0]['detected'] - - def pairoperate(self, opr, pvol, svol, is_vvol, args=None): - unit = self.unit_name - method = '-ss' if is_vvol else '-si' - opt = '-unit %s -%s %s -pvol %d -svol %d' % (unit, opr, method, - pvol, svol) - if args: - opt = '%s %s' % (opt, args) - ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt) - if ret: - opt = '%s %s' % ('aureplicationlocal', opt) - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_create_pair(self, pvol, svol, is_vvol): - if not is_vvol: - args = '-compsplit -pace %s' % self.pace - method = basic_lib.FULL - else: - pool = self.conf.hitachi_thin_pool_id - args = ('-localrepdppoolno %d -localmngdppoolno %d ' - '-compsplit -pace %s' % (pool, pool, self.pace)) - method = basic_lib.THIN - try: - self.pairoperate('create', pvol, svol, is_vvol, args=args) - except exception.HBSDCmdError as ex: - if (re.search('DMER0300B8', ex.stderr) - or re.search('DMER0800CF', ex.stderr) - or re.search('DMER0800D[0-6D]', ex.stderr) - or re.search('DMER03006A', ex.stderr) - or re.search('DMER030080', ex.stderr)): - msg = basic_lib.output_err(615, copy_method=method, pvol=pvol) - raise exception.HBSDBusy(message=msg) - else: - raise - - def _comm_pairevtwait(self, pvol, svol, is_vvol): - unit = self.unit_name - if not is_vvol: - pairname = 'SI_LU%04d_LU%04d' % (pvol, svol) - method = '-si' - else: - pairname = 'SS_LU%04d_LU%04d' % (pvol, svol) - method = '-ss' - opt = ('-unit %s -evwait %s -pairname %s -gname Ungrouped -nowait' % - (unit, method, pairname)) - ret, stdout, stderr = self.exec_hsnm('aureplicationmon', - opt, noretry=True) - - return ret - - def _wait_for_pair_status(self, pvol, svol, is_vvol, - status, timeout, start): - if self._comm_pairevtwait(pvol, svol, is_vvol) in status: - raise loopingcall.LoopingCallDone() - - if time.time() - start >= timeout: - msg = basic_lib.output_err( - 637, method='_wait_for_pair_status', timeout=timeout) - raise exception.HBSDError(message=msg) - - def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_pair_status, pvol, svol, is_vvol, - status, timeout, time.time()) - - loop.start(interval=interval).wait() - - def delete_pair(self, pvol, svol, is_vvol): - self.pairoperate('simplex', pvol, svol, is_vvol) - - def trans_status_hsnm2raid(self, str): - status = None - obj = re.search(r'Split\((.*)%\)', str) - if obj: - status = basic_lib.PSUS - obj = re.search(r'Paired\((.*)%\)', str) - if obj: - status = basic_lib.PAIR - return status - - def get_paired_info(self, ldev, only_flag=False): - opt_base = '-unit %s -refer' % self.unit_name - if only_flag: - opt_base = '%s -ss' % opt_base - - opt = '%s -pvol %d' % (opt_base, ldev) - ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', - opt, noretry=True) - if ret == 0: - lines = stdout.splitlines() - pair_info = {'pvol': ldev, 'svol': []} - for line in lines[1:]: - status = self.trans_status_hsnm2raid(line) - if re.search('SnapShot', line[100:]): - is_vvol = True - else: - is_vvol = False - line = shlex.split(line) - if not line: - break - svol = int(line[2]) - pair_info['svol'].append({'lun': svol, - 'status': status, - 'is_vvol': is_vvol}) - return pair_info - - opt = '%s -svol %d' % (opt_base, ldev) - ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', - opt, noretry=True) - if ret == 1: - return {'pvol': None, 'svol': []} - lines = stdout.splitlines() - status = self.trans_status_hsnm2raid(lines[1]) - if re.search('SnapShot', lines[1][100:]): - is_vvol = True - else: - is_vvol = False - line = shlex.split(lines[1]) - pvol = int(line[1]) - - return {'pvol': pvol, 'svol': [{'lun': ldev, - 'status': status, - 'is_vvol': is_vvol}]} - - def create_lock_file(self): - basic_lib.create_empty_file(self.hsnm_lock_file) - - def get_hostgroup_luns(self, port, gid): - list = [] - self.add_used_hlun('auhgmap', port, gid, list, DUMMY_LU) - - return list - - def get_ldev_size_in_gigabyte(self, ldev, existing_ref): - param = 'unit_name' - if param not in existing_ref: - msg = basic_lib.output_err(700, param=param) - raise exception.HBSDError(data=msg) - storage = existing_ref.get(param) - if storage != self.conf.hitachi_unit_name: - msg = basic_lib.output_err(648, resource=param) - raise exception.HBSDError(data=msg) - - try: - stdout = self._get_lu(ldev) - except exception.HBSDError: - with excutils.save_and_reraise_exception(): - basic_lib.output_err(648, resource='LDEV') - - lines = stdout.splitlines() - line = lines[2] - - splits = shlex.split(line) - - vol_type = splits[len(splits) - 1] - if basic_lib.NORMAL_VOLUME_TYPE != vol_type: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - dppool = splits[5] - if 'N/A' == dppool: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - # Hitachi storage calculates volume sizes in a block unit, 512 bytes. - # So, units.Gi is divided by 512. - size = int(splits[1]) - if size % (units.Gi / 512): - msg = basic_lib.output_err(703, ldev=ldev) - raise exception.HBSDError(data=msg) - - num_port = int(splits[len(splits) - 2]) - if num_port: - msg = basic_lib.output_err(704, ldev=ldev) - raise exception.HBSDError(data=msg) - - return size / (units.Gi / 512) diff --git a/cinder/volume/drivers/hitachi/hnas_backend.py b/cinder/volume/drivers/hitachi/hnas_backend.py deleted file mode 100644 index 4497d8247ec..00000000000 --- a/cinder/volume/drivers/hitachi/hnas_backend.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Hitachi Unified Storage (HUS-HNAS) platform. Backend operations. -""" - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import ssh_utils -from cinder import utils - -LOG = logging.getLogger("cinder.volume.driver") -HNAS_SSC_RETRIES = 5 - - -class HNASSSHBackend(object): - def __init__(self, backend_opts): - - self.mgmt_ip0 = backend_opts.get('mgmt_ip0') - self.hnas_cmd = backend_opts.get('ssc_cmd', 'ssc') - self.cluster_admin_ip0 = backend_opts.get('cluster_admin_ip0') - self.ssh_port = backend_opts.get('ssh_port', '22') - self.ssh_username = backend_opts.get('username') - self.ssh_pwd = backend_opts.get('password') - self.ssh_private_key = backend_opts.get('ssh_private_key') - self.storage_version = None - self.sshpool = None - self.fslist = {} - self.tgt_list = {} - - @utils.retry(exceptions=exception.HNASConnError, retries=HNAS_SSC_RETRIES, - wait_random=True) - def _run_cmd(self, *args, **kwargs): - """Runs a command on SMU using SSH. - - :returns: stdout and stderr of the command - """ - if self.cluster_admin_ip0 is None: - # Connect to SMU through SSH and run ssc locally - args = (self.hnas_cmd, 'localhost') + args - else: - args = (self.hnas_cmd, '--smuauth', self.cluster_admin_ip0) + args - - utils.check_ssh_injection(args) - command = ' '.join(args) - command = command.replace('"', '\\"') - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(ip=self.mgmt_ip0, - port=int(self.ssh_port), - conn_timeout=None, - login=self.ssh_username, - password=self.ssh_pwd, - privatekey=self.ssh_private_key) - - with self.sshpool.item() as ssh: - try: - out, err = putils.ssh_execute(ssh, command, - check_exit_code=True) - LOG.debug("command %(cmd)s result: out = " - "%(out)s - err = %(err)s", - {'cmd': self.hnas_cmd, 'out': out, 'err': err}) - return out, err - except putils.ProcessExecutionError as e: - if 'Failed to establish SSC connection' in e.stderr: - msg = _("Failed to establish SSC connection!") - LOG.exception(msg) - raise exception.HNASConnError(msg) - elif 'Connection reset' in e.stderr: - msg = _("HNAS connection reset!") - LOG.exception(msg) - raise exception.HNASConnError(msg) - else: - raise - - def get_version(self): - """Gets version information from the storage unit. - - :returns: dictionary with HNAS information - - .. code:: python - - storage_version={ - 'mac': HNAS MAC ID, - 'model': HNAS model, - 'version': the software version, - 'hardware': the hardware version, - 'serial': HNAS serial number - } - - """ - if not self.storage_version: - version_info = {} - out, err = self._run_cmd("cluster-getmac") - mac = out.split(':')[1].strip() - version_info['mac'] = mac - - out, err = self._run_cmd("ver") - split_out = out.split('\n') - - model = split_out[1].split(':')[1].strip() - version = split_out[3].split()[1] - hardware = split_out[5].split(':')[1].strip() - serial = split_out[12].split()[2] - - version_info['model'] = model - version_info['version'] = version - version_info['hardware'] = hardware - version_info['serial'] = serial - - self.storage_version = version_info - - LOG.debug("version_info: %(info)s", {'info': self.storage_version}) - return self.storage_version - - def get_evs_info(self): - """Gets the IP addresses of all EVSs in HNAS. - - :returns: dictionary with EVS information - - .. code:: python - - evs_info={ - : {evs_number: number identifying the EVS1 on HNAS}, - : {evs_number: number identifying the EVS2 on HNAS}, - ... - } - - """ - evs_info = {} - out, err = self._run_cmd("evsipaddr", "-l") - - out = out.split('\n') - for line in out: - if 'evs' in line and 'admin' not in line: - ip = line.split()[3].strip() - evs_info[ip] = {} - evs_info[ip]['evs_number'] = line.split()[1].strip() - - return evs_info - - def get_fs_info(self, fs_label): - """Gets the information of a given FS. - - :param fs_label: Label of the filesystem - :returns: dictionary with FS information - - .. code:: python - - fs_info={ - 'id': a Logical Unit ID, - 'label': a Logical Unit name, - 'evs_id': the ID of the EVS in which the filesystem is created - (not present if there is a single EVS), - 'total_size': the total size of the FS (in GB), - 'used_size': the size that is already used (in GB), - 'available_size': the free space (in GB) - } - - """ - def _convert_size(param): - size = float(param) * units.Mi - return six.text_type(size) - - fs_info = {} - single_evs = True - id, lbl, evs, t_sz, u_sz, a_sz = 0, 1, 2, 3, 5, 12 - t_sz_unit, u_sz_unit, a_sz_unit = 4, 6, 13 - - out, err = self._run_cmd("df", "-af", fs_label) - - invalid_outs = ['Not mounted', 'Not determined', 'not found'] - - for problem in invalid_outs: - if problem in out: - return {} - - if 'EVS' in out: - single_evs = False - - fs_data = out.split('\n')[3].split() - - # Getting only the desired values from the output. If there is a single - # EVS, its ID is not shown in the output and we have to decrease the - # indexes to get the right values. - fs_info['id'] = fs_data[id] - fs_info['label'] = fs_data[lbl] - - if not single_evs: - fs_info['evs_id'] = fs_data[evs] - - fs_info['total_size'] = ( - (fs_data[t_sz]) if not single_evs else fs_data[t_sz - 1]) - fs_info['used_size'] = ( - fs_data[u_sz] if not single_evs else fs_data[u_sz - 1]) - fs_info['available_size'] = ( - fs_data[a_sz] if not single_evs else fs_data[a_sz - 1]) - - # Converting the sizes if necessary. - if not single_evs: - if fs_data[t_sz_unit] == 'TB': - fs_info['total_size'] = _convert_size(fs_info['total_size']) - if fs_data[u_sz_unit] == 'TB': - fs_info['used_size'] = _convert_size(fs_info['used_size']) - if fs_data[a_sz_unit] == 'TB': - fs_info['available_size'] = _convert_size( - fs_info['available_size']) - else: - if fs_data[t_sz_unit - 1] == 'TB': - fs_info['total_size'] = _convert_size(fs_info['total_size']) - if fs_data[u_sz_unit - 1] == 'TB': - fs_info['used_size'] = _convert_size(fs_info['used_size']) - if fs_data[a_sz_unit - 1] == 'TB': - fs_info['available_size'] = _convert_size( - fs_info['available_size']) - - fs_info['provisioned_capacity'] = 0 - - LOG.debug("File system info of %(fs)s (sizes in GB): %(info)s.", - {'fs': fs_label, 'info': fs_info}) - - return fs_info - - def get_evs(self, fs_label): - """Gets the EVS ID for the named filesystem. - - :param fs_label: The filesystem label related to the EVS required - :returns: EVS ID of the filesystem - """ - if not self.fslist: - self._get_fs_list() - - # When the FS is found in the list of known FS, returns the EVS ID - for key in self.fslist: - if fs_label == self.fslist[key]['label']: - LOG.debug("EVS ID for fs %(fs)s: %(id)s.", - {'fs': fs_label, 'id': self.fslist[key]['evsid']}) - return self.fslist[key]['evsid'] - LOG.debug("Can't find EVS ID for fs %(fs)s.", {'fs': fs_label}) - - def file_clone(self, fs_label, src, name): - """Clones NFS files to a new one named 'name'. - - Clone primitive used to support all NFS snapshot/cloning functions. - - :param fs_label: file system label of the new file - :param src: source file - :param name: target path of the new created file - """ - fs_list = self._get_fs_list() - fs = fs_list.get(fs_label) - if not fs: - LOG.error("Can't find file %(file)s in FS %(label)s", - {'file': src, 'label': fs_label}) - msg = _('FS label: %(fs_label)s') % {'fs_label': fs_label} - raise exception.InvalidParameterValue(err=msg) - - self._run_cmd("console-context", "--evs", fs['evsid'], - 'file-clone-create', '-f', fs_label, src, name) - LOG.debug('file_clone: fs:%(fs_label)s %(src)s/src: -> %(name)s/dst', - {'fs_label': fs_label, 'src': src, 'name': name}) - - def _get_fs_list(self): - """Gets a list of file systems configured on the backend. - - :returns: a list with the Filesystems configured on HNAS - """ - if not self.fslist: - fslist_out, err = self._run_cmd('evsfs', 'list') - list_raw = fslist_out.split('\n')[3:-2] - - for fs_raw in list_raw: - fs = {} - - fs_raw = fs_raw.split() - fs['id'] = fs_raw[0] - fs['label'] = fs_raw[1] - fs['permid'] = fs_raw[2] - fs['evsid'] = fs_raw[3] - fs['evslabel'] = fs_raw[4] - self.fslist[fs['label']] = fs - - return self.fslist - - def _get_evs_list(self): - """Gets a list of EVS configured on the backend. - - :returns: a list of the EVS configured on HNAS - """ - evslist_out, err = self._run_cmd('evs', 'list') - - evslist = {} - idx = 0 - for evs_raw in evslist_out.split('\n'): - idx += 1 - if 'Service' in evs_raw and 'Online' in evs_raw: - evs = {} - evs_line = evs_raw.split() - evs['node'] = evs_line[0] - evs['id'] = evs_line[1] - evs['label'] = evs_line[3] - evs['ips'] = [] - evs['ips'].append(evs_line[6]) - # Each EVS can have a list of IPs that are displayed in the - # next lines of the evslist_out. We need to check if the next - # lines is a new EVS entry or and IP of this current EVS. - for evs_ip_raw in evslist_out.split('\n')[idx:]: - if 'Service' in evs_ip_raw or not evs_ip_raw.split(): - break - ip = evs_ip_raw.split()[0] - evs['ips'].append(ip) - - evslist[evs['label']] = evs - - return evslist - - def get_export_list(self): - """Gets information on each NFS export. - - :returns: a list of the exports configured on HNAS - """ - nfs_export_out, _ = self._run_cmd('for-each-evs', '-q', 'nfs-export', - 'list') - fs_list = self._get_fs_list() - evs_list = self._get_evs_list() - - export_list = [] - - for export_raw_data in nfs_export_out.split("Export name:")[1:]: - export_info = {} - export_data = export_raw_data.split('\n') - - export_info['name'] = export_data[0].strip() - export_info['path'] = export_data[1].split(':')[1].strip() - export_info['fs'] = export_data[2].split(':')[1].strip() - - if "*** not available ***" in export_raw_data: - export_info['size'] = -1 - export_info['free'] = -1 - else: - evslbl = fs_list[export_info['fs']]['evslabel'] - export_info['evs'] = evs_list[evslbl]['ips'] - - size = export_data[3].split(':')[1].strip().split()[0] - multiplier = export_data[3].split(':')[1].strip().split()[1] - if multiplier == 'TB': - export_info['size'] = float(size) * units.Ki - else: - export_info['size'] = float(size) - - free = export_data[4].split(':')[1].strip().split()[0] - fmultiplier = export_data[4].split(':')[1].strip().split()[1] - if fmultiplier == 'TB': - export_info['free'] = float(free) * units.Ki - else: - export_info['free'] = float(free) - - export_list.append(export_info) - - LOG.debug("get_export_list: %(exp_list)s", {'exp_list': export_list}) - return export_list - - def _get_file_handler(self, volume_path, _evs_id, fs_label, - raise_except): - - try: - out, err = self._run_cmd("console-context", "--evs", _evs_id, - 'file-clone-stat', '-f', fs_label, - volume_path) - except putils.ProcessExecutionError as e: - if 'File is not a clone' in e.stderr and raise_except: - msg = (_("%s is not a clone!") % volume_path) - raise exception.ManageExistingInvalidReference( - existing_ref=volume_path, reason=msg) - else: - return - - lines = out.split('\n') - filehandle_list = [] - - for line in lines: - if "SnapshotFile:" in line and "FileHandle" in line: - item = line.split(':') - handler = item[1][:-1].replace(' FileHandle[', "") - filehandle_list.append(handler) - LOG.debug("Volume handler found: %(fh)s. Adding to list...", - {'fh': handler}) - - return filehandle_list - - def get_cloned_file_relatives(self, file_path, fs_label, - raise_except=False): - """Gets the files related to a clone - - :param file_path: path of the cloned file - :param fs_label: filesystem of the cloned file - :param raise_except: If True exception will be raised for files that - aren't clones. If False, only an error message - is logged. - :returns: list with names of the related files - """ - relatives = [] - - _evs_id = self.get_evs(fs_label) - - file_handler_list = self._get_file_handler(file_path, _evs_id, - fs_label, raise_except) - - if file_handler_list: - for file_handler in file_handler_list: - out, err = self._run_cmd('console-context', '--evs', _evs_id, - 'file-clone-stat-snapshot-file', '-f', - fs_label, file_handler) - - results = out.split('\n') - - for value in results: - if 'Clone:' in value and file_path not in value: - relative = value.split(':')[1] - relatives.append(relative) - else: - LOG.debug("File %(path)s is not a clone.", { - 'path': file_path}) - - return relatives - - def check_snapshot_parent(self, volume_path, snap_name, fs_label): - """Check if a volume is the snapshot source - - :param volume_path: path of the volume - :param snap_name: name of the snapshot - :param fs_label: filesystem label - :return: True if the volume is the snapshot's source or False otherwise - """ - lines = self.get_cloned_file_relatives(volume_path, fs_label, True) - - for line in lines: - if snap_name in line: - LOG.debug("Snapshot %(snap)s found in children list from " - "%(vol)s!", {'snap': snap_name, - 'vol': volume_path}) - return True - - LOG.debug("Snapshot %(snap)s was not found in children list from " - "%(vol)s, probably it is not the parent!", - {'snap': snap_name, 'vol': volume_path}) - return False - - def get_export_path(self, export, fs_label): - """Gets the path of an export on HNAS - - :param export: the export's name - :param fs_label: the filesystem name - :returns: string of the export's path - """ - evs_id = self.get_evs(fs_label) - out, err = self._run_cmd("console-context", "--evs", evs_id, - 'nfs-export', 'list', export) - - lines = out.split('\n') - - for line in lines: - if 'Export path:' in line: - return line.split('Export path:')[1].strip() diff --git a/cinder/volume/drivers/hitachi/hnas_nfs.py b/cinder/volume/drivers/hitachi/hnas_nfs.py deleted file mode 100644 index 45029d92569..00000000000 --- a/cinder/volume/drivers/hitachi/hnas_nfs.py +++ /dev/null @@ -1,1014 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Volume driver for HNAS NFS storage. -""" - -import math -import os -import socket - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils as cutils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import hnas_backend -from cinder.volume.drivers.hitachi import hnas_utils -from cinder.volume.drivers import nfs -from cinder.volume import utils - - -HNAS_NFS_VERSION = '6.0.0' - -LOG = logging.getLogger(__name__) - -NFS_OPTS = [ - cfg.StrOpt('hds_hnas_nfs_config_file', - default='/opt/hds/hnas/cinder_nfs_conf.xml', - help='Legacy configuration file for HNAS NFS Cinder plugin. ' - 'This is not needed if you fill all configuration on ' - 'cinder.conf', - deprecated_for_removal=True) -] - -CONF = cfg.CONF -CONF.register_opts(NFS_OPTS, group=configuration.SHARED_CONF_GROUP) - -HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc', 'ssh_port': '22'} - - -@interface.volumedriver -class HNASNFSDriver(nfs.NfsDriver): - """Base class for Hitachi NFS driver. - - Executes commands relating to Volumes. - - Version history: - - .. code-block:: none - - Version 1.0.0: Initial driver version - Version 2.2.0: Added support to SSH authentication - Version 3.0.0: Added pool aware scheduling - Version 4.0.0: Added manage/unmanage features - Version 4.1.0: Fixed XML parser checks on blank options - Version 5.0.0: Remove looping in driver initialization - Code cleaning up - New communication interface between the driver and HNAS - Removed the option to use local SSC (ssh_enabled=False) - Updated to use versioned objects - Changed the class name to HNASNFSDriver - Deprecated XML config file - Added support to manage/unmanage snapshots features - Fixed driver stats reporting - Version 6.0.0: Deprecated hnas_svcX_vol_type configuration - Added list-manageable volumes/snapshots support - Rename snapshots to link with its original volume - """ - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Hitachi_HNAS_CI" - VERSION = HNAS_NFS_VERSION - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - msg = _("The Hitachi NAS driver is deprecated and will be " - "removed in a future release.") - versionutils.report_deprecated_feature(LOG, msg) - self._execute = None - self.context = None - self.configuration = kwargs.get('configuration', None) - - service_parameters = ['volume_type', 'hdp'] - optional_parameters = ['ssc_cmd', 'cluster_admin_ip0'] - - if self.configuration: - self.configuration.append_config_values( - hnas_utils.drivers_common_opts) - self.configuration.append_config_values(NFS_OPTS) - self.config = {} - - # Trying to get HNAS configuration from cinder.conf - self.config = hnas_utils.read_cinder_conf( - self.configuration) - - # If HNAS configuration are not set on cinder.conf, tries to use - # the deprecated XML configuration file - if not self.config: - self.config = hnas_utils.read_xml_config( - self.configuration.hds_hnas_nfs_config_file, - service_parameters, - optional_parameters) - - super(HNASNFSDriver, self).__init__(*args, **kwargs) - self.backend = hnas_backend.HNASSSHBackend(self.config) - - def _get_service(self, volume): - """Get service parameters. - - Get the available service parameters for a given volume using - its type. - - :param volume: dictionary volume reference - :returns: Tuple containing the service parameters (label, - export path and export file system) or error if no configuration is - found. - :raises ParameterNotFound: - """ - LOG.debug("_get_service: volume: %(vol)s", {'vol': volume}) - label = utils.extract_host(volume.host, level='pool') - - if label in self.config['services'].keys(): - svc = self.config['services'][label] - LOG.debug("_get_service: %(lbl)s->%(svc)s", - {'lbl': label, 'svc': svc['export']['fs']}) - service = (svc['hdp'], svc['export']['path'], svc['export']['fs']) - else: - LOG.info("Available services: %(svc)s", - {'svc': self.config['services'].keys()}) - LOG.error("No configuration found for service: %(lbl)s", - {'lbl': label}) - raise exception.ParameterNotFound(param=label) - - return service - - def _get_snapshot_name(self, snapshot): - snap_file_name = ("%(vol_name)s.%(snap_id)s" % - {'vol_name': snapshot.volume.name, - 'snap_id': snapshot.id}) - return snap_file_name - - @cutils.trace - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: dictionary volume reference - :param new_size: int size in GB to extend - :raises InvalidResults: - """ - nfs_mount = volume.provider_location - path = self._get_file_path(nfs_mount, volume.name) - - # Resize the image file on share to new size. - LOG.info("Checking file for resize.") - - if not self._is_file_size_equal(path, new_size): - LOG.info("Resizing file to %(sz)sG", {'sz': new_size}) - image_utils.resize_image(path, new_size) - - if self._is_file_size_equal(path, new_size): - LOG.info("LUN %(id)s extended to %(size)s GB.", - {'id': volume.id, 'size': new_size}) - else: - msg = _("Resizing image file failed.") - LOG.error(msg) - raise exception.InvalidResults(msg) - - def _is_file_size_equal(self, path, size): - """Checks if file size at path is equal to size.""" - data = image_utils.qemu_img_info(path) - virt_size = data.virtual_size / units.Gi - - if virt_size == size: - return True - else: - return False - - @cutils.trace - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - :param volume: volume to be created - :param snapshot: source snapshot - :returns: the provider_location of the volume created - """ - nfs_mount = snapshot.volume.provider_location - snapshot_name = self._get_snapshot_name(snapshot) - - if self._file_not_present(nfs_mount, snapshot_name): - LOG.info("Creating volume %(vol)s from legacy " - "snapshot %(snap)s.", - {'vol': volume.name, 'snap': snapshot.name}) - snapshot_name = snapshot.name - - self._clone_volume(snapshot.volume, volume.name, snapshot_name) - - return {'provider_location': nfs_mount} - - @cutils.trace - def create_snapshot(self, snapshot): - """Create a snapshot. - - :param snapshot: dictionary snapshot reference - :returns: the provider_location of the snapshot created - """ - snapshot_name = self._get_snapshot_name(snapshot) - self._clone_volume(snapshot.volume, snapshot_name) - - share = snapshot.volume.provider_location - LOG.debug('Share: %(shr)s', {'shr': share}) - - # returns the mount point (not path) - return {'provider_location': share} - - @cutils.trace - def delete_snapshot(self, snapshot): - """Deletes a snapshot. - - :param snapshot: dictionary snapshot reference - """ - nfs_mount = snapshot.volume.provider_location - snapshot_name = self._get_snapshot_name(snapshot) - - if self._file_not_present(nfs_mount, snapshot_name): - # Snapshot with new name does not exist. The verification - # for a file with legacy name will be done. - snapshot_name = snapshot.name - - if self._file_not_present(nfs_mount, snapshot_name): - # The file does not exist. Nothing to do. - return - - self._execute('rm', self._get_file_path( - nfs_mount, snapshot_name), run_as_root=True) - - def _file_not_present(self, nfs_mount, volume_name): - """Check if file does not exist. - - :param nfs_mount: string path of the nfs share - :param volume_name: string volume name - :returns: boolean (true for file not present and false otherwise) - """ - try: - self._execute('ls', self._get_file_path(nfs_mount, volume_name)) - except processutils.ProcessExecutionError as e: - if "No such file or directory" in e.stderr: - # If the file isn't present - return True - else: - raise - - return False - - def _get_file_path(self, nfs_share, file_name): - """Get file path (local fs path) for given name on given nfs share. - - :param nfs_share string, example 172.18.194.100:/var/nfs - :param file_name string, - example volume-91ee65ec-c473-4391-8c09-162b00c68a8c - :returns: the local path according to the parameters - """ - return os.path.join(self._get_mount_point_for_share(nfs_share), - file_name) - - @cutils.trace - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume. - - :param volume: reference to the volume being created - :param src_vref: reference to the source volume - :returns: the provider_location of the cloned volume - """ - - # HNAS always creates cloned volumes in the same pool as the source - # volumes. So, it is not allowed to use different volume types for - # clone operations. - if volume.volume_type_id != src_vref.volume_type_id: - msg = _("Source and cloned volumes should have the same " - "volume type.") - LOG.error(msg) - raise exception.InvalidVolumeType(msg) - - vol_size = volume.size - src_vol_size = src_vref.size - - self._clone_volume(src_vref, volume.name, src_vref.name) - - share = src_vref.provider_location - - if vol_size > src_vol_size: - volume.provider_location = share - self.extend_volume(volume, vol_size) - - return {'provider_location': share} - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - :param refresh: if it is True, update the stats first. - :returns: dictionary with the stats from HNAS - - .. code:: python - - _stats['pools'] = { - 'total_capacity_gb': total size of the pool, - 'free_capacity_gb': the available size, - 'QoS_support': bool to indicate if QoS is supported, - 'reserved_percentage': percentage of size reserved, - 'max_over_subscription_ratio': oversubscription rate, - 'thin_provisioning_support': thin support (True), - } - - """ - LOG.info("Getting volume stats") - - _stats = super(HNASNFSDriver, self).get_volume_stats(refresh) - _stats["vendor_name"] = 'Hitachi' - _stats["driver_version"] = HNAS_NFS_VERSION - _stats["storage_protocol"] = 'NFS' - - max_osr = self.max_over_subscription_ratio - - for pool in self.pools: - capacity, free, provisioned = self._get_capacity_info(pool['fs']) - pool['total_capacity_gb'] = capacity / float(units.Gi) - pool['free_capacity_gb'] = free / float(units.Gi) - pool['provisioned_capacity_gb'] = provisioned / float(units.Gi) - pool['QoS_support'] = 'False' - pool['reserved_percentage'] = self.reserved_percentage - pool['max_over_subscription_ratio'] = max_osr - pool['thin_provisioning_support'] = True - - _stats['pools'] = self.pools - - LOG.debug('Driver stats: %(stat)s', {'stat': _stats}) - - return _stats - - def do_setup(self, context): - """Perform internal driver setup.""" - version_info = self.backend.get_version() - LOG.info("HNAS NFS driver.") - LOG.info("HNAS model: %(mdl)s", {'mdl': version_info['model']}) - LOG.info("HNAS version: %(ver)s", - {'ver': version_info['version']}) - LOG.info("HNAS hardware: %(hw)s", - {'hw': version_info['hardware']}) - LOG.info("HNAS S/N: %(sn)s", {'sn': version_info['serial']}) - - self.context = context - self._load_shares_config( - getattr(self.configuration, self.driver_prefix + '_shares_config')) - LOG.info("Review shares: %(shr)s", {'shr': self.shares}) - - elist = self.backend.get_export_list() - - # Check for all configured exports - for svc_name, svc_info in self.config['services'].items(): - server_ip = svc_info['hdp'].split(':')[0] - mountpoint = svc_info['hdp'].split(':')[1] - - # Ensure export are configured in HNAS - export_configured = False - for export in elist: - if mountpoint == export['name'] and server_ip in export['evs']: - svc_info['export'] = export - export_configured = True - - # Ensure export are reachable - try: - out, err = self._execute('showmount', '-e', server_ip) - except processutils.ProcessExecutionError: - LOG.exception("NFS server %(srv)s not reachable!", - {'srv': server_ip}) - raise - - export_list = out.split('\n')[1:] - export_list.pop() - mountpoint_not_found = mountpoint not in map( - lambda x: x.split()[0], export_list) - if (len(export_list) < 1 or - mountpoint_not_found or - not export_configured): - LOG.error("Configured share %(share)s is not present" - "in %(srv)s.", - {'share': mountpoint, 'srv': server_ip}) - msg = _('Section: %(svc_name)s') % {'svc_name': svc_name} - raise exception.InvalidParameterValue(err=msg) - - LOG.debug("Loading services: %(svc)s", { - 'svc': self.config['services']}) - - service_list = self.config['services'].keys() - for svc in service_list: - svc = self.config['services'][svc] - pool = {} - pool['pool_name'] = svc['pool_name'] - pool['service_label'] = svc['pool_name'] - pool['fs'] = svc['hdp'] - - self.pools.append(pool) - - LOG.debug("Configured pools: %(pool)s", {'pool': self.pools}) - LOG.info("HNAS NFS Driver loaded successfully.") - - def _clone_volume(self, src_vol, clone_name, src_name=None): - """Clones mounted volume using the HNAS file_clone. - - :param src_vol: object source volume - :param clone_name: string clone name (or snapshot) - :param src_name: name of the source volume. - """ - - # when the source is a snapshot, we need to pass the source name and - # use the information of the volume that originated the snapshot to - # get the clone path. - if not src_name: - src_name = src_vol.name - - # volume-ID snapshot-ID, /cinder - LOG.info("Cloning with volume_name %(vname)s, clone_name %(cname)s" - " ,export_path %(epath)s", - {'vname': src_name, 'cname': clone_name, - 'epath': src_vol.provider_location}) - - (fs, path, fs_label) = self._get_service(src_vol) - - target_path = '%s/%s' % (path, clone_name) - source_path = '%s/%s' % (path, src_name) - - self.backend.file_clone(fs_label, source_path, target_path) - - @cutils.trace - def create_volume(self, volume): - """Creates a volume. - - :param volume: volume reference - :returns: the volume provider_location - """ - self._ensure_shares_mounted() - - (fs_id, path, fslabel) = self._get_service(volume) - - volume.provider_location = fs_id - - LOG.info("Volume service: %(label)s. Casted to: %(loc)s", - {'label': fslabel, 'loc': volume.provider_location}) - - self._do_create_volume(volume) - - return {'provider_location': fs_id} - - def _convert_vol_ref_share_name_to_share_ip(self, vol_ref): - """Converts the share point name to an IP address. - - The volume reference may have a DNS name portion in the share name. - Convert that to an IP address and then restore the entire path. - - :param vol_ref: driver-specific information used to identify a volume - :returns: a volume reference where share is in IP format or raises - error - :raises e.strerror: - """ - - # First strip out share and convert to IP format. - share_split = vol_ref.split(':') - - try: - vol_ref_share_ip = cutils.resolve_hostname(share_split[0]) - except socket.gaierror as e: - LOG.exception('Invalid hostname %(host)s', - {'host': share_split[0]}) - LOG.debug('error: %(err)s', {'err': e.strerror}) - raise - - # Now place back into volume reference. - vol_ref_share = vol_ref_share_ip + ':' + share_split[1] - - return vol_ref_share - - def _get_share_mount_and_vol_from_vol_ref(self, vol_ref): - """Get the NFS share, the NFS mount, and the volume from reference. - - Determine the NFS share point, the NFS mount point, and the volume - (with possible path) from the given volume reference. Raise exception - if unsuccessful. - - :param vol_ref: driver-specific information used to identify a volume - :returns: NFS Share, NFS mount, volume path or raise error - :raises ManageExistingInvalidReference: - """ - # Check that the reference is valid. - if 'source-name' not in vol_ref: - reason = _('Reference must contain source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=vol_ref, reason=reason) - vol_ref_name = vol_ref['source-name'] - - self._ensure_shares_mounted() - - # If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config - # file, but the admin tries to manage the file located at - # 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below - # when searching self._mounted_shares to see if we have an existing - # mount that would work to access the volume-to-be-managed (a string - # comparison is done instead of IP comparison). - vol_ref_share = self._convert_vol_ref_share_name_to_share_ip( - vol_ref_name) - for nfs_share in self._mounted_shares: - cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share) - (orig_share, work_share, - file_path) = vol_ref_share.partition(cfg_share) - if work_share == cfg_share: - file_path = file_path[1:] # strip off leading path divider - LOG.debug("Found possible share %(shr)s; checking mount.", - {'shr': work_share}) - nfs_mount = self._get_mount_point_for_share(nfs_share) - vol_full_path = os.path.join(nfs_mount, file_path) - if os.path.isfile(vol_full_path): - LOG.debug("Found share %(share)s and vol %(path)s on " - "mount %(mnt)s.", - {'share': nfs_share, 'path': file_path, - 'mnt': nfs_mount}) - return nfs_share, nfs_mount, file_path - else: - LOG.debug("vol_ref %(ref)s not on share %(share)s.", - {'ref': vol_ref_share, 'share': nfs_share}) - - raise exception.ManageExistingInvalidReference( - existing_ref=vol_ref, - reason=_('Volume/Snapshot not found on configured storage ' - 'backend.')) - - @cutils.trace - def manage_existing(self, volume, existing_vol_ref): - """Manages an existing volume. - - The specified Cinder volume is to be taken into Cinder management. - The driver will verify its existence and then rename it to the - new Cinder volume name. It is expected that the existing volume - reference is an NFS share point and some [/path]/volume; - e.g., 10.10.32.1:/openstack/vol_to_manage - or 10.10.32.1:/openstack/some_directory/vol_to_manage - - :param volume: cinder volume to manage - :param existing_vol_ref: driver-specific information used to identify a - volume - :returns: the provider location - :raises VolumeBackendAPIException: - """ - - # Attempt to find NFS share, NFS mount, and volume path from vol_ref. - (nfs_share, nfs_mount, vol_name - ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) - - LOG.info("Asked to manage NFS volume %(vol)s, " - "with vol ref %(ref)s.", - {'vol': volume.id, - 'ref': existing_vol_ref['source-name']}) - - vol_id = utils.extract_id_from_volume_name(vol_name) - if utils.check_already_managed_volume(vol_id): - raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name) - - self._check_pool_and_share(volume, nfs_share) - - if vol_name == volume.name: - LOG.debug("New Cinder volume %(vol)s name matches reference name: " - "no need to rename.", {'vol': volume.name}) - else: - src_vol = os.path.join(nfs_mount, vol_name) - dst_vol = os.path.join(nfs_mount, volume.name) - try: - self._try_execute("mv", src_vol, dst_vol, run_as_root=False, - check_exit_code=True) - LOG.debug("Setting newly managed Cinder volume name " - "to %(vol)s.", {'vol': volume.name}) - self._set_rw_permissions_for_all(dst_vol) - except (OSError, processutils.ProcessExecutionError) as err: - msg = (_("Failed to manage existing volume " - "%(name)s, because rename operation " - "failed: Error msg: %(msg)s.") % - {'name': existing_vol_ref['source-name'], - 'msg': six.text_type(err)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return {'provider_location': nfs_share} - - def _check_pool_and_share(self, volume, nfs_share): - """Validates the pool and the NFS share. - - Checks if the NFS share for the volume-type chosen matches the - one passed in the volume reference. Also, checks if the pool - for the volume type matches the pool for the host passed. - - :param volume: cinder volume reference - :param nfs_share: NFS share passed to manage - :raises ManageExistingVolumeTypeMismatch: - """ - pool_from_vol_type = hnas_utils.get_pool(self.config, volume) - - pool_from_host = utils.extract_host(volume.host, level='pool') - - if (pool_from_vol_type == 'default' and - 'default' not in self.config['services']): - msg = (_("Failed to manage existing volume %(volume)s because the " - "chosen volume type %(vol_type)s does not have a " - "service_label configured in its extra-specs and there " - "is no pool configured with hnas_svcX_volume_type as " - "'default' in cinder.conf.") % - {'volume': volume.id, - 'vol_type': getattr(volume.volume_type, 'id', None)}) - LOG.error(msg) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - pool = self.config['services'][pool_from_vol_type]['hdp'] - if pool != nfs_share: - msg = (_("Failed to manage existing volume because the pool of " - "the volume type chosen (%(pool)s) does not match the " - "NFS share passed in the volume reference (%(share)s).") - % {'share': nfs_share, 'pool': pool}) - LOG.error(msg) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - if pool_from_host != pool_from_vol_type: - msg = (_("Failed to manage existing volume because the pool of " - "the volume type chosen (%(pool)s) does not match the " - "pool of the host %(pool_host)s") % - {'pool': pool_from_vol_type, - 'pool_host': pool_from_host}) - LOG.error(msg) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - @cutils.trace - def manage_existing_get_size(self, volume, existing_vol_ref): - """Returns the size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: cinder volume to manage - :param existing_vol_ref: existing volume to take under management - :returns: the size of the volume or raise error - :raises VolumeBackendAPIException: - """ - return self._manage_existing_get_size(existing_vol_ref) - - @cutils.trace - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - It does not delete the underlying backend storage object. A log entry - will be made to notify the Admin that the volume is no longer being - managed. - - :param volume: cinder volume to unmanage - """ - vol_str = CONF.volume_name_template % volume.id - path = self._get_mount_point_for_share(volume.provider_location) - - new_str = "unmanage-" + vol_str - - vol_path = os.path.join(path, vol_str) - new_path = os.path.join(path, new_str) - - try: - self._try_execute("mv", vol_path, new_path, - run_as_root=False, check_exit_code=True) - - LOG.info("The volume with path %(old)s is no longer being " - "managed by Cinder. However, it was not deleted " - "and can be found in the new path %(cr)s.", - {'old': vol_path, 'cr': new_path}) - - except (OSError, ValueError): - LOG.exception("The NFS Volume %(cr)s does not exist.", - {'cr': new_path}) - - def _get_file_size(self, file_path): - file_size = float(cutils.get_file_size(file_path)) / units.Gi - # Round up to next Gb - return int(math.ceil(file_size)) - - def _manage_existing_get_size(self, existing_ref): - # Attempt to find NFS share, NFS mount, and path from vol_ref. - (nfs_share, nfs_mount, path - ) = self._get_share_mount_and_vol_from_vol_ref(existing_ref) - - try: - LOG.debug("Asked to get size of NFS ref %(ref)s.", - {'ref': existing_ref['source-name']}) - - file_path = os.path.join(nfs_mount, path) - size = self._get_file_size(file_path) - except (OSError, ValueError): - exception_message = (_("Failed to manage existing volume/snapshot " - "%(name)s, because of error in getting " - "its size."), - {'name': existing_ref['source-name']}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - LOG.debug("Reporting size of NFS ref %(ref)s as %(size)d GB.", - {'ref': existing_ref['source-name'], 'size': size}) - - return size - - def _check_snapshot_parent(self, volume, old_snap_name, share): - volume_name = 'volume-' + volume.id - (fs, path, fs_label) = self._get_service(volume) - # 172.24.49.34:/nfs_cinder - - export_path = self.backend.get_export_path(share.split(':')[1], - fs_label) - volume_path = os.path.join(export_path, volume_name) - - return self.backend.check_snapshot_parent(volume_path, old_snap_name, - fs_label) - - def _get_snapshot_origin_from_name(self, snap_name): - """Gets volume name from snapshot names""" - if 'unmanage' in snap_name: - return snap_name.split('.')[0][9:] - - return snap_name.split('.')[0] - - @cutils.trace - def manage_existing_snapshot(self, snapshot, existing_ref): - """Brings an existing backend storage object under Cinder management. - - :param snapshot: Cinder volume snapshot to manage - :param existing_ref: Driver-specific information used to identify a - volume snapshot - """ - - # Attempt to find NFS share, NFS mount, and volume path from ref. - (nfs_share, nfs_mount, src_snapshot_name - ) = self._get_share_mount_and_vol_from_vol_ref(existing_ref) - - LOG.info("Asked to manage NFS snapshot %(snap)s for volume " - "%(vol)s, with vol ref %(ref)s.", - {'snap': snapshot.id, - 'vol': snapshot.volume_id, - 'ref': existing_ref['source-name']}) - - volume = snapshot.volume - parent_name = self._get_snapshot_origin_from_name(src_snapshot_name) - - if parent_name != volume.name: - # Check if the snapshot belongs to the volume for the legacy case - if not self._check_snapshot_parent( - volume, src_snapshot_name, nfs_share): - msg = (_("This snapshot %(snap)s doesn't belong " - "to the volume parent %(vol)s.") % - {'snap': src_snapshot_name, 'vol': volume.id}) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - - snapshot_name = self._get_snapshot_name(snapshot) - - if src_snapshot_name == snapshot_name: - LOG.debug("New Cinder snapshot %(snap)s name matches reference " - "name. No need to rename.", {'snap': snapshot_name}) - else: - src_snap = os.path.join(nfs_mount, src_snapshot_name) - dst_snap = os.path.join(nfs_mount, snapshot_name) - try: - self._try_execute("mv", src_snap, dst_snap, run_as_root=False, - check_exit_code=True) - LOG.info("Setting newly managed Cinder snapshot name " - "to %(snap)s.", {'snap': snapshot_name}) - self._set_rw_permissions_for_all(dst_snap) - except (OSError, processutils.ProcessExecutionError) as err: - msg = (_("Failed to manage existing snapshot " - "%(name)s, because rename operation " - "failed: Error msg: %(msg)s.") % - {'name': existing_ref['source-name'], - 'msg': six.text_type(err)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return {'provider_location': nfs_share} - - @cutils.trace - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - return self._manage_existing_get_size(existing_ref) - - @cutils.trace - def unmanage_snapshot(self, snapshot): - """Removes the specified snapshot from Cinder management. - - Does not delete the underlying backend storage object. - - :param snapshot: Cinder volume snapshot to unmanage - """ - - path = self._get_mount_point_for_share(snapshot.provider_location) - snapshot_name = self._get_snapshot_name(snapshot) - - if self._file_not_present(snapshot.provider_location, snapshot_name): - LOG.info("Unmanaging legacy snapshot %(snap)s.", - {'snap': snapshot.name}) - snapshot_name = snapshot.name - - new_name = "unmanage-" + snapshot_name - - old_path = os.path.join(path, snapshot_name) - new_path = os.path.join(path, new_name) - - try: - self._execute("mv", old_path, new_path, - run_as_root=False, check_exit_code=True) - LOG.info("The snapshot with path %(old)s is no longer being " - "managed by Cinder. However, it was not deleted and " - "can be found in the new path %(cr)s.", - {'old': old_path, 'cr': new_path}) - - except (OSError, ValueError): - LOG.exception("The NFS snapshot %(old)s does not exist.", - {'old': old_path}) - - def _get_volumes_from_export(self, export_path): - mnt_point = self._get_mount_point_for_share(export_path) - - vols = self._execute("ls", mnt_point, run_as_root=False, - check_exit_code=True) - - vols = vols[0].split('\n') - if '' in vols: - vols.remove('') - - return list(vols) - - def _get_snapshot_origin(self, snap_path, fs_label): - relatives = self.backend.get_cloned_file_relatives(snap_path, fs_label) - - origin = [] - - if not relatives: - return - elif len(relatives) > 1: - for relative in relatives: - if 'snapshot' not in relative: - origin.append(relative) - else: - origin.append(relatives[0]) - - return origin - - def _get_manageable_resource_info(self, cinder_resources, resource_type, - marker, limit, offset, sort_keys, - sort_dirs): - """Gets the resources on the backend available for management by Cinder. - - Receives the parameters from "get_manageable_volumes" and - "get_manageable_snapshots" and gets the available resources - - :param cinder_resources: A list of resources in this host that Cinder - currently manages - :param resource_type: If it's a volume or a snapshot - :param marker: The last item of the previous page; we return the - next results after this value (after sorting) - :param limit: Maximum number of items to return - :param offset: Number of items to skip after marker - :param sort_keys: List of keys to sort results by (valid keys - are 'identifier' and 'size') - :param sort_dirs: List of directions to sort by, corresponding to - sort_keys (valid directions are 'asc' and 'desc') - - :returns: list of dictionaries, each specifying a volume or snapshot - (resource) in the host, with the following keys: - - reference (dictionary): The reference for a resource, - which can be passed to "manage_existing_snapshot". - - size (int): The size of the resource according to the storage - backend, rounded up to the nearest GB. - - safe_to_manage (boolean): Whether or not this resource is - safe to manage according to the storage backend. - - reason_not_safe (string): If safe_to_manage is False, - the reason why. - - cinder_id (string): If already managed, provide the Cinder ID. - - extra_info (string): Any extra information to return to the - user - - source_reference (string): Similar to "reference", but for the - snapshot's source volume. - """ - - entries = [] - exports = {} - bend_rsrc = {} - cinder_ids = [resource.id for resource in cinder_resources] - - for service in self.config['services']: - exp_path = self.config['services'][service]['hdp'] - exports[exp_path] = ( - self.config['services'][service]['export']['fs']) - - for exp in exports.keys(): - # bend_rsrc has all the resources in the specified exports - # volumes {u'172.24.54.39:/Export-Cinder': - # ['volume-325e7cdc-8f65-40a8-be9a-6172c12c9394', - # ' snapshot-1bfb6f0d-9497-4c12-a052-5426a76cacdc','']} - bend_rsrc[exp] = self._get_volumes_from_export(exp) - mnt_point = self._get_mount_point_for_share(exp) - - for resource in bend_rsrc[exp]: - # Ignoring resources of unwanted types - if ((resource_type == 'volume' and - ('.' in resource or 'snapshot' in resource)) or - (resource_type == 'snapshot' and '.' not in resource and - 'snapshot' not in resource)): - continue - - path = '%s/%s' % (exp, resource) - mnt_path = '%s/%s' % (mnt_point, resource) - size = self._get_file_size(mnt_path) - - rsrc_inf = {'reference': {'source-name': path}, - 'size': size, 'cinder_id': None, - 'extra_info': None} - - if resource_type == 'volume': - potential_id = utils.extract_id_from_volume_name(resource) - elif 'snapshot' in resource: - # This is for the snapshot legacy case - potential_id = utils.extract_id_from_snapshot_name( - resource) - else: - potential_id = resource.split('.')[1] - - # When a resource is already managed by cinder, it's not - # recommended to manage it again. So we set safe_to_manage = - # False. Otherwise, it is set safe_to_manage = True. - if potential_id in cinder_ids: - rsrc_inf['safe_to_manage'] = False - rsrc_inf['reason_not_safe'] = 'already managed' - rsrc_inf['cinder_id'] = potential_id - else: - rsrc_inf['safe_to_manage'] = True - rsrc_inf['reason_not_safe'] = None - - # If it's a snapshot, we try to get its source volume. However, - # this search is not reliable in some cases. So, if it's not - # possible to return a precise result, we return unknown as - # source-reference, throw a warning message and fill the - # extra-info. - if resource_type == 'snapshot': - if 'snapshot' not in resource: - origin = self._get_snapshot_origin_from_name(resource) - if 'unmanage' in origin: - origin = origin[16:] - else: - origin = origin[7:] - rsrc_inf['source_reference'] = {'id': origin} - else: - path = path.split(':')[1] - origin = self._get_snapshot_origin(path, exports[exp]) - - if not origin: - # if origin is empty, the file is not a clone - continue - elif len(origin) == 1: - origin = origin[0].split('/')[2] - origin = utils.extract_id_from_volume_name(origin) - rsrc_inf['source_reference'] = {'id': origin} - else: - LOG.warning("Could not determine the volume " - "that owns the snapshot %(snap)s", - {'snap': resource}) - rsrc_inf['source_reference'] = {'id': 'unknown'} - rsrc_inf['extra_info'] = ('Could not determine ' - 'the volume that owns ' - 'the snapshot') - - entries.append(rsrc_inf) - - return utils.paginate_entries_list(entries, marker, limit, offset, - sort_keys, sort_dirs) - - @cutils.trace - def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - """List volumes on the backend available for management by Cinder.""" - - return self._get_manageable_resource_info(cinder_volumes, 'volume', - marker, limit, offset, - sort_keys, sort_dirs) - - @cutils.trace - def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, - sort_keys, sort_dirs): - """List snapshots on the backend available for management by Cinder.""" - - return self._get_manageable_resource_info(cinder_snapshots, 'snapshot', - marker, limit, offset, - sort_keys, sort_dirs) diff --git a/cinder/volume/drivers/hitachi/hnas_utils.py b/cinder/volume/drivers/hitachi/hnas_utils.py deleted file mode 100644 index edd8c41937c..00000000000 --- a/cinder/volume/drivers/hitachi/hnas_utils.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright (c) 2016 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Shared code for HNAS drivers -""" - -import os -import re - -from oslo_config import cfg -from oslo_log import log as logging -import six -from xml.etree import ElementTree as ETree - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc', - 'chap_enabled': True, - 'ssh_port': 22} - -MAX_HNAS_ISCSI_TARGETS = 32 - -drivers_common_opts = [ - cfg.IPOpt('hnas_mgmt_ip0', - help='Management IP address of HNAS. This can ' - 'be any IP in the admin address on HNAS or ' - 'the SMU IP.'), - cfg.StrOpt('hnas_ssc_cmd', - default='ssc', - help='Command to communicate to HNAS.'), - cfg.StrOpt('hnas_username', - help='HNAS username.'), - cfg.StrOpt('hnas_password', - secret=True, - help='HNAS password.'), - cfg.PortOpt('hnas_ssh_port', - default=22, - help='Port to be used for SSH authentication.'), - cfg.StrOpt('hnas_ssh_private_key', - help='Path to the SSH private key used to ' - 'authenticate in HNAS SMU.'), - cfg.StrOpt('hnas_cluster_admin_ip0', - default=None, - help='The IP of the HNAS cluster admin. ' - 'Required only for HNAS multi-cluster setups.'), - cfg.StrOpt('hnas_svc0_pool_name', - help='Service 0 pool name', - deprecated_name='hnas_svc0_volume_type'), - cfg.StrOpt('hnas_svc0_hdp', - help='Service 0 HDP'), - cfg.StrOpt('hnas_svc1_pool_name', - help='Service 1 pool name', - deprecated_name='hnas_svc1_volume_type'), - cfg.StrOpt('hnas_svc1_hdp', - help='Service 1 HDP'), - cfg.StrOpt('hnas_svc2_pool_name', - help='Service 2 pool name', - deprecated_name='hnas_svc2_volume_type'), - cfg.StrOpt('hnas_svc2_hdp', - help='Service 2 HDP'), - cfg.StrOpt('hnas_svc3_pool_name', - help='Service 3 pool name:', - deprecated_name='hnas_svc3_volume_type'), - cfg.StrOpt('hnas_svc3_hdp', - help='Service 3 HDP') -] - -CONF = cfg.CONF -CONF.register_opts(drivers_common_opts, group=configuration.SHARED_CONF_GROUP) - - -def _check_conf_params(config, pool_name, idx): - """Validates if the configuration on cinder.conf is complete. - - :param config: Dictionary with the driver configurations - :param pool_name: The name of the current pool - :param dv_type: The type of the driver (NFS or iSCSI) - :param idx: Index of the current pool - """ - - # Validating the inputs on cinder.conf - if config['username'] is None: - msg = (_("The config parameter hnas_username " - "is not set in the cinder.conf.")) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - if (config['password'] is None and - config['ssh_private_key'] is None): - msg = (_("Credentials configuration parameters " - "missing: you need to set hnas_password " - "or hnas_ssh_private_key " - "in the cinder.conf.")) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - if config['mgmt_ip0'] is None: - msg = (_("The config parameter hnas_mgmt_ip0 " - "is not set in the cinder.conf.")) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - if config['services'][pool_name]['hdp'] is None: - msg = (_("The config parameter hnas_svc%(idx)s_hdp is " - "not set in the cinder.conf. Note that you need to " - "have at least one pool configured.") % - {'idx': idx}) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - if config['services'][pool_name]['pool_name'] is None: - msg = (_("The config parameter " - "hnas_svc%(idx)s_pool_name is not set " - "in the cinder.conf. Note that you need to " - "have at least one pool configured.") % - {'idx': idx}) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - -def _xml_read(root, element, check=None): - """Read an xml element. - - :param root: XML object - :param element: string desired tag - :param check: string if present, throw exception if element missing - """ - - val = root.findtext(element) - - # mandatory parameter not found - if val is None and check: - LOG.error("Mandatory parameter not found: %(p)s", {'p': element}) - raise exception.ParameterNotFound(param=element) - - # tag not found - if val is None: - return None - - svc_tag_pattern = re.compile("svc_[0-3]$") - # tag found but empty parameter. - if not val.strip(): - if svc_tag_pattern.search(element): - return "" - LOG.error("Parameter not found: %(param)s", {'param': element}) - raise exception.ParameterNotFound(param=element) - - LOG.debug("%(element)s: %(val)s", - {'element': element, - 'val': val if element != 'password' else '***'}) - - return val.strip() - - -def read_xml_config(xml_config_file, svc_params, optional_params): - """Read Hitachi driver specific xml config file. - - :param xml_config_file: string filename containing XML configuration - :param svc_params: parameters to configure the services - - .. code:: python - - ['volume_type', 'hdp'] - - :param optional_params: parameters to configure that are not mandatory - - .. code:: python - - ['ssc_cmd', 'cluster_admin_ip0', 'chap_enabled'] - - """ - - if not os.access(xml_config_file, os.R_OK): - msg = (_("Can't find HNAS configurations on cinder.conf neither " - "on the path %(xml)s.") % {'xml': xml_config_file}) - LOG.error(msg) - raise exception.ConfigNotFound(message=msg) - else: - LOG.warning("This XML configuration file %(xml)s is deprecated. " - "Please, move all the configurations to the " - "cinder.conf file. If you keep both configuration " - "files, the options set on cinder.conf will be " - "used.", {'xml': xml_config_file}) - - try: - root = ETree.parse(xml_config_file).getroot() - except ETree.ParseError: - msg = (_("Error parsing config file: %(xml_config_file)s") % - {'xml_config_file': xml_config_file}) - LOG.error(msg) - raise exception.ConfigNotFound(message=msg) - - # mandatory parameters for NFS - config = {} - arg_prereqs = ['mgmt_ip0', 'username'] - for req in arg_prereqs: - config[req] = _xml_read(root, req, 'check') - - # optional parameters for NFS - for req in optional_params: - config[req] = _xml_read(root, req) - if config[req] is None and HNAS_DEFAULT_CONFIG.get(req) is not None: - config[req] = HNAS_DEFAULT_CONFIG.get(req) - - config['ssh_private_key'] = _xml_read(root, 'ssh_private_key') - config['password'] = _xml_read(root, 'password') - - if config['ssh_private_key'] is None and config['password'] is None: - msg = _("Missing authentication option (passw or private key file).") - LOG.error(msg) - raise exception.ConfigNotFound(message=msg) - - if _xml_read(root, 'ssh_port') is not None: - config['ssh_port'] = int(_xml_read(root, 'ssh_port')) - else: - config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port'] - - config['fs'] = {} - config['services'] = {} - - # min one needed - for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: - if _xml_read(root, svc) is None: - continue - service = {'label': svc} - - # none optional - for arg in svc_params: - service[arg] = _xml_read(root, svc + '/' + arg, 'check') - - # Backward compatibility with volume_type - service.setdefault('pool_name', service.pop('volume_type', None)) - - config['services'][service['pool_name']] = service - config['fs'][service['hdp']] = service['hdp'] - - # at least one service required! - if not config['services'].keys(): - LOG.error("No service found in xml config file") - raise exception.ParameterNotFound(param="svc_0") - - return config - - -def get_pool(config, volume): - """Get the pool of a volume. - - :param config: dictionary containing the configuration parameters - :param volume: dictionary volume reference - :returns: the pool related to the volume - """ - - if volume.volume_type: - metadata = {} - type_id = volume.volume_type_id - if type_id is not None: - metadata = volume_types.get_volume_type_extra_specs(type_id) - if metadata.get('service_label'): - if metadata['service_label'] in config['services'].keys(): - return metadata['service_label'] - return 'default' - - -def read_cinder_conf(config_opts): - """Reads cinder.conf - - Gets the driver specific information set on cinder.conf configuration - file. - - :param config_opts: Configuration object that contains the information - needed by HNAS driver - :param dv_type: The type of the driver (NFS or iSCSI) - :returns: Dictionary with the driver configuration - """ - - config = {} - config['services'] = {} - config['fs'] = {} - mandatory_parameters = ['username', 'password', 'mgmt_ip0'] - optional_parameters = ['ssc_cmd', - 'ssh_port', 'cluster_admin_ip0', - 'ssh_private_key'] - - # Trying to get the mandatory parameters from cinder.conf - for opt in mandatory_parameters: - config[opt] = config_opts.safe_get('hnas_%(opt)s' % {'opt': opt}) - - # If there is at least one of the mandatory parameters in - # cinder.conf, we assume that we should use the configuration - # from this file. - # Otherwise, we use the configuration from the deprecated XML file. - for param in mandatory_parameters: - if config[param] is not None: - break - else: - return None - - # Getting the optional parameters from cinder.conf - for opt in optional_parameters: - config[opt] = config_opts.safe_get('hnas_%(opt)s' % {'opt': opt}) - - # It's possible to have up to 4 pools configured. - for i in range(0, 4): - idx = six.text_type(i) - svc_pool_name = (config_opts.safe_get( - 'hnas_svc%(idx)s_pool_name' % {'idx': idx})) - - svc_hdp = (config_opts.safe_get( - 'hnas_svc%(idx)s_hdp' % {'idx': idx})) - - # It's mandatory to have at least 1 pool configured (svc_0) - if (idx == '0' or svc_pool_name is not None or - svc_hdp is not None): - config['services'][svc_pool_name] = {} - config['fs'][svc_hdp] = svc_hdp - config['services'][svc_pool_name]['hdp'] = svc_hdp - config['services'][svc_pool_name]['pool_name'] = svc_pool_name - - config['services'][svc_pool_name]['label'] = ( - 'svc_%(idx)s' % {'idx': idx}) - # Checking to ensure that the pools configurations are complete - _check_conf_params(config, svc_pool_name, idx) - - return config diff --git a/cinder/volume/drivers/hitachi/vsp_common.py b/cinder/volume/drivers/hitachi/vsp_common.py deleted file mode 100644 index a6ae748b9fa..00000000000 --- a/cinder/volume/drivers/hitachi/vsp_common.py +++ /dev/null @@ -1,955 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Common module for Hitachi VSP Driver.""" - -import abc -import re - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import coordination -from cinder import exception -from cinder import utils as cinder_utils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import vsp_utils as utils -from cinder.volume import utils as volume_utils - - -VERSION = '1.0.0' - -_COPY_METHOD = set(['FULL', 'THIN']) - -_INHERITED_VOLUME_OPTS = [ - 'volume_backend_name', - 'volume_driver', - 'reserved_percentage', - 'use_multipath_for_image_xfer', - 'enforce_multipath_for_image_xfer', - 'num_volume_device_scan_tries', -] - -common_opts = [ - cfg.StrOpt( - 'vsp_storage_id', - help='Product number of the storage system.'), - cfg.StrOpt( - 'vsp_pool', - help='Pool number or pool name of the DP pool.'), - cfg.StrOpt( - 'vsp_thin_pool', - help='Pool number or pool name of the Thin Image pool.'), - cfg.StrOpt( - 'vsp_ldev_range', - help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that ' - 'can be used by the driver. Values can be in decimal format ' - '(e.g. 1000) or in colon-separated hexadecimal format ' - '(e.g. 00:03:E8).'), - cfg.StrOpt( - 'vsp_default_copy_method', - default='FULL', - choices=['FULL', 'THIN'], - help='Method of volume copy. FULL indicates full data copy by ' - 'Shadow Image and THIN indicates differential data copy by Thin ' - 'Image.'), - cfg.IntOpt( - 'vsp_copy_speed', - min=1, - max=15, - default=3, - help='Speed at which data is copied by Shadow Image. 1 or 2 indicates ' - 'low speed, 3 indicates middle speed, and a value between 4 and ' - '15 indicates high speed.'), - cfg.IntOpt( - 'vsp_copy_check_interval', - min=1, - max=600, - default=3, - help='Interval in seconds at which volume pair synchronization status ' - 'is checked when volume pairs are created.'), - cfg.IntOpt( - 'vsp_async_copy_check_interval', - min=1, - max=600, - default=10, - help='Interval in seconds at which volume pair synchronization status ' - 'is checked when volume pairs are deleted.'), - cfg.ListOpt( - 'vsp_target_ports', - help='IDs of the storage ports used to attach volumes to the ' - 'controller node. To specify multiple ports, connect them by ' - 'commas (e.g. CL1-A,CL2-A).'), - cfg.ListOpt( - 'vsp_compute_target_ports', - help='IDs of the storage ports used to attach volumes to compute ' - 'nodes. To specify multiple ports, connect them by commas ' - '(e.g. CL1-A,CL2-A).'), - cfg.BoolOpt( - 'vsp_group_request', - default=False, - help='If True, the driver will create host groups or iSCSI targets on ' - 'storage ports as needed.'), -] - -_REQUIRED_COMMON_OPTS = [ - 'vsp_storage_id', - 'vsp_pool', -] - -CONF = cfg.CONF -CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) - -LOG = logging.getLogger(__name__) -MSG = utils.VSPMsg - - -def _str2int(num): - """Convert a string into an integer.""" - if not num: - return None - if num.isdigit(): - return int(num) - if not re.match(r'\w\w:\w\w:\w\w', num): - return None - try: - return int(num.replace(':', ''), 16) - except ValueError: - return None - - -@six.add_metaclass(abc.ABCMeta) -class VSPCommon(object): - """Common class for Hitachi VSP Driver.""" - - def __init__(self, conf, driverinfo, db): - """Initialize instance variables.""" - self.conf = conf - self.db = db - self.ctxt = None - self.lock = {} - self.driver_info = driverinfo - self.storage_info = { - 'protocol': driverinfo['proto'], - 'pool_id': None, - 'ldev_range': [], - 'controller_ports': [], - 'compute_ports': [], - 'pair_ports': [], - 'wwns': {}, - 'portals': {}, - 'output_first': True, - } - - self._stats = {} - - def run_and_verify_storage_cli(self, *cmd, **kwargs): - """Run storage CLI and return the result or raise an exception.""" - do_raise = kwargs.pop('do_raise', True) - ignore_error = kwargs.get('ignore_error') - success_code = kwargs.get('success_code', set([0])) - (ret, stdout, stderr) = self.run_storage_cli(*cmd, **kwargs) - if (ret not in success_code and - not utils.check_ignore_error(ignore_error, stderr)): - msg = utils.output_log( - MSG.STORAGE_COMMAND_FAILED, cmd=utils.mask_password(cmd), - ret=ret, out=' '.join(stdout.splitlines()), - err=' '.join(stderr.splitlines())) - if do_raise: - raise exception.VSPError(msg) - return ret, stdout, stderr - - @abc.abstractmethod - def run_storage_cli(self, *cmd, **kwargs): - """Run storage CLI.""" - raise NotImplementedError() - - def get_copy_method(self, metadata): - """Return copy method(FULL or THIN).""" - method = metadata.get( - 'copy_method', self.conf.vsp_default_copy_method) - if method not in _COPY_METHOD: - msg = utils.output_log(MSG.INVALID_PARAMETER_VALUE, - meta='copy_method') - raise exception.VSPError(msg) - if method == 'THIN' and not self.conf.vsp_thin_pool: - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_thin_pool') - raise exception.VSPError(msg) - return method - - def create_volume(self, volume): - """Create a volume and return its properties.""" - try: - ldev = self.create_ldev(volume['size']) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - utils.output_log(MSG.CREATE_LDEV_FAILED) - return { - 'provider_location': six.text_type(ldev), - } - - def create_ldev(self, size, is_vvol=False): - """Create an LDEV and return its LDEV number.""" - ldev = self.get_unused_ldev() - self.create_ldev_on_storage(ldev, size, is_vvol) - LOG.debug('Created logical device. (LDEV: %s)', ldev) - return ldev - - @abc.abstractmethod - def create_ldev_on_storage(self, ldev, size, is_vvol): - """Create an LDEV on the storage system.""" - raise NotImplementedError() - - @abc.abstractmethod - def get_unused_ldev(self): - """Find an unused LDEV and return its LDEV number.""" - raise NotImplementedError() - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot and return its properties.""" - ldev = utils.get_ldev(snapshot) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log( - MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='snapshot', - id=snapshot['id']) - raise exception.VSPError(msg) - size = volume['size'] - metadata = utils.get_volume_metadata(volume) - if size < snapshot['volume_size']: - msg = utils.output_log( - MSG.INVALID_VOLUME_SIZE_FOR_COPY, type='snapshot', - volume_id=volume['id']) - raise exception.VSPError(msg) - elif (size > snapshot['volume_size'] and not self.check_vvol(ldev) and - self.get_copy_method(metadata) == "THIN"): - msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI, - copy_method=utils.THIN, - type='snapshot', volume_id=volume['id']) - raise exception.VSPError(msg) - sync = size > snapshot['volume_size'] - new_ldev = self._copy_ldev( - ldev, snapshot['volume_size'], metadata, sync) - if sync: - self.delete_pair(new_ldev) - self.extend_ldev(new_ldev, snapshot['volume_size'], size) - return { - 'provider_location': six.text_type(new_ldev), - } - - def _copy_ldev(self, ldev, size, metadata, sync=False): - """Create a copy of the specified volume and return its properties.""" - try: - return self.copy_on_storage(ldev, size, metadata, sync) - except exception.VSPNotSupported: - return self._copy_on_host(ldev, size) - - def _copy_on_host(self, src_ldev, size): - """Create a copy of the specified LDEV via host.""" - dest_ldev = self.create_ldev(size) - try: - self._copy_with_dd(src_ldev, dest_ldev, size) - except Exception: - with excutils.save_and_reraise_exception(): - try: - self._delete_ldev(dest_ldev) - except exception.VSPError: - utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=dest_ldev) - return dest_ldev - - def _copy_with_dd(self, src_ldev, dest_ldev, size): - """Copy the content of a volume by dd command.""" - src_info = None - dest_info = None - properties = cinder_utils.brick_get_connector_properties( - multipath=self.conf.use_multipath_for_image_xfer, - enforce_multipath=self.conf.enforce_multipath_for_image_xfer) - try: - dest_info = self._attach_ldev(dest_ldev, properties) - src_info = self._attach_ldev(src_ldev, properties) - volume_utils.copy_volume( - src_info['device']['path'], dest_info['device']['path'], - size * units.Ki, self.conf.volume_dd_blocksize) - finally: - if src_info: - self._detach_ldev(src_info, src_ldev, properties) - if dest_info: - self._detach_ldev(dest_info, dest_ldev, properties) - self.discard_zero_page({'provider_location': six.text_type(dest_ldev)}) - - def _attach_ldev(self, ldev, properties): - """Attach the specified LDEV to the server.""" - volume = { - 'provider_location': six.text_type(ldev), - } - conn = self.initialize_connection(volume, properties) - try: - connector = cinder_utils.brick_get_connector( - conn['driver_volume_type'], - use_multipath=self.conf.use_multipath_for_image_xfer, - device_scan_attempts=self.conf.num_volume_device_scan_tries, - conn=conn) - device = connector.connect_volume(conn['data']) - except Exception as ex: - with excutils.save_and_reraise_exception(): - utils.output_log(MSG.CONNECT_VOLUME_FAILED, ldev=ldev, - reason=six.text_type(ex)) - self._terminate_connection(volume, properties) - return { - 'conn': conn, - 'device': device, - 'connector': connector, - } - - def _detach_ldev(self, attach_info, ldev, properties): - """Detach the specified LDEV from the server.""" - volume = { - 'provider_location': six.text_type(ldev), - } - connector = attach_info['connector'] - try: - connector.disconnect_volume( - attach_info['conn']['data'], attach_info['device']) - except Exception as ex: - utils.output_log(MSG.DISCONNECT_VOLUME_FAILED, ldev=ldev, - reason=six.text_type(ex)) - self._terminate_connection(volume, properties) - - def _terminate_connection(self, volume, connector): - """Disconnect the specified volume from the server.""" - try: - self.terminate_connection(volume, connector) - except exception.VSPError: - utils.output_log(MSG.UNMAP_LDEV_FAILED, - ldev=utils.get_ldev(volume)) - - def copy_on_storage(self, pvol, size, metadata, sync): - """Create a copy of the specified LDEV on the storage.""" - is_thin = self.get_copy_method(metadata) == "THIN" - svol = self.create_ldev(size, is_vvol=is_thin) - try: - self.create_pair_on_storage(pvol, svol, is_thin) - if sync: - self.wait_full_copy_completion(pvol, svol) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - try: - self._delete_ldev(svol) - except exception.VSPError: - utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol) - return svol - - @abc.abstractmethod - def create_pair_on_storage(self, pvol, svol, is_thin): - """Create a copy pair on the storage.""" - raise NotImplementedError() - - def _delete_ldev(self, ldev): - """Delete the specified LDEV.""" - self.delete_pair(ldev) - self.unmap_ldev_from_storage(ldev) - self.delete_ldev_from_storage(ldev) - - def unmap_ldev_from_storage(self, ldev): - """Delete the connection between the specified LDEV and servers.""" - targets = { - 'list': [], - } - self.find_all_mapped_targets_from_storage(targets, ldev) - self.unmap_ldev(targets, ldev) - - @abc.abstractmethod - def find_all_mapped_targets_from_storage(self, targets, ldev): - """Add all port-gids connected with the LDEV to the list.""" - raise NotImplementedError() - - def delete_pair(self, ldev, all_split=True): - """Disconnect all volume pairs to which the specified LDEV belongs.""" - pair_info = self.get_pair_info(ldev) - if not pair_info: - return - if pair_info['pvol'] == ldev: - self.delete_pair_based_on_pvol(pair_info, all_split) - else: - self.delete_pair_based_on_svol( - pair_info['pvol'], pair_info['svol_info'][0]) - - @abc.abstractmethod - def get_pair_info(self, ldev): - """Return volume pair info(LDEV number, pair status and pair type).""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_pair_based_on_pvol(self, pair_info, all_split): - """Disconnect all volume pairs to which the specified P-VOL belongs.""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_pair_based_on_svol(self, pvol, svol_info): - """Disconnect all volume pairs to which the specified S-VOL belongs.""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_pair_from_storage(self, pvol, svol, is_thin): - """Disconnect the volume pair that consists of the specified LDEVs.""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_ldev_from_storage(self, ldev): - """Delete the specified LDEV from the storage.""" - raise NotImplementedError() - - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume and return its properties.""" - ldev = utils.get_ldev(src_vref) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY, - type='volume', id=src_vref['id']) - raise exception.VSPError(msg) - size = volume['size'] - metadata = utils.get_volume_metadata(volume) - if size < src_vref['size']: - msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_COPY, - type='volume', volume_id=volume['id']) - raise exception.VSPError(msg) - elif (size > src_vref['size'] and not self.check_vvol(ldev) and - self.get_copy_method(metadata) == "THIN"): - msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI, - copy_method=utils.THIN, type='volume', - volume_id=volume['id']) - raise exception.VSPError(msg) - sync = size > src_vref['size'] - new_ldev = self._copy_ldev(ldev, src_vref['size'], metadata, sync) - if sync: - self.delete_pair(new_ldev) - self.extend_ldev(new_ldev, src_vref['size'], size) - return { - 'provider_location': six.text_type(new_ldev), - } - - def delete_volume(self, volume): - """Delete the specified volume.""" - ldev = utils.get_ldev(volume) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if ldev is None: - utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, - method='delete_volume', id=volume['id']) - return - try: - self._delete_ldev(ldev) - except exception.VSPBusy: - raise exception.VolumeIsBusy(volume_name=volume['name']) - - def create_snapshot(self, snapshot): - """Create a snapshot from a volume and return its properties.""" - src_vref = snapshot.volume - ldev = utils.get_ldev(src_vref) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY, - type='volume', id=src_vref['id']) - raise exception.VSPError(msg) - size = snapshot['volume_size'] - metadata = utils.get_volume_metadata(src_vref) - new_ldev = self._copy_ldev(ldev, size, metadata) - return { - 'provider_location': six.text_type(new_ldev), - } - - def delete_snapshot(self, snapshot): - """Delete the specified snapshot.""" - ldev = utils.get_ldev(snapshot) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - utils.output_log( - MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot', - id=snapshot['id']) - return - try: - self._delete_ldev(ldev) - except exception.VSPBusy: - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - - def get_volume_stats(self, refresh=False): - """Return properties, capabilities and current states of the driver.""" - if refresh: - if self.storage_info['output_first']: - self.storage_info['output_first'] = False - utils.output_log(MSG.DRIVER_READY_FOR_USE, - config_group=self.conf.config_group) - self._update_volume_stats() - return self._stats - - def _update_volume_stats(self): - """Update properties, capabilities and current states of the driver.""" - data = {} - backend_name = self.conf.safe_get('volume_backend_name') - data['volume_backend_name'] = ( - backend_name or self.driver_info['volume_backend_name']) - data['vendor_name'] = 'Hitachi' - data['driver_version'] = VERSION - data['storage_protocol'] = self.storage_info['protocol'] - try: - total_gb, free_gb = self.get_pool_info() - except exception.VSPError: - utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, - pool=self.conf.vsp_pool) - return - data['total_capacity_gb'] = total_gb - data['free_capacity_gb'] = free_gb - data['reserved_percentage'] = self.conf.safe_get('reserved_percentage') - data['QoS_support'] = False - data['multiattach'] = False - LOG.debug("Updating volume status. (%s)", data) - self._stats = data - - @abc.abstractmethod - def get_pool_info(self): - """Return the total and free capacity of the storage pool.""" - raise NotImplementedError() - - @abc.abstractmethod - def discard_zero_page(self, volume): - """Return the volume's no-data pages to the storage pool.""" - raise NotImplementedError() - - def extend_volume(self, volume, new_size): - """Extend the specified volume to the specified size.""" - ldev = utils.get_ldev(volume) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION, - volume_id=volume['id']) - raise exception.VSPError(msg) - if self.check_vvol(ldev): - msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND, - volume_id=volume['id']) - raise exception.VSPError(msg) - self.delete_pair(ldev) - self.extend_ldev(ldev, volume['size'], new_size) - - @abc.abstractmethod - def check_vvol(self, ldev): - """Return True if the specified LDEV is V-VOL, False otherwise.""" - raise NotImplementedError() - - @abc.abstractmethod - def extend_ldev(self, ldev, old_size, new_size): - """Extend the specified LDEV to the specified new size.""" - raise NotImplementedError() - - def manage_existing(self, existing_ref): - """Return volume properties which Cinder needs to manage the volume.""" - ldev = _str2int(existing_ref.get('source-id')) - return { - 'provider_location': six.text_type(ldev), - } - - def manage_existing_get_size(self, existing_ref): - """Return the size[GB] of the specified volume.""" - ldev = _str2int(existing_ref.get('source-id')) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - return self.get_ldev_size_in_gigabyte(ldev, existing_ref) - - @abc.abstractmethod - def get_ldev_size_in_gigabyte(self, ldev, existing_ref): - """Return the size[GB] of the specified LDEV.""" - raise NotImplementedError() - - def unmanage(self, volume): - """Prepare the volume for removing it from Cinder management.""" - ldev = utils.get_ldev(volume) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage', - id=volume['id']) - return - if self.check_vvol(ldev): - utils.output_log( - MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'], - volume_type=utils.NORMAL_LDEV_TYPE) - raise exception.VolumeIsBusy(volume_name=volume['name']) - try: - self.delete_pair(ldev) - except exception.VSPBusy: - raise exception.VolumeIsBusy(volume_name=volume['name']) - - def do_setup(self, context): - """Prepare for the startup of the driver.""" - self.ctxt = context - - self.check_param() - self.config_lock() - self.connect_storage() - self.init_cinder_hosts() - self.output_param_to_log() - - def check_param(self): - """Check parameter values and consistency among them.""" - utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS) - utils.check_opts(self.conf, common_opts) - utils.check_opts(self.conf, self.driver_info['volume_opts']) - if (self.conf.vsp_default_copy_method == 'THIN' and - not self.conf.vsp_thin_pool): - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_thin_pool') - raise exception.VSPError(msg) - if self.conf.vsp_ldev_range: - self.storage_info['ldev_range'] = self._range2list( - 'vsp_ldev_range') - if (not self.conf.vsp_target_ports and - not self.conf.vsp_compute_target_ports): - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_target_ports or ' - 'vsp_compute_target_ports') - raise exception.VSPError(msg) - for opt in _REQUIRED_COMMON_OPTS: - if not self.conf.safe_get(opt): - msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt) - raise exception.VSPError(msg) - if self.storage_info['protocol'] == 'iSCSI': - self.check_param_iscsi() - - def check_param_iscsi(self): - """Check iSCSI-related parameter values and consistency among them.""" - if self.conf.vsp_use_chap_auth: - if not self.conf.vsp_auth_user: - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_auth_user') - raise exception.VSPError(msg) - if not self.conf.vsp_auth_password: - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_auth_password') - raise exception.VSPError(msg) - - def _range2list(self, param): - """Analyze a 'xxx-xxx' string and return a list of two integers.""" - values = [_str2int(value) for value in - self.conf.safe_get(param).split('-')] - if (len(values) != 2 or - values[0] is None or values[1] is None or - values[0] > values[1]): - msg = utils.output_log(MSG.INVALID_PARAMETER, param=param) - raise exception.VSPError(msg) - return values - - @abc.abstractmethod - def config_lock(self): - """Initialize lock resource names.""" - raise NotImplementedError() - - def connect_storage(self): - """Prepare for using the storage.""" - self.storage_info['pool_id'] = self.get_pool_id() - # When 'pool_id' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if self.storage_info['pool_id'] is None: - msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=self.conf.vsp_pool) - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID', - value=self.storage_info['pool_id']) - - def check_ports_info(self): - """Check if available storage ports exist.""" - if (self.conf.vsp_target_ports and - not self.storage_info['controller_ports']): - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource="Target ports") - raise exception.VSPError(msg) - if (self.conf.vsp_compute_target_ports and - not self.storage_info['compute_ports']): - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource="Compute target ports") - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list', - value=self.storage_info['controller_ports']) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='compute target port list', - value=self.storage_info['compute_ports']) - - def get_pool_id(self): - """Return the storage pool ID as integer.""" - pool = self.conf.vsp_pool - if pool.isdigit(): - return int(pool) - return None - - def init_cinder_hosts(self, **kwargs): - """Initialize server-storage connection.""" - targets = kwargs.pop('targets', {'info': {}, 'list': [], 'iqns': {}}) - connector = cinder_utils.brick_get_connector_properties( - multipath=self.conf.use_multipath_for_image_xfer, - enforce_multipath=self.conf.enforce_multipath_for_image_xfer) - target_ports = self.storage_info['controller_ports'] - - if target_ports: - if (self.find_targets_from_storage( - targets, connector, target_ports) and - self.conf.vsp_group_request): - self.create_mapping_targets(targets, connector) - - utils.require_target_existed(targets) - - @abc.abstractmethod - def find_targets_from_storage(self, targets, connector, target_ports): - """Find mapped ports, memorize them and return unmapped port count.""" - raise NotImplementedError() - - def create_mapping_targets(self, targets, connector): - """Create server-storage connection for all specified storage ports.""" - hba_ids = self.get_hba_ids_from_connector(connector) - for port in targets['info'].keys(): - if targets['info'][port]: - continue - - try: - self._create_target(targets, port, connector, hba_ids) - except exception.VSPError: - utils.output_log( - self.driver_info['msg_id']['target'], port=port) - - if not targets['list']: - self.find_targets_from_storage( - targets, connector, targets['info'].keys()) - - def get_hba_ids_from_connector(self, connector): - """Return the HBA ID stored in the connector.""" - if self.driver_info['hba_id'] in connector: - return connector[self.driver_info['hba_id']] - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource=self.driver_info['hba_id_type']) - raise exception.VSPError(msg) - - def _create_target(self, targets, port, connector, hba_ids): - """Create a host group or an iSCSI target on the storage port.""" - target_name, gid = self.create_target_to_storage(port, connector, - hba_ids) - utils.output_log(MSG.OBJECT_CREATED, object='a target', - details='port: %(port)s, gid: %(gid)s, target_name: ' - '%(target)s' % - {'port': port, 'gid': gid, 'target': target_name}) - try: - self.set_target_mode(port, gid) - self.set_hba_ids(port, gid, hba_ids) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - self.delete_target_from_storage(port, gid) - targets['info'][port] = True - targets['list'].append((port, gid)) - - @abc.abstractmethod - def create_target_to_storage(self, port, connector, hba_ids): - """Create a host group or an iSCSI target on the specified port.""" - raise NotImplementedError() - - @abc.abstractmethod - def set_target_mode(self, port, gid): - """Configure the target to meet the environment.""" - raise NotImplementedError() - - @abc.abstractmethod - def set_hba_ids(self, port, gid, hba_ids): - """Connect all specified HBAs with the specified port.""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_target_from_storage(self, port, gid): - """Delete the host group or the iSCSI target from the port.""" - raise NotImplementedError() - - def output_param_to_log(self): - """Output configuration parameter values to the log file.""" - utils.output_log(MSG.OUTPUT_PARAMETER_VALUES, - config_group=self.conf.config_group) - name, version = self.get_storage_cli_info() - utils.output_storage_cli_info(name, version) - utils.output_opt_info(self.conf, _INHERITED_VOLUME_OPTS) - utils.output_opts(self.conf, common_opts) - utils.output_opts(self.conf, self.driver_info['volume_opts']) - - @abc.abstractmethod - def get_storage_cli_info(self): - """Return a tuple of the storage CLI name and its version.""" - raise NotImplementedError() - - @coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-' - '{connector[host]}') - def initialize_connection(self, volume, connector): - """Initialize connection between the server and the volume.""" - targets = { - 'info': {}, - 'list': [], - 'lun': {}, - 'iqns': {}, - } - ldev = utils.get_ldev(volume) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION, - volume_id=volume['id']) - raise exception.VSPError(msg) - - target_ports = self.get_target_ports(connector) - if (self.find_targets_from_storage( - targets, connector, target_ports) and - self.conf.vsp_group_request): - self.create_mapping_targets(targets, connector) - - utils.require_target_existed(targets) - - targets['list'].sort() - for port in target_ports: - targets['lun'][port] = False - target_lun = int(self.map_ldev(targets, ldev)) - - return { - 'driver_volume_type': self.driver_info['volume_type'], - 'data': self.get_properties(targets, connector, target_lun), - } - - def get_target_ports(self, connector): - """Return a list of ports corresponding to the specified connector.""" - if 'ip' in connector and connector['ip'] == CONF.my_ip: - return self.storage_info['controller_ports'] - return (self.storage_info['compute_ports'] or - self.storage_info['controller_ports']) - - @abc.abstractmethod - def map_ldev(self, targets, ldev): - """Create the path between the server and the LDEV and return LUN.""" - raise NotImplementedError() - - def get_properties(self, targets, connector, target_lun=None): - """Return server-LDEV connection info.""" - multipath = connector.get('multipath', False) - if self.storage_info['protocol'] == 'FC': - data = self.get_properties_fc(targets) - elif self.storage_info['protocol'] == 'iSCSI': - data = self.get_properties_iscsi(targets, multipath) - if target_lun is not None: - data['target_discovered'] = False - if not multipath or self.storage_info['protocol'] == 'FC': - data['target_lun'] = target_lun - else: - target_luns = [] - for target in targets['list']: - if targets['lun'][target[0]]: - target_luns.append(target_lun) - data['target_luns'] = target_luns - return data - - def get_properties_fc(self, targets): - """Return FC-specific server-LDEV connection info.""" - data = {} - data['target_wwn'] = [ - self.storage_info['wwns'][target[0]] for target in targets['list'] - if targets['lun'][target[0]]] - return data - - def get_properties_iscsi(self, targets, multipath): - """Return iSCSI-specific server-LDEV connection info.""" - data = {} - primary_target = targets['list'][0] - if not multipath: - data['target_portal'] = self.storage_info[ - 'portals'][primary_target[0]] - data['target_iqn'] = targets['iqns'][primary_target] - else: - data['target_portals'] = [ - self.storage_info['portals'][target[0]] for target in - targets['list'] if targets['lun'][target[0]]] - data['target_iqns'] = [ - targets['iqns'][target] for target in targets['list'] - if targets['lun'][target[0]]] - if self.conf.vsp_use_chap_auth: - data['auth_method'] = 'CHAP' - data['auth_username'] = self.conf.vsp_auth_user - data['auth_password'] = self.conf.vsp_auth_password - return data - - @coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-' - '{connector[host]}') - def terminate_connection(self, volume, connector): - """Terminate connection between the server and the volume.""" - targets = { - 'info': {}, - 'list': [], - 'iqns': {}, - } - mapped_targets = { - 'list': [], - } - unmap_targets = {} - - ldev = utils.get_ldev(volume) - if ldev is None: - utils.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING, - volume_id=volume['id']) - return - target_ports = self.get_target_ports(connector) - self.find_targets_from_storage(targets, connector, target_ports) - if not targets['list']: - utils.output_log(MSG.NO_CONNECTED_TARGET) - self.find_mapped_targets_from_storage( - mapped_targets, ldev, target_ports) - - unmap_targets['list'] = self.get_unmap_targets_list( - targets['list'], mapped_targets['list']) - unmap_targets['list'].sort(reverse=True) - self.unmap_ldev(unmap_targets, ldev) - - if self.storage_info['protocol'] == 'FC': - target_wwn = [ - self.storage_info['wwns'][port_gid[:utils.PORT_ID_LENGTH]] - for port_gid in unmap_targets['list']] - return {'driver_volume_type': self.driver_info['volume_type'], - 'data': {'target_wwn': target_wwn}} - - @abc.abstractmethod - def find_mapped_targets_from_storage(self, targets, ldev, target_ports): - """Find and store IDs of ports used for server-LDEV connection.""" - raise NotImplementedError() - - @abc.abstractmethod - def get_unmap_targets_list(self, target_list, mapped_list): - """Return a list of IDs of ports that need to be disconnected.""" - raise NotImplementedError() - - @abc.abstractmethod - def unmap_ldev(self, targets, ldev): - """Delete the LUN between the specified LDEV and port-gid.""" - raise NotImplementedError() - - @abc.abstractmethod - def wait_full_copy_completion(self, pvol, svol): - """Wait until FULL copy is completed.""" - raise NotImplementedError() diff --git a/cinder/volume/drivers/hitachi/vsp_fc.py b/cinder/volume/drivers/hitachi/vsp_fc.py deleted file mode 100644 index 3ab4b43f13a..00000000000 --- a/cinder/volume/drivers/hitachi/vsp_fc.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Fibre channel module for Hitachi VSP Driver.""" - -from oslo_config import cfg - -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.hitachi import vsp_common as common -from cinder.volume.drivers.hitachi import vsp_utils as utils - -fc_opts = [ - cfg.BoolOpt( - 'vsp_zoning_request', - default=False, - help='If True, the driver will configure FC zoning between the server ' - 'and the storage system provided that FC zoning manager is ' - 'enabled.'), -] - -MSG = utils.VSPMsg - -_DRIVER_INFO = { - 'proto': 'FC', - 'hba_id': 'wwpns', - 'hba_id_type': 'World Wide Name', - 'msg_id': { - 'target': MSG.CREATE_HOST_GROUP_FAILED, - }, - 'volume_backend_name': utils.DRIVER_PREFIX + 'FC', - 'volume_opts': fc_opts, - 'volume_type': 'fibre_channel', -} - -CONF = cfg.CONF -CONF.register_opts(fc_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class VSPFCDriver(driver.FibreChannelDriver): - """Fibre channel class for Hitachi VSP Driver. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver. - - """ - - VERSION = common.VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Hitachi_VSP_CI" - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - """Initialize instance variables.""" - utils.output_log(MSG.DRIVER_INITIALIZATION_START, - driver=self.__class__.__name__, - version=self.get_version()) - super(VSPFCDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(common.common_opts) - self.configuration.append_config_values(fc_opts) - self.common = utils.import_object( - self.configuration, _DRIVER_INFO, kwargs.get('db')) - - def check_for_setup_error(self): - """Error are checked in do_setup() instead of this method.""" - pass - - @utils.output_start_end_log - def create_volume(self, volume): - """Create a volume and return its properties.""" - return self.common.create_volume(volume) - - @utils.output_start_end_log - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot and return its properties.""" - return self.common.create_volume_from_snapshot(volume, snapshot) - - @utils.output_start_end_log - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume and return its properties.""" - return self.common.create_cloned_volume(volume, src_vref) - - @utils.output_start_end_log - def delete_volume(self, volume): - """Delete the specified volume.""" - self.common.delete_volume(volume) - - @utils.output_start_end_log - def create_snapshot(self, snapshot): - """Create a snapshot from a volume and return its properties.""" - return self.common.create_snapshot(snapshot) - - @utils.output_start_end_log - def delete_snapshot(self, snapshot): - """Delete the specified snapshot.""" - self.common.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - """Return properties, capabilities and current states of the driver.""" - return self.common.get_volume_stats(refresh) - - @utils.output_start_end_log - def update_migrated_volume( - self, ctxt, volume, new_volume, original_volume_status): - """Do any remaining jobs after migration.""" - self.common.discard_zero_page(new_volume) - super(VSPFCDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) - - @utils.output_start_end_log - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - super(VSPFCDriver, self).copy_image_to_volume( - context, volume, image_service, image_id) - self.common.discard_zero_page(volume) - - @utils.output_start_end_log - def extend_volume(self, volume, new_size): - """Extend the specified volume to the specified size.""" - self.common.extend_volume(volume, new_size) - - @utils.output_start_end_log - def manage_existing(self, volume, existing_ref): - """Return volume properties which Cinder needs to manage the volume.""" - return self.common.manage_existing(existing_ref) - - @utils.output_start_end_log - def manage_existing_get_size(self, volume, existing_ref): - """Return the size[GB] of the specified volume.""" - return self.common.manage_existing_get_size(existing_ref) - - @utils.output_start_end_log - def unmanage(self, volume): - """Prepare the volume for removing it from Cinder management.""" - self.common.unmanage(volume) - - @utils.output_start_end_log - def do_setup(self, context): - """Prepare for the startup of the driver.""" - self.common.do_setup(context) - - def ensure_export(self, context, volume): - """Synchronously recreate an export for a volume.""" - pass - - def create_export(self, context, volume, connector): - """Export the volume.""" - pass - - def remove_export(self, context, volume): - """Remove an export for a volume.""" - pass - - @utils.output_start_end_log - def initialize_connection(self, volume, connector): - """Initialize connection between the server and the volume.""" - return self.common.initialize_connection(volume, connector) - - @utils.output_start_end_log - def terminate_connection(self, volume, connector, **kwargs): - """Terminate connection between the server and the volume.""" - self.common.terminate_connection(volume, connector) diff --git a/cinder/volume/drivers/hitachi/vsp_horcm.py b/cinder/volume/drivers/hitachi/vsp_horcm.py deleted file mode 100644 index 1df062aebe6..00000000000 --- a/cinder/volume/drivers/hitachi/vsp_horcm.py +++ /dev/null @@ -1,1437 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""HORCM interface module for Hitachi VSP Driver.""" - -import functools -import math -import os -import re - -from oslo_config import cfg -from oslo_config import types -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import timeutils -from oslo_utils import units -import six -from six.moves import range - -from cinder import coordination -from cinder import exception -from cinder import utils as cinder_utils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import vsp_common as common -from cinder.volume.drivers.hitachi import vsp_utils as utils - -_GETSTORAGEARRAY_ONCE = 1000 -_LU_PATH_DEFINED = 'SSB=0xB958,0x015A' -_ANOTHER_LDEV_MAPPED = 'SSB=0xB958,0x0947' -_NOT_LOCKED = 'SSB=0x2E11,0x2205' -_LOCK_WAITTIME = 2 * 60 * 60 -NORMAL_STS = 'NML' -_LDEV_STATUS_WAITTIME = 120 -_LDEV_CHECK_INTERVAL = 1 -_LDEV_CREATED = ['-check_status', NORMAL_STS] -_LDEV_DELETED = ['-check_status', 'NOT', 'DEFINED'] -_LUN_MAX_WAITTIME = 50 -_LUN_RETRY_INTERVAL = 1 -FULL_ATTR = 'MRCF' -THIN_ATTR = 'QS' -VVOL_ATTR = 'VVOL' -_PERMITTED_TYPES = set(['CVS', 'HDP', 'HDT']) -_PAIR_ATTRS = set([FULL_ATTR, THIN_ATTR]) -_CHECK_KEYS = ('vol_type', 'vol_size', 'num_port', 'vol_attr', 'sts') -_HORCM_WAITTIME = 1 -_EXEC_MAX_WAITTIME = 30 -_EXTEND_WAITTIME = 10 * 60 -_EXEC_RETRY_INTERVAL = 5 -_HORCM_NO_RETRY_ERRORS = [ - 'SSB=0x2E10,0x9705', - 'SSB=0x2E10,0x9706', - 'SSB=0x2E10,0x9707', - 'SSB=0x2E11,0x8303', - 'SSB=0x2E30,0x0007', - 'SSB=0xB956,0x3173', - 'SSB=0xB956,0x31D7', - 'SSB=0xB956,0x31D9', - 'SSB=0xB957,0x4188', - _LU_PATH_DEFINED, - 'SSB=0xB958,0x015E', -] - -SMPL = 1 -PVOL = 2 -SVOL = 3 - -COPY = 2 -PAIR = 3 -PSUS = 4 -PSUE = 5 -UNKN = 0xff - -_STATUS_TABLE = { - 'SMPL': SMPL, - 'COPY': COPY, - 'RCPY': COPY, - 'PAIR': PAIR, - 'PFUL': PAIR, - 'PSUS': PSUS, - 'PFUS': PSUS, - 'SSUS': PSUS, - 'PSUE': PSUE, -} - -_NOT_SET = '-' - -_SMPL_STAUS = set([_NOT_SET, 'SMPL']) - -_HORCM_RUNNING = 1 -_COPY_GROUP = utils.DRIVER_PREFIX + '-%s%s%03X%d' -_SNAP_NAME = utils.DRIVER_PREFIX + '-SNAP' -_LDEV_NAME = utils.DRIVER_PREFIX + '-LDEV-%d-%d' -_PAIR_TARGET_NAME_BODY = 'pair00' -_PAIR_TARGET_NAME = utils.TARGET_PREFIX + _PAIR_TARGET_NAME_BODY -_MAX_MUNS = 3 - -_SNAP_HASH_SIZE = 8 - -ALL_EXIT_CODE = set(range(256)) -HORCM_EXIT_CODE = set(range(128)) -EX_ENAUTH = 202 -EX_ENOOBJ = 205 -EX_CMDRJE = 221 -EX_ENLDEV = 227 -EX_CMDIOE = 237 -EX_ENOGRP = 239 -EX_INVCMD = 240 -EX_INVMOD = 241 -EX_ENORMT = 242 -EX_ENODEV = 246 -EX_ENOENT = 247 -EX_OPTINV = 248 -EX_ATTDBG = 250 -EX_ATTHOR = 251 -EX_INVARG = 253 -EX_COMERR = 255 -_NO_SUCH_DEVICE = [EX_ENOGRP, EX_ENODEV, EX_ENOENT] -_INVALID_RANGE = [EX_ENLDEV, EX_INVARG] -_HORCM_ERROR = set([EX_ENORMT, EX_ATTDBG, EX_ATTHOR, EX_COMERR]) -_COMMAND_IO_TO_RAID = set( - [EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV]) - -_DEFAULT_PORT_BASE = 31000 - -_HORCMGR = 0 -_PAIR_HORCMGR = 1 -_INFINITE = "-" - -_HORCM_PATTERNS = { - 'gid': { - 'pattern': re.compile(r"ID +(?P\d+)\(0x\w+\)"), - 'type': six.text_type, - }, - 'ldev': { - 'pattern': re.compile(r"^LDEV +: +(?P\d+)", re.M), - 'type': int, - }, - 'lun': { - 'pattern': re.compile(r"LUN +(?P\d+)\(0x\w+\)"), - 'type': six.text_type, - }, - 'num_port': { - 'pattern': re.compile(r"^NUM_PORT +: +(?P\d+)", re.M), - 'type': int, - }, - 'pair_gid': { - 'pattern': re.compile( - r"^CL\w-\w+ +(?P\d+) +%s " % _PAIR_TARGET_NAME, re.M), - 'type': six.text_type, - }, - 'ports': { - 'pattern': re.compile(r"^PORTs +: +(?P.+)$", re.M), - 'type': list, - }, - 'vol_attr': { - 'pattern': re.compile(r"^VOL_ATTR +: +(?P.+)$", re.M), - 'type': list, - }, - 'vol_size': { - 'pattern': re.compile( - r"^VOL_Capacity\(BLK\) +: +(?P\d+)""", re.M), - 'type': int, - }, - 'vol_type': { - 'pattern': re.compile(r"^VOL_TYPE +: +(?P.+)$", re.M), - 'type': six.text_type, - }, - 'sts': { - 'pattern': re.compile(r"^STS +: +(?P.+)", re.M), - 'type': six.text_type, - }, - 'undefined_ldev': { - 'pattern': re.compile( - r"^ +\d+ +(?P\d+) +- +- +NOT +DEFINED", re.M), - 'type': int, - }, -} - -LDEV_SEP_PATTERN = re.compile(r'\ +:\ +') -CMD_PATTERN = re.compile(r"((?:^|\n)HORCM_CMD\n)") - -horcm_opts = [ - cfg.ListOpt( - 'vsp_horcm_numbers', - item_type=types.Integer(min=0, max=2047), - default=[200, 201], - help='Command Control Interface instance numbers in the format of ' - '\'xxx,yyy\'. The second one is for Shadow Image operation and ' - 'the first one is for other purposes.'), - cfg.StrOpt( - 'vsp_horcm_user', - help='Name of the user on the storage system.'), - cfg.StrOpt( - 'vsp_horcm_password', - secret=True, - help='Password corresponding to vsp_horcm_user.'), - cfg.BoolOpt( - 'vsp_horcm_add_conf', - default=True, - help='If True, the driver will create or update the Command Control ' - 'Interface configuration file as needed.'), - cfg.ListOpt( - 'vsp_horcm_pair_target_ports', - help='IDs of the storage ports used to copy volumes by Shadow Image ' - 'or Thin Image. To specify multiple ports, connect them by ' - 'commas (e.g. CL1-A,CL2-A).'), -] - -_REQUIRED_HORCM_OPTS = [ - 'vsp_horcm_user', - 'vsp_horcm_password', -] - -CONF = cfg.CONF -CONF.register_opts(horcm_opts, group=configuration.SHARED_CONF_GROUP) - -LOG = logging.getLogger(__name__) -MSG = utils.VSPMsg - - -def horcmgr_synchronized(func): - """Synchronize CCI operations per CCI instance.""" - @functools.wraps(func) - def wrap(self, *args, **kwargs): - """Synchronize CCI operations per CCI instance.""" - @coordination.synchronized(self.lock[args[0]]) - def func_locked(*_args, **_kwargs): - """Execute the wrapped function in a synchronized section.""" - return func(*_args, **_kwargs) - return func_locked(self, *args, **kwargs) - return wrap - - -def _is_valid_target(target, target_name, target_ports, is_pair): - """Return True if the specified target is valid, False otherwise.""" - if is_pair: - return (target[:utils.PORT_ID_LENGTH] in target_ports and - target_name == _PAIR_TARGET_NAME) - if (target[:utils.PORT_ID_LENGTH] not in target_ports or - not target_name.startswith(utils.TARGET_PREFIX) or - target_name == _PAIR_TARGET_NAME): - return False - return True - - -def find_value(stdout, key): - """Return the first match from the given raidcom command output.""" - match = _HORCM_PATTERNS[key]['pattern'].search(stdout) - if match: - if _HORCM_PATTERNS[key]['type'] is list: - return [ - value.strip() for value in - LDEV_SEP_PATTERN.split(match.group(key))] - return _HORCM_PATTERNS[key]['type'](match.group(key)) - return None - - -def _run_horcmgr(inst): - """Return 1 if the CCI instance is running.""" - result = utils.execute( - 'env', 'HORCMINST=%s' % inst, 'horcmgr', '-check') - return result[0] - - -def _run_horcmshutdown(inst): - """Stop the CCI instance and return 0 if successful.""" - result = utils.execute('horcmshutdown.sh', inst) - return result[0] - - -def _run_horcmstart(inst): - """Start the CCI instance and return 0 if successful.""" - result = utils.execute('horcmstart.sh', inst) - return result[0] - - -def _check_ldev(ldev_info, ldev, existing_ref): - """Check if the LDEV meets the criteria for being managed by the driver.""" - if ldev_info['sts'] != NORMAL_STS: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - vol_attr = set(ldev_info['vol_attr']) - if (not ldev_info['vol_type'].startswith('OPEN-V') or - len(vol_attr) < 2 or not vol_attr.issubset(_PERMITTED_TYPES)): - msg = utils.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev, - ldevtype=utils.NVOL_LDEV_TYPE) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - # Hitachi storage calculates volume sizes in a block unit, 512 bytes. - if ldev_info['vol_size'] % utils.GIGABYTE_PER_BLOCK_SIZE: - msg = utils.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - if ldev_info['num_port']: - msg = utils.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - - -class VSPHORCM(common.VSPCommon): - """HORCM interface class for Hitachi VSP Driver.""" - - def __init__(self, conf, storage_protocol, db): - """Initialize instance variables.""" - super(VSPHORCM, self).__init__(conf, storage_protocol, db) - self.conf.append_config_values(horcm_opts) - - self._copy_groups = [None] * _MAX_MUNS - self._pair_targets = [] - self._pattern = { - 'pool': None, - 'p_pool': None, - } - - def run_raidcom(self, *args, **kwargs): - """Run a raidcom command and return its output.""" - if 'success_code' not in kwargs: - kwargs['success_code'] = HORCM_EXIT_CODE - cmd = ['raidcom'] + list(args) + [ - '-s', self.conf.vsp_storage_id, - '-I%s' % self.conf.vsp_horcm_numbers[_HORCMGR]] - return self.run_and_verify_storage_cli(*cmd, **kwargs) - - def _run_pair_cmd(self, command, *args, **kwargs): - """Run a pair-related CCI command and return its output.""" - kwargs['horcmgr'] = _PAIR_HORCMGR - if 'success_code' not in kwargs: - kwargs['success_code'] = HORCM_EXIT_CODE - cmd = [command] + list(args) + [ - '-IM%s' % self.conf.vsp_horcm_numbers[_PAIR_HORCMGR]] - return self.run_and_verify_storage_cli(*cmd, **kwargs) - - def run_storage_cli(self, *cmd, **kwargs): - """Run a CCI command and return its output.""" - interval = kwargs.pop('interval', _EXEC_RETRY_INTERVAL) - flag = {'ignore_enauth': True} - - def _wait_for_horcm_execution(start_time, flag, *cmd, **kwargs): - """Run a CCI command and raise its output.""" - ignore_error = kwargs.pop('ignore_error', []) - no_retry_error = ignore_error + _HORCM_NO_RETRY_ERRORS - success_code = kwargs.pop('success_code', HORCM_EXIT_CODE) - timeout = kwargs.pop('timeout', _EXEC_MAX_WAITTIME) - horcmgr = kwargs.pop('horcmgr', _HORCMGR) - do_login = kwargs.pop('do_login', False) - - result = utils.execute(*cmd, **kwargs) - if _NOT_LOCKED in result[2] and not utils.check_timeout( - start_time, _LOCK_WAITTIME): - LOG.debug( - "The resource group to which the operation object " - "belongs is being locked by other software.") - return - if (result[0] in success_code or - utils.check_timeout(start_time, timeout) or - utils.check_ignore_error(no_retry_error, result[2])): - raise loopingcall.LoopingCallDone(result) - if result[0] == EX_ENAUTH: - if not self._retry_login(flag['ignore_enauth'], do_login): - raise loopingcall.LoopingCallDone(result) - flag['ignore_enauth'] = False - elif result[0] in _HORCM_ERROR: - if not self._start_horcmgr(horcmgr): - raise loopingcall.LoopingCallDone(result) - elif result[0] not in _COMMAND_IO_TO_RAID: - raise loopingcall.LoopingCallDone(result) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_horcm_execution, timeutils.utcnow(), - flag, *cmd, **kwargs) - return loop.start(interval=interval).wait() - - def _retry_login(self, ignore_enauth, do_login): - """Return True if login to CCI succeeds, False otherwise.""" - if not ignore_enauth: - if not do_login: - result = self._run_raidcom_login(do_raise=False) - - if do_login or result[0]: - utils.output_log(MSG.HORCM_LOGIN_FAILED, - user=self.conf.vsp_horcm_user) - return False - - return True - - def _run_raidcom_login(self, do_raise=True): - """Log in to CCI and return its output.""" - return self.run_raidcom( - '-login', self.conf.vsp_horcm_user, - self.conf.vsp_horcm_password, - do_raise=do_raise, do_login=True) - - @horcmgr_synchronized - def _restart_horcmgr(self, horcmgr): - """Restart the CCI instance.""" - inst = self.conf.vsp_horcm_numbers[horcmgr] - - def _wait_for_horcm_shutdown(start_time, inst): - """Stop the CCI instance and raise True if it stops.""" - if _run_horcmgr(inst) != _HORCM_RUNNING: - raise loopingcall.LoopingCallDone() - if (_run_horcmshutdown(inst) and - _run_horcmgr(inst) == _HORCM_RUNNING or - utils.check_timeout( - start_time, utils.DEFAULT_PROCESS_WAITTIME)): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_horcm_shutdown, timeutils.utcnow(), inst) - if not loop.start(interval=_HORCM_WAITTIME).wait(): - msg = utils.output_log( - MSG.HORCM_SHUTDOWN_FAILED, - inst=self.conf.vsp_horcm_numbers[horcmgr]) - raise exception.VSPError(msg) - - ret = _run_horcmstart(inst) - if ret and ret != _HORCM_RUNNING: - msg = utils.output_log( - MSG.HORCM_RESTART_FAILED, - inst=self.conf.vsp_horcm_numbers[horcmgr]) - raise exception.VSPError(msg) - - @coordination.synchronized('{self.lock[create_ldev]}') - def create_ldev(self, size, is_vvol=False): - """Create an LDEV of the specified size and the specified type.""" - ldev = super(VSPHORCM, self).create_ldev(size, is_vvol=is_vvol) - self._check_ldev_status(ldev) - return ldev - - def _check_ldev_status(self, ldev, delete=False): - """Wait until the LDEV status changes to the specified status.""" - if not delete: - args = _LDEV_CREATED - msg_id = MSG.LDEV_CREATION_WAIT_TIMEOUT - else: - args = _LDEV_DELETED - msg_id = MSG.LDEV_DELETION_WAIT_TIMEOUT - - def _wait_for_ldev_status(start_time, ldev, *args): - """Raise True if the LDEV is in the specified status.""" - result = self.run_raidcom( - 'get', 'ldev', '-ldev_id', ldev, *args, do_raise=False) - if not result[0]: - raise loopingcall.LoopingCallDone() - if utils.check_timeout(start_time, _LDEV_STATUS_WAITTIME): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_ldev_status, timeutils.utcnow(), ldev, *args) - if not loop.start(interval=_LDEV_CHECK_INTERVAL).wait(): - msg = utils.output_log(msg_id, ldev=ldev) - raise exception.VSPError(msg) - - def create_ldev_on_storage(self, ldev, size, is_vvol): - """Create an LDEV on the storage system.""" - args = ['add', 'ldev', '-ldev_id', ldev, '-capacity', '%sG' % size, - '-emulation', 'OPEN-V', '-pool'] - if is_vvol: - args.append('snap') - else: - args.append(self.conf.vsp_pool) - self.run_raidcom(*args) - - def get_unused_ldev(self): - """Find an unused LDEV and return its LDEV number.""" - if not self.storage_info['ldev_range']: - ldev_info = self.get_ldev_info( - ['ldev'], '-ldev_list', 'undefined', '-cnt', '1') - ldev = ldev_info.get('ldev') - else: - ldev = self._find_unused_ldev_by_range() - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.NO_AVAILABLE_RESOURCE, resource='LDEV') - raise exception.VSPError(msg) - return ldev - - def _find_unused_ldev_by_range(self): - """Return the LDEV number of an unused LDEV in the LDEV range.""" - success_code = HORCM_EXIT_CODE.union(_INVALID_RANGE) - start, end = self.storage_info['ldev_range'][:2] - - while start <= end: - if end - start + 1 > _GETSTORAGEARRAY_ONCE: - cnt = _GETSTORAGEARRAY_ONCE - else: - cnt = end - start + 1 - - ldev_info = self.get_ldev_info( - ['undefined_ldev'], '-ldev_id', start, '-cnt', cnt, - '-key', 'front_end', success_code=success_code) - ldev = ldev_info.get('undefined_ldev') - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if ldev is not None: - return ldev - - start += _GETSTORAGEARRAY_ONCE - - return None - - def get_ldev_info(self, keys, *args, **kwargs): - """Return a dictionary of LDEV-related items.""" - data = {} - result = self.run_raidcom('get', 'ldev', *args, **kwargs) - for key in keys: - data[key] = find_value(result[1], key) - return data - - def copy_on_storage(self, pvol, size, metadata, sync): - """Check if the LDEV can be copied on the storage.""" - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', pvol) - if ldev_info['sts'] != NORMAL_STS: - msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol) - raise exception.VSPError(msg) - - if VVOL_ATTR in ldev_info['vol_attr']: - raise exception.VSPNotSupported() - return super(VSPHORCM, self).copy_on_storage(pvol, size, metadata, - sync) - - @coordination.synchronized('{self.lock[create_pair]}') - def create_pair_on_storage(self, pvol, svol, is_thin): - """Create a copy pair on the storage.""" - path_list = [] - vol_type, pair_info = self._get_vol_type_and_pair_info(pvol) - if vol_type == SVOL: - self._delete_pair_based_on_svol( - pair_info['pvol'], pair_info['svol_info'], - no_restart=True) - if vol_type != PVOL: - self._initialize_pair_connection(pvol) - path_list.append(pvol) - try: - self._initialize_pair_connection(svol) - path_list.append(svol) - self._create_pair_on_storage_core(pvol, svol, is_thin, vol_type) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - for ldev in path_list: - try: - self._terminate_pair_connection(ldev) - except exception.VSPError: - utils.output_log(MSG.UNMAP_LDEV_FAILED, ldev=ldev) - - def _create_pair_on_storage_core(self, pvol, svol, is_thin, vol_type): - """Create a copy pair on the storage depending on the copy method.""" - if is_thin: - self._create_thin_copy_pair(pvol, svol) - - else: - self._create_full_copy_pair(pvol, svol, vol_type) - - def _create_thin_copy_pair(self, pvol, svol): - """Create a THIN copy pair on the storage.""" - snapshot_name = _SNAP_NAME + six.text_type(svol % _SNAP_HASH_SIZE) - self.run_raidcom( - 'add', 'snapshot', '-ldev_id', pvol, svol, '-pool', - self.conf.vsp_thin_pool, '-snapshot_name', - snapshot_name, '-copy_size', self.conf.vsp_copy_speed) - try: - self.wait_thin_copy(svol, PAIR) - self.run_raidcom( - 'modify', 'snapshot', '-ldev_id', svol, - '-snapshot_data', 'create') - self.wait_thin_copy(svol, PSUS) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - interval = self.conf.vsp_async_copy_check_interval - try: - self._delete_thin_copy_pair(pvol, svol, interval) - except exception.VSPError: - utils.output_log(MSG.DELETE_TI_PAIR_FAILED, pvol=pvol, - svol=svol) - - def _create_full_copy_pair(self, pvol, svol, vol_type): - """Create a FULL copy pair on the storage.""" - mun = 0 - - if vol_type == PVOL: - mun = self._get_unused_mun(pvol) - - copy_group = self._copy_groups[mun] - ldev_name = _LDEV_NAME % (pvol, svol) - restart = False - create = False - - try: - self._add_pair_config(pvol, svol, copy_group, ldev_name, mun) - self._restart_horcmgr(_PAIR_HORCMGR) - restart = True - self._run_pair_cmd( - 'paircreate', '-g', copy_group, '-d', ldev_name, - '-c', self.conf.vsp_copy_speed, - '-vl', '-split', '-fq', 'quick') - create = True - - self._wait_full_copy(svol, set([PSUS, COPY])) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - if create: - try: - self._wait_full_copy(svol, set([PAIR, PSUS, PSUE])) - except exception.VSPError: - utils.output_log(MSG.WAIT_SI_PAIR_STATUS_FAILED, - pvol=pvol, svol=svol) - - interval = self.conf.vsp_async_copy_check_interval - - try: - self._delete_full_copy_pair(pvol, svol, interval) - except exception.VSPError: - utils.output_log(MSG.DELETE_SI_PAIR_FAILED, pvol=pvol, - svol=svol) - - try: - if self._is_smpl(svol): - self._delete_pair_config( - pvol, svol, copy_group, ldev_name) - except exception.VSPError: - utils.output_log(MSG.DELETE_DEVICE_GRP_FAILED, pvol=pvol, - svol=svol) - - if restart: - try: - self._restart_horcmgr(_PAIR_HORCMGR) - except exception.VSPError: - utils.output_log( - MSG.HORCM_RESTART_FOR_SI_FAILED, - inst=self.conf.vsp_horcm_numbers[1]) - - def _get_unused_mun(self, ldev): - """Return the number of an unused mirror unit.""" - pair_list = [] - - for mun in range(_MAX_MUNS): - pair_info = self._get_full_copy_pair_info(ldev, mun) - if not pair_info: - return mun - - pair_list.append((pair_info['svol_info'], mun)) - - for svol_info, mun in pair_list: - if svol_info['is_psus']: - self._delete_pair_based_on_svol( - ldev, svol_info, no_restart=True) - return mun - - utils.output_log(MSG.NO_AVAILABLE_MIRROR_UNIT, - copy_method=utils.FULL, pvol=ldev) - raise exception.VSPBusy() - - def _get_vol_type_and_pair_info(self, ldev): - """Return a tuple of the LDEV's Shadow Image pair status and info.""" - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) - if ldev_info['sts'] != NORMAL_STS: - return (SMPL, None) - - if THIN_ATTR in ldev_info['vol_attr']: - return (PVOL, None) - - if FULL_ATTR in ldev_info['vol_attr']: - pair_info = self._get_full_copy_pair_info(ldev, 0) - if not pair_info: - return (PVOL, None) - - if pair_info['pvol'] != ldev: - return (SVOL, pair_info) - - return (PVOL, None) - - return (SMPL, None) - - def _get_full_copy_info(self, ldev): - """Return a tuple of P-VOL and S-VOL's info of a Shadow Image pair.""" - vol_type, pair_info = self._get_vol_type_and_pair_info(ldev) - svol_info = [] - - if vol_type == SMPL: - return (None, None) - - elif vol_type == SVOL: - return (pair_info['pvol'], [pair_info['svol_info']]) - - for mun in range(_MAX_MUNS): - pair_info = self._get_full_copy_pair_info(ldev, mun) - if pair_info: - svol_info.append(pair_info['svol_info']) - - return (ldev, svol_info) - - @coordination.synchronized('{self.lock[create_pair]}') - def delete_pair(self, ldev, all_split=True): - """Delete the specified LDEV in a synchronized section.""" - super(VSPHORCM, self).delete_pair(ldev, all_split=all_split) - - def delete_pair_based_on_pvol(self, pair_info, all_split): - """Disconnect all volume pairs to which the specified P-VOL belongs.""" - svols = [] - restart = False - - try: - for svol_info in pair_info['svol_info']: - if svol_info['is_thin'] or not svol_info['is_psus']: - svols.append(six.text_type(svol_info['ldev'])) - continue - - self.delete_pair_from_storage( - pair_info['pvol'], svol_info['ldev'], False) - - restart = True - - self._terminate_pair_connection(svol_info['ldev']) - - if not svols: - self._terminate_pair_connection(pair_info['pvol']) - - finally: - if restart: - self._restart_horcmgr(_PAIR_HORCMGR) - - if all_split and svols: - utils.output_log( - MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'], - svol=', '.join(svols)) - raise exception.VSPBusy() - - def delete_pair_based_on_svol(self, pvol, svol_info): - """Disconnect all volume pairs to which the specified S-VOL belongs.""" - self._delete_pair_based_on_svol(pvol, svol_info) - - def _delete_pair_based_on_svol(self, pvol, svol_info, no_restart=False): - """Disconnect all volume pairs to which the specified S-VOL belongs.""" - do_restart = False - - if not svol_info['is_psus']: - utils.output_log(MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol, - svol=svol_info['ldev']) - raise exception.VSPBusy() - - try: - self.delete_pair_from_storage( - pvol, svol_info['ldev'], svol_info['is_thin']) - do_restart = True - self._terminate_pair_connection(svol_info['ldev']) - self._terminate_pair_connection(pvol) - finally: - if not no_restart and do_restart: - self._restart_horcmgr(_PAIR_HORCMGR) - - def delete_pair_from_storage(self, pvol, svol, is_thin): - """Disconnect the volume pair that consists of the specified LDEVs.""" - interval = self.conf.vsp_async_copy_check_interval - if is_thin: - self._delete_thin_copy_pair(pvol, svol, interval) - else: - self._delete_full_copy_pair(pvol, svol, interval) - - def _delete_thin_copy_pair(self, pvol, svol, interval): - """Disconnect a THIN volume pair.""" - result = self.run_raidcom( - 'get', 'snapshot', '-ldev_id', svol) - if not result[1]: - return - mun = result[1].splitlines()[1].split()[5] - self.run_raidcom( - 'unmap', 'snapshot', '-ldev_id', svol, - success_code=ALL_EXIT_CODE) - self.run_raidcom( - 'delete', 'snapshot', '-ldev_id', pvol, '-mirror_id', mun) - self._wait_thin_copy_deleting(svol, interval=interval) - - def _wait_thin_copy_deleting(self, ldev, **kwargs): - """Wait until the LDEV is no longer in a THIN volume pair.""" - interval = kwargs.pop( - 'interval', self.conf.vsp_async_copy_check_interval) - - def _wait_for_thin_copy_smpl(start_time, ldev, **kwargs): - """Raise True if the LDEV is no longer in a THIN volume pair.""" - timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) - ldev_info = self.get_ldev_info( - ['sts', 'vol_attr'], '-ldev_id', ldev) - if (ldev_info['sts'] != NORMAL_STS or - THIN_ATTR not in ldev_info['vol_attr']): - raise loopingcall.LoopingCallDone() - if utils.check_timeout(start_time, timeout): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_thin_copy_smpl, timeutils.utcnow(), ldev, **kwargs) - if not loop.start(interval=interval).wait(): - msg = utils.output_log(MSG.TI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) - raise exception.VSPError(msg) - - def _delete_full_copy_pair(self, pvol, svol, interval): - """Disconnect a FULL volume pair.""" - stdout = self._run_pairdisplay( - '-d', self.conf.vsp_storage_id, svol, 0) - if not stdout: - return - - copy_group = stdout.splitlines()[2].split()[0] - ldev_name = _LDEV_NAME % (pvol, svol) - - if stdout.splitlines()[1].split()[9] != 'P-VOL': - self._restart_horcmgr(_PAIR_HORCMGR) - try: - self._run_pair_cmd( - 'pairsplit', '-g', copy_group, '-d', ldev_name, '-S') - self._wait_full_copy(svol, set([SMPL]), interval=interval) - finally: - if self._is_smpl(svol): - self._delete_pair_config(pvol, svol, copy_group, ldev_name) - - def _initialize_pair_connection(self, ldev): - """Initialize server-volume connection for volume copy.""" - port, gid = None, None - - for port, gid in self._pair_targets: - try: - targets = { - 'list': [(port, gid)], - 'lun': {}, - } - return self.map_ldev(targets, ldev) - except exception.VSPError: - utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, - id=gid, lun=None) - - msg = utils.output_log(MSG.NO_MAPPING_FOR_LDEV, ldev=ldev) - raise exception.VSPError(msg) - - def _terminate_pair_connection(self, ldev): - """Terminate server-volume connection for volume copy.""" - targets = { - 'list': [], - } - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) - if (ldev_info['sts'] == NORMAL_STS and - FULL_ATTR in ldev_info['vol_attr'] or - self._get_thin_copy_svol_status(ldev) != SMPL): - LOG.debug( - 'The specified LDEV has pair. Therefore, unmapping ' - 'operation was skipped. ' - '(LDEV: %(ldev)s, vol_attr: %(info)s)', - {'ldev': ldev, 'info': ldev_info['vol_attr']}) - return - self._find_mapped_targets_from_storage( - targets, ldev, self.storage_info['controller_ports'], is_pair=True) - self.unmap_ldev(targets, ldev) - - def check_param(self): - """Check parameter values and consistency among them.""" - super(VSPHORCM, self).check_param() - utils.check_opts(self.conf, horcm_opts) - insts = self.conf.vsp_horcm_numbers - if len(insts) != 2 or insts[_HORCMGR] == insts[_PAIR_HORCMGR]: - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_horcm_numbers') - raise exception.VSPError(msg) - if (not self.conf.vsp_target_ports and - not self.conf.vsp_horcm_pair_target_ports): - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_target_ports or ' - 'vsp_horcm_pair_target_ports') - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, object='LDEV range', - value=self.storage_info['ldev_range']) - for opt in _REQUIRED_HORCM_OPTS: - if not self.conf.safe_get(opt): - msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt) - raise exception.VSPError(msg) - - def _set_copy_groups(self, host_ip): - """Initialize an instance variable for Shadow Image copy groups.""" - serial = self.conf.vsp_storage_id - inst = self.conf.vsp_horcm_numbers[_PAIR_HORCMGR] - - for mun in range(_MAX_MUNS): - copy_group = _COPY_GROUP % (host_ip, serial, inst, mun) - self._copy_groups[mun] = copy_group - utils.output_log(MSG.SET_CONFIG_VALUE, object='copy group list', - value=self._copy_groups) - - def connect_storage(self): - """Prepare for using the storage.""" - self._set_copy_groups(CONF.my_ip) - - if self.conf.vsp_horcm_add_conf: - self._create_horcm_conf() - self._create_horcm_conf(horcmgr=_PAIR_HORCMGR) - self._restart_horcmgr(_HORCMGR) - self._restart_horcmgr(_PAIR_HORCMGR) - self._run_raidcom_login() - super(VSPHORCM, self).connect_storage() - - self._pattern['p_pool'] = re.compile( - (r"^%03d +\S+ +\d+ +\d+ +(?P\d+) +\d+ +\d+ +\d+ +\w+ +" - r"\d+ +(?P\d+)") % self.storage_info['pool_id'], re.M) - self._pattern['pool'] = re.compile( - r"^%03d +\S+ +\d+ +\S+ +\w+ +\d+ +\w+ +\d+ +(?P\S+)" % - self.storage_info['pool_id'], re.M) - - def _find_lun(self, ldev, port, gid): - """Return LUN determined by the given arguments.""" - result = self.run_raidcom( - 'get', 'lun', '-port', '-'.join([port, gid])) - match = re.search( - r'^%(port)s +%(gid)s +\S+ +(?P\d+) +1 +%(ldev)s ' % { - 'port': port, 'gid': gid, 'ldev': ldev}, result[1], re.M) - if match: - return match.group('lun') - return None - - def _find_mapped_targets_from_storage(self, targets, ldev, - target_ports, is_pair=False): - """Update port-gid list for the specified LDEV.""" - ldev_info = self.get_ldev_info(['ports'], '-ldev_id', ldev) - if not ldev_info['ports']: - return - for ports_strings in ldev_info['ports']: - ports = ports_strings.split() - if _is_valid_target(ports[0], ports[2], target_ports, is_pair): - targets['list'].append(ports[0]) - - def find_mapped_targets_from_storage(self, targets, ldev, target_ports): - """Update port-gid list for the specified LDEV.""" - self._find_mapped_targets_from_storage(targets, ldev, target_ports) - - def get_unmap_targets_list(self, target_list, mapped_list): - """Return a list of IDs of ports that need to be disconnected.""" - unmap_list = [] - for mapping_info in mapped_list: - if (mapping_info[:utils.PORT_ID_LENGTH], - mapping_info.split('-')[2]) in target_list: - unmap_list.append(mapping_info) - return unmap_list - - def unmap_ldev(self, targets, ldev): - """Delete the LUN between the specified LDEV and port-gid.""" - interval = _LUN_RETRY_INTERVAL - success_code = HORCM_EXIT_CODE.union([EX_ENOOBJ]) - timeout = utils.DEFAULT_PROCESS_WAITTIME - for target in targets['list']: - self.run_raidcom( - 'delete', 'lun', '-port', target, '-ldev_id', ldev, - interval=interval, success_code=success_code, timeout=timeout) - LOG.debug( - 'Deleted logical unit path of the specified logical ' - 'device. (LDEV: %(ldev)s, target: %(target)s)', - {'ldev': ldev, 'target': target}) - - def find_all_mapped_targets_from_storage(self, targets, ldev): - """Add all port-gids connected with the LDEV to the list.""" - ldev_info = self.get_ldev_info(['ports'], '-ldev_id', ldev) - if ldev_info['ports']: - for port in ldev_info['ports']: - targets['list'].append(port.split()[0]) - - def delete_target_from_storage(self, port, gid): - """Delete the host group or the iSCSI target from the port.""" - result = self.run_raidcom( - 'delete', 'host_grp', '-port', - '-'.join([port, gid]), do_raise=False) - if result[0]: - utils.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid) - - def _run_add_lun(self, ldev, port, gid, lun=None): - """Create a LUN between the specified LDEV and port-gid.""" - args = ['add', 'lun', '-port', '-'.join([port, gid]), '-ldev_id', ldev] - ignore_error = [_LU_PATH_DEFINED] - if lun: - args.extend(['-lun_id', lun]) - ignore_error = [_ANOTHER_LDEV_MAPPED] - result = self.run_raidcom( - *args, ignore_error=ignore_error, - interval=_LUN_RETRY_INTERVAL, timeout=_LUN_MAX_WAITTIME) - if not lun: - if result[0] == EX_CMDRJE: - lun = self._find_lun(ldev, port, gid) - LOG.debug( - 'A logical unit path has already been defined in the ' - 'specified logical device. (LDEV: %(ldev)s, ' - 'port: %(port)s, gid: %(gid)s, lun: %(lun)s)', - {'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun}) - else: - lun = find_value(result[1], 'lun') - elif _ANOTHER_LDEV_MAPPED in result[2]: - utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, id=gid, - lun=lun) - return None - LOG.debug( - 'Created logical unit path to the specified logical device. ' - '(LDEV: %(ldev)s, port: %(port)s, ' - 'gid: %(gid)s, lun: %(lun)s)', - {'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun}) - return lun - - def map_ldev(self, targets, ldev): - """Create the path between the server and the LDEV and return LUN.""" - port, gid = targets['list'][0] - lun = self._run_add_lun(ldev, port, gid) - targets['lun'][port] = True - for port, gid in targets['list'][1:]: - try: - lun2 = self._run_add_lun(ldev, port, gid, lun=lun) - if lun2 is not None: - targets['lun'][port] = True - except exception.VSPError: - utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, - id=gid, lun=lun) - return lun - - def extend_ldev(self, ldev, old_size, new_size): - """Extend the specified LDEV to the specified new size.""" - timeout = _EXTEND_WAITTIME - self.run_raidcom('extend', 'ldev', '-ldev_id', ldev, '-capacity', - '%sG' % (new_size - old_size), timeout=timeout) - - def get_pool_info(self): - """Return the total and free capacity of the storage pool.""" - result = self.run_raidcom('get', 'dp_pool') - p_pool_match = self._pattern['p_pool'].search(result[1]) - - result = self.run_raidcom('get', 'pool', '-key', 'opt') - pool_match = self._pattern['pool'].search(result[1]) - - if not p_pool_match or not pool_match: - msg = utils.output_log(MSG.POOL_NOT_FOUND, - pool=self.storage_info['pool_id']) - raise exception.VSPError(msg) - - tp_cap = float(p_pool_match.group('tp_cap')) / units.Ki - tl_cap = float(p_pool_match.group('tl_cap')) / units.Ki - vcap = 'infinite' if pool_match.group('vcap') == _INFINITE else ( - int(pool_match.group('vcap'))) - - if vcap == 'infinite': - return 'unknown', 'unknown' - else: - total_gb = int(math.floor(tp_cap * (vcap / 100.0))) - free_gb = int(math.floor(total_gb - tl_cap)) - return total_gb, free_gb - - def discard_zero_page(self, volume): - """Return the volume's no-data pages to the storage pool.""" - ldev = utils.get_ldev(volume) - try: - self.run_raidcom( - 'modify', 'ldev', '-ldev_id', ldev, - '-status', 'discard_zero_page') - except exception.VSPError: - utils.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev) - - def wait_thin_copy(self, ldev, status, **kwargs): - """Wait until the S-VOL status changes to the specified status.""" - interval = kwargs.pop( - 'interval', self.conf.vsp_copy_check_interval) - - def _wait_for_thin_copy_status(start_time, ldev, status, **kwargs): - """Raise True if the S-VOL is in the specified status.""" - timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) - if self._get_thin_copy_svol_status(ldev) == status: - raise loopingcall.LoopingCallDone() - if utils.check_timeout(start_time, timeout): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_thin_copy_status, timeutils.utcnow(), - ldev, status, **kwargs) - if not loop.start(interval=interval).wait(): - msg = utils.output_log(MSG.TI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) - raise exception.VSPError(msg) - - def _get_thin_copy_svol_status(self, ldev): - """Return the status of the S-VOL in a THIN volume pair.""" - result = self.run_raidcom( - 'get', 'snapshot', '-ldev_id', ldev) - if not result[1]: - return SMPL - return _STATUS_TABLE.get(result[1].splitlines()[1].split()[2], UNKN) - - def _create_horcm_conf(self, horcmgr=_HORCMGR): - """Create a CCI configuration file.""" - inst = self.conf.vsp_horcm_numbers[horcmgr] - serial = self.conf.vsp_storage_id - filename = '/etc/horcm%s.conf' % inst - port = _DEFAULT_PORT_BASE + inst - found = False - if not os.path.exists(filename): - file_str = """ -HORCM_MON -#ip_address service poll(10ms) timeout(10ms) -127.0.0.1 %16d 6000 3000 -HORCM_CMD -""" % port - else: - file_str = cinder_utils.read_file_as_root(filename) - if re.search(r'^\\\\.\\CMD-%s:/dev/sd$' % serial, file_str, re.M): - found = True - if not found: - repl_str = r'\1\\\\.\\CMD-%s:/dev/sd\n' % serial - file_str = CMD_PATTERN.sub(repl_str, file_str) - result = utils.execute('tee', filename, process_input=file_str) - if result[0]: - msg = utils.output_log( - MSG.CREATE_HORCM_CONF_FILE_FAILED, file=filename, - ret=result[0], err=result[2]) - raise exception.VSPError(msg) - - def init_cinder_hosts(self, **kwargs): - """Initialize server-storage connection.""" - targets = { - 'info': {}, - 'list': [], - 'iqns': {}, - } - super(VSPHORCM, self).init_cinder_hosts(targets=targets) - if self.storage_info['pair_ports']: - targets['info'] = {} - ports = self._get_pair_ports() - for port in ports: - targets['info'][port] = True - self._init_pair_targets(targets['info']) - - def _init_pair_targets(self, targets_info): - """Initialize server-storage connection for volume copy.""" - for port in targets_info.keys(): - if not targets_info[port]: - continue - result = self.run_raidcom('get', 'host_grp', '-port', port) - gid = find_value(result[1], 'pair_gid') - if not gid: - try: - connector = { - 'ip': _PAIR_TARGET_NAME_BODY, - 'wwpns': [_PAIR_TARGET_NAME_BODY], - } - target_name, gid = self.create_target_to_storage( - port, connector, None) - utils.output_log(MSG.OBJECT_CREATED, - object='a target for pair operation', - details='port: %(port)s, gid: %(gid)s, ' - 'target_name: %(target)s' % - {'port': port, 'gid': gid, - 'target': target_name}) - except exception.VSPError: - utils.output_log(MSG.CREATE_HOST_GROUP_FAILED, port=port) - continue - self._pair_targets.append((port, gid)) - - if not self._pair_targets: - msg = utils.output_log(MSG.ADD_PAIR_TARGET_FAILED) - raise exception.VSPError(msg) - self._pair_targets.sort(reverse=True) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='port-gid list for pair operation', - value=self._pair_targets) - - @coordination.synchronized('{self.lock[create_ldev]}') - def delete_ldev_from_storage(self, ldev): - """Delete the specified LDEV from the storage.""" - self._delete_ldev_from_storage(ldev) - self._check_ldev_status(ldev, delete=True) - - def _delete_ldev_from_storage(self, ldev): - """Delete the specified LDEV from the storage.""" - result = self.run_raidcom( - 'get', 'ldev', '-ldev_id', ldev, *_LDEV_DELETED, do_raise=False) - if not result[0]: - utils.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev) - return - self.run_raidcom('delete', 'ldev', '-ldev_id', ldev) - - def _run_pairdisplay(self, *args): - """Execute Shadow Image pairdisplay command.""" - result = self._run_pair_cmd( - 'pairdisplay', '-CLI', *args, do_raise=False, - success_code=HORCM_EXIT_CODE.union(_NO_SUCH_DEVICE)) - return result[1] - - def _check_copy_grp(self, copy_group): - """Return the number of device groups in the specified copy group.""" - count = 0 - result = self.run_raidcom('get', 'copy_grp') - for line in result[1].splitlines()[1:]: - line = line.split() - if line[0] == copy_group: - count += 1 - if count == 2: - break - return count - - def _check_device_grp(self, group_name, ldev, ldev_name=None): - """Return True if the LDEV is in the device group, False otherwise.""" - result = self.run_raidcom( - 'get', 'device_grp', '-device_grp_name', group_name) - for line in result[1].splitlines()[1:]: - line = line.split() - if int(line[2]) == ldev: - if not ldev_name: - return True - else: - return line[1] == ldev_name - return False - - def _is_smpl(self, ldev): - """Return True if the status of the LDEV is SMPL, False otherwise.""" - stdout = self._run_pairdisplay( - '-d', self.conf.vsp_storage_id, ldev, 0) - if not stdout: - return True - return stdout.splitlines()[2].split()[9] in _SMPL_STAUS - - def _get_full_copy_pair_info(self, ldev, mun): - """Return info of the Shadow Image volume pair.""" - stdout = self._run_pairdisplay( - '-d', self.conf.vsp_storage_id, ldev, mun) - if not stdout: - return None - line = stdout.splitlines()[2].split() - if not line[8].isdigit() or not line[12].isdigit(): - return None - pvol, svol = int(line[12]), int(line[8]) - LOG.debug( - 'Full copy pair status. (P-VOL: %(pvol)s, S-VOL: %(svol)s, ' - 'status: %(status)s)', - {'pvol': pvol, 'svol': svol, 'status': line[10]}) - return { - 'pvol': pvol, - 'svol_info': { - 'ldev': svol, - 'is_psus': line[10] == "SSUS", - 'is_thin': False, - }, - } - - def _get_thin_copy_info(self, ldev): - """Return info of the Thin Image volume pair.""" - result = self.run_raidcom( - 'get', 'snapshot', '-ldev_id', ldev) - if not result[1]: - return (None, None) - - line = result[1].splitlines()[1].split() - is_psus = _STATUS_TABLE.get(line[2]) == PSUS - if line[1] == "P-VOL": - pvol, svol = ldev, int(line[6]) - else: - pvol, svol = int(line[6]), ldev - LOG.debug( - 'Thin copy pair status. (P-VOL: %(pvol)s, S-VOL: %(svol)s, ' - 'status: %(status)s)', - {'pvol': pvol, 'svol': svol, 'status': line[2]}) - return (pvol, [{'ldev': svol, 'is_thin': True, 'is_psus': is_psus}]) - - def get_pair_info(self, ldev): - """Return info of the volume pair.""" - pair_info = {} - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) - if ldev_info['sts'] != NORMAL_STS or _PAIR_ATTRS.isdisjoint( - ldev_info['vol_attr']): - return None - - if FULL_ATTR in ldev_info['vol_attr']: - pvol, svol_info = self._get_full_copy_info(ldev) - # When 'pvol' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if pvol is not None: - pair_info['pvol'] = pvol - pair_info.setdefault('svol_info', []) - pair_info['svol_info'].extend(svol_info) - - if THIN_ATTR in ldev_info['vol_attr']: - pvol, svol_info = self._get_thin_copy_info(ldev) - # When 'pvol' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if pvol is not None: - pair_info['pvol'] = pvol - pair_info.setdefault('svol_info', []) - pair_info['svol_info'].extend(svol_info) - - return pair_info - - def _get_pair_ports(self): - """Return a list of ports used for volume pair management.""" - return (self.storage_info['pair_ports'] or - self.storage_info['controller_ports']) - - def _add_pair_config(self, pvol, svol, copy_group, ldev_name, mun): - """Create device groups and a copy group for the SI volume pair.""" - pvol_group = copy_group + 'P' - svol_group = copy_group + 'S' - self.run_raidcom( - 'add', 'device_grp', '-device_grp_name', - pvol_group, ldev_name, '-ldev_id', pvol) - self.run_raidcom( - 'add', 'device_grp', '-device_grp_name', - svol_group, ldev_name, '-ldev_id', svol) - nr_copy_groups = self._check_copy_grp(copy_group) - if nr_copy_groups == 1: - self.run_raidcom( - 'delete', 'copy_grp', '-copy_grp_name', copy_group) - if nr_copy_groups != 2: - self.run_and_verify_storage_cli( - 'raidcom', 'add', 'copy_grp', '-copy_grp_name', - copy_group, pvol_group, svol_group, '-mirror_id', mun, - '-s', self.conf.vsp_storage_id, - '-IM%s' % self.conf.vsp_horcm_numbers[_HORCMGR], - success_code=HORCM_EXIT_CODE) - - def _delete_pair_config(self, pvol, svol, copy_group, ldev_name): - """Delete specified LDEVs from Shadow Image device groups.""" - pvol_group = copy_group + 'P' - svol_group = copy_group + 'S' - if self._check_device_grp(pvol_group, pvol, ldev_name=ldev_name): - self.run_raidcom( - 'delete', 'device_grp', '-device_grp_name', - pvol_group, '-ldev_id', pvol) - if self._check_device_grp(svol_group, svol, ldev_name=ldev_name): - self.run_raidcom( - 'delete', 'device_grp', '-device_grp_name', - svol_group, '-ldev_id', svol) - - def _wait_full_copy(self, ldev, status, **kwargs): - """Wait until the LDEV status changes to the specified status.""" - interval = kwargs.pop( - 'interval', self.conf.vsp_copy_check_interval) - - def _wait_for_full_copy_pair_status(start_time, ldev, - status, **kwargs): - """Raise True if the LDEV is in the specified status.""" - timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) - if self._run_pairevtwait(ldev) in status: - raise loopingcall.LoopingCallDone() - if utils.check_timeout(start_time, timeout): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_full_copy_pair_status, timeutils.utcnow(), - ldev, status, **kwargs) - if not loop.start(interval=interval).wait(): - msg = utils.output_log(MSG.SI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) - raise exception.VSPError(msg) - - def wait_full_copy_completion(self, pvol, svol): - """Wait until the Shadow Image volume copy has finished.""" - self._wait_full_copy(svol, set([PSUS, PSUE]), - timeout=utils.MAX_PROCESS_WAITTIME) - if self._run_pairevtwait(svol) == PSUE: - msg = utils.output_log(MSG.VOLUME_COPY_FAILED, - copy_method=utils.FULL, pvol=pvol, - svol=svol) - raise exception.VSPError(msg) - - def _run_pairevtwait(self, ldev): - """Execute Shadow Image pairevtwait command.""" - result = self._run_pair_cmd( - 'pairevtwait', '-d', self.conf.vsp_storage_id, - ldev, '-nowaits') - return result[0] - - def get_ldev_size_in_gigabyte(self, ldev, existing_ref): - """Return the size[GB] of the specified LDEV.""" - ldev_info = self.get_ldev_info( - _CHECK_KEYS, '-ldev_id', ldev, do_raise=False) - _check_ldev(ldev_info, ldev, existing_ref) - # Hitachi storage calculates volume sizes in a block unit, 512 bytes. - return ldev_info['vol_size'] / utils.GIGABYTE_PER_BLOCK_SIZE - - def get_pool_id(self): - """Return the pool number of vsp_pool.""" - pool_id = super(VSPHORCM, self).get_pool_id() - if pool_id is None: - pool = self.conf.vsp_pool - result = self.run_raidcom('get', 'pool', '-key', 'opt') - for line in result[1].splitlines()[1:]: - line = line.split() - if line[3] == pool: - return int(line[0]) - return pool_id - - def config_lock(self): - """Initialize lock resource names.""" - for key in ['create_ldev', 'create_pair']: - self.lock[key] = '_'.join([key, self.conf.vsp_storage_id]) - self.lock[_HORCMGR] = ( - 'horcmgr_%s' % self.conf.vsp_horcm_numbers[_HORCMGR]) - self.lock[_PAIR_HORCMGR] = ( - 'horcmgr_%s' % self.conf.vsp_horcm_numbers[_PAIR_HORCMGR]) - - @horcmgr_synchronized - def _start_horcmgr(self, horcmgr): - """Start the CCI instance and return True if successful.""" - inst = self.conf.vsp_horcm_numbers[horcmgr] - ret = 0 - if _run_horcmgr(inst) != _HORCM_RUNNING: - ret = _run_horcmstart(inst) - if ret and ret != _HORCM_RUNNING: - utils.output_log(MSG.HORCM_START_FAILED, inst=inst) - return False - return True - - def output_param_to_log(self): - """Output configuration parameter values to the log file.""" - super(VSPHORCM, self).output_param_to_log() - utils.output_opts(self.conf, horcm_opts) - - def get_storage_cli_info(self): - """Return a tuple of the storage CLI name and its version.""" - version = 'N/A' - result = utils.execute('raidqry', '-h') - match = re.search(r'^Ver&Rev: +(?P\S+)', result[1], re.M) - if match: - version = match.group('version') - return ('Command Control Interface', version) - - def check_vvol(self, ldev): - """Return True if the specified LDEV is V-VOL, False otherwise.""" - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) - if ldev_info['sts'] != NORMAL_STS: - return False - return VVOL_ATTR in ldev_info['vol_attr'] diff --git a/cinder/volume/drivers/hitachi/vsp_horcm_fc.py b/cinder/volume/drivers/hitachi/vsp_horcm_fc.py deleted file mode 100644 index 917e63ef732..00000000000 --- a/cinder/volume/drivers/hitachi/vsp_horcm_fc.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""HORCM interface fibre channel module for Hitachi VSP Driver.""" - -import re - -from oslo_log import log as logging - -from cinder import exception -from cinder.volume.drivers.hitachi import vsp_horcm as horcm -from cinder.volume.drivers.hitachi import vsp_utils as utils -from cinder.zonemanager import utils as fczm_utils - -_FC_LINUX_MODE_OPTS = ['-host_mode', 'LINUX'] -_HOST_GROUPS_PATTERN = re.compile( - r"^CL\w-\w+ +(?P\d+) +%s(?!pair00 )\S* +\d+ " % utils.TARGET_PREFIX, - re.M) -_FC_PORT_PATTERN = re.compile( - (r"^(CL\w-\w)\w* +(?:FIBRE|FCoE) +TAR +\w+ +\w+ +\w +\w+ +Y +" - r"\d+ +\d+ +(\w{16})"), re.M) - -LOG = logging.getLogger(__name__) -MSG = utils.VSPMsg - - -class VSPHORCMFC(horcm.VSPHORCM): - """HORCM interface fibre channel class for Hitachi VSP Driver.""" - - def __init__(self, conf, storage_protocol, db): - """Initialize instance variables.""" - super(VSPHORCMFC, self).__init__(conf, storage_protocol, db) - self._lookup_service = fczm_utils.create_lookup_service() - - def connect_storage(self): - """Prepare for using the storage.""" - target_ports = self.conf.vsp_target_ports - compute_target_ports = self.conf.vsp_compute_target_ports - pair_target_ports = self.conf.vsp_horcm_pair_target_ports - - super(VSPHORCMFC, self).connect_storage() - result = self.run_raidcom('get', 'port') - for port, wwn in _FC_PORT_PATTERN.findall(result[1]): - if target_ports and port in target_ports: - self.storage_info['controller_ports'].append(port) - self.storage_info['wwns'][port] = wwn - if compute_target_ports and port in compute_target_ports: - self.storage_info['compute_ports'].append(port) - self.storage_info['wwns'][port] = wwn - if pair_target_ports and port in pair_target_ports: - self.storage_info['pair_ports'].append(port) - - self.check_ports_info() - if pair_target_ports and not self.storage_info['pair_ports']: - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource="Pair target ports") - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='pair target port list', - value=self.storage_info['pair_ports']) - utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list', - value=self.storage_info['wwns']) - - def create_target_to_storage(self, port, connector, hba_ids): - """Create a host group on the specified port.""" - wwpns = self.get_hba_ids_from_connector(connector) - target_name = utils.TARGET_PREFIX + min(wwpns) - try: - result = self.run_raidcom( - 'add', 'host_grp', '-port', port, '-host_grp_name', - target_name) - except exception.VSPError: - result = self.run_raidcom('get', 'host_grp', '-port', port) - hostgroup_pt = re.compile( - r"^CL\w-\w+ +(?P\d+) +%s +\d+ " % - target_name, re.M) - gid = hostgroup_pt.findall(result[1]) - if gid: - return target_name, gid[0] - else: - raise - return target_name, horcm.find_value(result[1], 'gid') - - def set_hba_ids(self, port, gid, hba_ids): - """Connect all specified HBAs with the specified port.""" - registered_wwns = [] - for wwn in hba_ids: - try: - self.run_raidcom( - 'add', 'hba_wwn', '-port', - '-'.join([port, gid]), '-hba_wwn', wwn) - registered_wwns.append(wwn) - except exception.VSPError: - utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid, - wwn=wwn) - if not registered_wwns: - msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port, - gid=gid) - raise exception.VSPError(msg) - - def set_target_mode(self, port, gid): - """Configure the host group to meet the environment.""" - self.run_raidcom( - 'modify', 'host_grp', '-port', - '-'.join([port, gid]), *_FC_LINUX_MODE_OPTS, - success_code=horcm.ALL_EXIT_CODE) - - def find_targets_from_storage(self, targets, connector, target_ports): - """Find mapped ports, memorize them and return unmapped port count.""" - nr_not_found = 0 - old_target_name = None - if 'ip' in connector: - old_target_name = utils.TARGET_PREFIX + connector['ip'] - success_code = horcm.HORCM_EXIT_CODE.union([horcm.EX_ENOOBJ]) - wwpns = self.get_hba_ids_from_connector(connector) - wwpns_pattern = re.compile( - r'^CL\w-\w+ +\d+ +\S+ +(%s) ' % '|'.join(wwpns), re.M | re.I) - target_name = utils.TARGET_PREFIX + min(wwpns) - - for port in target_ports: - targets['info'][port] = False - - result = self.run_raidcom( - 'get', 'hba_wwn', '-port', port, target_name, - success_code=success_code) - wwpns = wwpns_pattern.findall(result[1]) - if not wwpns and old_target_name: - result = self.run_raidcom( - 'get', 'hba_wwn', '-port', port, old_target_name, - success_code=success_code) - wwpns = wwpns_pattern.findall(result[1]) - if wwpns: - gid = result[1].splitlines()[1].split()[1] - targets['info'][port] = True - targets['list'].append((port, gid)) - LOG.debug( - 'Found wwpns in host group immediately. ' - '(port: %(port)s, gid: %(gid)s, wwpns: %(wwpns)s)', - {'port': port, 'gid': gid, 'wwpns': wwpns}) - continue - - result = self.run_raidcom( - 'get', 'host_grp', '-port', port) - for gid in _HOST_GROUPS_PATTERN.findall(result[1]): - result = self.run_raidcom( - 'get', 'hba_wwn', '-port', '-'.join([port, gid])) - wwpns = wwpns_pattern.findall(result[1]) - if wwpns: - targets['info'][port] = True - targets['list'].append((port, gid)) - LOG.debug( - 'Found wwpns in host group. (port: %(port)s, ' - 'gid: %(gid)s, wwpns: %(wwpns)s)', - {'port': port, 'gid': gid, 'wwpns': wwpns}) - break - else: - nr_not_found += 1 - - return nr_not_found - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Initialize connection between the server and the volume.""" - conn_info = super(VSPHORCMFC, self).initialize_connection( - volume, connector) - if self.conf.vsp_zoning_request: - utils.update_conn_info(conn_info, connector, self._lookup_service) - return conn_info - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector): - """Terminate connection between the server and the volume.""" - conn_info = super(VSPHORCMFC, self).terminate_connection( - volume, connector) - if self.conf.vsp_zoning_request and ( - conn_info and conn_info['data']['target_wwn']): - utils.update_conn_info(conn_info, connector, self._lookup_service) - return conn_info diff --git a/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py b/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py deleted file mode 100644 index 1b652fae542..00000000000 --- a/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""HORCM interface iSCSI module for Hitachi VSP Driver.""" - -import re - -from oslo_log import log as logging - -from cinder import exception -from cinder.volume.drivers.hitachi import vsp_horcm as horcm -from cinder.volume.drivers.hitachi import vsp_utils as utils - -_ISCSI_LINUX_MODE_OPTS = ['-host_mode', 'LINUX'] -_ISCSI_HOST_MODE_OPT = '-host_mode_opt' -_ISCSI_HMO_REPORT_FULL_PORTAL = 83 -_ISCSI_TARGETS_PATTERN = re.compile( - (r"^CL\w-\w+ +(?P\d+) +%s(?!pair00 )\S* +(?P\S+) +" - r"\w+ +\w +\d+ ") % utils.TARGET_PREFIX, re.M) -_ISCSI_PORT_PATTERN = re.compile( - r"^(CL\w-\w)\w* +ISCSI +TAR +\w+ +\w+ +\w +\w+ +Y ", re.M) -_ISCSI_IPV4_ADDR_PATTERN = re.compile( - r"^IPV4_ADDR +: +(?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$", re.M) -_ISCSI_TCP_PORT_PATTERN = re.compile( - r'^TCP_PORT\ +:\ +(?P\d+)$', re.M) - -LOG = logging.getLogger(__name__) -MSG = utils.VSPMsg - - -class VSPHORCMISCSI(horcm.VSPHORCM): - """HORCM interface iscsi class for Hitachi VSP Driver.""" - - def connect_storage(self): - """Prepare for using the storage.""" - target_ports = self.conf.vsp_target_ports - compute_target_ports = self.conf.vsp_compute_target_ports - pair_target_ports = self.conf.vsp_horcm_pair_target_ports - - super(VSPHORCMISCSI, self).connect_storage() - result = self.run_raidcom('get', 'port') - for port in _ISCSI_PORT_PATTERN.findall(result[1]): - if (target_ports and port in target_ports and - self._set_target_portal(port)): - self.storage_info['controller_ports'].append(port) - if (compute_target_ports and port in compute_target_ports and - (port in self.storage_info['portals'] or - self._set_target_portal(port))): - self.storage_info['compute_ports'].append(port) - if pair_target_ports and port in pair_target_ports: - self.storage_info['pair_ports'].append(port) - - self.check_ports_info() - if pair_target_ports and not self.storage_info['pair_ports']: - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource="Pair target ports") - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='pair target port list', - value=self.storage_info['pair_ports']) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='port- list', - value=self.storage_info['portals']) - - def _set_target_portal(self, port): - """Get port info and store it in an instance variable.""" - ipv4_addr = None - tcp_port = None - result = self.run_raidcom( - 'get', 'port', '-port', port, '-key', 'opt') - match = _ISCSI_IPV4_ADDR_PATTERN.search(result[1]) - if match: - ipv4_addr = match.group('ipv4_addr') - match = _ISCSI_TCP_PORT_PATTERN.search(result[1]) - if match: - tcp_port = match.group('tcp_port') - if not ipv4_addr or not tcp_port: - return False - self.storage_info['portals'][port] = ':'.join( - [ipv4_addr, tcp_port]) - return True - - def create_target_to_storage(self, port, connector, hba_ids): - """Create an iSCSI target on the specified port.""" - target_name = utils.TARGET_PREFIX + connector['ip'] - args = [ - 'add', 'host_grp', '-port', port, '-host_grp_name', target_name] - if hba_ids: - args.extend(['-iscsi_name', hba_ids + utils.TARGET_IQN_SUFFIX]) - try: - result = self.run_raidcom(*args) - except exception.VSPError: - result = self.run_raidcom('get', 'host_grp', '-port', port) - hostgroup_pt = re.compile( - r"^CL\w-\w+ +(?P\d+) +%s +\S+ " % - target_name.replace('.', r'\.'), re.M) - gid = hostgroup_pt.findall(result[1]) - if gid: - return target_name, gid[0] - else: - raise - return target_name, horcm.find_value(result[1], 'gid') - - def set_hba_ids(self, port, gid, hba_ids): - """Connect the specified HBA with the specified port.""" - self.run_raidcom( - 'add', 'hba_iscsi', '-port', '-'.join([port, gid]), - '-hba_iscsi_name', hba_ids) - - def set_target_mode(self, port, gid): - """Configure the iSCSI target to meet the environment.""" - hostmode_setting = [] - hostmode_setting[:] = _ISCSI_LINUX_MODE_OPTS - hostmode_setting.append(_ISCSI_HOST_MODE_OPT) - hostmode_setting.append(_ISCSI_HMO_REPORT_FULL_PORTAL) - self.run_raidcom( - 'modify', 'host_grp', '-port', - '-'.join([port, gid]), *hostmode_setting) - - def find_targets_from_storage(self, targets, connector, target_ports): - """Find mapped ports, memorize them and return unmapped port count.""" - nr_not_found = 0 - target_name = utils.TARGET_PREFIX + connector['ip'] - success_code = horcm.HORCM_EXIT_CODE.union([horcm.EX_ENOOBJ]) - iqn = self.get_hba_ids_from_connector(connector) - iqn_pattern = re.compile( - r'^CL\w-\w+ +\d+ +\S+ +%s ' % iqn, re.M) - - for port in target_ports: - targets['info'][port] = False - - result = self.run_raidcom( - 'get', 'hba_iscsi', '-port', port, target_name, - success_code=success_code) - if iqn_pattern.search(result[1]): - gid = result[1].splitlines()[1].split()[1] - targets['info'][port] = True - targets['list'].append((port, gid)) - continue - - result = self.run_raidcom( - 'get', 'host_grp', '-port', port) - for gid, iqn in _ISCSI_TARGETS_PATTERN.findall(result[1]): - result = self.run_raidcom( - 'get', 'hba_iscsi', '-port', '-'.join([port, gid])) - if iqn_pattern.search(result[1]): - targets['info'][port] = True - targets['list'].append((port, gid)) - targets['iqns'][(port, gid)] = iqn - break - else: - nr_not_found += 1 - - return nr_not_found - - def get_properties_iscsi(self, targets, multipath): - """Check if specified iSCSI targets exist and store their IQNs.""" - if not multipath: - target_list = targets['list'][:1] - else: - target_list = targets['list'][:] - - for target in target_list: - if target not in targets['iqns']: - port, gid = target - result = self.run_raidcom('get', 'host_grp', '-port', port) - match = re.search( - r"^CL\w-\w+ +%s +\S+ +(?P\S+) +\w+ +\w +\d+ " % gid, - result[1], re.M) - if not match: - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource='Target IQN') - raise exception.VSPError(msg) - targets['iqns'][target] = match.group('iqn') - LOG.debug('Found iqn of the iSCSI target. (port: %(port)s, ' - 'gid: %(gid)s, target iqn: %(iqn)s)', - {'port': port, 'gid': gid, - 'iqn': match.group('iqn')}) - return super(VSPHORCMISCSI, self).get_properties_iscsi( - targets, multipath) diff --git a/cinder/volume/drivers/hitachi/vsp_iscsi.py b/cinder/volume/drivers/hitachi/vsp_iscsi.py deleted file mode 100644 index 521413a3183..00000000000 --- a/cinder/volume/drivers/hitachi/vsp_iscsi.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""iSCSI module for Hitachi VSP Driver.""" - -from oslo_config import cfg - -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.hitachi import vsp_common as common -from cinder.volume.drivers.hitachi import vsp_utils as utils - -iscsi_opts = [ - cfg.BoolOpt( - 'vsp_use_chap_auth', - default=False, - help='If True, CHAP authentication will be applied to communication ' - 'between hosts and any of the iSCSI targets on the storage ports.'), - cfg.StrOpt( - 'vsp_auth_user', - help='Name of the user used for CHAP authentication performed in ' - 'communication between hosts and iSCSI targets on the storage ports.'), - cfg.StrOpt( - 'vsp_auth_password', - secret=True, - help='Password corresponding to vsp_auth_user.'), -] - -MSG = utils.VSPMsg - -_DRIVER_INFO = { - 'proto': 'iSCSI', - 'hba_id': 'initiator', - 'hba_id_type': 'iSCSI initiator IQN', - 'msg_id': { - 'target': MSG.CREATE_ISCSI_TARGET_FAILED, - }, - 'volume_backend_name': utils.DRIVER_PREFIX + 'iSCSI', - 'volume_opts': iscsi_opts, - 'volume_type': 'iscsi', -} - -CONF = cfg.CONF -CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class VSPISCSIDriver(driver.ISCSIDriver): - """iSCSI class for Hitachi VSP Driver. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver. - - """ - - VERSION = common.VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Hitachi_VSP_CI" - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - """Initialize instance variables.""" - utils.output_log(MSG.DRIVER_INITIALIZATION_START, - driver=self.__class__.__name__, - version=self.get_version()) - super(VSPISCSIDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(common.common_opts) - self.configuration.append_config_values(iscsi_opts) - self.common = utils.import_object( - self.configuration, _DRIVER_INFO, kwargs.get('db')) - - def check_for_setup_error(self): - """Error are checked in do_setup() instead of this method.""" - pass - - @utils.output_start_end_log - def create_volume(self, volume): - """Create a volume and return its properties.""" - return self.common.create_volume(volume) - - @utils.output_start_end_log - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot and return its properties.""" - return self.common.create_volume_from_snapshot(volume, snapshot) - - @utils.output_start_end_log - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume and return its properties.""" - return self.common.create_cloned_volume(volume, src_vref) - - @utils.output_start_end_log - def delete_volume(self, volume): - """Delete the specified volume.""" - self.common.delete_volume(volume) - - @utils.output_start_end_log - def create_snapshot(self, snapshot): - """Create a snapshot from a volume and return its properties.""" - return self.common.create_snapshot(snapshot) - - @utils.output_start_end_log - def delete_snapshot(self, snapshot): - """Delete the specified snapshot.""" - self.common.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - """Return properties, capabilities and current states of the driver.""" - return self.common.get_volume_stats(refresh) - - @utils.output_start_end_log - def update_migrated_volume( - self, ctxt, volume, new_volume, original_volume_status): - """Do any remaining jobs after migration.""" - self.common.discard_zero_page(new_volume) - super(VSPISCSIDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) - - @utils.output_start_end_log - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - super(VSPISCSIDriver, self).copy_image_to_volume( - context, volume, image_service, image_id) - self.common.discard_zero_page(volume) - - @utils.output_start_end_log - def extend_volume(self, volume, new_size): - """Extend the specified volume to the specified size.""" - self.common.extend_volume(volume, new_size) - - @utils.output_start_end_log - def manage_existing(self, volume, existing_ref): - """Return volume properties which Cinder needs to manage the volume.""" - return self.common.manage_existing(existing_ref) - - @utils.output_start_end_log - def manage_existing_get_size(self, volume, existing_ref): - """Return the size[GB] of the specified volume.""" - return self.common.manage_existing_get_size(existing_ref) - - @utils.output_start_end_log - def unmanage(self, volume): - """Prepare the volume for removing it from Cinder management.""" - self.common.unmanage(volume) - - @utils.output_start_end_log - def do_setup(self, context): - """Prepare for the startup of the driver.""" - self.common.do_setup(context) - - def ensure_export(self, context, volume): - """Synchronously recreate an export for a volume.""" - pass - - def create_export(self, context, volume, connector): - """Export the volume.""" - pass - - def remove_export(self, context, volume): - """Remove an export for a volume.""" - pass - - @utils.output_start_end_log - def initialize_connection(self, volume, connector): - """Initialize connection between the server and the volume.""" - return self.common.initialize_connection(volume, connector) - - @utils.output_start_end_log - def terminate_connection(self, volume, connector, **kwargs): - """Terminate connection between the server and the volume.""" - self.common.terminate_connection(volume, connector) diff --git a/cinder/volume/drivers/hitachi/vsp_utils.py b/cinder/volume/drivers/hitachi/vsp_utils.py deleted file mode 100644 index 93c887a858a..00000000000 --- a/cinder/volume/drivers/hitachi/vsp_utils.py +++ /dev/null @@ -1,667 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Utility module for Hitachi VSP Driver.""" - -import functools -import inspect -import logging as base_logging -import os -import re - -import enum -from oslo_concurrency import processutils as putils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import units -import six - -from cinder import exception -from cinder import utils as cinder_utils - - -_DRIVER_DIR = 'cinder.volume.drivers.hitachi' - -_DRIVERS = { - 'HORCM': { - 'FC': 'vsp_horcm_fc.VSPHORCMFC', - 'iSCSI': 'vsp_horcm_iscsi.VSPHORCMISCSI', - }, -} - -DRIVER_PREFIX = 'VSP' -TARGET_PREFIX = 'HBSD-' -TARGET_IQN_SUFFIX = '.hbsd-target' -GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512 - -MAX_PROCESS_WAITTIME = 24 * 60 * 60 -DEFAULT_PROCESS_WAITTIME = 15 * 60 - -NORMAL_LDEV_TYPE = 'Normal' -NVOL_LDEV_TYPE = 'DP-VOL' - -FULL = 'Full copy' -THIN = 'Thin copy' - -INFO_SUFFIX = 'I' -WARNING_SUFFIX = 'W' -ERROR_SUFFIX = 'E' - -PORT_ID_LENGTH = 5 - - -@enum.unique -class VSPMsg(enum.Enum): - """messages for Hitachi VSP Driver.""" - - METHOD_START = { - 'msg_id': 0, - 'loglevel': base_logging.INFO, - 'msg': '%(method)s starts. (config_group: %(config_group)s)', - 'suffix': INFO_SUFFIX - } - OUTPUT_PARAMETER_VALUES = { - 'msg_id': 1, - 'loglevel': base_logging.INFO, - 'msg': 'The parameter of the storage backend. (config_group: ' - '%(config_group)s)', - 'suffix': INFO_SUFFIX - } - METHOD_END = { - 'msg_id': 2, - 'loglevel': base_logging.INFO, - 'msg': '%(method)s ended. (config_group: %(config_group)s)', - 'suffix': INFO_SUFFIX - } - DRIVER_READY_FOR_USE = { - 'msg_id': 3, - 'loglevel': base_logging.INFO, - 'msg': 'The storage backend can be used. (config_group: ' - '%(config_group)s)', - 'suffix': INFO_SUFFIX - } - DRIVER_INITIALIZATION_START = { - 'msg_id': 4, - 'loglevel': base_logging.INFO, - 'msg': 'Initialization of %(driver)s %(version)s started.', - 'suffix': INFO_SUFFIX - } - SET_CONFIG_VALUE = { - 'msg_id': 5, - 'loglevel': base_logging.INFO, - 'msg': 'Set %(object)s to %(value)s.', - 'suffix': INFO_SUFFIX - } - OBJECT_CREATED = { - 'msg_id': 6, - 'loglevel': base_logging.INFO, - 'msg': 'Created %(object)s. (%(details)s)', - 'suffix': INFO_SUFFIX - } - INVALID_LDEV_FOR_UNMAPPING = { - 'msg_id': 302, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to specify a logical device for the volume ' - '%(volume_id)s to be unmapped.', - 'suffix': WARNING_SUFFIX - } - INVALID_LDEV_FOR_DELETION = { - 'msg_id': 304, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to specify a logical device to be deleted. ' - '(method: %(method)s, id: %(id)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_TARGET_FAILED = { - 'msg_id': 306, - 'loglevel': base_logging.WARNING, - 'msg': 'A host group or an iSCSI target could not be deleted. ' - '(port: %(port)s, gid: %(id)s)', - 'suffix': WARNING_SUFFIX - } - CREATE_HOST_GROUP_FAILED = { - 'msg_id': 308, - 'loglevel': base_logging.WARNING, - 'msg': 'A host group could not be added. (port: %(port)s)', - 'suffix': WARNING_SUFFIX - } - CREATE_ISCSI_TARGET_FAILED = { - 'msg_id': 309, - 'loglevel': base_logging.WARNING, - 'msg': 'An iSCSI target could not be added. (port: %(port)s)', - 'suffix': WARNING_SUFFIX - } - UNMAP_LDEV_FAILED = { - 'msg_id': 310, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to unmap a logical device. (LDEV: %(ldev)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_LDEV_FAILED = { - 'msg_id': 313, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)', - 'suffix': WARNING_SUFFIX - } - MAP_LDEV_FAILED = { - 'msg_id': 314, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to map a logical device. (LDEV: %(ldev)s, port: ' - '%(port)s, id: %(id)s, lun: %(lun)s)', - 'suffix': WARNING_SUFFIX - } - DISCARD_ZERO_PAGE_FAILED = { - 'msg_id': 315, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to perform a zero-page reclamation. (LDEV: ' - '%(ldev)s)', - 'suffix': WARNING_SUFFIX - } - ADD_HBA_WWN_FAILED = { - 'msg_id': 317, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to assign the WWN. (port: %(port)s, gid: %(gid)s, ' - 'wwn: %(wwn)s)', - 'suffix': WARNING_SUFFIX - } - LDEV_NOT_EXIST = { - 'msg_id': 319, - 'loglevel': base_logging.WARNING, - 'msg': 'The logical device does not exist in the storage system. ' - '(LDEV: %(ldev)s)', - 'suffix': WARNING_SUFFIX - } - HORCM_START_FAILED = { - 'msg_id': 320, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to start HORCM. (inst: %(inst)s)', - 'suffix': WARNING_SUFFIX - } - HORCM_RESTART_FOR_SI_FAILED = { - 'msg_id': 322, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to reload the configuration of full copy pair. ' - '(inst: %(inst)s)', - 'suffix': WARNING_SUFFIX - } - HORCM_LOGIN_FAILED = { - 'msg_id': 323, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to perform user authentication of HORCM. ' - '(user: %(user)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_SI_PAIR_FAILED = { - 'msg_id': 324, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to delete full copy pair. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_TI_PAIR_FAILED = { - 'msg_id': 325, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to delete thin copy pair. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)', - 'suffix': WARNING_SUFFIX - } - WAIT_SI_PAIR_STATUS_FAILED = { - 'msg_id': 326, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to change the status of full copy pair. (P-VOL: ' - '%(pvol)s, S-VOL: %(svol)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_DEVICE_GRP_FAILED = { - 'msg_id': 327, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to delete the configuration of full copy pair. ' - '(P-VOL: %(pvol)s, S-VOL: %(svol)s)', - 'suffix': WARNING_SUFFIX - } - DISCONNECT_VOLUME_FAILED = { - 'msg_id': 329, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to detach the logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)', - 'suffix': WARNING_SUFFIX - } - STORAGE_COMMAND_FAILED = { - 'msg_id': 600, - 'loglevel': base_logging.ERROR, - 'msg': 'The command %(cmd)s failed. (ret: %(ret)s, stdout: ' - '%(out)s, stderr: %(err)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_PARAMETER = { - 'msg_id': 601, - 'loglevel': base_logging.ERROR, - 'msg': 'A parameter is invalid. (%(param)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_PARAMETER_VALUE = { - 'msg_id': 602, - 'loglevel': base_logging.ERROR, - 'msg': 'A parameter value is invalid. (%(meta)s)', - 'suffix': ERROR_SUFFIX - } - HORCM_SHUTDOWN_FAILED = { - 'msg_id': 608, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to shutdown HORCM. (inst: %(inst)s)', - 'suffix': ERROR_SUFFIX - } - HORCM_RESTART_FAILED = { - 'msg_id': 609, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to restart HORCM. (inst: %(inst)s)', - 'suffix': ERROR_SUFFIX - } - SI_PAIR_STATUS_WAIT_TIMEOUT = { - 'msg_id': 610, - 'loglevel': base_logging.ERROR, - 'msg': 'The status change of full copy pair could not be ' - 'completed. (S-VOL: %(svol)s)', - 'suffix': ERROR_SUFFIX - } - TI_PAIR_STATUS_WAIT_TIMEOUT = { - 'msg_id': 611, - 'loglevel': base_logging.ERROR, - 'msg': 'The status change of thin copy pair could not be ' - 'completed. (S-VOL: %(svol)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_STATUS_FOR_COPY = { - 'msg_id': 612, - 'loglevel': base_logging.ERROR, - 'msg': 'The source logical device to be replicated does not exist ' - 'in the storage system. (LDEV: %(ldev)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_FOR_EXTENSION = { - 'msg_id': 613, - 'loglevel': base_logging.ERROR, - 'msg': 'The volume %(volume_id)s to be extended was not found.', - 'suffix': ERROR_SUFFIX - } - NO_HBA_WWN_ADDED_TO_HOST_GRP = { - 'msg_id': 614, - 'loglevel': base_logging.ERROR, - 'msg': 'No WWN is assigned. (port: %(port)s, gid: %(gid)s)', - 'suffix': ERROR_SUFFIX - } - NO_AVAILABLE_MIRROR_UNIT = { - 'msg_id': 615, - 'loglevel': base_logging.ERROR, - 'msg': 'A pair could not be created. The maximum number of pair ' - 'is exceeded. (copy method: %(copy_method)s, P-VOL: ' - '%(pvol)s)', - 'suffix': ERROR_SUFFIX - } - UNABLE_TO_DELETE_PAIR = { - 'msg_id': 616, - 'loglevel': base_logging.ERROR, - 'msg': 'A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_VOLUME_SIZE_FOR_COPY = { - 'msg_id': 617, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to create a volume from a %(type)s. The size of ' - 'the new volume must be equal to or greater than the size ' - 'of the original %(type)s. (new volume: %(volume_id)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_VOLUME_TYPE_FOR_EXTEND = { - 'msg_id': 618, - 'loglevel': base_logging.ERROR, - 'msg': 'The volume %(volume_id)s could not be extended. The ' - 'volume type must be Normal.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_FOR_CONNECTION = { - 'msg_id': 619, - 'loglevel': base_logging.ERROR, - 'msg': 'The volume %(volume_id)s to be mapped was not found.', - 'suffix': ERROR_SUFFIX - } - POOL_INFO_RETRIEVAL_FAILED = { - 'msg_id': 620, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to provide information about a pool. (pool: ' - '%(pool)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_VOLUME_SIZE_FOR_TI = { - 'msg_id': 621, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to create a volume from a %(type)s. The size of ' - 'the new volume must be equal to the size of the original ' - '%(type)s when the new volume is created by ' - '%(copy_method)s. (new volume: %(volume_id)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_FOR_VOLUME_COPY = { - 'msg_id': 624, - 'loglevel': base_logging.ERROR, - 'msg': 'The %(type)s %(id)s source to be replicated was not ' - 'found.', - 'suffix': ERROR_SUFFIX - } - CREATE_HORCM_CONF_FILE_FAILED = { - 'msg_id': 632, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to open a file. (file: %(file)s, ret: %(ret)s, ' - 'stderr: %(err)s)', - 'suffix': ERROR_SUFFIX - } - CONNECT_VOLUME_FAILED = { - 'msg_id': 634, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to attach the logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)', - 'suffix': ERROR_SUFFIX - } - CREATE_LDEV_FAILED = { - 'msg_id': 636, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to add the logical device.', - 'suffix': ERROR_SUFFIX - } - ADD_PAIR_TARGET_FAILED = { - 'msg_id': 638, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to add the pair target.', - 'suffix': ERROR_SUFFIX - } - NO_MAPPING_FOR_LDEV = { - 'msg_id': 639, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to map a logical device to any pair targets. ' - '(LDEV: %(ldev)s)', - 'suffix': ERROR_SUFFIX - } - POOL_NOT_FOUND = { - 'msg_id': 640, - 'loglevel': base_logging.ERROR, - 'msg': 'A pool could not be found. (pool: %(pool)s)', - 'suffix': ERROR_SUFFIX - } - NO_AVAILABLE_RESOURCE = { - 'msg_id': 648, - 'loglevel': base_logging.ERROR, - 'msg': 'There are no resources available for use. (resource: ' - '%(resource)s)', - 'suffix': ERROR_SUFFIX - } - NO_CONNECTED_TARGET = { - 'msg_id': 649, - 'loglevel': base_logging.ERROR, - 'msg': 'The host group or iSCSI target was not found.', - 'suffix': ERROR_SUFFIX - } - RESOURCE_NOT_FOUND = { - 'msg_id': 650, - 'loglevel': base_logging.ERROR, - 'msg': 'The resource %(resource)s was not found.', - 'suffix': ERROR_SUFFIX - } - LDEV_DELETION_WAIT_TIMEOUT = { - 'msg_id': 652, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)', - 'suffix': ERROR_SUFFIX - } - LDEV_CREATION_WAIT_TIMEOUT = { - 'msg_id': 653, - 'loglevel': base_logging.ERROR, - 'msg': 'The creation of a logical device could not be completed. ' - '(LDEV: %(ldev)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_ATTR_FOR_MANAGE = { - 'msg_id': 702, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'must be an unpaired %(ldevtype)s.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_SIZE_FOR_MANAGE = { - 'msg_id': 703, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'size must be expressed in gigabytes.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_PORT_FOR_MANAGE = { - 'msg_id': 704, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'must not be mapped.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_TYPE_FOR_UNMANAGE = { - 'msg_id': 706, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to unmanage the volume %(volume_id)s. The volume ' - 'type must be %(volume_type)s.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_FOR_MANAGE = { - 'msg_id': 707, - 'loglevel': base_logging.ERROR, - 'msg': 'No valid value is specified for "source-id". A valid LDEV ' - 'number must be specified in "source-id" to manage the ' - 'volume.', - 'suffix': ERROR_SUFFIX - } - VOLUME_COPY_FAILED = { - 'msg_id': 722, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to copy a volume. (copy method: %(copy_method)s, ' - 'P-VOL: %(pvol)s, S-VOL: %(svol)s)', - 'suffix': ERROR_SUFFIX - } - - def __init__(self, error_info): - """Initialize Enum attributes.""" - self.msg_id = error_info['msg_id'] - self.level = error_info['loglevel'] - self.msg = error_info['msg'] - self.suffix = error_info['suffix'] - - def output_log(self, **kwargs): - """Output the message to the log file and return the message.""" - msg = self.msg % kwargs - LOG.log(self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s", - {'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg}) - return msg - - -def output_log(msg_enum, **kwargs): - """Output the specified message to the log file and return the message.""" - return msg_enum.output_log(**kwargs) - -LOG = logging.getLogger(__name__) -MSG = VSPMsg - - -def output_start_end_log(func): - """Output the log of the start and the end of the method.""" - @functools.wraps(func) - def wrap(self, *args, **kwargs): - """Wrap the method to add logging function.""" - def _output_start_end_log(*_args, **_kwargs): - """Output the log of the start and the end of the method.""" - output_log(MSG.METHOD_START, - method=func.__name__, - config_group=self.configuration.config_group) - ret = func(*_args, **_kwargs) - output_log(MSG.METHOD_END, - method=func.__name__, - config_group=self.configuration.config_group) - return ret - return _output_start_end_log(self, *args, **kwargs) - return wrap - - -def get_ldev(obj): - """Get the LDEV number from the given object and return it as integer.""" - if not obj: - return None - ldev = obj.get('provider_location') - if not ldev or not ldev.isdigit(): - return None - return int(ldev) - - -def check_timeout(start_time, timeout): - """Return True if the specified time has passed, False otherwise.""" - return timeutils.is_older_than(start_time, timeout) - - -def mask_password(cmd): - """Return a string in which the password is masked.""" - if len(cmd) > 3 and cmd[0] == 'raidcom' and cmd[1] == '-login': - tmp = list(cmd) - tmp[3] = strutils.mask_dict_password({'password': ''}).get('password') - else: - tmp = cmd - return ' '.join([six.text_type(c) for c in tmp]) - - -def execute(*cmd, **kwargs): - """Run the specified command and return its results.""" - process_input = kwargs.pop('process_input', None) - run_as_root = kwargs.pop('run_as_root', True) - ret = 0 - try: - if len(cmd) > 3 and cmd[0] == 'raidcom' and cmd[1] == '-login': - stdout, stderr = cinder_utils.execute( - *cmd, process_input=process_input, run_as_root=run_as_root, - loglevel=base_logging.NOTSET)[:2] - else: - stdout, stderr = cinder_utils.execute( - *cmd, process_input=process_input, run_as_root=run_as_root)[:2] - except putils.ProcessExecutionError as ex: - ret = ex.exit_code - stdout = ex.stdout - stderr = ex.stderr - LOG.debug('cmd: %s', mask_password(cmd)) - LOG.debug('from: %s', inspect.stack()[2]) - LOG.debug('ret: %s', ret) - LOG.debug('stdout: %s', ' '.join(stdout.splitlines())) - LOG.debug('stderr: %s', ' '.join(stderr.splitlines())) - return ret, stdout, stderr - - -def import_object(conf, driver_info, db): - """Import a class and return an instance of it.""" - os.environ['LANG'] = 'C' - cli = _DRIVERS.get('HORCM') - return importutils.import_object( - '.'.join([_DRIVER_DIR, cli[driver_info['proto']]]), - conf, driver_info, db) - - -def check_ignore_error(ignore_error, stderr): - """Return True if ignore_error is in stderr, False otherwise.""" - if not ignore_error or not stderr: - return False - if not isinstance(ignore_error, six.string_types): - ignore_error = '|'.join(ignore_error) - - if re.search(ignore_error, stderr): - return True - return False - - -def check_opts(conf, opts): - """Check if the specified configuration is valid.""" - names = [] - for opt in opts: - names.append(opt.name) - check_opt_value(conf, names) - - -def check_opt_value(conf, names): - """Check if the parameter names and values are valid.""" - for name in names: - try: - getattr(conf, name) - except (cfg.NoSuchOptError, cfg.ConfigFileValueError): - with excutils.save_and_reraise_exception(): - output_log(MSG.INVALID_PARAMETER, param=name) - - -def output_storage_cli_info(name, version): - """Output storage CLI info to the log file.""" - LOG.info('\t%(name)-35s%(version)s', - {'name': name + ' version: ', 'version': version}) - - -def output_opt_info(conf, names): - """Output parameter names and values to the log file.""" - for name in names: - LOG.info('\t%(name)-35s%(attr)s', - {'name': name + ': ', 'attr': getattr(conf, name)}) - - -def output_opts(conf, opts): - """Output parameter names and values to the log file.""" - names = [opt.name for opt in opts if not opt.secret] - output_opt_info(conf, names) - - -def require_target_existed(targets): - """Check if the target list includes one or more members.""" - if not targets['list']: - msg = output_log(MSG.NO_CONNECTED_TARGET) - raise exception.VSPError(msg) - - -def get_volume_metadata(volume): - """Return a dictionary of the metadata of the specified volume.""" - volume_metadata = volume.get('volume_metadata', {}) - return {item['key']: item['value'] for item in volume_metadata} - - -def update_conn_info(conn_info, connector, lookup_service): - """Set wwn mapping list to the connection info.""" - init_targ_map = build_initiator_target_map( - connector, conn_info['data']['target_wwn'], lookup_service) - if init_targ_map: - conn_info['data']['initiator_target_map'] = init_targ_map - - -def build_initiator_target_map(connector, target_wwns, lookup_service): - """Return a dictionary mapping server-wwns and lists of storage-wwns.""" - init_targ_map = {} - initiator_wwns = connector['wwpns'] - if lookup_service: - dev_map = lookup_service.get_device_mapping_from_network( - initiator_wwns, target_wwns) - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - for initiator in fabric['initiator_port_wwn_list']: - init_targ_map[initiator] = fabric['target_port_wwn_list'] - else: - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - return init_targ_map diff --git a/doc/source/configuration/block-storage/drivers/hds-hnas-driver.rst b/doc/source/configuration/block-storage/drivers/hds-hnas-driver.rst deleted file mode 100644 index fca6ff30a25..00000000000 --- a/doc/source/configuration/block-storage/drivers/hds-hnas-driver.rst +++ /dev/null @@ -1,548 +0,0 @@ -========================================== -Hitachi NAS Platform NFS driver -========================================== - -This OpenStack Block Storage volume drivers provides NFS support -for `Hitachi NAS Platform (HNAS) `_ Models 3080, 3090, 4040, 4060, 4080, and 4100 -with NAS OS 12.2 or higher. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The NFS driver support these operations: - -* Create, delete, attach, and detach volumes. -* Create, list, and delete volume snapshots. -* Create a volume from a snapshot. -* Copy an image to a volume. -* Copy a volume to an image. -* Clone a volume. -* Extend a volume. -* Get volume statistics. -* Manage and unmanage a volume. -* Manage and unmanage snapshots (`HNAS NFS only`). -* List manageable volumes and snapshots (`HNAS NFS only`). - -HNAS storage requirements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before using NFS services, use the HNAS configuration and management -GUI (SMU) or SSC CLI to configure HNAS to work with the drivers. Additionally: - -1. General: - -* It is mandatory to have at least ``1 storage pool, 1 EVS and 1 file - system`` to be able to run any of the HNAS drivers. -* HNAS drivers consider the space allocated to the file systems to - provide the reports to cinder. So, when creating a file system, make sure - it has enough space to fit your needs. -* The file system used should not be created as a ``replication target`` and - should be mounted. -* It is possible to configure HNAS drivers to use distinct EVSs and file - systems, but ``all compute nodes and controllers`` in the cloud must have - access to the EVSs. - -2. For NFS: - -* Create NFS exports, choose a path for them (it must be different from - ``/``) and set the :guilabel: `Show snapshots` option to ``hide and - disable access``. -* For each export used, set the option ``norootsquash`` in the share - ``Access configuration`` so Block Storage services can change the - permissions of its volumes. For example, ``"* (rw, norootsquash)"``. -* Make sure that all computes and controllers have R/W access to the - shares used by cinder HNAS driver. -* In order to use the hardware accelerated features of HNAS NFS, we - recommend setting ``max-nfs-version`` to 3. Refer to Hitachi NAS Platform - command line reference to see how to configure this option. - -Block Storage host requirements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The HNAS drivers are supported for Red Hat Enterprise Linux OpenStack -Platform, SUSE OpenStack Cloud, and Ubuntu OpenStack. -The following packages must be installed in all compute, controller and -storage (if any) nodes: - -* ``nfs-utils`` for Red Hat Enterprise Linux OpenStack Platform -* ``nfs-client`` for SUSE OpenStack Cloud -* ``nfs-common``, ``libc6-i386`` for Ubuntu OpenStack - -Package installation --------------------- - -If you are installing the driver from an RPM or DEB package, -follow the steps below: - -#. Install the dependencies: - - In Red Hat: - - .. code-block:: console - - # yum install nfs-utils nfs-utils-lib - - Or in Ubuntu: - - .. code-block:: console - - # apt-get install nfs-common - - Or in SUSE: - - .. code-block:: console - - # zypper install nfs-client - - If you are using Ubuntu 12.04, you also need to install ``libc6-i386`` - - .. code-block:: console - - # apt-get install libc6-i386 - -#. Configure the driver as described in the :ref:`hnas-driver-configuration` - section. - -#. Restart all Block Storage services (volume, scheduler, and backup). - -.. _hnas-driver-configuration: - -Driver configuration -~~~~~~~~~~~~~~~~~~~~ - -HNAS supports a variety of storage options and file system capabilities, -which are selected through the definition of volume types combined with the -use of multiple back ends and multiple services. Each back end can configure -up to ``4 service pools``, which can be mapped to cinder volume types. - -The configuration for the driver is read from the back-end sections of the -``cinder.conf``. Each back-end section must have the appropriate configurations -to communicate with your HNAS back end, such as the IP address of the HNAS EVS -that is hosting your data, HNAS SSH access credentials, the configuration of -each of the services in that back end, and so on. You can find examples of such -configurations in the :ref:`configuration_example` section. - -.. note:: - HNAS cinder drivers still support the XML configuration the - same way it was in the older versions, but we recommend configuring the - HNAS cinder drivers only through the ``cinder.conf`` file, - since the XML configuration file from previous versions is being - deprecated as of Newton Release. - -.. note:: - We do not recommend the use of the same NFS export for different back ends. - If possible, configure each back end to - use a different NFS export/file system. - -The following is the definition of each configuration option that can be used -in a HNAS back-end section in the ``cinder.conf`` file: - -.. list-table:: **Configuration options in cinder.conf** - :header-rows: 1 - :widths: 25, 10, 15, 50 - - * - Option - - Type - - Default - - Description - * - ``volume_backend_name`` - - Optional - - N/A - - A name that identifies the back end and can be used as an extra-spec to - redirect the volumes to the referenced back end. - * - ``volume_driver`` - - Required - - N/A - - The python module path to the HNAS volume driver python class. When - installing through the rpm or deb packages, you should configure this - to `cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`. - * - ``nfs_shares_config`` - - Required (only for NFS) - - /etc/cinder/nfs_shares - - Path to the ``nfs_shares`` file. This is required by the base cinder - generic NFS driver and therefore also required by the HNAS NFS driver. - This file should list, one per line, every NFS share being used by the - back end. For example, all the values found in the configuration keys - hnas_svcX_hdp in the HNAS NFS back-end sections. - * - ``hnas_mgmt_ip0`` - - Required - - N/A - - HNAS management IP address. Should be the IP address of the `Admin` - EVS. It is also the IP through which you access the web SMU - administration frontend of HNAS. - * - ``hnas_username`` - - Required - - N/A - - HNAS SSH username - * - ``hds_hnas_nfs_config_file`` - - Optional (deprecated) - - /opt/hds/hnas/cinder_nfs_conf.xml - - Path to the deprecated XML configuration file (only required if using - the XML file) - * - ``hnas_cluster_admin_ip0`` - - Optional (required only for HNAS multi-farm setups) - - N/A - - The IP of the HNAS farm admin. If your SMU controls more than one - system or cluster, this option must be set with the IP of the desired - node. This is different for HNAS multi-cluster setups, which - does not require this option to be set. - * - ``hnas_ssh_private_key`` - - Optional - - N/A - - Path to the SSH private key used to authenticate to the HNAS SMU. Only - required if you do not want to set `hnas_password`. - * - ``hnas_ssh_port`` - - Optional - - 22 - - Port on which HNAS is listening for SSH connections - * - ``hnas_password`` - - Required (unless hnas_ssh_private_key is provided) - - N/A - - HNAS password - * - ``hnas_svcX_hdp`` [1]_ - - Required (at least 1) - - N/A - - HDP (export) where the volumes will be created. Use - exports paths to configure this. - * - ``hnas_svcX_pool_name`` - - Required - - N/A - - A `unique string` that is used to refer to this pool within the - context of cinder. You can tell cinder to put volumes of a specific - volume type into this back end, within this pool. See, - ``Service Labels`` and :ref:`configuration_example` sections - for more details. - -.. [1] - Replace X with a number from 0 to 3 (keep the sequence when configuring - the driver) - -Service labels -~~~~~~~~~~~~~~ - -HNAS driver supports differentiated types of service using the service labels. -It is possible to create up to 4 types of them for each back end. (For example -gold, platinum, silver, ssd, and so on). - -After creating the services in the ``cinder.conf`` configuration file, you -need to configure one cinder ``volume_type`` per service. Each ``volume_type`` -must have the metadata service_label with the same name configured in the -``hnas_svcX_pool_name option`` of that service. See the -:ref:`configuration_example` section for more details. If the ``volume_type`` -is not set, the cinder service pool with largest available free space or -other criteria configured in scheduler filters. - -.. code-block:: console - - $ openstack volume type create default - $ openstack volume type set --property service_label=default default - $ openstack volume type create platinum-tier - $ openstack volume type set --property service_label=platinum platinum - -Multi-backend configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can deploy multiple OpenStack HNAS Driver instances (back ends) that each -controls a separate HNAS or a single HNAS. If you use multiple cinder -back ends, remember that each cinder back end can host up to 4 services. Each -back-end section must have the appropriate configurations to communicate with -your HNAS back end, such as the IP address of the HNAS EVS that is hosting -your data, HNAS SSH access credentials, the configuration of each of the -services in that back end, and so on. You can find examples of such -configurations in the :ref:`configuration_example` section. - -If you want the volumes from a volume_type to be casted into a specific -back end, you must configure an extra_spec in the ``volume_type`` with the -value of the ``volume_backend_name`` option from that back end. - -For multiple NFS back ends configuration, each back end should have a -separated ``nfs_shares_config`` and also a separated ``nfs_shares file`` -defined (For example, ``nfs_shares1``, ``nfs_shares2``) with the desired -shares listed in separated lines. - -SSH configuration -~~~~~~~~~~~~~~~~~ - -.. note:: - As of the Newton OpenStack release, the user can no longer run the - driver using a locally installed instance of the :command:`SSC` utility - package. Instead, all communications with the HNAS back end are handled - through :command:`SSH`. - -You can use your username and password to authenticate the Block Storage node -to the HNAS back end. In order to do that, simply configure ``hnas_username`` -and ``hnas_password`` in your back end section within the ``cinder.conf`` -file. - -For example: - -.. code-block:: ini - - [hnas-backend] - # ... - hnas_username = supervisor - hnas_password = supervisor - -Alternatively, the HNAS cinder driver also supports SSH authentication -through public key. To configure that: - -#. If you do not have a pair of public keys already generated, create it in - the Block Storage node (leave the pass-phrase empty): - - .. code-block:: console - - $ mkdir -p /opt/hitachi/ssh - $ ssh-keygen -f /opt/hds/ssh/hnaskey - -#. Change the owner of the key to cinder (or the user the volume service will - be run as): - - .. code-block:: console - - # chown -R cinder.cinder /opt/hitachi/ssh - -#. Create the directory ``ssh_keys`` in the SMU server: - - .. code-block:: console - - $ ssh [manager|supervisor]@ 'mkdir -p /var/opt/mercury-main/home/[manager|supervisor]/ssh_keys/' - -#. Copy the public key to the ``ssh_keys`` directory: - - .. code-block:: console - - $ scp /opt/hitachi/ssh/hnaskey.pub [manager|supervisor]@:/var/opt/mercury-main/home/[manager|supervisor]/ssh_keys/ - -#. Access the SMU server: - - .. code-block:: console - - $ ssh [manager|supervisor]@ - -#. Run the command to register the SSH keys: - - .. code-block:: console - - $ ssh-register-public-key -u [manager|supervisor] -f ssh_keys/hnaskey.pub - -#. Check the communication with HNAS in the Block Storage node: - - For multi-farm HNAS: - - .. code-block:: console - - $ ssh -i /opt/hitachi/ssh/hnaskey [manager|supervisor]@ 'ssc df -a' - - Or, for Single-node/Multi-Cluster: - - .. code-block:: console - - $ ssh -i /opt/hitachi/ssh/hnaskey [manager|supervisor]@ 'ssc localhost df -a' - -#. Configure your backend section in ``cinder.conf`` to use your public key: - - .. code-block:: ini - - [hnas-backend] - # ... - hnas_ssh_private_key = /opt/hitachi/ssh/hnaskey - -Managing volumes -~~~~~~~~~~~~~~~~ - -If there are some existing volumes on HNAS that you want to import to cinder, -it is possible to use the manage volume feature to do this. The manage action -on an existing volume is very similar to a volume creation. It creates a -volume entry on cinder database, but instead of creating a new volume in the -back end, it only adds a link to an existing volume. - -.. note:: - It is an admin only feature and you have to be logged as an user - with admin rights to be able to use this. - -#. Under the :menuselection:`System > Volumes` tab, - choose the option :guilabel:`Manage Volume`. - -#. Fill the fields :guilabel:`Identifier`, :guilabel:`Host`, - :guilabel:`Volume Name`, and :guilabel:`Volume Type` with volume - information to be managed: - - * :guilabel:`Identifier`: ip:/type/volume_name (*For example:* - 172.24.44.34:/silver/volume-test) - * :guilabel:`Host`: `host@backend-name#pool_name` (*For example:* - `ubuntu@hnas-nfs#test_silver`) - * :guilabel:`Volume Name`: volume_name (*For example:* volume-test) - * :guilabel:`Volume Type`: choose a type of volume (*For example:* silver) - -By CLI: - -.. code-block:: console - - $ cinder manage [--id-type ][--name ][--description ] - [--volume-type ][--availability-zone ] - [--metadata [ [ ...]]][--bootable] - -Example: - -.. code-block:: console - - $ cinder manage --name volume-test --volume-type silver - ubuntu@hnas-nfs#test_silver 172.24.44.34:/silver/volume-test - -Managing snapshots -~~~~~~~~~~~~~~~~~~ - -The manage snapshots feature works very similarly to the manage volumes -feature, currently supported on HNAS cinder drivers. So, if you have a volume -already managed by cinder which has snapshots that are not managed by cinder, -it is possible to use manage snapshots to import these snapshots and link them -with their original volume. - -.. note:: - For HNAS NFS cinder driver, the snapshots of volumes are clones of volumes - that were created using :command:`file-clone-create`, not the HNAS - :command:`snapshot-\*` feature. Check the HNAS users - documentation to have details about those 2 features. - -Currently, the manage snapshots function does not support importing snapshots -(generally created by storage's :command:`file-clone` operation) -``without parent volumes`` or when the parent volume is ``in-use``. In this -case, the ``manage volumes`` should be used to import the snapshot as a normal -cinder volume. - -Also, it is an admin only feature and you have to be logged as a user with -admin rights to be able to use this. - -.. note:: - Although there is a verification to prevent importing snapshots using - non-related volumes as parents, it is possible to manage a snapshot using - any related cloned volume. So, when managing a snapshot, it is extremely - important to make sure that you are using the correct parent volume. - -.. code-block:: console - - $ cinder snapshot-manage - -* :guilabel:`Identifier`: evs_ip:/export_name/snapshot_name - (*For example:* 172.24.44.34:/export1/snapshot-test) - -* :guilabel:`Volume`: Parent volume ID (*For example:* - 061028c0-60cf-499f-99e2-2cd6afea081f) - -Example: - -.. code-block:: console - - $ cinder snapshot-manage 061028c0-60cf-499f-99e2-2cd6afea081f 172.24.44.34:/export1/snapshot-test - -.. note:: - This feature is currently available only for HNAS NFS Driver. - -.. _configuration_example: - -Configuration example -~~~~~~~~~~~~~~~~~~~~~ - -Below are configuration examples for NFS backend: - -#. HNAS NFS Driver - - #. For HNAS NFS driver, create this section in your ``cinder.conf`` file: - - .. code-block:: ini - - [hnas-nfs] - volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver - nfs_shares_config = /home/cinder/nfs_shares - volume_backend_name = hnas_nfs_backend - hnas_username = supervisor - hnas_password = supervisor - hnas_mgmt_ip0 = 172.24.44.15 - - hnas_svc0_pool_name = nfs_gold - hnas_svc0_hdp = 172.24.49.21:/gold_export - - hnas_svc1_pool_name = nfs_platinum - hnas_svc1_hdp = 172.24.49.21:/silver_platinum - - hnas_svc2_pool_name = nfs_silver - hnas_svc2_hdp = 172.24.49.22:/silver_export - - hnas_svc3_pool_name = nfs_bronze - hnas_svc3_hdp = 172.24.49.23:/bronze_export - - #. Add it to the ``enabled_backends`` list, under the ``DEFAULT`` section - of your ``cinder.conf`` file: - - .. code-block:: ini - - [DEFAULT] - enabled_backends = hnas-nfs - - #. Add the configured exports to the ``nfs_shares`` file: - - .. code-block:: vim - - 172.24.49.21:/gold_export - 172.24.49.21:/silver_platinum - 172.24.49.22:/silver_export - 172.24.49.23:/bronze_export - - #. Register a volume type with cinder and associate it with - this backend: - - .. code-block:: console - - $ openstack volume type create hnas_nfs_gold - $ openstack volume type set --property volume_backend_name=hnas_nfs_backend \ - service_label=nfs_gold hnas_nfs_gold - $ openstack volume type create hnas_nfs_platinum - $ openstack volume type set --property volume_backend_name=hnas_nfs_backend \ - service_label=nfs_platinum hnas_nfs_platinum - $ openstack volume type create hnas_nfs_silver - $ openstack volume type set --property volume_backend_name=hnas_nfs_backend \ - service_label=nfs_silver hnas_nfs_silver - $ openstack volume type create hnas_nfs_bronze - $ openstack volume type set --property volume_backend_name=hnas_nfs_backend \ - service_label=nfs_bronze hnas_nfs_bronze - -Additional notes and limitations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* The ``get_volume_stats()`` function always provides the available - capacity based on the combined sum of all the HDPs that are used in - these services labels. - -* After changing the configuration on the storage node, the Block Storage - driver must be restarted. - -* On Red Hat, if the system is configured to use SELinux, you need to - set ``virt_use_nfs = on`` for NFS driver work properly. - - .. code-block:: console - - # setsebool -P virt_use_nfs on - -* It is not possible to manage a volume if there is a slash (``/``) or - a colon (``:``) in the volume name. - -* File system ``auto-expansion``: Although supported, we do not recommend using - file systems with auto-expansion setting enabled because the scheduler uses - the file system capacity reported by the driver to determine if new volumes - can be created. For instance, in a setup with a file system that can expand - to 200GB but is at 100GB capacity, with 10GB free, the scheduler will not - allow a 15GB volume to be created. In this case, manual expansion would - have to be triggered by an administrator. We recommend always creating the - file system at the ``maximum capacity`` or periodically expanding the file - system manually. - -* The ``hnas_svcX_pool_name`` option must be unique for a given back end. It - is still possible to use the deprecated form ``hnas_svcX_volume_type``, but - this support will be removed in a future release. - -* SSC simultaneous connections limit: In very busy environments, if 2 or - more volume hosts are configured to use the same storage, some requests - (create, delete and so on) can have some attempts failed and re-tried ( - ``5 attempts`` by default) due to an HNAS connection limitation ( - ``max of 5`` simultaneous connections). diff --git a/doc/source/configuration/block-storage/drivers/hitachi-storage-volume-driver.rst b/doc/source/configuration/block-storage/drivers/hitachi-storage-volume-driver.rst deleted file mode 100644 index a650cd128b7..00000000000 --- a/doc/source/configuration/block-storage/drivers/hitachi-storage-volume-driver.rst +++ /dev/null @@ -1,169 +0,0 @@ -============================= -Hitachi storage volume driver -============================= - -Hitachi storage volume driver provides iSCSI and Fibre Channel -support for Hitachi storages. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -Supported storages: - -* Hitachi Virtual Storage Platform G1000 (VSP G1000) -* Hitachi Virtual Storage Platform (VSP) -* Hitachi Unified Storage VM (HUS VM) -* Hitachi Unified Storage 100 Family (HUS 100 Family) - -Required software: - -* RAID Manager Ver 01-32-03/01 or later for VSP G1000/VSP/HUS VM -* Hitachi Storage Navigator Modular 2 (HSNM2) Ver 27.50 or later - for HUS 100 Family - - .. note:: - - HSNM2 needs to be installed under ``/usr/stonavm``. - -Required licenses: - -* Hitachi In-System Replication Software for VSP G1000/VSP/HUS VM -* (Mandatory) ShadowImage in-system replication for HUS 100 Family -* (Optional) Copy-on-Write Snapshot for HUS 100 Family - -Additionally, the ``pexpect`` package is required. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. -* Create, list, and delete volume snapshots. -* Manage and unmanage volume snapshots. -* Create a volume from a snapshot. -* Copy a volume to an image. -* Copy an image to a volume. -* Clone a volume. -* Extend a volume. -* Get volume statistics. - -Configuration -~~~~~~~~~~~~~ - -Set up Hitachi storage ----------------------- - -You need to specify settings as described below. For details about each step, -see the user's guide of the storage device. Use a storage administrative -software such as ``Storage Navigator`` to set up the storage device so that -LDEVs and host groups can be created and deleted, and LDEVs can be connected -to the server and can be asynchronously copied. - -#. Create a Dynamic Provisioning pool. - -#. Connect the ports at the storage to the controller node and compute nodes. - -#. For VSP G1000/VSP/HUS VM, set ``port security`` to ``enable`` for the - ports at the storage. - -#. For HUS 100 Family, set ``Host Group security`` or - ``iSCSI target security`` to ``ON`` for the ports at the storage. - -#. For the ports at the storage, create host groups (iSCSI targets) whose - names begin with HBSD- for the controller node and each compute node. - Then register a WWN (initiator IQN) for each of the controller node and - compute nodes. - -#. For VSP G1000/VSP/HUS VM, perform the following: - - * Create a storage device account belonging to the Administrator User - Group. (To use multiple storage devices, create the same account name - for all the target storage devices, and specify the same resource - group and permissions.) - * Create a command device (In-Band), and set user authentication to ``ON``. - * Register the created command device to the host group for the controller - node. - * To use the Thin Image function, create a pool for Thin Image. - -#. For HUS 100 Family, perform the following: - - * Use the :command:`auunitaddauto` command to register the - unit name and controller of the storage device to HSNM2. - * When connecting via iSCSI, if you are using CHAP certification, specify - the same user and password as that used for the storage port. - -Set up Hitachi Gigabit Fibre Channel adaptor --------------------------------------------- - -Change a parameter of the hfcldd driver and update the ``initram`` file -if Hitachi Gigabit Fibre Channel adaptor is used: - -.. code-block:: console - - # /opt/hitachi/drivers/hba/hfcmgr -E hfc_rport_lu_scan 1 - # dracut -f initramfs-KERNEL_VERSION.img KERNEL_VERSION - # reboot - -Set up Hitachi storage volume driver ------------------------------------- - -#. Create a directory: - - .. code-block:: console - - # mkdir /var/lock/hbsd - # chown cinder:cinder /var/lock/hbsd - -#. Create ``volume type`` and ``volume key``. - - This example shows that HUS100_SAMPLE is created as ``volume type`` - and hus100_backend is registered as ``volume key``: - - .. code-block:: console - - $ openstack volume type create HUS100_SAMPLE - $ openstack volume type set --property volume_backend_name=hus100_backend HUS100_SAMPLE - -#. Specify any identical ``volume type`` name and ``volume key``. - - To confirm the created ``volume type``, please execute the following - command: - - .. code-block:: console - - $ openstack volume type list --long - -#. Edit the ``/etc/cinder/cinder.conf`` file as follows. - - If you use Fibre Channel: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver - - If you use iSCSI: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.hitachi.hbsd_iscsi.HBSDISCSIDriver - - Also, set ``volume_backend_name`` created by :command:`openstack volume type set` - command: - - .. code-block:: ini - - volume_backend_name = hus100_backend - - This table shows configuration options for Hitachi storage volume driver. - - .. include:: ../../tables/cinder-hitachi-hbsd.inc - -#. Restart the Block Storage service. - - When the startup is done, "MSGID0003-I: The storage backend can be used." - is output into ``/var/log/cinder/volume.log`` as follows: - - .. code-block:: console - - 2014-09-01 10:34:14.169 28734 WARNING cinder.volume.drivers.hitachi. - hbsd_common [req-a0bb70b5-7c3f-422a-a29e-6a55d6508135 None None] - MSGID0003-I: The storage backend can be used. (config_group: hus100_backend) diff --git a/doc/source/configuration/block-storage/volume-drivers.rst b/doc/source/configuration/block-storage/volume-drivers.rst index df634c291ff..980d3df79bc 100644 --- a/doc/source/configuration/block-storage/volume-drivers.rst +++ b/doc/source/configuration/block-storage/volume-drivers.rst @@ -25,8 +25,6 @@ Volume drivers drivers/emc-vnx-driver.rst drivers/emc-xtremio-driver.rst drivers/fujitsu-eternus-dx-driver.rst - drivers/hds-hnas-driver.rst - drivers/hitachi-storage-volume-driver.rst drivers/hpe-3par-driver.rst drivers/hpe-lefthand-driver.rst drivers/hp-msa-driver.rst diff --git a/doc/source/configuration/tables/cinder-hitachi-hbsd.inc b/doc/source/configuration/tables/cinder-hitachi-hbsd.inc deleted file mode 100644 index 544f5184367..00000000000 --- a/doc/source/configuration/tables/cinder-hitachi-hbsd.inc +++ /dev/null @@ -1,64 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hitachi-hbsd: - -.. list-table:: Description of Hitachi storage volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hitachi_add_chap_user`` = ``False`` - - (Boolean) Add CHAP user - * - ``hitachi_async_copy_check_interval`` = ``10`` - - (Integer) Interval to check copy asynchronously - * - ``hitachi_auth_method`` = ``None`` - - (String) iSCSI authentication method - * - ``hitachi_auth_password`` = ``HBSD-CHAP-password`` - - (String) iSCSI authentication password - * - ``hitachi_auth_user`` = ``HBSD-CHAP-user`` - - (String) iSCSI authentication username - * - ``hitachi_copy_check_interval`` = ``3`` - - (Integer) Interval to check copy - * - ``hitachi_copy_speed`` = ``3`` - - (Integer) Copy speed of storage system - * - ``hitachi_default_copy_method`` = ``FULL`` - - (String) Default copy method of storage system - * - ``hitachi_group_range`` = ``None`` - - (String) Range of group number - * - ``hitachi_group_request`` = ``False`` - - (Boolean) Request for creating HostGroup or iSCSI Target - * - ``hitachi_horcm_add_conf`` = ``True`` - - (Boolean) Add to HORCM configuration - * - ``hitachi_horcm_numbers`` = ``200,201`` - - (String) Instance numbers for HORCM - * - ``hitachi_horcm_password`` = ``None`` - - (String) Password of storage system for HORCM - * - ``hitachi_horcm_resource_lock_timeout`` = ``600`` - - (Integer) Timeout until a resource lock is released, in seconds. The value must be between 0 and 7200. - * - ``hitachi_horcm_user`` = ``None`` - - (String) Username of storage system for HORCM - * - ``hitachi_ldev_range`` = ``None`` - - (String) Range of logical device of storage system - * - ``hitachi_pool_id`` = ``None`` - - (Integer) Pool ID of storage system - * - ``hitachi_serial_number`` = ``None`` - - (String) Serial number of storage system - * - ``hitachi_target_ports`` = ``None`` - - (String) Control port names for HostGroup or iSCSI Target - * - ``hitachi_thin_pool_id`` = ``None`` - - (Integer) Thin pool ID of storage system - * - ``hitachi_unit_name`` = ``None`` - - (String) Name of an array unit - * - ``hitachi_zoning_request`` = ``False`` - - (Boolean) Request for FC Zone creating HostGroup diff --git a/doc/source/configuration/tables/cinder-hitachi-hnas.inc b/doc/source/configuration/tables/cinder-hitachi-hnas.inc deleted file mode 100644 index 30d173e4044..00000000000 --- a/doc/source/configuration/tables/cinder-hitachi-hnas.inc +++ /dev/null @@ -1,64 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hitachi-hnas: - -.. list-table:: Description of Hitachi HNAS iSCSI and NFS driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hds_hnas_iscsi_config_file`` = ``/opt/hds/hnas/cinder_iscsi_conf.xml`` - - (String) DEPRECATED: Legacy configuration file for HNAS iSCSI Cinder plugin. This is not needed if you fill all configuration on cinder.conf - * - ``hds_hnas_nfs_config_file`` = ``/opt/hds/hnas/cinder_nfs_conf.xml`` - - (String) DEPRECATED: Legacy configuration file for HNAS NFS Cinder plugin. This is not needed if you fill all configuration on cinder.conf - * - ``hnas_chap_enabled`` = ``True`` - - (Boolean) Whether the chap authentication is enabled in the iSCSI target or not. - * - ``hnas_cluster_admin_ip0`` = ``None`` - - (String) The IP of the HNAS cluster admin. Required only for HNAS multi-cluster setups. - * - ``hnas_mgmt_ip0`` = ``None`` - - (IP) Management IP address of HNAS. This can be any IP in the admin address on HNAS or the SMU IP. - * - ``hnas_password`` = ``None`` - - (String) HNAS password. - * - ``hnas_ssc_cmd`` = ``ssc`` - - (String) Command to communicate to HNAS. - * - ``hnas_ssh_port`` = ``22`` - - (Port number) Port to be used for SSH authentication. - * - ``hnas_ssh_private_key`` = ``None`` - - (String) Path to the SSH private key used to authenticate in HNAS SMU. - * - ``hnas_svc0_hdp`` = ``None`` - - (String) Service 0 HDP - * - ``hnas_svc0_iscsi_ip`` = ``None`` - - (IP) Service 0 iSCSI IP - * - ``hnas_svc0_pool_name`` = ``None`` - - (String) Service 0 pool name - * - ``hnas_svc1_hdp`` = ``None`` - - (String) Service 1 HDP - * - ``hnas_svc1_iscsi_ip`` = ``None`` - - (IP) Service 1 iSCSI IP - * - ``hnas_svc1_pool_name`` = ``None`` - - (String) Service 1 pool name - * - ``hnas_svc2_hdp`` = ``None`` - - (String) Service 2 HDP - * - ``hnas_svc2_iscsi_ip`` = ``None`` - - (IP) Service 2 iSCSI IP - * - ``hnas_svc2_pool_name`` = ``None`` - - (String) Service 2 pool name - * - ``hnas_svc3_hdp`` = ``None`` - - (String) Service 3 HDP - * - ``hnas_svc3_iscsi_ip`` = ``None`` - - (IP) Service 3 iSCSI IP - * - ``hnas_svc3_pool_name`` = ``None`` - - (String) Service 3 pool name: - * - ``hnas_username`` = ``None`` - - (String) HNAS username. diff --git a/doc/source/configuration/tables/cinder-hitachi-vsp.inc b/doc/source/configuration/tables/cinder-hitachi-vsp.inc deleted file mode 100644 index 648a79129c3..00000000000 --- a/doc/source/configuration/tables/cinder-hitachi-vsp.inc +++ /dev/null @@ -1,60 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hitachi-vsp: - -.. list-table:: Description of HORCM interface module for Hitachi VSP driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``vsp_async_copy_check_interval`` = ``10`` - - (Integer) Interval in seconds at which volume pair synchronization status is checked when volume pairs are deleted. - * - ``vsp_auth_password`` = ``None`` - - (String) Password corresponding to vsp_auth_user. - * - ``vsp_auth_user`` = ``None`` - - (String) Name of the user used for CHAP authentication performed in communication between hosts and iSCSI targets on the storage ports. - * - ``vsp_compute_target_ports`` = ``None`` - - (List) IDs of the storage ports used to attach volumes to compute nodes. To specify multiple ports, connect them by commas (e.g. CL1-A,CL2-A). - * - ``vsp_copy_check_interval`` = ``3`` - - (Integer) Interval in seconds at which volume pair synchronization status is checked when volume pairs are created. - * - ``vsp_copy_speed`` = ``3`` - - (Integer) Speed at which data is copied by Shadow Image. 1 or 2 indicates low speed, 3 indicates middle speed, and a value between 4 and 15 indicates high speed. - * - ``vsp_default_copy_method`` = ``FULL`` - - (String) Method of volume copy. FULL indicates full data copy by Shadow Image and THIN indicates differential data copy by Thin Image. - * - ``vsp_group_request`` = ``False`` - - (Boolean) If True, the driver will create host groups or iSCSI targets on storage ports as needed. - * - ``vsp_horcm_add_conf`` = ``True`` - - (Boolean) If True, the driver will create or update the Command Control Interface configuration file as needed. - * - ``vsp_horcm_numbers`` = ``200, 201`` - - (List) Command Control Interface instance numbers in the format of 'xxx,yyy'. The second one is for Shadow Image operation and the first one is for other purposes. - * - ``vsp_horcm_pair_target_ports`` = ``None`` - - (List) IDs of the storage ports used to copy volumes by Shadow Image or Thin Image. To specify multiple ports, connect them by commas (e.g. CL1-A,CL2-A). - * - ``vsp_horcm_password`` = ``None`` - - (String) Password corresponding to vsp_horcm_user. - * - ``vsp_horcm_user`` = ``None`` - - (String) Name of the user on the storage system. - * - ``vsp_ldev_range`` = ``None`` - - (String) Range of the LDEV numbers in the format of 'xxxx-yyyy' that can be used by the driver. Values can be in decimal format (e.g. 1000) or in colon-separated hexadecimal format (e.g. 00:03:E8). - * - ``vsp_pool`` = ``None`` - - (String) Pool number or pool name of the DP pool. - * - ``vsp_storage_id`` = ``None`` - - (String) Product number of the storage system. - * - ``vsp_target_ports`` = ``None`` - - (List) IDs of the storage ports used to attach volumes to the controller node. To specify multiple ports, connect them by commas (e.g. CL1-A,CL2-A). - * - ``vsp_thin_pool`` = ``None`` - - (String) Pool number or pool name of the Thin Image pool. - * - ``vsp_use_chap_auth`` = ``False`` - - (Boolean) If True, CHAP authentication will be applied to communication between hosts and any of the iSCSI targets on the storage ports. - * - ``vsp_zoning_request`` = ``False`` - - (Boolean) If True, the driver will configure FC zoning between the server and the storage system provided that FC zoning manager is enabled. diff --git a/releasenotes/notes/remove-hitachi-57d0b37cb9cc7e13.yaml b/releasenotes/notes/remove-hitachi-57d0b37cb9cc7e13.yaml new file mode 100644 index 00000000000..2d6f46a3da2 --- /dev/null +++ b/releasenotes/notes/remove-hitachi-57d0b37cb9cc7e13.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The Hitachi HNAS, HBSD, and VSP volume drivers were marked as deprecated + in the Pike release and have now been removed. Hitachi storage drivers are + now only available directly from Hitachi.