Introduce Hitachi storage volume driver
This patch introduces Hitachi storage volume driver. Implements: blueprint hitachi-block-storage-driver Certification test result for FC: https://bugs.launchpad.net/cinder/+bug/1336661/+attachment/4189194/+files/FC%20tmp.pTAkWV3eWb Certification test result for iSCSI: https://bugs.launchpad.net/cinder/+bug/1336661/+attachment/4189195/+files/iSCSI%20tmp.1Q7C1rkzTY Change-Id: Ie9b5df6d223b47d176c4e80fcf7e110543ce1d37 Signed-off-by: Seiji Aguchi <seiji.aguchi.tr@hitachi.com>
This commit is contained in:
parent
a1ff7b9705
commit
c6af914de5
@ -813,3 +813,25 @@ class CgSnapshotNotFound(NotFound):
|
||||
|
||||
class InvalidCgSnapshot(Invalid):
|
||||
message = _("Invalid CgSnapshot: %(reason)s")
|
||||
|
||||
|
||||
# Hitachi Block Storage Driver
|
||||
class HBSDError(CinderException):
|
||||
message = _("HBSD error occurs.")
|
||||
|
||||
|
||||
class HBSDCmdError(HBSDError):
|
||||
|
||||
def __init__(self, message=None, ret=None, err=None):
|
||||
self.ret = ret
|
||||
self.stderr = err
|
||||
|
||||
super(HBSDCmdError, self).__init__(message=message)
|
||||
|
||||
|
||||
class HBSDBusy(HBSDError):
|
||||
message = "Device or resource is busy."
|
||||
|
||||
|
||||
class HBSDNotFound(NotFound):
|
||||
message = _("Storage resource could not be found.")
|
||||
|
670
cinder/tests/test_hitachi_hbsd_horcm_fc.py
Normal file
670
cinder/tests/test_hitachi_hbsd_horcm_fc.py
Normal file
@ -0,0 +1,670 @@
|
||||
# Copyright (C) 2014, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
Self test for Hitachi Block Storage Driver
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder import utils
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.hitachi import hbsd_basiclib
|
||||
from cinder.volume.drivers.hitachi import hbsd_common
|
||||
from cinder.volume.drivers.hitachi import hbsd_fc
|
||||
from cinder.volume.drivers.hitachi import hbsd_horcm
|
||||
|
||||
|
||||
def _exec_raidcom(*args, **kargs):
|
||||
return HBSDHORCMFCDriverTest.horcm_vals.get(args)
|
||||
|
||||
|
||||
class HBSDHORCMFCDriverTest(test.TestCase):
|
||||
"""Test HBSDHORCMFCDriver."""
|
||||
|
||||
raidqry_result = "DUMMY\n\
|
||||
Ver&Rev: 01-31-03/06"
|
||||
|
||||
raidcom_get_host_grp_result = "DUMMY\n\
|
||||
CL1-A 0 HBSD-127.0.0.1 None -\n\
|
||||
CL1-A 1 - None -"
|
||||
|
||||
raidcom_get_result = "LDEV : 0\n\
|
||||
VOL_TYPE : OPEN-V-CVS\n\
|
||||
LDEV : 1\n\
|
||||
VOL_TYPE : NOT DEFINED"
|
||||
|
||||
raidcom_get_result2 = "DUMMY\n\
|
||||
LDEV : 1\n\
|
||||
DUMMY\n\
|
||||
DUMMY\n\
|
||||
VOL_TYPE : OPEN-V-CVS"
|
||||
|
||||
raidcom_get_result3 = "Serial# : 210944\n\
|
||||
LDEV : 0\n\
|
||||
SL : 0\n\
|
||||
CL : 0\n\
|
||||
VOL_TYPE : NOT DEFINED\n\
|
||||
VOL_Capacity(BLK) : 2098560\n\
|
||||
NUM_LDEV : 1\n\
|
||||
LDEVs : 0\n\
|
||||
NUM_PORT : 3\n\
|
||||
PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\
|
||||
F_POOLID : NONE\n\
|
||||
VOL_ATTR : CVS\n\
|
||||
RAID_LEVEL : RAID5\n\
|
||||
RAID_TYPE : 3D+1P\n\
|
||||
NUM_GROUP : 1\n\
|
||||
RAID_GROUPs : 01-01\n\
|
||||
DRIVE_TYPE : DKR5C-J600SS\n\
|
||||
DRIVE_Capa : 1143358736\n\
|
||||
LDEV_NAMING : test\n\
|
||||
STS : NML\n\
|
||||
OPE_TYPE : NONE\n\
|
||||
OPE_RATE : 100\n\
|
||||
MP# : 0\n\
|
||||
SSID : 0004"
|
||||
|
||||
raidcom_get_command_status_result = "HANDLE SSB1 SSB2 ERR_CNT\
|
||||
Serial# Description\n\
|
||||
00d4 - - 0 210944 -"
|
||||
|
||||
raidcom_get_result4 = "Serial# : 210944\n\
|
||||
LDEV : 0\n\
|
||||
SL : 0\n\
|
||||
CL : 0\n\
|
||||
VOL_TYPE : DEFINED\n\
|
||||
VOL_Capacity(BLK) : 2098560\n\
|
||||
NUM_LDEV : 1\n\
|
||||
LDEVs : 0\n\
|
||||
NUM_PORT : 3\n\
|
||||
PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\
|
||||
F_POOLID : NONE\n\
|
||||
VOL_ATTR : CVS\n\
|
||||
RAID_LEVEL : RAID5\n\
|
||||
RAID_TYPE : 3D+1P\n\
|
||||
NUM_GROUP : 1\n\
|
||||
RAID_GROUPs : 01-01\n\
|
||||
DRIVE_TYPE : DKR5C-J600SS\n\
|
||||
DRIVE_Capa : 1143358736\n\
|
||||
LDEV_NAMING : test\n\
|
||||
STS : NML\n\
|
||||
OPE_TYPE : NONE\n\
|
||||
OPE_RATE : 100\n\
|
||||
MP# : 0\n\
|
||||
SSID : 0004"
|
||||
|
||||
raidcom_get_copy_grp_result = "DUMMY\n\
|
||||
HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31P - - None\n\
|
||||
HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31S - - None"
|
||||
|
||||
raidcom_get_device_grp_result1 = "DUMMY\n\
|
||||
HBSD-127.0.0.1None1A31P HBSD-ldev-0-2 0 None"
|
||||
|
||||
raidcom_get_device_grp_result2 = "DUMMY\n\
|
||||
HBSD-127.0.0.1None1A31S HBSD-ldev-0-2 2 None"
|
||||
|
||||
raidcom_get_snapshot_result = "DUMMY\n\
|
||||
HBSD-sanp P-VOL PSUS None 0 3 3 18 100 G--- 53ee291f\n\
|
||||
HBSD-sanp P-VOL PSUS None 0 4 4 18 100 G--- 53ee291f"
|
||||
|
||||
raidcom_dp_pool_result = "DUMMY \n\
|
||||
030 POLN 0 6006 6006 75 80 1 14860 32 167477"
|
||||
|
||||
raidcom_port_result = "DUMMY\n\
|
||||
CL1-A FIBRE TAR AUT 01 Y PtoP Y 0 None 50060E801053C2E0 -"
|
||||
|
||||
raidcom_port_result2 = "DUMMY\n\
|
||||
CL1-A 12345678912345aa None -\n\
|
||||
CL1-A 12345678912345bb None -"
|
||||
|
||||
raidcom_host_grp_result = "DUMMY\n\
|
||||
CL1-A 0 HBSD-127.0.0.1 None LINUX/IRIX"
|
||||
|
||||
raidcom_hba_wwn_result = "DUMMY\n\
|
||||
CL1-A 0 HBSD-127.0.0.1 12345678912345aa None -"
|
||||
|
||||
raidcom_get_lun_result = "DUMMY\n\
|
||||
CL1-A 0 LINUX/IRIX 254 1 5 - None"
|
||||
|
||||
pairdisplay_result = "DUMMY\n\
|
||||
HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 L CL1-A-0 0 0 0 None 0 P-VOL PSUS None 2\
|
||||
-\n\
|
||||
HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 R CL1-A-0 0 0 0 None 2 S-VOL SSUS - 0 -"
|
||||
|
||||
pairdisplay_result2 = "DUMMY\n\
|
||||
HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 L CL1-A-1 0 0 0 None 1 P-VOL PAIR None 1\
|
||||
-\n\
|
||||
HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 R CL1-A-1 0 0 0 None 1 S-VOL PAIR - 1 -"
|
||||
|
||||
horcm_vals = {
|
||||
('raidqry', u'-h'):
|
||||
[0, "%s" % raidqry_result, ""],
|
||||
('raidcom', '-login user pasword'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'get host_grp -port CL1-A -key host_grp'):
|
||||
[0, "%s" % raidcom_get_host_grp_result, ""],
|
||||
('raidcom', u'add host_grp -port CL1-A-1 -host_grp_name HBSD-pair00'):
|
||||
[0, "", ""],
|
||||
('raidcom',
|
||||
u'add host_grp -port CL1-A-1 -host_grp_name HBSD-127.0.0.2'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'delete host_grp -port CL1-A-1 HBSD-127.0.0.2'):
|
||||
[1, "", ""],
|
||||
('raidcom', 'get ldev -ldev_id 0 -cnt 2'):
|
||||
[0, "%s" % raidcom_get_result, ""],
|
||||
('raidcom', 'lock resource'):
|
||||
[0, "", ""],
|
||||
('raidcom',
|
||||
'add ldev -pool 30 -ldev_id 1 -capacity 128G -emulation OPEN-V'):
|
||||
[0, "", ""],
|
||||
('raidcom',
|
||||
'add ldev -pool 30 -ldev_id 1 -capacity 256G -emulation OPEN-V'):
|
||||
[1, "", "SSB=0x2E22,0x0001"],
|
||||
('raidcom', 'get command_status'):
|
||||
[0, "%s" % raidcom_get_command_status_result, ""],
|
||||
('raidcom', 'get ldev -ldev_id 1'):
|
||||
[0, "%s" % raidcom_get_result2, ""],
|
||||
('raidcom', 'get ldev -ldev_id 1 -check_status NML -time 120'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'get snapshot -ldev_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'get snapshot -ldev_id 1'):
|
||||
[0, "%s" % raidcom_get_snapshot_result, ""],
|
||||
('raidcom', 'get snapshot -ldev_id 2'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'get snapshot -ldev_id 3'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'get copy_grp'):
|
||||
[0, "%s" % raidcom_get_copy_grp_result, ""],
|
||||
('raidcom', 'delete ldev -ldev_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'delete ldev -ldev_id 1'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'delete ldev -ldev_id 2'):
|
||||
[1, "", "error"],
|
||||
('raidcom', 'delete ldev -ldev_id 3'):
|
||||
[1, "", "SSB=0x2E20,0x0000"],
|
||||
('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30P'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30S'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31P'):
|
||||
[0, "%s" % raidcom_get_device_grp_result1, ""],
|
||||
('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31S'):
|
||||
[0, "%s" % raidcom_get_device_grp_result2, ""],
|
||||
('pairdisplay', '-g HBSD-127.0.0.1None1A30 -CLI'):
|
||||
[0, "", ""],
|
||||
('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -CLI'):
|
||||
[0, "", ""],
|
||||
('pairdisplay', '-g HBSD-127.0.0.1None1A31 -CLI'):
|
||||
[0, "%s" % pairdisplay_result, ""],
|
||||
('pairdisplay', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -CLI'):
|
||||
[0, "%s" % pairdisplay_result, ""],
|
||||
('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -CLI'):
|
||||
[0, "%s" % pairdisplay_result2, ""],
|
||||
('raidcom',
|
||||
'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \
|
||||
HBSD-ldev-0-1 -ldev_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom',
|
||||
'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \
|
||||
HBSD-ldev-0-1 -ldev_id 1'):
|
||||
[0, "", ""],
|
||||
('raidcom',
|
||||
'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \
|
||||
HBSD-ldev-1-1 -ldev_id 1'):
|
||||
[0, "", ""],
|
||||
('raidcom',
|
||||
'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \
|
||||
HBSD-ldev-1-1 -ldev_id 1'):
|
||||
[0, "", ""],
|
||||
('raidcom',
|
||||
'add copy_grp -copy_grp_name HBSD-127.0.0.1None1A30 \
|
||||
HBSD-127.0.0.1None1A30P HBSD-127.0.0.1None1A30S -mirror_id 0'):
|
||||
[0, "", ""],
|
||||
('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 \
|
||||
-split -fq quick -c 3 -vl'):
|
||||
[0, "", ""],
|
||||
('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 \
|
||||
-split -fq quick -c 3 -vl'):
|
||||
[0, "", ""],
|
||||
('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowait'):
|
||||
[4, "", ""],
|
||||
('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowaits'):
|
||||
[4, "", ""],
|
||||
('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowait'):
|
||||
[1, "", ""],
|
||||
('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowaits'):
|
||||
[1, "", ""],
|
||||
('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowait'):
|
||||
[4, "", ""],
|
||||
('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowaits'):
|
||||
[200, "", ""],
|
||||
('pairsplit', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -S'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'extend ldev -ldev_id 0 -capacity 128G'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'get dp_pool'):
|
||||
[0, "%s" % raidcom_dp_pool_result, ""],
|
||||
('raidcom', 'get port'):
|
||||
[0, "%s" % raidcom_port_result, ""],
|
||||
('raidcom', 'get port -port CL1-A'):
|
||||
[0, "%s" % raidcom_port_result2, ""],
|
||||
('raidcom', 'get host_grp -port CL1-A'):
|
||||
[0, "%s" % raidcom_host_grp_result, ""],
|
||||
('raidcom', 'get hba_wwn -port CL1-A-0'):
|
||||
[0, "%s" % raidcom_hba_wwn_result, ""],
|
||||
('raidcom', 'get hba_wwn -port CL1-A-1'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'add hba_wwn -port CL1-A-0 -hba_wwn 12345678912345bb'):
|
||||
[0, "", ""],
|
||||
('raidcom', 'add hba_wwn -port CL1-A-1 -hba_wwn 12345678912345bb'):
|
||||
[1, "", ""],
|
||||
('raidcom', u'get lun -port CL1-A-0'):
|
||||
[0, "%s" % raidcom_get_lun_result, ""],
|
||||
('raidcom', u'get lun -port CL1-A-1'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'add lun -port CL1-A-0 -ldev_id 0 -lun_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'add lun -port CL1-A-0 -ldev_id 1 -lun_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'add lun -port CL1-A-1 -ldev_id 0 -lun_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'add lun -port CL1-A-1 -ldev_id 1 -lun_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'delete lun -port CL1-A-0 -ldev_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'delete lun -port CL1-A-0 -ldev_id 1'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'delete lun -port CL1-A-1 -ldev_id 0'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'delete lun -port CL1-A-1 -ldev_id 2'):
|
||||
[0, "", ""],
|
||||
('raidcom', u'delete lun -port CL1-A-1 -ldev_id 1'):
|
||||
[1, "", ""]}
|
||||
|
||||
# The following information is passed on to tests, when creating a volume
|
||||
|
||||
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
|
||||
'provider_location': '0', 'name': 'test',
|
||||
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
|
||||
|
||||
test_volume = {'name': 'test_volume', 'size': 128,
|
||||
'id': 'test-volume',
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error = {'name': 'test_volume', 'size': 256,
|
||||
'id': 'test-volume',
|
||||
'status': 'creating'}
|
||||
|
||||
test_volume_error2 = {'name': 'test_volume2', 'size': 128,
|
||||
'id': 'test-volume2',
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
|
||||
'id': 'test-volume3',
|
||||
'volume_metadata': [{'key': 'type',
|
||||
'value': 'V-VOL'}],
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
|
||||
'id': 'test-volume2',
|
||||
'provider_location': '3', 'status': 'available'}
|
||||
|
||||
test_volume_error5 = {'name': 'test_volume', 'size': 256,
|
||||
'id': 'test-volume',
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_snapshot = {'volume_name': 'test', 'size': 128,
|
||||
'volume_size': 128, 'name': 'test-snap',
|
||||
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
|
||||
'provider_location': '0', 'status': 'available'}
|
||||
|
||||
test_snapshot_error = {'volume_name': 'test', 'size': 128,
|
||||
'volume_size': 128, 'name': 'test-snap',
|
||||
'volume_id': 0, 'id': 'test-snap-0',
|
||||
'volume': _VOLUME,
|
||||
'provider_location': '2', 'status': 'available'}
|
||||
|
||||
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
|
||||
'volume_size': 128, 'name': 'test-snap',
|
||||
'volume_id': 0, 'id': 'test-snap-0',
|
||||
'volume': _VOLUME,
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HBSDHORCMFCDriverTest, self).__init__(*args, **kwargs)
|
||||
|
||||
@mock.patch.object(utils, 'brick_get_connector_properties',
|
||||
return_value={'ip': '127.0.0.1',
|
||||
'wwpns': ['12345678912345aa']})
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(utils, 'execute',
|
||||
return_value=['%s' % raidqry_result, ''])
|
||||
def setUp(self, arg1, arg2, arg3, arg4):
|
||||
super(HBSDHORCMFCDriverTest, self).setUp()
|
||||
self._setup_config()
|
||||
self._setup_driver()
|
||||
self.driver.check_param()
|
||||
self.driver.common.pair_flock = hbsd_basiclib.NopLock()
|
||||
self.driver.common.command.horcmgr_flock = hbsd_basiclib.NopLock()
|
||||
self.driver.common.create_lock_file()
|
||||
self.driver.common.command.connect_storage()
|
||||
self.driver.max_hostgroups = \
|
||||
self.driver.common.command.get_max_hostgroups()
|
||||
self.driver.add_hostgroup()
|
||||
self.driver.output_param_to_log()
|
||||
self.driver.do_setup_status.set()
|
||||
|
||||
def _setup_config(self):
|
||||
self.configuration = mock.Mock(conf.Configuration)
|
||||
self.configuration.hitachi_pool_id = 30
|
||||
self.configuration.hitachi_thin_pool_id = 31
|
||||
self.configuration.hitachi_target_ports = "CL1-A"
|
||||
self.configuration.hitachi_debug_level = 0
|
||||
self.configuration.hitachi_serial_number = "None"
|
||||
self.configuration.hitachi_unit_name = None
|
||||
self.configuration.hitachi_group_request = True
|
||||
self.configuration.hitachi_group_range = None
|
||||
self.configuration.hitachi_zoning_request = False
|
||||
self.configuration.config_group = "None"
|
||||
self.configuration.hitachi_ldev_range = "0-1"
|
||||
self.configuration.hitachi_default_copy_method = 'FULL'
|
||||
self.configuration.hitachi_copy_check_interval = 1
|
||||
self.configuration.hitachi_async_copy_check_interval = 1
|
||||
self.configuration.hitachi_copy_speed = 3
|
||||
self.configuration.hitachi_horcm_add_conf = True
|
||||
self.configuration.hitachi_horcm_numbers = "409,419"
|
||||
self.configuration.hitachi_horcm_user = "user"
|
||||
self.configuration.hitachi_horcm_password = "pasword"
|
||||
|
||||
def _setup_driver(self):
|
||||
self.driver = hbsd_fc.HBSDFCDriver(
|
||||
configuration=self.configuration)
|
||||
context = None
|
||||
db = None
|
||||
self.driver.common = hbsd_common.HBSDCommon(
|
||||
self.configuration, self.driver, context, db)
|
||||
|
||||
# API test cases
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
def test_create_volume(self, arg1, arg2, arg3):
|
||||
"""test create_volume."""
|
||||
ret = self.driver.create_volume(self._VOLUME)
|
||||
vol = self._VOLUME.copy()
|
||||
vol['provider_location'] = ret['provider_location']
|
||||
self.assertEqual(vol['provider_location'], '1')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
def test_create_volume_error(self, arg1, arg2, arg3):
|
||||
"""test create_volume."""
|
||||
self.assertRaises(exception.HBSDError, self.driver.create_volume,
|
||||
self.test_volume_error)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
def test_get_volume_stats(self, arg1, arg2):
|
||||
"""test get_volume_stats."""
|
||||
stats = self.driver.get_volume_stats(True)
|
||||
self.assertEqual(stats['vendor_name'], 'Hitachi')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
def test_get_volume_stats_error(self, arg1, arg2):
|
||||
"""test get_volume_stats."""
|
||||
self.configuration.hitachi_pool_id = 29
|
||||
stats = self.driver.get_volume_stats(True)
|
||||
self.assertEqual(stats, {})
|
||||
self.configuration.hitachi_pool_id = 30
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_extend_volume(self, arg1, arg2, arg3, arg4):
|
||||
"""test extend_volume."""
|
||||
self.driver.extend_volume(self._VOLUME, 256)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_extend_volume_error(self, arg1, arg2, arg3, arg4):
|
||||
"""test extend_volume."""
|
||||
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
|
||||
self.test_volume_error3, 256)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_delete_volume(self, arg1, arg2, arg3, arg4):
|
||||
"""test delete_volume."""
|
||||
self.driver.delete_volume(self._VOLUME)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_delete_volume_error(self, arg1, arg2, arg3, arg4):
|
||||
"""test delete_volume."""
|
||||
self.driver.delete_volume(self.test_volume_error4)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
|
||||
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=_VOLUME)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5, arg6, arg7):
|
||||
"""test create_snapshot."""
|
||||
ret = self.driver.create_volume(self._VOLUME)
|
||||
ret = self.driver.create_snapshot(self.test_snapshot)
|
||||
self.assertEqual(ret['provider_location'], '1')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
|
||||
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=_VOLUME)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5, arg6,
|
||||
arg7):
|
||||
"""test create_snapshot."""
|
||||
ret = self.driver.create_volume(self.test_volume)
|
||||
ret = self.driver.create_snapshot(self.test_snapshot_error)
|
||||
self.assertEqual(ret['provider_location'], '1')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_delete_snapshot(self, arg1, arg2, arg3, arg4):
|
||||
"""test delete_snapshot."""
|
||||
self.driver.delete_snapshot(self.test_snapshot)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_delete_snapshot_error(self, arg1, arg2, arg3, arg4):
|
||||
"""test delete_snapshot."""
|
||||
self.assertRaises(exception.HBSDCmdError,
|
||||
self.driver.delete_snapshot,
|
||||
self.test_snapshot_error)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_create_volume_from_snapshot(self, arg1, arg2, arg3, arg4, arg5):
|
||||
"""test create_volume_from_snapshot."""
|
||||
vol = self.driver.create_volume_from_snapshot(self.test_volume,
|
||||
self.test_snapshot)
|
||||
self.assertIsNotNone(vol)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3, arg4,
|
||||
arg5):
|
||||
"""test create_volume_from_snapshot."""
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.create_volume_from_snapshot,
|
||||
self.test_volume_error5, self.test_snapshot_error2)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=_VOLUME)
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4, arg5, arg6):
|
||||
"""test create_cloned_volume."""
|
||||
vol = self.driver.create_cloned_volume(self.test_volume,
|
||||
self._VOLUME)
|
||||
self.assertEqual(vol['provider_location'], '1')
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=_VOLUME)
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
|
||||
return_value=[0, "", ""])
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
|
||||
return_value=[0, "", ""])
|
||||
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4, arg5,
|
||||
arg6):
|
||||
"""test create_cloned_volume."""
|
||||
self.assertRaises(exception.HBSDCmdError,
|
||||
self.driver.create_cloned_volume,
|
||||
self.test_volume, self.test_volume_error2)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
def test_initialize_connection(self, arg1, arg2):
|
||||
"""test initialize connection."""
|
||||
connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
|
||||
'ip': '127.0.0.1'}
|
||||
rc = self.driver.initialize_connection(self._VOLUME, connector)
|
||||
self.assertEqual(rc['driver_volume_type'], 'fibre_channel')
|
||||
self.assertEqual(rc['data']['target_wwn'], ['50060E801053C2E0'])
|
||||
self.assertEqual(rc['data']['target_lun'], 0)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
def test_initialize_connection_error(self, arg1, arg2):
|
||||
"""test initialize connection."""
|
||||
connector = {'wwpns': ['12345678912345bb'], 'ip': '127.0.0.2'}
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.initialize_connection,
|
||||
self._VOLUME, connector)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
def test_terminate_connection(self, arg1, arg2):
|
||||
"""test terminate connection."""
|
||||
connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
|
||||
'ip': '127.0.0.1'}
|
||||
rc = self.driver.terminate_connection(self._VOLUME, connector)
|
||||
self.assertEqual(rc['driver_volume_type'], 'fibre_channel')
|
||||
self.assertEqual(rc['data']['target_wwn'], ['50060E801053C2E0'])
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
|
||||
side_effect=_exec_raidcom)
|
||||
def test_terminate_connection_error(self, arg1, arg2):
|
||||
"""test terminate connection."""
|
||||
connector = {'ip': '127.0.0.1'}
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.terminate_connection,
|
||||
self._VOLUME, connector)
|
||||
return
|
379
cinder/tests/test_hitachi_hbsd_snm2_fc.py
Normal file
379
cinder/tests/test_hitachi_hbsd_snm2_fc.py
Normal file
@ -0,0 +1,379 @@
|
||||
# Copyright (C) 2014, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
Self test for Hitachi Block Storage Driver
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.hitachi import hbsd_basiclib
|
||||
from cinder.volume.drivers.hitachi import hbsd_common
|
||||
from cinder.volume.drivers.hitachi import hbsd_fc
|
||||
from cinder.volume.drivers.hitachi import hbsd_snm2
|
||||
|
||||
|
||||
def _exec_hsnm(*args, **kargs):
|
||||
return HBSDSNM2FCDriverTest.hsnm_vals.get(args)
|
||||
|
||||
|
||||
class HBSDSNM2FCDriverTest(test.TestCase):
|
||||
"""Test HBSDSNM2FCDriver."""
|
||||
|
||||
audppool_result = " DP RAID \
|
||||
Current Utilization Current Over Replication\
|
||||
Available Current Replication Rotational \
|
||||
\
|
||||
Stripe \
|
||||
Needing Preparation\n\
|
||||
Pool Tier Mode Level Total Capacity Consumed Capacity \
|
||||
Percent Provisioning Percent Capacity \
|
||||
Utilization Percent Type Speed Encryption Status \
|
||||
\
|
||||
Reconstruction Progress Size Capacity\n\
|
||||
30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \
|
||||
1% 24835% 532.0 GB \
|
||||
1% SAS 10000rpm N/A Normal \
|
||||
N/A \
|
||||
256KB 0.0 GB"
|
||||
|
||||
aureplicationlocal_result = "Pair Name LUN Pair \
|
||||
LUN Status Copy Type Group \
|
||||
Point-in-Time MU Number\n\
|
||||
0 10 0 Split( 99%) \
|
||||
ShadowImage ---:Ungrouped N/A\
|
||||
"
|
||||
|
||||
auluref_result = " Stripe RAID DP Tier \
|
||||
RAID Rotational Number\n\
|
||||
LU Capacity Size Group Pool Mode Level Type\
|
||||
Speed of Paths Status\n\
|
||||
0 2097152 blocks 256KB 0 0 Enable 5( 3D+1P) SAS"
|
||||
|
||||
auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \
|
||||
Name Port Name Host Group\n\
|
||||
HBSD-00 10000000C97BCE7A 001:HBSD-01\n\
|
||||
Assigned WWN\n Name Port Name \
|
||||
Host Group\n abcdefg 10000000C97BCE7A \
|
||||
001:HBSD-01"
|
||||
|
||||
aufibre1_result = "Port Information\n\
|
||||
Port Address\n CTL Port\
|
||||
Node Name Port Name Setting Current\n 0 0 \
|
||||
50060E801053C2E0 50060E801053C2E0 0000EF 272700"
|
||||
|
||||
auhgmap_result = "Mapping Mode = ON\nPort Group \
|
||||
H-LUN LUN\n 00 001:HBSD-00 0 1000"
|
||||
|
||||
hsnm_vals = {
|
||||
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
|
||||
('aureplicationlocal',
|
||||
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
|
||||
[0, "", ""],
|
||||
('aureplicationlocal',
|
||||
'-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
|
||||
[1, "", ""],
|
||||
('aureplicationlocal', '-unit None -refer -pvol 1'):
|
||||
[0, "%s" % aureplicationlocal_result, ""],
|
||||
('aureplicationlocal', '-unit None -refer -pvol 3'):
|
||||
[1, "", "DMEC002015"],
|
||||
('aureplicationlocal', '-unit None -refer -svol 3'):
|
||||
[1, "", "DMEC002015"],
|
||||
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
|
||||
[0, "", ""],
|
||||
('auluchgsize', '-unit None -lu 1 -size 256g'):
|
||||
[0, "", ""],
|
||||
('auludel', '-unit None -lu 1 -f'): [0, 0, ""],
|
||||
('auludel', '-unit None -lu 3 -f'): [1, 0, ""],
|
||||
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, 0, ""],
|
||||
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
|
||||
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
|
||||
('auhgmap', '-unit None -add 0 0 1 1 1'): [0, 0, ""],
|
||||
('auhgwwn', '-unit None -refer'): [0, "%s" % auhgwwn_result, ""],
|
||||
('aufibre1', '-unit None -refer'): [0, "%s" % aufibre1_result, ""],
|
||||
('auhgmap', '-unit None -refer'): [0, "%s" % auhgmap_result, ""]}
|
||||
|
||||
# The following information is passed on to tests, when creating a volume
|
||||
|
||||
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
|
||||
'provider_location': '1', 'name': 'test',
|
||||
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
|
||||
|
||||
test_volume = {'name': 'test_volume', 'size': 128,
|
||||
'id': 'test-volume-0',
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error = {'name': 'test_volume_error', 'size': 256,
|
||||
'id': 'test-volume-error',
|
||||
'provider_location': '3', 'status': 'available'}
|
||||
|
||||
test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
|
||||
'id': 'test-volume-error',
|
||||
'provider_location': None, 'status': 'available'}
|
||||
|
||||
test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
|
||||
'id': 'test-volume-error',
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
|
||||
'id': 'test-volume3',
|
||||
'volume_metadata': [{'key': 'type',
|
||||
'value': 'V-VOL'}],
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
|
||||
'id': 'test-volume2',
|
||||
'provider_location': '3', 'status': 'available'}
|
||||
|
||||
test_snapshot = {'volume_name': 'test', 'size': 128,
|
||||
'volume_size': 128, 'name': 'test-snap',
|
||||
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
|
||||
'volume_size': 128, 'name': 'test-snap',
|
||||
'volume_id': 0, 'id': 'test-snap-0',
|
||||
'volume': test_volume_error,
|
||||
'provider_location': None, 'status': 'available'}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HBSDSNM2FCDriverTest, self).__init__(*args, **kwargs)
|
||||
|
||||
def setUp(self):
|
||||
super(HBSDSNM2FCDriverTest, self).setUp()
|
||||
self._setup_config()
|
||||
self._setup_driver()
|
||||
|
||||
def _setup_config(self):
|
||||
self.configuration = mock.Mock(conf.Configuration)
|
||||
self.configuration.hitachi_pool_id = 30
|
||||
self.configuration.hitachi_target_ports = "00"
|
||||
self.configuration.hitachi_debug_level = 0
|
||||
self.configuration.hitachi_serial_number = "None"
|
||||
self.configuration.hitachi_unit_name = "None"
|
||||
self.configuration.hitachi_group_request = False
|
||||
self.configuration.hitachi_zoning_request = False
|
||||
self.configuration.config_group = "None"
|
||||
self.configuration.hitachi_ldev_range = [0, 100]
|
||||
self.configuration.hitachi_default_copy_method = 'SI'
|
||||
self.configuration.hitachi_copy_check_interval = 1
|
||||
self.configuration.hitachi_copy_speed = 3
|
||||
|
||||
def _setup_driver(self):
|
||||
self.driver = hbsd_fc.HBSDFCDriver(
|
||||
configuration=self.configuration)
|
||||
context = None
|
||||
db = None
|
||||
self.driver.common = hbsd_common.HBSDCommon(
|
||||
self.configuration, self.driver, context, db)
|
||||
self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration)
|
||||
self.driver.common.pair_flock = \
|
||||
self.driver.common.command.set_pair_flock()
|
||||
self.driver.do_setup_status.set()
|
||||
|
||||
# API test cases
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_volume(self, arg1, arg2, arg3):
|
||||
"""test create_volume."""
|
||||
ret = self.driver.create_volume(self._VOLUME)
|
||||
vol = self._VOLUME.copy()
|
||||
vol['provider_location'] = ret['provider_location']
|
||||
self.assertEqual(vol['provider_location'], '1')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_volume_error(self, arg1, arg2, arg3):
|
||||
"""test create_volume."""
|
||||
self.assertRaises(exception.HBSDCmdError,
|
||||
self.driver.create_volume,
|
||||
self.test_volume_error)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_get_volume_stats(self, arg1, arg2):
|
||||
"""test get_volume_stats."""
|
||||
stats = self.driver.get_volume_stats(True)
|
||||
self.assertEqual(stats['vendor_name'], 'Hitachi')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_get_volume_stats_error(self, arg1, arg2):
|
||||
"""test get_volume_stats."""
|
||||
self.configuration.hitachi_pool_id = 29
|
||||
stats = self.driver.get_volume_stats(True)
|
||||
self.assertEqual(stats, {})
|
||||
self.configuration.hitachi_pool_id = 30
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_extend_volume(self, arg1, arg2):
|
||||
"""test extend_volume."""
|
||||
self.driver.extend_volume(self._VOLUME, 256)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_extend_volume_error(self, arg1, arg2):
|
||||
"""test extend_volume."""
|
||||
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
|
||||
self.test_volume_error3, 256)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_delete_volume(self, arg1, arg2):
|
||||
"""test delete_volume."""
|
||||
self.driver.delete_volume(self._VOLUME)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_delete_volume_error(self, arg1, arg2):
|
||||
"""test delete_volume."""
|
||||
self.assertRaises(exception.HBSDCmdError,
|
||||
self.driver.delete_volume,
|
||||
self.test_volume_error4)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
|
||||
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=_VOLUME)
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
|
||||
"""test create_snapshot."""
|
||||
ret = self.driver.create_volume(self._VOLUME)
|
||||
ret = self.driver.create_snapshot(self.test_snapshot)
|
||||
self.assertEqual(ret['provider_location'], '1')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
|
||||
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=test_volume_error)
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
|
||||
"""test create_snapshot."""
|
||||
self.assertRaises(exception.HBSDCmdError,
|
||||
self.driver.create_snapshot,
|
||||
self.test_snapshot_error2)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_delete_snapshot(self, arg1, arg2):
|
||||
"""test delete_snapshot."""
|
||||
self.driver.delete_snapshot(self.test_snapshot)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_delete_snapshot_error(self, arg1, arg2):
|
||||
"""test delete_snapshot."""
|
||||
self.driver.delete_snapshot(self.test_snapshot_error2)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
|
||||
"""test create_volume_from_snapshot."""
|
||||
vol = self.driver.create_volume_from_snapshot(self._VOLUME,
|
||||
self.test_snapshot)
|
||||
self.assertIsNotNone(vol)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
|
||||
"""test create_volume_from_snapshot."""
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.create_volume_from_snapshot,
|
||||
self.test_volume_error2, self.test_snapshot)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=_VOLUME)
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
|
||||
"""test create_cloned_volume."""
|
||||
vol = self.driver.create_cloned_volume(self._VOLUME,
|
||||
self.test_volume)
|
||||
self.assertIsNotNone(vol)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=test_volume_error1)
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
|
||||
"""test create_cloned_volume."""
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.create_cloned_volume,
|
||||
self._VOLUME, self.test_volume_error1)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_initialize_connection(self, arg1, arg2):
|
||||
"""test initialize connection."""
|
||||
connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
|
||||
rc = self.driver.initialize_connection(self._VOLUME, connector)
|
||||
self.assertEqual(rc['driver_volume_type'], 'fibre_channel')
|
||||
self.assertEqual(rc['data']['target_wwn'], ['50060E801053C2E0'])
|
||||
self.assertEqual(rc['data']['target_lun'], 1)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_initialize_connection_error(self, arg1, arg2):
|
||||
"""test initialize connection."""
|
||||
connector = {'wwpns': 'x', 'ip': '0xc0a80100'}
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.initialize_connection,
|
||||
self._VOLUME, connector)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_terminate_connection(self, arg1):
|
||||
"""test terminate connection."""
|
||||
connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
|
||||
rc = self.driver.terminate_connection(self._VOLUME, connector)
|
||||
self.assertEqual(rc['driver_volume_type'], 'fibre_channel')
|
||||
self.assertEqual(rc['data']['target_wwn'], ['50060E801053C2E0'])
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_terminate_connection_error(self, arg1):
|
||||
"""test terminate connection."""
|
||||
connector = {'ip': '0xc0a80100'}
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.terminate_connection,
|
||||
self._VOLUME, connector)
|
||||
return
|
494
cinder/tests/test_hitachi_hbsd_snm2_iscsi.py
Normal file
494
cinder/tests/test_hitachi_hbsd_snm2_iscsi.py
Normal file
@ -0,0 +1,494 @@
|
||||
# Copyright (C) 2014, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
Self test for Hitachi Block Storage Driver
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder import utils
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.hitachi import hbsd_basiclib
|
||||
from cinder.volume.drivers.hitachi import hbsd_common
|
||||
from cinder.volume.drivers.hitachi import hbsd_iscsi
|
||||
from cinder.volume.drivers.hitachi import hbsd_snm2
|
||||
|
||||
|
||||
def _exec_hsnm(*args, **kargs):
|
||||
return HBSDSNM2ISCSIDriverTest.hsnm_vals.get(args)
|
||||
|
||||
|
||||
def _exec_hsnm_init(*args, **kargs):
|
||||
return HBSDSNM2ISCSIDriverTest.hsnm_vals_init.get(args)
|
||||
|
||||
|
||||
class HBSDSNM2ISCSIDriverTest(test.TestCase):
|
||||
"""Test HBSDSNM2ISCSIDriver."""
|
||||
|
||||
audppool_result = " DP RAID \
|
||||
Current Utilization Current Over Replication\
|
||||
Available Current Replication Rotational \
|
||||
\
|
||||
Stripe \
|
||||
Needing Preparation\n\
|
||||
Pool Tier Mode Level Total Capacity Consumed Capacity \
|
||||
Percent Provisioning Percent Capacity \
|
||||
Utilization Percent Type Speed Encryption Status \
|
||||
\
|
||||
Reconstruction Progress Size Capacity\n\
|
||||
30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \
|
||||
1% 24835% 532.0 GB \
|
||||
1% SAS 10000rpm N/A Normal \
|
||||
N/A \
|
||||
256KB 0.0 GB"
|
||||
|
||||
aureplicationlocal_result = "Pair Name LUN Pair \
|
||||
LUN Status Copy Type Group \
|
||||
Point-in-Time MU Number\n\
|
||||
0 10 0 Split( 99%) \
|
||||
ShadowImage ---:Ungrouped N/A\
|
||||
"
|
||||
|
||||
auluref_result = " Stripe RAID DP Tier \
|
||||
RAID Rotational Number\n\
|
||||
LU Capacity Size Group Pool Mode Level Type\
|
||||
Speed of Paths Status\n\
|
||||
0 2097152 blocks 256KB 0 0 Enable 5( 3D+1P) SAS"
|
||||
|
||||
auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \
|
||||
Name Port Name Host Group\n\
|
||||
HBSD-00 10000000C97BCE7A 001:HBSD-01\n\
|
||||
Assigned WWN\n Name Port Name \
|
||||
Host Group\n abcdefg 10000000C97BCE7A \
|
||||
001:HBSD-01"
|
||||
|
||||
autargetini_result = "Port 00 Target Security ON\n\
|
||||
Target Name \
|
||||
iSCSI Name\n\
|
||||
001:HBSD-01 \
|
||||
iqn"
|
||||
|
||||
autargetini_result2 = "Port 00 Target Security ON\n\
|
||||
Target Name \
|
||||
iSCSI Name"
|
||||
|
||||
autargetmap_result = "Mapping Mode = ON\n\
|
||||
Port Target H-LUN LUN\n\
|
||||
00 001:HBSD-01 0 1000"
|
||||
|
||||
auiscsi_result = "Port 00\n\
|
||||
Port Number : 3260\n\
|
||||
Keep Alive Timer[sec.] : 60\n\
|
||||
MTU : 1500\n\
|
||||
Transfer Rate : 1Gbps\n\
|
||||
Link Status : Link Up\n\
|
||||
Ether Address : 00:00:87:33:D1:3E\n\
|
||||
IPv4\n\
|
||||
IPv4 Address : 192.168.0.1\n\
|
||||
IPv4 Subnet Mask : 255.255.252.0\n\
|
||||
IPv4 Default Gateway : 0.0.0.0\n\
|
||||
IPv6 Status : Disable\n\
|
||||
Connecting Hosts : 0\n\
|
||||
Result : Normal\n\
|
||||
VLAN Status : Disable\n\
|
||||
VLAN ID : N/A\n\
|
||||
Header Digest : Enable\n\
|
||||
Data Digest : Enable\n\
|
||||
Window Scale : Disable"
|
||||
|
||||
autargetdef_result = "Port 00\n\
|
||||
Authentication Mutual\n\
|
||||
Target Method CHAP Algorithm \
|
||||
Authentication\n\
|
||||
001:T000 None --- ---\n\
|
||||
User Name : ---\n\
|
||||
iSCSI Name : iqn-target"
|
||||
|
||||
hsnm_vals = {
|
||||
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
|
||||
('aureplicationlocal',
|
||||
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
|
||||
[0, "", ""],
|
||||
('aureplicationlocal',
|
||||
'-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
|
||||
[1, "", ""],
|
||||
('aureplicationlocal', '-unit None -refer -pvol 1'):
|
||||
[0, "%s" % aureplicationlocal_result, ""],
|
||||
('aureplicationlocal', '-unit None -refer -pvol 3'):
|
||||
[1, "", "DMEC002015"],
|
||||
('aureplicationlocal', '-unit None -refer -svol 3'):
|
||||
[1, "", "DMEC002015"],
|
||||
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
|
||||
[0, "", ""],
|
||||
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 1'):
|
||||
[1, "", ""],
|
||||
('auluchgsize', '-unit None -lu 1 -size 256g'):
|
||||
[0, "", ""],
|
||||
('auludel', '-unit None -lu 1 -f'): [0, "", ""],
|
||||
('auludel', '-unit None -lu 3 -f'): [1, "", ""],
|
||||
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
|
||||
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
|
||||
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
|
||||
('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
|
||||
('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
|
||||
('autargetini', '-unit None -refer'):
|
||||
[0, "%s" % autargetini_result, ""],
|
||||
('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
|
||||
[0, "", ""],
|
||||
('autargetmap', '-unit None -refer'):
|
||||
[0, "%s" % autargetmap_result, ""],
|
||||
('autargetdef',
|
||||
'-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
|
||||
-authmethod None'):
|
||||
[0, "", ""],
|
||||
('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 \
|
||||
-iname iqnX.target -authmethod None'):
|
||||
[1, "", ""],
|
||||
('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
|
||||
-ReportFullPortalList enable'):
|
||||
[0, "", ""],
|
||||
('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
|
||||
('autargetdef', '-unit None -refer'):
|
||||
[0, "%s" % autargetdef_result, ""]}
|
||||
|
||||
hsnm_vals_init = {
|
||||
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
|
||||
('aureplicationlocal',
|
||||
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
|
||||
[0, 0, ""],
|
||||
('aureplicationlocal', '-unit None -refer -pvol 1'):
|
||||
[0, "%s" % aureplicationlocal_result, ""],
|
||||
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
|
||||
[0, 0, ""],
|
||||
('auluchgsize', '-unit None -lu 1 -size 256g'):
|
||||
[0, 0, ""],
|
||||
('auludel', '-unit None -lu 1 -f'): [0, "", ""],
|
||||
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
|
||||
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
|
||||
('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
|
||||
('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
|
||||
('autargetini', '-unit None -refer'):
|
||||
[0, "%s" % autargetini_result2, ""],
|
||||
('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
|
||||
[0, "", ""],
|
||||
('autargetmap', '-unit None -refer'):
|
||||
[0, "%s" % autargetmap_result, ""],
|
||||
('autargetdef',
|
||||
'-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
|
||||
-authmethod None'):
|
||||
[0, "", ""],
|
||||
('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
|
||||
-ReportFullPortalList enable'):
|
||||
[0, "", ""],
|
||||
('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
|
||||
('autargetdef', '-unit None -refer'):
|
||||
[0, "%s" % autargetdef_result, ""],
|
||||
('auman', '-help'):
|
||||
[0, "Version 27.50", ""]}
|
||||
|
||||
# The following information is passed on to tests, when creating a volume
|
||||
|
||||
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
|
||||
'provider_location': '1', 'name': 'test',
|
||||
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
|
||||
|
||||
test_volume = {'name': 'test_volume', 'size': 128,
|
||||
'id': 'test-volume-0',
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error = {'name': 'test_volume_error', 'size': 256,
|
||||
'id': 'test-volume-error',
|
||||
'provider_location': '3', 'status': 'available'}
|
||||
|
||||
test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
|
||||
'id': 'test-volume-error',
|
||||
'provider_location': None, 'status': 'available'}
|
||||
|
||||
test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
|
||||
'id': 'test-volume-error',
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
|
||||
'id': 'test-volume3',
|
||||
'volume_metadata': [{'key': 'type',
|
||||
'value': 'V-VOL'}],
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
|
||||
'id': 'test-volume2',
|
||||
'provider_location': '3', 'status': 'available'}
|
||||
|
||||
test_snapshot = {'volume_name': 'test', 'size': 128,
|
||||
'volume_size': 128, 'name': 'test-snap',
|
||||
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
|
||||
'provider_location': '1', 'status': 'available'}
|
||||
|
||||
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
|
||||
'volume_size': 128, 'name': 'test-snap',
|
||||
'volume_id': 0, 'id': 'test-snap-0',
|
||||
'volume': test_volume_error,
|
||||
'provider_location': None, 'status': 'available'}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HBSDSNM2ISCSIDriverTest, self).__init__(*args, **kwargs)
|
||||
|
||||
@mock.patch.object(utils, 'brick_get_connector_properties',
|
||||
return_value={'ip': '0.0.0.0',
|
||||
'initiator': 'iqn'})
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
|
||||
side_effect=_exec_hsnm_init)
|
||||
@mock.patch.object(utils, 'execute',
|
||||
return_value=['', ''])
|
||||
def setUp(self, args1, arg2, arg3, arg4):
|
||||
super(HBSDSNM2ISCSIDriverTest, self).setUp()
|
||||
self._setup_config()
|
||||
self._setup_driver()
|
||||
self.driver.check_param()
|
||||
self.driver.common.create_lock_file()
|
||||
self.driver.common.command.connect_storage()
|
||||
self.driver.max_hostgroups = \
|
||||
self.driver.common.command.get_max_hostgroups()
|
||||
self.driver.add_hostgroup()
|
||||
self.driver.output_param_to_log()
|
||||
self.driver.do_setup_status.set()
|
||||
|
||||
def _setup_config(self):
|
||||
self.configuration = mock.Mock(conf.Configuration)
|
||||
self.configuration.hitachi_pool_id = 30
|
||||
self.configuration.hitachi_thin_pool_id = 31
|
||||
self.configuration.hitachi_target_ports = "00"
|
||||
self.configuration.hitachi_debug_level = 0
|
||||
self.configuration.hitachi_serial_number = None
|
||||
self.configuration.hitachi_unit_name = "None"
|
||||
self.configuration.hitachi_group_request = True
|
||||
self.configuration.hitachi_group_range = "0-1"
|
||||
self.configuration.config_group = "None"
|
||||
self.configuration.hitachi_ldev_range = "0-100"
|
||||
self.configuration.hitachi_default_copy_method = 'FULL'
|
||||
self.configuration.hitachi_copy_check_interval = 1
|
||||
self.configuration.hitachi_async_copy_check_interval = 1
|
||||
self.configuration.hitachi_copy_speed = 3
|
||||
self.configuration.hitachi_auth_method = None
|
||||
self.configuration.hitachi_auth_user = "HBSD-CHAP-user"
|
||||
self.configuration.hitachi_auth_password = "HBSD-CHAP-password"
|
||||
self.configuration.hitachi_add_chap_user = "False"
|
||||
|
||||
def _setup_driver(self):
|
||||
self.driver = hbsd_iscsi.HBSDISCSIDriver(
|
||||
configuration=self.configuration)
|
||||
context = None
|
||||
db = None
|
||||
self.driver.common = hbsd_common.HBSDCommon(
|
||||
self.configuration, self.driver, context, db)
|
||||
|
||||
# API test cases
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_volume(self, arg1, arg2, arg3):
|
||||
"""test create_volume."""
|
||||
ret = self.driver.create_volume(self._VOLUME)
|
||||
vol = self._VOLUME.copy()
|
||||
vol['provider_location'] = ret['provider_location']
|
||||
self.assertEqual(vol['provider_location'], '1')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_volume_error(self, arg1, arg2, arg3):
|
||||
"""test create_volume."""
|
||||
self.assertRaises(exception.HBSDCmdError,
|
||||
self.driver.create_volume,
|
||||
self.test_volume_error)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_get_volume_stats(self, arg1, arg2):
|
||||
"""test get_volume_stats."""
|
||||
stats = self.driver.get_volume_stats(True)
|
||||
self.assertEqual(stats['vendor_name'], 'Hitachi')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_get_volume_stats_error(self, arg1, arg2):
|
||||
"""test get_volume_stats."""
|
||||
self.configuration.hitachi_pool_id = 29
|
||||
stats = self.driver.get_volume_stats(True)
|
||||
self.assertEqual(stats, {})
|
||||
self.configuration.hitachi_pool_id = 30
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_extend_volume(self, arg1, arg2):
|
||||
"""test extend_volume."""
|
||||
self.driver.extend_volume(self._VOLUME, 256)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_extend_volume_error(self, arg1, arg2):
|
||||
"""test extend_volume."""
|
||||
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
|
||||
self.test_volume_error3, 256)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_delete_volume(self, arg1, arg2):
|
||||
"""test delete_volume."""
|
||||
self.driver.delete_volume(self._VOLUME)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_delete_volume_error(self, arg1, arg2):
|
||||
"""test delete_volume."""
|
||||
self.assertRaises(exception.HBSDCmdError,
|
||||
self.driver.delete_volume,
|
||||
self.test_volume_error4)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
|
||||
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=_VOLUME)
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
|
||||
"""test create_snapshot."""
|
||||
ret = self.driver.create_volume(self._VOLUME)
|
||||
ret = self.driver.create_snapshot(self.test_snapshot)
|
||||
self.assertEqual(ret['provider_location'], '1')
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
|
||||
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=test_volume_error)
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
|
||||
"""test create_snapshot."""
|
||||
self.assertRaises(exception.HBSDCmdError,
|
||||
self.driver.create_snapshot,
|
||||
self.test_snapshot_error2)
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_delete_snapshot(self, arg1, arg2):
|
||||
"""test delete_snapshot."""
|
||||
self.driver.delete_snapshot(self.test_snapshot)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_delete_snapshot_error(self, arg1, arg2):
|
||||
"""test delete_snapshot."""
|
||||
self.driver.delete_snapshot(self.test_snapshot_error2)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
|
||||
"""test create_volume_from_snapshot."""
|
||||
vol = self.driver.create_volume_from_snapshot(self._VOLUME,
|
||||
self.test_snapshot)
|
||||
self.assertIsNotNone(vol)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
|
||||
"""test create_volume_from_snapshot."""
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.create_volume_from_snapshot,
|
||||
self.test_volume_error2, self.test_snapshot)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=_VOLUME)
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
|
||||
"""test create_cloned_volume."""
|
||||
vol = self.driver.create_cloned_volume(self._VOLUME,
|
||||
self.test_snapshot)
|
||||
self.assertIsNotNone(vol)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
|
||||
return_value={'dummy_volume_meta': 'meta'})
|
||||
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
|
||||
return_value=test_volume_error1)
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
|
||||
"""test create_cloned_volume."""
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.create_cloned_volume,
|
||||
self._VOLUME, self.test_volume_error1)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_initialize_connection(self, arg1, arg2):
|
||||
"""test initialize connection."""
|
||||
connector = {
|
||||
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
|
||||
'iqn'}
|
||||
rc = self.driver.initialize_connection(self._VOLUME, connector)
|
||||
self.assertEqual(rc['driver_volume_type'], 'iscsi')
|
||||
self.assertEqual(rc['data']['target_iqn'], 'iqn-target')
|
||||
self.assertEqual(rc['data']['target_lun'], 1)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_initialize_connection_error(self, arg1, arg2):
|
||||
"""test initialize connection."""
|
||||
connector = {
|
||||
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
|
||||
'iqnX'}
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.initialize_connection,
|
||||
self._VOLUME, connector)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_terminate_connection(self, arg1):
|
||||
"""test terminate connection."""
|
||||
connector = {
|
||||
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
|
||||
'iqn'}
|
||||
self.driver.terminate_connection(self._VOLUME, connector)
|
||||
return
|
||||
|
||||
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
|
||||
def test_terminate_connection_error(self, arg1):
|
||||
"""test terminate connection."""
|
||||
connector = {'ip': '0.0.0.0'}
|
||||
self.assertRaises(exception.HBSDError,
|
||||
self.driver.terminate_connection,
|
||||
self._VOLUME, connector)
|
||||
return
|
0
cinder/volume/drivers/hitachi/__init__.py
Normal file
0
cinder/volume/drivers/hitachi/__init__.py
Normal file
265
cinder/volume/drivers/hitachi/hbsd_basiclib.py
Normal file
265
cinder/volume/drivers/hitachi/hbsd_basiclib.py
Normal file
@ -0,0 +1,265 @@
|
||||
# Copyright (C) 2014, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import shlex
|
||||
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.openstack.common import excutils
|
||||
from cinder.openstack.common import lockutils
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.openstack.common import processutils as putils
|
||||
from cinder import utils
|
||||
|
||||
SMPL = 1
|
||||
COPY = 2
|
||||
PAIR = 3
|
||||
PSUS = 4
|
||||
PSUE = 5
|
||||
UNKN = 0xff
|
||||
|
||||
FULL = 'Full copy'
|
||||
THIN = 'Thin copy'
|
||||
|
||||
DEFAULT_TRY_RANGE = range(3)
|
||||
MAX_PROCESS_WAITTIME = 86400
|
||||
DEFAULT_PROCESS_WAITTIME = 900
|
||||
|
||||
GETSTORAGEARRAY_ONCE = 100
|
||||
|
||||
WARNING_ID = 300
|
||||
|
||||
DEFAULT_GROUP_RANGE = [0, 65535]
|
||||
|
||||
NAME_PREFIX = 'HBSD-'
|
||||
|
||||
LOCK_DIR = '/var/lock/hbsd/'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
HBSD_INFO_MSG = {
|
||||
1: _('The parameter of the storage backend. '
|
||||
'(config_group: %(config_group)s)'),
|
||||
3: _('The storage backend can be used. (config_group: %(config_group)s)'),
|
||||
}
|
||||
|
||||
HBSD_WARN_MSG = {
|
||||
301: _('A LUN (HLUN) was not found. (LDEV: %(ldev)s)'),
|
||||
302: _('Failed to specify a logical device for the volume '
|
||||
'%(volume_id)s to be unmapped.'),
|
||||
303: _('An iSCSI CHAP user could not be deleted. (username: %(user)s)'),
|
||||
304: _('Failed to specify a logical device to be deleted. '
|
||||
'(method: %(method)s, id: %(id)s)'),
|
||||
305: _('The logical device for specified %(type)s %(id)s '
|
||||
'was already deleted.'),
|
||||
306: _('A host group could not be deleted. (port: %(port)s, '
|
||||
'gid: %(gid)s, name: %(name)s)'),
|
||||
307: _('An iSCSI target could not be deleted. (port: %(port)s, '
|
||||
'tno: %(tno)s, alias: %(alias)s)'),
|
||||
308: _('A host group could not be added. (port: %(port)s, '
|
||||
'name: %(name)s)'),
|
||||
309: _('An iSCSI target could not be added. '
|
||||
'(port: %(port)s, alias: %(alias)s, reason: %(reason)s)'),
|
||||
310: _('Failed to unmap a logical device. (LDEV: %(ldev)s, '
|
||||
'reason: %(reason)s)'),
|
||||
311: _('A free LUN (HLUN) was not found. Add a different host'
|
||||
' group. (LDEV: %(ldev)s)'),
|
||||
312: _('Failed to get a storage resource. The system will attempt '
|
||||
'to get the storage resource again. (resource: %(resource)s)'),
|
||||
313: _('Failed to delete a logical device. (LDEV: %(ldev)s, '
|
||||
'reason: %(reason)s)'),
|
||||
314: _('Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, '
|
||||
'port: %(port)s, id: %(id)s)'),
|
||||
315: _('Failed to perform a zero-page reclamation. '
|
||||
'(LDEV: %(ldev)s, reason: %(reason)s)'),
|
||||
316: _('Failed to assign the iSCSI initiator IQN. (port: %(port)s, '
|
||||
'reason: %(reason)s)'),
|
||||
}
|
||||
|
||||
HBSD_ERR_MSG = {
|
||||
600: _('The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, '
|
||||
'stderr: %(err)s)'),
|
||||
601: _('A parameter is invalid. (%(param)s)'),
|
||||
602: _('A parameter value is invalid. (%(meta)s)'),
|
||||
603: _('Failed to acquire a resource lock. (serial: %(serial)s, '
|
||||
'inst: %(inst)s, ret: %(ret)s, stderr: %(err)s)'),
|
||||
604: _('Cannot set both hitachi_serial_number and hitachi_unit_name.'),
|
||||
605: _('Either hitachi_serial_number or hitachi_unit_name is required.'),
|
||||
615: _('A pair could not be created. The maximum number of pair is '
|
||||
'exceeded. (copy method: %(copy_method)s, P-VOL: %(pvol)s)'),
|
||||
616: _('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)'),
|
||||
617: _('The specified operation is not supported. The volume size '
|
||||
'must be the same as the source %(type)s. (volume: %(volume_id)s)'),
|
||||
618: _('The volume %(volume_id)s could not be extended. '
|
||||
'The volume type must be Normal.'),
|
||||
619: _('The volume %(volume_id)s to be mapped was not found.'),
|
||||
624: _('The %(type)s %(id)s source to be replicated was not found.'),
|
||||
631: _('Failed to create a file. (file: %(file)s, ret: %(ret)s, '
|
||||
'stderr: %(err)s)'),
|
||||
632: _('Failed to open a file. (file: %(file)s, ret: %(ret)s, '
|
||||
'stderr: %(err)s)'),
|
||||
633: _('%(file)s: Permission denied.'),
|
||||
636: _('Failed to add the logical device.'),
|
||||
637: _('The method %(method)s is timed out. (timeout value: %(timeout)s)'),
|
||||
640: _('A pool could not be found. (pool id: %(pool_id)s)'),
|
||||
641: _('The host group or iSCSI target could not be added.'),
|
||||
642: _('An iSCSI CHAP user could not be added. (username: %(user)s)'),
|
||||
643: _('The iSCSI CHAP user %(user)s does not exist.'),
|
||||
648: _('There are no resources available for use. '
|
||||
'(resource: %(resource)s)'),
|
||||
649: _('The host group or iSCSI target was not found.'),
|
||||
650: _('The resource %(resource)s was not found.'),
|
||||
651: _('The IP Address was not found.'),
|
||||
653: _('The creation of a logical device could not be '
|
||||
'completed. (LDEV: %(ldev)s)'),
|
||||
654: _('A volume status is invalid. (status: %(status)s)'),
|
||||
655: _('A snapshot status is invalid. (status: %(status)s)'),
|
||||
659: _('A host group is invalid. (host group: %(gid)s)'),
|
||||
660: _('The specified %(desc)s is busy.'),
|
||||
}
|
||||
|
||||
|
||||
def set_msg(msg_id, **kwargs):
|
||||
if msg_id < WARNING_ID:
|
||||
msg_header = 'MSGID%04d-I:' % msg_id
|
||||
msg_body = HBSD_INFO_MSG.get(msg_id)
|
||||
else:
|
||||
msg_header = 'MSGID%04d-W:' % msg_id
|
||||
msg_body = HBSD_WARN_MSG.get(msg_id)
|
||||
|
||||
return '%(header)s %(body)s' % {'header': msg_header,
|
||||
'body': msg_body % kwargs}
|
||||
|
||||
|
||||
def output_err(msg_id, **kwargs):
|
||||
msg = HBSD_ERR_MSG.get(msg_id) % kwargs
|
||||
|
||||
LOG.error("MSGID%04d-E: %s", msg_id, msg)
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
def get_process_lock(file):
|
||||
if not os.access(file, os.W_OK):
|
||||
msg = output_err(633, file=file)
|
||||
raise exception.HBSDError(message=msg)
|
||||
return lockutils.InterProcessLock(file)
|
||||
|
||||
|
||||
def create_empty_file(filename):
|
||||
if not os.path.exists(filename):
|
||||
try:
|
||||
utils.execute('touch', filename)
|
||||
except putils.ProcessExecutionError as ex:
|
||||
msg = output_err(
|
||||
631, file=filename, ret=ex.exit_code, err=ex.stderr)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
|
||||
class FileLock(lockutils.InterProcessLock):
|
||||
|
||||
def __init__(self, name, lock_object):
|
||||
self.lock_object = lock_object
|
||||
|
||||
super(FileLock, self).__init__(name)
|
||||
|
||||
def __enter__(self):
|
||||
if not os.access(self.fname, os.W_OK):
|
||||
msg = output_err(633, file=self.fname)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
self.lock_object.acquire()
|
||||
|
||||
try:
|
||||
ret = super(FileLock, self).__enter__()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.lock_object.release()
|
||||
|
||||
return ret
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
try:
|
||||
super(FileLock, self).__exit__(exc_type, exc_val, exc_tb)
|
||||
finally:
|
||||
self.lock_object.release()
|
||||
|
||||
|
||||
class NopLock(object):
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
|
||||
class HBSDBasicLib(object):
|
||||
|
||||
def __init__(self, conf=None):
|
||||
self.conf = conf
|
||||
|
||||
def exec_command(self, cmd, args=None, printflag=True):
|
||||
if printflag:
|
||||
if args:
|
||||
LOG.debug('cmd: %(cmd)s, args: %(args)s' %
|
||||
{'cmd': cmd, 'args': args})
|
||||
else:
|
||||
LOG.debug('cmd: %s' % cmd)
|
||||
|
||||
cmd = [cmd]
|
||||
|
||||
if args:
|
||||
if isinstance(args, six.text_type):
|
||||
cmd += shlex.split(args.encode())
|
||||
else:
|
||||
cmd += shlex.split(args)
|
||||
|
||||
try:
|
||||
stdout, stderr = utils.execute(*cmd, run_as_root=True)
|
||||
ret = 0
|
||||
except putils.ProcessExecutionError as e:
|
||||
ret = e.exit_code
|
||||
stdout = e.stdout
|
||||
stderr = e.stderr
|
||||
|
||||
LOG.debug('cmd: %s' % six.text_type(cmd))
|
||||
LOG.debug('from: %s' % six.text_type(inspect.stack()[2]))
|
||||
LOG.debug('ret: %d' % ret)
|
||||
LOG.debug('stdout: %s' % stdout.replace(os.linesep, ' '))
|
||||
LOG.debug('stderr: %s' % stderr.replace(os.linesep, ' '))
|
||||
|
||||
return ret, stdout, stderr
|
||||
|
||||
def set_pair_flock(self):
|
||||
return NopLock()
|
||||
|
||||
def discard_zero_page(self, ldev):
|
||||
pass
|
||||
|
||||
def output_param_to_log(self, conf):
|
||||
pass
|
||||
|
||||
def connect_storage(self):
|
||||
pass
|
||||
|
||||
def get_max_hostgroups(self):
|
||||
pass
|
||||
|
||||
def restart_pair_horcm(self):
|
||||
pass
|
736
cinder/volume/drivers/hitachi/hbsd_common.py
Normal file
736
cinder/volume/drivers/hitachi/hbsd_common.py
Normal file
@ -0,0 +1,736 @@
|
||||
# Copyright (C) 2014, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Common class for Hitachi storage drivers.
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import nested
|
||||
import re
|
||||
import threading
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from cinder.db.sqlalchemy import api
|
||||
from cinder.db.sqlalchemy import models
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.openstack.common import excutils
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder import utils
|
||||
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
||||
from cinder.volume.drivers.hitachi import hbsd_horcm as horcm
|
||||
from cinder.volume.drivers.hitachi import hbsd_snm2 as snm2
|
||||
from cinder.volume import utils as volume_utils
|
||||
|
||||
|
||||
VERSION = '1.0.0'
|
||||
|
||||
PARAM_RANGE = {
|
||||
'hitachi_copy_check_interval': {'min': 1, 'max': 600},
|
||||
'hitachi_async_copy_check_interval': {'min': 1, 'max': 600},
|
||||
'hitachi_copy_speed': {'min': 1, 'max': 15},
|
||||
}
|
||||
|
||||
DEFAULT_LDEV_RANGE = [0, 65535]
|
||||
|
||||
COPY_METHOD = ('FULL', 'THIN')
|
||||
VALID_DP_VOLUME_STATUS = ['available', 'in-use']
|
||||
VALID_V_VOLUME_STATUS = ['available']
|
||||
SYSTEM_LOCK_FILE = basic_lib.LOCK_DIR + 'system'
|
||||
SERVICE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'service_'
|
||||
STORAGE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'storage_'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
volume_opts = [
|
||||
cfg.StrOpt('hitachi_serial_number',
|
||||
default=None,
|
||||
help='Serial number of storage system'),
|
||||
cfg.StrOpt('hitachi_unit_name',
|
||||
default=None,
|
||||
help='Name of an array unit'),
|
||||
cfg.IntOpt('hitachi_pool_id',
|
||||
default=None,
|
||||
help='Pool ID of storage system'),
|
||||
cfg.IntOpt('hitachi_thin_pool_id',
|
||||
default=None,
|
||||
help='Thin pool ID of storage system'),
|
||||
cfg.StrOpt('hitachi_ldev_range',
|
||||
default=None,
|
||||
help='Range of logical device of storage system'),
|
||||
cfg.StrOpt('hitachi_default_copy_method',
|
||||
default='FULL',
|
||||
help='Default copy method of storage system'),
|
||||
cfg.IntOpt('hitachi_copy_speed',
|
||||
default=3,
|
||||
help='Copy speed of storage system'),
|
||||
cfg.IntOpt('hitachi_copy_check_interval',
|
||||
default=3,
|
||||
help='Interval to check copy'),
|
||||
cfg.IntOpt('hitachi_async_copy_check_interval',
|
||||
default=10,
|
||||
help='Interval to check copy asynchronously'),
|
||||
cfg.StrOpt('hitachi_target_ports',
|
||||
default=None,
|
||||
help='Control port names for HostGroup or iSCSI Target'),
|
||||
cfg.StrOpt('hitachi_group_range',
|
||||
default=None,
|
||||
help='Range of group number'),
|
||||
cfg.BoolOpt('hitachi_group_request',
|
||||
default=False,
|
||||
secret=True,
|
||||
help='Request for creating HostGroup or iSCSI Target'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
|
||||
|
||||
class TryLock(object):
|
||||
|
||||
def __init__(self):
|
||||
self.lock = threading.RLock()
|
||||
self.desc = None
|
||||
|
||||
def set_desc(self, description):
|
||||
self.desc = description
|
||||
|
||||
def __enter__(self):
|
||||
if not self.lock.acquire(False):
|
||||
msg = basic_lib.output_err(660, desc=self.desc)
|
||||
raise exception.HBSDError(message=msg)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.lock.release()
|
||||
|
||||
|
||||
class HBSDCommon(object):
|
||||
|
||||
def __init__(self, conf, parent, context, db):
|
||||
self.configuration = conf
|
||||
self.generated_from = parent
|
||||
self.context = context
|
||||
self.db = db
|
||||
|
||||
self.system_lock_file = SYSTEM_LOCK_FILE
|
||||
self.service_lock_file = '%s%s' % (SERVICE_LOCK_PATH_BASE,
|
||||
conf.config_group)
|
||||
if conf.hitachi_serial_number:
|
||||
self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE,
|
||||
six.text_type(
|
||||
conf.hitachi_serial_number))
|
||||
elif conf.hitachi_unit_name:
|
||||
self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE,
|
||||
six.text_type(
|
||||
conf.hitachi_unit_name))
|
||||
|
||||
self.storage_obj_lock = threading.Lock()
|
||||
self.volinfo_lock = threading.Lock()
|
||||
self.volume_info = {}
|
||||
self.output_first = True
|
||||
|
||||
def get_volume(self, volume_id):
|
||||
return self.db.volume_get(self.context, volume_id)
|
||||
|
||||
def get_volume_metadata(self, volume_id):
|
||||
return self.db.volume_metadata_get(self.context, volume_id)
|
||||
|
||||
def get_snapshot_metadata(self, snapshot_id):
|
||||
return self.db.snapshot_metadata_get(self.context, snapshot_id)
|
||||
|
||||
def get_ldev(self, obj):
|
||||
if not obj:
|
||||
return None
|
||||
|
||||
ldev = obj.get('provider_location')
|
||||
if not ldev or not ldev.isdigit():
|
||||
return None
|
||||
else:
|
||||
return int(ldev)
|
||||
|
||||
def get_value(self, obj, name, key):
|
||||
if not obj:
|
||||
return None
|
||||
|
||||
if obj.get(name):
|
||||
for i in obj[name]:
|
||||
if i['key'] == key:
|
||||
return i['value']
|
||||
return None
|
||||
|
||||
def get_is_vvol(self, obj, name):
|
||||
return self.get_value(obj, name, 'type') == 'V-VOL'
|
||||
|
||||
def get_volume_is_vvol(self, volume):
|
||||
return self.get_is_vvol(volume, 'volume_metadata')
|
||||
|
||||
def get_snapshot_is_vvol(self, snapshot):
|
||||
return self.get_is_vvol(snapshot, 'snapshot_metadata')
|
||||
|
||||
def get_copy_method(self, volume):
|
||||
method = self.get_value(volume, 'volume_metadata', 'copy_method')
|
||||
if method:
|
||||
if method not in COPY_METHOD:
|
||||
msg = basic_lib.output_err(602, meta='copy_method')
|
||||
raise exception.HBSDError(message=msg)
|
||||
elif (method == 'THIN'
|
||||
and self.configuration.hitachi_thin_pool_id is None):
|
||||
msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
|
||||
raise exception.HBSDError(message=msg)
|
||||
else:
|
||||
method = self.configuration.hitachi_default_copy_method
|
||||
return method
|
||||
|
||||
def _range2list(self, conf, param):
|
||||
str = getattr(conf, param)
|
||||
lists = str.split('-')
|
||||
if len(lists) != 2:
|
||||
msg = basic_lib.output_err(601, param=param)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
first_type = None
|
||||
for i in range(len(lists)):
|
||||
if lists[i].isdigit():
|
||||
lists[i] = int(lists[i], 10)
|
||||
if first_type == 'hex':
|
||||
msg = basic_lib.output_err(601, param=param)
|
||||
raise exception.HBSDError(message=msg)
|
||||
first_type = 'dig'
|
||||
else:
|
||||
if (first_type == 'dig'
|
||||
or not re.match('\w\w:\w\w:\w\w', lists[i])):
|
||||
msg = basic_lib.output_err(601, param=param)
|
||||
raise exception.HBSDError(message=msg)
|
||||
try:
|
||||
lists[i] = int(lists[i].replace(':', ''), 16)
|
||||
first_type = 'hex'
|
||||
except Exception:
|
||||
msg = basic_lib.output_err(601, param=param)
|
||||
raise exception.HBSDError(message=msg)
|
||||
if lists[0] > lists[1]:
|
||||
msg = basic_lib.output_err(601, param=param)
|
||||
raise exception.HBSDError(message=msg)
|
||||
return lists
|
||||
|
||||
def output_param_to_log(self, storage_protocol):
|
||||
essential_inherited_param = ['volume_backend_name', 'volume_driver']
|
||||
conf = self.configuration
|
||||
|
||||
msg = basic_lib.set_msg(1, config_group=conf.config_group)
|
||||
LOG.info(msg)
|
||||
version = self.command.get_comm_version()
|
||||
if conf.hitachi_unit_name:
|
||||
prefix = 'HSNM2 version'
|
||||
else:
|
||||
prefix = 'RAID Manager version'
|
||||
LOG.info('\t%-35s%s' % (prefix + ': ', six.text_type(version)))
|
||||
for param in essential_inherited_param:
|
||||
value = conf.safe_get(param)
|
||||
LOG.info('\t%-35s%s' % (param + ': ', six.text_type(value)))
|
||||
for opt in volume_opts:
|
||||
if not opt.secret:
|
||||
value = getattr(conf, opt.name)
|
||||
LOG.info('\t%-35s%s' % (opt.name + ': ',
|
||||
six.text_type(value)))
|
||||
|
||||
if storage_protocol == 'iSCSI':
|
||||
value = getattr(conf, 'hitachi_group_request')
|
||||
LOG.info('\t%-35s%s' % ('hitachi_group_request: ',
|
||||
six.text_type(value)))
|
||||
|
||||
def check_param(self):
|
||||
conf = self.configuration
|
||||
|
||||
if conf.hitachi_unit_name and conf.hitachi_serial_number:
|
||||
msg = basic_lib.output_err(604)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
if not conf.hitachi_unit_name and not conf.hitachi_serial_number:
|
||||
msg = basic_lib.output_err(605)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
if conf.hitachi_pool_id is None:
|
||||
msg = basic_lib.output_err(601, param='hitachi_pool_id')
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
for param in PARAM_RANGE.keys():
|
||||
_value = getattr(conf, param)
|
||||
if (_value and
|
||||
(not PARAM_RANGE[param]['min'] <= _value <=
|
||||
PARAM_RANGE[param]['max'])):
|
||||
msg = basic_lib.output_err(601, param=param)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
if conf.hitachi_default_copy_method not in COPY_METHOD:
|
||||
msg = basic_lib.output_err(601,
|
||||
param='hitachi_default_copy_method')
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
if (conf.hitachi_default_copy_method == 'THIN'
|
||||
and conf.hitachi_thin_pool_id is None):
|
||||
msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
for param in ('hitachi_ldev_range', 'hitachi_group_range'):
|
||||
if not getattr(conf, param):
|
||||
continue
|
||||
else:
|
||||
_value = self._range2list(conf, param)
|
||||
setattr(conf, param, _value)
|
||||
|
||||
if conf.hitachi_target_ports:
|
||||
conf.hitachi_target_ports = conf.hitachi_target_ports.split(',')
|
||||
|
||||
for opt in volume_opts:
|
||||
getattr(conf, opt.name)
|
||||
|
||||
if conf.hitachi_unit_name:
|
||||
self.command = snm2.HBSDSNM2(conf)
|
||||
else:
|
||||
conf.append_config_values(horcm.volume_opts)
|
||||
self.command = horcm.HBSDHORCM(conf)
|
||||
self.command.check_param()
|
||||
self.pair_flock = self.command.set_pair_flock()
|
||||
|
||||
def create_lock_file(self):
|
||||
basic_lib.create_empty_file(self.system_lock_file)
|
||||
basic_lib.create_empty_file(self.service_lock_file)
|
||||
basic_lib.create_empty_file(self.storage_lock_file)
|
||||
self.command.create_lock_file()
|
||||
|
||||
def _add_ldev(self, volume_num, capacity, pool_id, is_vvol):
|
||||
self.command.comm_add_ldev(pool_id, volume_num, capacity, is_vvol)
|
||||
|
||||
def _get_unused_volume_num(self, ldev_range):
|
||||
return self.command.get_unused_ldev(ldev_range)
|
||||
|
||||
def add_volinfo(self, ldev, id=None, type='volume'):
|
||||
with self.volinfo_lock:
|
||||
if ldev not in self.volume_info:
|
||||
self.init_volinfo(self.volume_info, ldev)
|
||||
if id:
|
||||
desc = '%s %s' % (type, id)
|
||||
self.volume_info[ldev]['in_use'].set_desc(desc)
|
||||
|
||||
def delete_pair(self, ldev, all_split=True, is_vvol=None):
|
||||
paired_info = self.command.get_paired_info(ldev)
|
||||
LOG.debug('paired_info: %s' % six.text_type(paired_info))
|
||||
pvol = paired_info['pvol']
|
||||
svols = paired_info['svol']
|
||||
driver = self.generated_from
|
||||
restart = False
|
||||
svol_list = []
|
||||
try:
|
||||
if pvol is None:
|
||||
return
|
||||
elif pvol == ldev:
|
||||
for svol in svols[:]:
|
||||
if svol['is_vvol'] or svol['status'] != basic_lib.PSUS:
|
||||
continue
|
||||
|
||||
self.command.delete_pair(pvol, svol['lun'], False)
|
||||
restart = True
|
||||
driver.pair_terminate_connection(svol['lun'])
|
||||
svols.remove(svol)
|
||||
|
||||
if all_split and svols:
|
||||
svol_list.append(six.text_type(svols[0]['lun']))
|
||||
for svol in svols[1:]:
|
||||
svol_list.append(', %d' % svol['lun'])
|
||||
|
||||
msg = basic_lib.output_err(616, pvol=pvol,
|
||||
svol=''.join(svol_list))
|
||||
raise exception.HBSDBusy(message=msg)
|
||||
|
||||
if not svols:
|
||||
driver.pair_terminate_connection(pvol)
|
||||
|
||||
else:
|
||||
self.add_volinfo(pvol)
|
||||
if not self.volume_info[pvol]['in_use'].lock.acquire(False):
|
||||
desc = self.volume_info[pvol]['in_use'].desc
|
||||
msg = basic_lib.output_err(660, desc=desc)
|
||||
raise exception.HBSDBusy(message=msg)
|
||||
try:
|
||||
paired_info = self.command.get_paired_info(ldev)
|
||||
if paired_info['pvol'] is None:
|
||||
return
|
||||
svol = paired_info['svol'][0]
|
||||
if svol['status'] != basic_lib.PSUS:
|
||||
msg = basic_lib.output_err(616, pvol=pvol, svol=ldev)
|
||||
raise exception.HBSDBusy(message=msg)
|
||||
|
||||
self.command.delete_pair(pvol, ldev, svol['is_vvol'])
|
||||
if not svol['is_vvol']:
|
||||
restart = True
|
||||
driver.pair_terminate_connection(ldev)
|
||||
paired_info = self.command.get_paired_info(pvol)
|
||||
if paired_info['pvol'] is None:
|
||||
driver.pair_terminate_connection(pvol)
|
||||
finally:
|
||||
self.volume_info[pvol]['in_use'].lock.release()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if restart:
|
||||
try:
|
||||
self.command.restart_pair_horcm()
|
||||
except Exception as e:
|
||||
LOG.warning(_('Failed to restart horcm: %s') %
|
||||
six.text_type(e))
|
||||
else:
|
||||
if (all_split or is_vvol) and restart:
|
||||
try:
|
||||
self.command.restart_pair_horcm()
|
||||
except Exception as e:
|
||||
LOG.warning(_('Failed to restart horcm: %s') %
|
||||
six.text_type(e))
|
||||
|
||||
def copy_async_data(self, pvol, svol, is_vvol):
|
||||
path_list = []
|
||||
driver = self.generated_from
|
||||
try:
|
||||
with self.pair_flock:
|
||||
self.delete_pair(pvol, all_split=False, is_vvol=is_vvol)
|
||||
paired_info = self.command.get_paired_info(pvol)
|
||||
if paired_info['pvol'] is None:
|
||||
driver.pair_initialize_connection(pvol)
|
||||
path_list.append(pvol)
|
||||
driver.pair_initialize_connection(svol)
|
||||
path_list.append(svol)
|
||||
self.command.comm_create_pair(pvol, svol, is_vvol)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
for ldev in path_list:
|
||||
try:
|
||||
driver.pair_terminate_connection(ldev)
|
||||
except Exception as ex:
|
||||
msg = basic_lib.set_msg(
|
||||
310, ldev=ldev, reason=six.text_type(ex))
|
||||
LOG.warning(msg)
|
||||
|
||||
def copy_sync_data(self, src_ldev, dest_ldev, size):
|
||||
src_vol = {'provider_location': six.text_type(src_ldev),
|
||||
'id': 'src_vol'}
|
||||
dest_vol = {'provider_location': six.text_type(dest_ldev),
|
||||
'id': 'dest_vol'}
|
||||
properties = utils.brick_get_connector_properties()
|
||||
driver = self.generated_from
|
||||
src_info = None
|
||||
dest_info = None
|
||||
try:
|
||||
dest_info = driver._attach_volume(self.context, dest_vol,
|
||||
properties)
|
||||
src_info = driver._attach_volume(self.context, src_vol,
|
||||
properties)
|
||||
volume_utils.copy_volume(src_info['device']['path'],
|
||||
dest_info['device']['path'], size * 1024,
|
||||
self.configuration.volume_dd_blocksize)
|
||||
finally:
|
||||
if dest_info:
|
||||
driver._detach_volume(self.context, dest_info,
|
||||
dest_vol, properties)
|
||||
if src_info:
|
||||
driver._detach_volume(self.context, src_info,
|
||||
src_vol, properties)
|
||||
self.command.discard_zero_page(dest_ldev)
|
||||
|
||||
def copy_data(self, pvol, size, p_is_vvol, method):
|
||||
type = 'Normal'
|
||||
is_vvol = method == 'THIN'
|
||||
svol = self._create_volume(size, is_vvol=is_vvol)
|
||||
try:
|
||||
if p_is_vvol:
|
||||
self.copy_sync_data(pvol, svol, size)
|
||||
else:
|
||||
if is_vvol:
|
||||
type = 'V-VOL'
|
||||
self.copy_async_data(pvol, svol, is_vvol)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
self.delete_ldev(svol, is_vvol)
|
||||
except Exception as ex:
|
||||
msg = basic_lib.set_msg(
|
||||
313, ldev=svol, reason=six.text_type(ex))
|
||||
LOG.warning(msg)
|
||||
|
||||
return six.text_type(svol), type
|
||||
|
||||
def add_lun(self, command, hostgroups, ldev, is_once=False):
|
||||
lock = basic_lib.get_process_lock(self.storage_lock_file)
|
||||
with lock:
|
||||
self.command.comm_add_lun(command, hostgroups, ldev, is_once)
|
||||
|
||||
def create_ldev(self, size, ldev_range, pool_id, is_vvol):
|
||||
LOG.debug('create start (normal)')
|
||||
for i in basic_lib.DEFAULT_TRY_RANGE:
|
||||
LOG.debug('Try number: %(tries)s / %(max_tries)s' %
|
||||
{'tries': i + 1,
|
||||
'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)})
|
||||
new_ldev = self._get_unused_volume_num(ldev_range)
|
||||
try:
|
||||
self._add_ldev(new_ldev, size, pool_id, is_vvol)
|
||||
except exception.HBSDNotFound:
|
||||
msg = basic_lib.set_msg(312, resource='LDEV')
|
||||
LOG.warning(msg)
|
||||
continue
|
||||
else:
|
||||
break
|
||||
else:
|
||||
msg = basic_lib.output_err(636)
|
||||
raise exception.HBSDError(message=msg)
|
||||
LOG.debug('create end (normal: %s)' % six.text_type(new_ldev))
|
||||
self.init_volinfo(self.volume_info, new_ldev)
|
||||
return new_ldev
|
||||
|
||||
def _create_volume(self, size, is_vvol=False):
|
||||
ldev_range = self.configuration.hitachi_ldev_range
|
||||
if not ldev_range:
|
||||
ldev_range = DEFAULT_LDEV_RANGE
|
||||
pool_id = self.configuration.hitachi_pool_id
|
||||
|
||||
lock = basic_lib.get_process_lock(self.storage_lock_file)
|
||||
with nested(self.storage_obj_lock, lock):
|
||||
ldev = self.create_ldev(size, ldev_range, pool_id, is_vvol)
|
||||
return ldev
|
||||
|
||||
def create_volume(self, volume):
|
||||
volume_metadata = self.get_volume_metadata(volume['id'])
|
||||
volume_metadata['type'] = 'Normal'
|
||||
|
||||
size = volume['size']
|
||||
ldev = self._create_volume(size)
|
||||
volume_metadata['ldev'] = six.text_type(ldev)
|
||||
|
||||
return {'provider_location': six.text_type(ldev),
|
||||
'metadata': volume_metadata}
|
||||
|
||||
def delete_ldev(self, ldev, is_vvol):
|
||||
LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)'
|
||||
% {'ldev': ldev, 'vvol': is_vvol})
|
||||
with self.pair_flock:
|
||||
self.delete_pair(ldev)
|
||||
self.command.comm_delete_ldev(ldev, is_vvol)
|
||||
with self.volinfo_lock:
|
||||
if ldev in self.volume_info:
|
||||
self.volume_info.pop(ldev)
|
||||
LOG.debug('delete_ldev is finished '
|
||||
'(LDEV: %(ldev)d, is_vvol: %(vvol)s)'
|
||||
% {'ldev': ldev, 'vvol': is_vvol})
|
||||
|
||||
def delete_volume(self, volume):
|
||||
ldev = self.get_ldev(volume)
|
||||
if ldev is None:
|
||||
msg = basic_lib.set_msg(
|
||||
304, method='delete_volume', id=volume['id'])
|
||||
LOG.warning(msg)
|
||||
return
|
||||
self.add_volinfo(ldev, volume['id'])
|
||||
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
|
||||
desc = self.volume_info[ldev]['in_use'].desc
|
||||
basic_lib.output_err(660, desc=desc)
|
||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||
try:
|
||||
is_vvol = self.get_volume_is_vvol(volume)
|
||||
try:
|
||||
self.delete_ldev(ldev, is_vvol)
|
||||
except exception.HBSDNotFound:
|
||||
with self.volinfo_lock:
|
||||
if ldev in self.volume_info:
|
||||
self.volume_info.pop(ldev)
|
||||
msg = basic_lib.set_msg(
|
||||
305, type='volume', id=volume['id'])
|
||||
LOG.warning(msg)
|
||||
except exception.HBSDBusy:
|
||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||
finally:
|
||||
if ldev in self.volume_info:
|
||||
self.volume_info[ldev]['in_use'].lock.release()
|
||||
|
||||
def check_volume_status(self, volume, is_vvol):
|
||||
if not is_vvol:
|
||||
status = VALID_DP_VOLUME_STATUS
|
||||
else:
|
||||
status = VALID_V_VOLUME_STATUS
|
||||
if volume['status'] not in status:
|
||||
msg = basic_lib.output_err(654, status=volume['status'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
src_ref = self.get_volume(snapshot['volume_id'])
|
||||
pvol = self.get_ldev(src_ref)
|
||||
if pvol is None:
|
||||
msg = basic_lib.output_err(624, type='volume', id=src_ref['id'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
self.add_volinfo(pvol, src_ref['id'])
|
||||
with self.volume_info[pvol]['in_use']:
|
||||
is_vvol = self.get_volume_is_vvol(src_ref)
|
||||
self.check_volume_status(src_ref, is_vvol)
|
||||
size = snapshot['volume_size']
|
||||
snap_metadata = self.get_snapshot_metadata(snapshot['id'])
|
||||
method = None if is_vvol else self.get_copy_method(src_ref)
|
||||
|
||||
svol, type = self.copy_data(pvol, size, is_vvol, method)
|
||||
|
||||
if type == 'V-VOL':
|
||||
snap_metadata['type'] = type
|
||||
snap_metadata['ldev'] = svol
|
||||
|
||||
snapshot_metadata = api._metadata_refs(snap_metadata,
|
||||
models.SnapshotMetadata)
|
||||
return {'provider_location': svol,
|
||||
'snapshot_metadata': snapshot_metadata}
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
ldev = self.get_ldev(snapshot)
|
||||
if ldev is None:
|
||||
msg = basic_lib.set_msg(
|
||||
304, method='delete_snapshot', id=snapshot['id'])
|
||||
LOG.warning(msg)
|
||||
return
|
||||
self.add_volinfo(ldev, id=snapshot['id'], type='snapshot')
|
||||
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
|
||||
desc = self.volume_info[ldev]['in_use'].desc
|
||||
basic_lib.output_err(660, desc=desc)
|
||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
||||
try:
|
||||
is_vvol = self.get_snapshot_is_vvol(snapshot)
|
||||
try:
|
||||
self.delete_ldev(ldev, is_vvol)
|
||||
except exception.HBSDNotFound:
|
||||
with self.volinfo_lock:
|
||||
if ldev in self.volume_info:
|
||||
self.volume_info.pop(ldev)
|
||||
msg = basic_lib.set_msg(
|
||||
305, type='snapshot', id=snapshot['id'])
|
||||
LOG.warning(msg)
|
||||
except exception.HBSDBusy:
|
||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
||||
finally:
|
||||
if ldev in self.volume_info:
|
||||
self.volume_info[ldev]['in_use'].lock.release()
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
pvol = self.get_ldev(src_vref)
|
||||
if pvol is None:
|
||||
msg = basic_lib.output_err(624, type='volume', id=src_vref['id'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
self.add_volinfo(pvol, src_vref['id'])
|
||||
with self.volume_info[pvol]['in_use']:
|
||||
is_vvol = self.get_volume_is_vvol(src_vref)
|
||||
self.check_volume_status(self.get_volume(src_vref['id']), is_vvol)
|
||||
size = volume['size']
|
||||
src_size = src_vref['size']
|
||||
if size != src_size:
|
||||
msg = basic_lib.output_err(617, type='volume',
|
||||
volume_id=volume['id'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
metadata = self.get_volume_metadata(volume['id'])
|
||||
method = None if is_vvol else self.get_copy_method(volume)
|
||||
|
||||
svol, type = self.copy_data(pvol, size, is_vvol, method)
|
||||
|
||||
metadata['type'] = type
|
||||
metadata['volume'] = src_vref['id']
|
||||
metadata['ldev'] = svol
|
||||
|
||||
return {'provider_location': svol, 'metadata': metadata}
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
pvol = self.get_ldev(snapshot)
|
||||
if pvol is None:
|
||||
msg = basic_lib.output_err(624, type='snapshot', id=snapshot['id'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
self.add_volinfo(pvol, id=snapshot['id'], type='snapshot')
|
||||
with self.volume_info[pvol]['in_use']:
|
||||
is_vvol = self.get_snapshot_is_vvol(snapshot)
|
||||
if snapshot['status'] != 'available':
|
||||
msg = basic_lib.output_err(655, status=snapshot['status'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
size = volume['size']
|
||||
src_size = snapshot['volume_size']
|
||||
if size != src_size:
|
||||
msg = basic_lib.output_err(617, type='snapshot',
|
||||
volume_id=volume['id'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
metadata = self.get_volume_metadata(volume['id'])
|
||||
method = None if is_vvol else self.get_copy_method(volume)
|
||||
svol, type = self.copy_data(pvol, size, is_vvol, method)
|
||||
|
||||
metadata['type'] = type
|
||||
metadata['snapshot'] = snapshot['id']
|
||||
metadata['ldev'] = svol
|
||||
|
||||
return {'provider_location': svol, 'metadata': metadata}
|
||||
|
||||
def _extend_volume(self, ldev, old_size, new_size):
|
||||
with self.pair_flock:
|
||||
self.delete_pair(ldev)
|
||||
self.command.comm_extend_ldev(ldev, old_size, new_size)
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
pvol = self.get_ldev(volume)
|
||||
self.add_volinfo(pvol, volume['id'])
|
||||
with self.volume_info[pvol]['in_use']:
|
||||
if self.get_volume_is_vvol(volume):
|
||||
msg = basic_lib.output_err(618, volume_id=volume['id'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
self._extend_volume(pvol, volume['size'], new_size)
|
||||
|
||||
def output_backend_available_once(self):
|
||||
if self.output_first:
|
||||
self.output_first = False
|
||||
msg = basic_lib.set_msg(
|
||||
3, config_group=self.configuration.config_group)
|
||||
LOG.warning(msg)
|
||||
|
||||
def update_volume_stats(self, storage_protocol):
|
||||
data = {}
|
||||
total_gb = None
|
||||
free_gb = None
|
||||
data['volume_backend_name'] = self.configuration.safe_get(
|
||||
'volume_backend_name') or 'HBSD%s' % storage_protocol
|
||||
data['vendor_name'] = 'Hitachi'
|
||||
data['driver_version'] = VERSION
|
||||
data['storage_protocol'] = storage_protocol
|
||||
|
||||
try:
|
||||
total_gb, free_gb = self.command.comm_get_dp_pool(
|
||||
self.configuration.hitachi_pool_id)
|
||||
except Exception as ex:
|
||||
LOG.error(_('Failed to update volume status: %s') %
|
||||
six.text_type(ex))
|
||||
return None
|
||||
|
||||
data['total_capacity_gb'] = total_gb
|
||||
data['free_capacity_gb'] = free_gb
|
||||
data['reserved_percentage'] = self.configuration.safe_get(
|
||||
'reserved_percentage')
|
||||
data['QoS_support'] = False
|
||||
|
||||
LOG.debug('Updating volume status (%s)' % data)
|
||||
|
||||
return data
|
||||
|
||||
def init_volinfo(self, vol_info, ldev):
|
||||
vol_info[ldev] = {'in_use': TryLock(), 'lock': threading.Lock()}
|
521
cinder/volume/drivers/hitachi/hbsd_fc.py
Normal file
521
cinder/volume/drivers/hitachi/hbsd_fc.py
Normal file
@ -0,0 +1,521 @@
|
||||
# Copyright (C) 2014, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Fibre channel Cinder volume driver for Hitachi storage.
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import nested
|
||||
import os
|
||||
import threading
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.openstack.common import excutils
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder import utils
|
||||
import cinder.volume.driver
|
||||
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
||||
from cinder.volume.drivers.hitachi import hbsd_common as common
|
||||
from cinder.zonemanager import utils as fczm_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
volume_opts = [
|
||||
cfg.BoolOpt('hitachi_zoning_request',
|
||||
default=False,
|
||||
help='Request for FC Zone creating HostGroup'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
|
||||
|
||||
class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||
VERSION = common.VERSION
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
os.environ['LANG'] = 'C'
|
||||
super(HBSDFCDriver, self).__init__(*args, **kwargs)
|
||||
self.db = kwargs.get('db')
|
||||
self.common = None
|
||||
self.configuration.append_config_values(common.volume_opts)
|
||||
self._stats = {}
|
||||
self.context = None
|
||||
self.max_hostgroups = None
|
||||
self.pair_hostgroups = []
|
||||
self.pair_hostnum = 0
|
||||
self.do_setup_status = threading.Event()
|
||||
|
||||
def _check_param(self):
|
||||
self.configuration.append_config_values(volume_opts)
|
||||
for opt in volume_opts:
|
||||
getattr(self.configuration, opt.name)
|
||||
|
||||
def check_param(self):
|
||||
try:
|
||||
self.common.check_param()
|
||||
self._check_param()
|
||||
except exception.HBSDError:
|
||||
raise
|
||||
except Exception as ex:
|
||||
msg = basic_lib.output_err(601, param=six.text_type(ex))
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
def output_param_to_log(self):
|
||||
lock = basic_lib.get_process_lock(self.common.system_lock_file)
|
||||
|
||||
with lock:
|
||||
self.common.output_param_to_log('FC')
|
||||
for opt in volume_opts:
|
||||
if not opt.secret:
|
||||
value = getattr(self.configuration, opt.name)
|
||||
LOG.info('\t%-35s%s' %
|
||||
(opt.name + ': ', six.text_type(value)))
|
||||
self.common.command.output_param_to_log(self.configuration)
|
||||
|
||||
def _add_wwn(self, hgs, port, gid, wwns):
|
||||
for wwn in wwns:
|
||||
wwn = six.text_type(wwn)
|
||||
self.common.command.comm_add_hbawwn(port, gid, wwn)
|
||||
detected = self.common.command.is_detected(port, wwn)
|
||||
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
|
||||
'detected': detected})
|
||||
LOG.debug('Create host group for %s' % hgs)
|
||||
|
||||
def _add_lun(self, hostgroups, ldev):
|
||||
if hostgroups is self.pair_hostgroups:
|
||||
is_once = True
|
||||
else:
|
||||
is_once = False
|
||||
self.common.add_lun('auhgmap', hostgroups, ldev, is_once)
|
||||
|
||||
def _delete_lun(self, hostgroups, ldev):
|
||||
try:
|
||||
self.common.command.comm_delete_lun(hostgroups, ldev)
|
||||
except exception.HBSDNotFound:
|
||||
msg = basic_lib.set_msg(301, ldev=ldev)
|
||||
LOG.warning(msg)
|
||||
|
||||
def _get_hgname_gid(self, port, host_grp_name):
|
||||
return self.common.command.get_hgname_gid(port, host_grp_name)
|
||||
|
||||
def _get_unused_gid(self, port):
|
||||
group_range = self.configuration.hitachi_group_range
|
||||
if not group_range:
|
||||
group_range = basic_lib.DEFAULT_GROUP_RANGE
|
||||
return self.common.command.get_unused_gid(group_range, port)
|
||||
|
||||
def _get_hostgroup_info(self, hgs, wwns, login=True):
|
||||
target_ports = self.configuration.hitachi_target_ports
|
||||
return self.common.command.comm_get_hostgroup_info(
|
||||
hgs, wwns, target_ports, login=login)
|
||||
|
||||
def _fill_group(self, hgs, port, host_grp_name, wwns):
|
||||
added_hostgroup = False
|
||||
LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
|
||||
'name: %(name)s wwns: %(wwns)s)'
|
||||
% {'hgs': hgs, 'port': port,
|
||||
'name': host_grp_name, 'wwns': wwns})
|
||||
gid = self._get_hgname_gid(port, host_grp_name)
|
||||
if gid is None:
|
||||
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
|
||||
try:
|
||||
gid = self._get_unused_gid(port)
|
||||
self._add_hostgroup(port, gid, host_grp_name)
|
||||
added_hostgroup = True
|
||||
except exception.HBSDNotFound:
|
||||
gid = None
|
||||
msg = basic_lib.set_msg(312, resource='GID')
|
||||
LOG.warning(msg)
|
||||
continue
|
||||
else:
|
||||
LOG.debug('Completed to add host target'
|
||||
'(port: %(port)s gid: %(gid)d)'
|
||||
% {'port': port, 'gid': gid})
|
||||
break
|
||||
else:
|
||||
msg = basic_lib.output_err(641)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
try:
|
||||
if wwns:
|
||||
self._add_wwn(hgs, port, gid, wwns)
|
||||
else:
|
||||
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': None,
|
||||
'detected': True})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if added_hostgroup:
|
||||
self._delete_hostgroup(port, gid, host_grp_name)
|
||||
|
||||
def add_hostgroup_master(self, hgs, master_wwns, host_ip, security_ports):
|
||||
target_ports = self.configuration.hitachi_target_ports
|
||||
group_request = self.configuration.hitachi_group_request
|
||||
wwns = []
|
||||
for wwn in master_wwns:
|
||||
wwns.append(wwn.lower())
|
||||
if target_ports and group_request:
|
||||
host_grp_name = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
|
||||
for port in security_ports:
|
||||
wwns_copy = wwns[:]
|
||||
for hostgroup in hgs:
|
||||
if (hostgroup['port'] == port and
|
||||
hostgroup['initiator_wwn'].lower() in wwns_copy):
|
||||
wwns_copy.remove(hostgroup['initiator_wwn'].lower())
|
||||
if wwns_copy:
|
||||
try:
|
||||
self._fill_group(hgs, port, host_grp_name, wwns_copy)
|
||||
except Exception as ex:
|
||||
LOG.warning(_('Failed to add host group: %s') %
|
||||
six.text_type(ex))
|
||||
msg = basic_lib.set_msg(
|
||||
308, port=port, name=host_grp_name)
|
||||
LOG.warning(msg)
|
||||
|
||||
if not hgs:
|
||||
msg = basic_lib.output_err(649)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
def add_hostgroup_pair(self, pair_hostgroups):
|
||||
if self.configuration.hitachi_unit_name:
|
||||
return
|
||||
|
||||
properties = utils.brick_get_connector_properties()
|
||||
if 'wwpns' not in properties:
|
||||
msg = basic_lib.output_err(650, resource='HBA')
|
||||
raise exception.HBSDError(message=msg)
|
||||
hostgroups = []
|
||||
self._get_hostgroup_info(hostgroups, properties['wwpns'],
|
||||
login=False)
|
||||
host_grp_name = '%spair%02x' % (basic_lib.NAME_PREFIX,
|
||||
self.pair_hostnum)
|
||||
for hostgroup in hostgroups:
|
||||
gid = self._get_hgname_gid(hostgroup['port'],
|
||||
host_grp_name)
|
||||
|
||||
# When 'gid' is 0, it should be true.
|
||||
# So, it cannot remove 'is not None'.
|
||||
if gid is not None:
|
||||
pair_hostgroups.append({'port': hostgroup['port'],
|
||||
'gid': gid, 'initiator_wwn': None,
|
||||
'detected': True})
|
||||
break
|
||||
|
||||
if not pair_hostgroups:
|
||||
for hostgroup in hostgroups:
|
||||
pair_port = hostgroup['port']
|
||||
try:
|
||||
self._fill_group(pair_hostgroups, pair_port,
|
||||
host_grp_name, None)
|
||||
except Exception:
|
||||
if hostgroup is hostgroups[-1]:
|
||||
raise
|
||||
else:
|
||||
break
|
||||
|
||||
def add_hostgroup(self):
|
||||
properties = utils.brick_get_connector_properties()
|
||||
if 'wwpns' not in properties:
|
||||
msg = basic_lib.output_err(650, resource='HBA')
|
||||
raise exception.HBSDError(message=msg)
|
||||
LOG.debug("wwpns: %s" % properties['wwpns'])
|
||||
|
||||
hostgroups = []
|
||||
security_ports = self._get_hostgroup_info(
|
||||
hostgroups, properties['wwpns'], login=False)
|
||||
self.add_hostgroup_master(hostgroups, properties['wwpns'],
|
||||
properties['ip'], security_ports)
|
||||
self.add_hostgroup_pair(self.pair_hostgroups)
|
||||
|
||||
def _get_target_wwn(self, port):
|
||||
target_wwns = self.common.command.comm_set_target_wwns(
|
||||
self.configuration.hitachi_target_ports)
|
||||
return target_wwns[port]
|
||||
|
||||
def _add_hostgroup(self, port, gid, host_grp_name):
|
||||
self.common.command.comm_add_hostgrp(port, gid, host_grp_name)
|
||||
|
||||
def _delete_hostgroup(self, port, gid, host_grp_name):
|
||||
try:
|
||||
self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
msg = basic_lib.set_msg(
|
||||
306, port=port, gid=gid, name=host_grp_name)
|
||||
LOG.warning(msg)
|
||||
|
||||
def _check_volume_mapping(self, hostgroup):
|
||||
port = hostgroup['port']
|
||||
gid = hostgroup['gid']
|
||||
if self.common.command.get_hostgroup_luns(port, gid):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _build_initiator_target_map(self, hostgroups, terminate=False):
|
||||
target_wwns = []
|
||||
init_targ_map = {}
|
||||
|
||||
target_ports = self.configuration.hitachi_target_ports
|
||||
zoning_request = self.configuration.hitachi_zoning_request
|
||||
|
||||
for hostgroup in hostgroups:
|
||||
target_wwn = self._get_target_wwn(hostgroup['port'])
|
||||
|
||||
if target_wwn not in target_wwns:
|
||||
target_wwns.append(target_wwn)
|
||||
|
||||
if target_ports and zoning_request:
|
||||
if terminate and self._check_volume_mapping(hostgroup):
|
||||
continue
|
||||
|
||||
initiator_wwn = hostgroup['initiator_wwn']
|
||||
if initiator_wwn not in init_targ_map:
|
||||
init_targ_map[initiator_wwn] = []
|
||||
|
||||
init_targ_map[initiator_wwn].append(target_wwn)
|
||||
|
||||
return target_wwns, init_targ_map
|
||||
|
||||
def _get_properties(self, volume, hostgroups, terminate=False):
|
||||
properties = {}
|
||||
|
||||
target_wwns, init_targ_map = self._build_initiator_target_map(
|
||||
hostgroups, terminate)
|
||||
|
||||
properties['target_wwn'] = target_wwns
|
||||
|
||||
if init_targ_map:
|
||||
properties['initiator_target_map'] = init_targ_map
|
||||
|
||||
if not terminate:
|
||||
properties['target_lun'] = hostgroups[0]['lun']
|
||||
|
||||
return properties
|
||||
|
||||
def do_setup(self, context):
|
||||
self.context = context
|
||||
self.common = common.HBSDCommon(self.configuration, self,
|
||||
context, self.db)
|
||||
|
||||
self.check_param()
|
||||
|
||||
self.common.create_lock_file()
|
||||
|
||||
self.common.command.connect_storage()
|
||||
self.max_hostgroups = self.common.command.get_max_hostgroups()
|
||||
|
||||
lock = basic_lib.get_process_lock(self.common.service_lock_file)
|
||||
with lock:
|
||||
self.add_hostgroup()
|
||||
|
||||
self.output_param_to_log()
|
||||
self.do_setup_status.set()
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
self.do_setup_status.wait()
|
||||
self.common.extend_volume(volume, new_size)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
if refresh:
|
||||
if self.do_setup_status.isSet():
|
||||
self.common.output_backend_available_once()
|
||||
_stats = self.common.update_volume_stats("FC")
|
||||
if _stats:
|
||||
self._stats = _stats
|
||||
return self._stats
|
||||
|
||||
def create_volume(self, volume):
|
||||
self.do_setup_status.wait()
|
||||
metadata = self.common.create_volume(volume)
|
||||
return metadata
|
||||
|
||||
def delete_volume(self, volume):
|
||||
self.do_setup_status.wait()
|
||||
self.common.delete_volume(volume)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
self.do_setup_status.wait()
|
||||
metadata = self.common.create_snapshot(snapshot)
|
||||
return metadata
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
self.do_setup_status.wait()
|
||||
self.common.delete_snapshot(snapshot)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
self.do_setup_status.wait()
|
||||
metadata = self.common.create_cloned_volume(volume, src_vref)
|
||||
return metadata
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
self.do_setup_status.wait()
|
||||
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
|
||||
return metadata
|
||||
|
||||
def _initialize_connection(self, ldev, connector, src_hgs=None):
|
||||
LOG.debug("Call _initialize_connection "
|
||||
"(config_group: %(group)s ldev: %(ldev)d)"
|
||||
% {'group': self.configuration.config_group, 'ldev': ldev})
|
||||
if src_hgs is self.pair_hostgroups:
|
||||
hostgroups = src_hgs
|
||||
else:
|
||||
hostgroups = []
|
||||
security_ports = self._get_hostgroup_info(
|
||||
hostgroups, connector['wwpns'], login=True)
|
||||
self.add_hostgroup_master(hostgroups, connector['wwpns'],
|
||||
connector['ip'], security_ports)
|
||||
|
||||
if src_hgs is self.pair_hostgroups:
|
||||
try:
|
||||
self._add_lun(hostgroups, ldev)
|
||||
except exception.HBSDNotFound:
|
||||
msg = basic_lib.set_msg(311, ldev=ldev)
|
||||
LOG.warning(msg)
|
||||
for i in range(self.max_hostgroups + 1):
|
||||
self.pair_hostnum += 1
|
||||
pair_hostgroups = []
|
||||
try:
|
||||
self.add_hostgroup_pair(pair_hostgroups)
|
||||
self.pair_hostgroups.extend(pair_hostgroups)
|
||||
except exception.HBSDNotFound:
|
||||
if i >= self.max_hostgroups:
|
||||
msg = basic_lib.output_err(648, resource='GID')
|
||||
raise exception.HBSDError(message=msg)
|
||||
else:
|
||||
break
|
||||
self.pair_initialize_connection(ldev)
|
||||
else:
|
||||
self._add_lun(hostgroups, ldev)
|
||||
|
||||
return hostgroups
|
||||
|
||||
@fczm_utils.AddFCZone
|
||||
def initialize_connection(self, volume, connector):
|
||||
self.do_setup_status.wait()
|
||||
ldev = self.common.get_ldev(volume)
|
||||
if ldev is None:
|
||||
msg = basic_lib.output_err(619, volume_id=volume['id'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
self.common.add_volinfo(ldev, volume['id'])
|
||||
with nested(self.common.volume_info[ldev]['lock'],
|
||||
self.common.volume_info[ldev]['in_use']):
|
||||
hostgroups = self._initialize_connection(ldev, connector)
|
||||
properties = self._get_properties(volume, hostgroups)
|
||||
LOG.debug('Initialize volume_info: %s'
|
||||
% self.common.volume_info)
|
||||
|
||||
LOG.debug('HFCDrv: properties=%s' % properties)
|
||||
return {
|
||||
'driver_volume_type': 'fibre_channel',
|
||||
'data': properties
|
||||
}
|
||||
|
||||
def _terminate_connection(self, ldev, connector, src_hgs):
|
||||
LOG.debug("Call _terminate_connection(config_group: %s)"
|
||||
% self.configuration.config_group)
|
||||
hostgroups = src_hgs[:]
|
||||
self._delete_lun(hostgroups, ldev)
|
||||
LOG.debug("*** _terminate_ ***")
|
||||
|
||||
@fczm_utils.RemoveFCZone
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
self.do_setup_status.wait()
|
||||
ldev = self.common.get_ldev(volume)
|
||||
if ldev is None:
|
||||
msg = basic_lib.set_msg(302, volume_id=volume['id'])
|
||||
LOG.warning(msg)
|
||||
return
|
||||
|
||||
if 'wwpns' not in connector:
|
||||
msg = basic_lib.output_err(650, resource='HBA')
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
hostgroups = []
|
||||
self._get_hostgroup_info(hostgroups,
|
||||
connector['wwpns'], login=False)
|
||||
if not hostgroups:
|
||||
msg = basic_lib.output_err(649)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
self.common.add_volinfo(ldev, volume['id'])
|
||||
with nested(self.common.volume_info[ldev]['lock'],
|
||||
self.common.volume_info[ldev]['in_use']):
|
||||
self._terminate_connection(ldev, connector, hostgroups)
|
||||
properties = self._get_properties(volume, hostgroups,
|
||||
terminate=True)
|
||||
LOG.debug('Terminate volume_info: %s' % self.common.volume_info)
|
||||
|
||||
return {
|
||||
'driver_volume_type': 'fibre_channel',
|
||||
'data': properties
|
||||
}
|
||||
|
||||
def pair_initialize_connection(self, ldev):
|
||||
if self.configuration.hitachi_unit_name:
|
||||
return
|
||||
self._initialize_connection(ldev, None, self.pair_hostgroups)
|
||||
|
||||
def pair_terminate_connection(self, ldev):
|
||||
if self.configuration.hitachi_unit_name:
|
||||
return
|
||||
self._terminate_connection(ldev, None, self.pair_hostgroups)
|
||||
|
||||
def discard_zero_page(self, volume):
|
||||
self.common.command.discard_zero_page(self.common.get_ldev(volume))
|
||||
|
||||
def create_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
|
||||
self.do_setup_status.wait()
|
||||
super(HBSDFCDriver, self).copy_volume_data(context, src_vol,
|
||||
dest_vol, remote)
|
||||
self.discard_zero_page(dest_vol)
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
self.do_setup_status.wait()
|
||||
super(HBSDFCDriver, self).copy_image_to_volume(context, volume,
|
||||
image_service,
|
||||
image_id)
|
||||
self.discard_zero_page(volume)
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
self.do_setup_status.wait()
|
||||
if (volume['instance_uuid'] or volume['attached_host']):
|
||||
desc = 'volume %s' % volume['id']
|
||||
msg = basic_lib.output_err(660, desc=desc)
|
||||
raise exception.HBSDError(message=msg)
|
||||
super(HBSDFCDriver, self).copy_volume_to_image(context, volume,
|
||||
image_service,
|
||||
image_meta)
|
||||
|
||||
def restore_backup(self, context, backup, volume, backup_service):
|
||||
self.do_setup_status.wait()
|
||||
super(HBSDFCDriver, self).restore_backup(context, backup,
|
||||
volume, backup_service)
|
||||
self.discard_zero_page(volume)
|
1509
cinder/volume/drivers/hitachi/hbsd_horcm.py
Normal file
1509
cinder/volume/drivers/hitachi/hbsd_horcm.py
Normal file
File diff suppressed because it is too large
Load Diff
420
cinder/volume/drivers/hitachi/hbsd_iscsi.py
Normal file
420
cinder/volume/drivers/hitachi/hbsd_iscsi.py
Normal file
@ -0,0 +1,420 @@
|
||||
# Copyright (C) 2014, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
iSCSI Cinder volume driver for Hitachi storage.
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import nested
|
||||
import os
|
||||
import threading
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder import utils
|
||||
import cinder.volume.driver
|
||||
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
||||
from cinder.volume.drivers.hitachi import hbsd_common as common
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CHAP_METHOD = ('None', 'CHAP None', 'CHAP')
|
||||
|
||||
volume_opts = [
|
||||
cfg.BoolOpt('hitachi_add_chap_user',
|
||||
default=False,
|
||||
help='Add CHAP user'),
|
||||
cfg.StrOpt('hitachi_auth_method',
|
||||
default=None,
|
||||
help='iSCSI authentication method'),
|
||||
cfg.StrOpt('hitachi_auth_user',
|
||||
default='%sCHAP-user' % basic_lib.NAME_PREFIX,
|
||||
help='iSCSI authentication username'),
|
||||
cfg.StrOpt('hitachi_auth_password',
|
||||
default='%sCHAP-password' % basic_lib.NAME_PREFIX,
|
||||
help='iSCSI authentication password'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
|
||||
|
||||
class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||
VERSION = common.VERSION
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
os.environ['LANG'] = 'C'
|
||||
super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.db = kwargs.get('db')
|
||||
self.common = None
|
||||
self.configuration.append_config_values(common.volume_opts)
|
||||
self._stats = {}
|
||||
self.context = None
|
||||
self.do_setup_status = threading.Event()
|
||||
|
||||
def _check_param(self):
|
||||
self.configuration.append_config_values(volume_opts)
|
||||
if (self.configuration.hitachi_auth_method and
|
||||
self.configuration.hitachi_auth_method not in CHAP_METHOD):
|
||||
msg = basic_lib.output_err(601, param='hitachi_auth_method')
|
||||
raise exception.HBSDError(message=msg)
|
||||
if self.configuration.hitachi_auth_method == 'None':
|
||||
self.configuration.hitachi_auth_method = None
|
||||
for opt in volume_opts:
|
||||
getattr(self.configuration, opt.name)
|
||||
|
||||
def check_param(self):
|
||||
try:
|
||||
self.common.check_param()
|
||||
self._check_param()
|
||||
except exception.HBSDError:
|
||||
raise
|
||||
except Exception as ex:
|
||||
msg = basic_lib.output_err(601, param=six.text_type(ex))
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
def output_param_to_log(self):
|
||||
lock = basic_lib.get_process_lock(self.common.system_lock_file)
|
||||
|
||||
with lock:
|
||||
self.common.output_param_to_log('iSCSI')
|
||||
for opt in volume_opts:
|
||||
if not opt.secret:
|
||||
value = getattr(self.configuration, opt.name)
|
||||
LOG.info('\t%-35s%s' % (opt.name + ': ',
|
||||
six.text_type(value)))
|
||||
|
||||
def _delete_lun_iscsi(self, hostgroups, ldev):
|
||||
try:
|
||||
self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
|
||||
except exception.HBSDNotFound:
|
||||
msg = basic_lib.set_msg(301, ldev=ldev)
|
||||
LOG.warning(msg)
|
||||
|
||||
def _add_target(self, hostgroups, ldev):
|
||||
self.common.add_lun('autargetmap', hostgroups, ldev)
|
||||
|
||||
def _add_initiator(self, hgs, port, gid, host_iqn):
|
||||
self.common.command.comm_add_initiator(port, gid, host_iqn)
|
||||
hgs.append({'port': port, 'gid': int(gid), 'detected': True})
|
||||
LOG.debug("Create iSCSI target for %s" % hgs)
|
||||
|
||||
def _get_unused_gid_iscsi(self, port):
|
||||
group_range = self.configuration.hitachi_group_range
|
||||
if not group_range:
|
||||
group_range = basic_lib.DEFAULT_GROUP_RANGE
|
||||
return self.common.command.get_unused_gid_iscsi(group_range, port)
|
||||
|
||||
def _delete_iscsi_target(self, port, target_no, target_alias):
|
||||
ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
|
||||
port, target_no, target_alias)
|
||||
if ret:
|
||||
msg = basic_lib.set_msg(
|
||||
307, port=port, tno=target_no, alias=target_alias)
|
||||
LOG.warning(msg)
|
||||
|
||||
def _delete_chap_user(self, port):
|
||||
ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
|
||||
if ret:
|
||||
msg = basic_lib.set_msg(
|
||||
303, user=self.configuration.hitachi_auth_user)
|
||||
LOG.warning(msg)
|
||||
|
||||
def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
|
||||
return self.common.command.comm_get_hostgroup_info_iscsi(
|
||||
hgs, host_iqn, self.configuration.hitachi_target_ports)
|
||||
|
||||
def _discovery_iscsi_target(self, hostgroups):
|
||||
for hostgroup in hostgroups:
|
||||
ip_addr, ip_port = self.common.command.comm_get_iscsi_ip(
|
||||
hostgroup['port'])
|
||||
target_iqn = self.common.command.comm_get_target_iqn(
|
||||
hostgroup['port'], hostgroup['gid'])
|
||||
hostgroup['ip_addr'] = ip_addr
|
||||
hostgroup['ip_port'] = ip_port
|
||||
hostgroup['target_iqn'] = target_iqn
|
||||
LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s"
|
||||
% {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
|
||||
|
||||
def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
|
||||
for port in ports:
|
||||
added_hostgroup = False
|
||||
added_user = False
|
||||
LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
|
||||
'target_iqn: %(tiqn)s target_alias: %(alias)s '
|
||||
'add_iqn: %(aiqn)s)' %
|
||||
{'hgs': hgs, 'port': port, 'tiqn': target_iqn,
|
||||
'alias': target_alias, 'aiqn': add_iqn})
|
||||
gid = self.common.command.get_gid_from_targetiqn(
|
||||
target_iqn, target_alias, port)
|
||||
if gid is None:
|
||||
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
|
||||
gid = None
|
||||
try:
|
||||
gid = self._get_unused_gid_iscsi(port)
|
||||
self.common.command.comm_add_hostgrp_iscsi(
|
||||
port, gid, target_alias, target_iqn)
|
||||
added_hostgroup = True
|
||||
except exception.HBSDNotFound:
|
||||
msg = basic_lib.set_msg(312, resource='GID')
|
||||
LOG.warning(msg)
|
||||
continue
|
||||
except Exception as ex:
|
||||
msg = basic_lib.set_msg(
|
||||
309, port=port, alias=target_alias,
|
||||
reason=six.text_type(ex))
|
||||
LOG.warning(msg)
|
||||
break
|
||||
else:
|
||||
LOG.debug('Completed to add target'
|
||||
'(port: %(port)s gid: %(gid)d)'
|
||||
% {'port': port, 'gid': gid})
|
||||
break
|
||||
if gid is None:
|
||||
LOG.error(_('Failed to add target(port: %s)') % port)
|
||||
continue
|
||||
try:
|
||||
if added_hostgroup:
|
||||
if self.configuration.hitachi_auth_method:
|
||||
added_user = self.common.command.set_chap_authention(
|
||||
port, gid)
|
||||
self.common.command.comm_set_hostgrp_reportportal(
|
||||
port, target_alias)
|
||||
self._add_initiator(hgs, port, gid, add_iqn)
|
||||
except Exception as ex:
|
||||
msg = basic_lib.set_msg(
|
||||
316, port=port, reason=six.text_type(ex))
|
||||
LOG.warning(msg)
|
||||
if added_hostgroup:
|
||||
if added_user:
|
||||
self._delete_chap_user(port)
|
||||
self._delete_iscsi_target(port, gid, target_alias)
|
||||
|
||||
def add_hostgroup_core(self, hgs, ports, target_iqn,
|
||||
target_alias, add_iqn):
|
||||
if ports:
|
||||
self._fill_groups(hgs, ports, target_iqn, target_alias, add_iqn)
|
||||
|
||||
def add_hostgroup_master(self, hgs, master_iqn, host_ip, security_ports):
|
||||
target_ports = self.configuration.hitachi_target_ports
|
||||
group_request = self.configuration.hitachi_group_request
|
||||
target_alias = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
|
||||
if target_ports and group_request:
|
||||
target_iqn = '%s.target' % master_iqn
|
||||
|
||||
diff_ports = []
|
||||
for port in security_ports:
|
||||
for hostgroup in hgs:
|
||||
if hostgroup['port'] == port:
|
||||
break
|
||||
else:
|
||||
diff_ports.append(port)
|
||||
|
||||
self.add_hostgroup_core(hgs, diff_ports, target_iqn,
|
||||
target_alias, master_iqn)
|
||||
if not hgs:
|
||||
msg = basic_lib.output_err(649)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
def add_hostgroup(self):
|
||||
properties = utils.brick_get_connector_properties()
|
||||
if 'initiator' not in properties:
|
||||
msg = basic_lib.output_err(650, resource='HBA')
|
||||
raise exception.HBSDError(message=msg)
|
||||
LOG.debug("initiator: %s" % properties['initiator'])
|
||||
hostgroups = []
|
||||
security_ports = self._get_hostgroup_info_iscsi(
|
||||
hostgroups, properties['initiator'])
|
||||
self.add_hostgroup_master(hostgroups, properties['initiator'],
|
||||
properties['ip'], security_ports)
|
||||
|
||||
def _get_properties(self, volume, hostgroups):
|
||||
conf = self.configuration
|
||||
properties = {}
|
||||
self._discovery_iscsi_target(hostgroups)
|
||||
hostgroup = hostgroups[0]
|
||||
|
||||
properties['target_discovered'] = True
|
||||
properties['target_portal'] = "%s:%s" % (hostgroup['ip_addr'],
|
||||
hostgroup['ip_port'])
|
||||
properties['target_iqn'] = hostgroup['target_iqn']
|
||||
properties['target_lun'] = hostgroup['lun']
|
||||
|
||||
if conf.hitachi_auth_method:
|
||||
properties['auth_method'] = 'CHAP'
|
||||
properties['auth_username'] = conf.hitachi_auth_user
|
||||
properties['auth_password'] = conf.hitachi_auth_password
|
||||
|
||||
return properties
|
||||
|
||||
def do_setup(self, context):
|
||||
self.context = context
|
||||
self.common = common.HBSDCommon(self.configuration, self,
|
||||
context, self.db)
|
||||
|
||||
self.check_param()
|
||||
|
||||
self.common.create_lock_file()
|
||||
|
||||
self.common.command.connect_storage()
|
||||
|
||||
lock = basic_lib.get_process_lock(self.common.service_lock_file)
|
||||
with lock:
|
||||
self.add_hostgroup()
|
||||
|
||||
self.output_param_to_log()
|
||||
self.do_setup_status.set()
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
self.do_setup_status.wait()
|
||||
self.common.extend_volume(volume, new_size)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
if refresh:
|
||||
if self.do_setup_status.isSet():
|
||||
self.common.output_backend_available_once()
|
||||
_stats = self.common.update_volume_stats("iSCSI")
|
||||
if _stats:
|
||||
self._stats = _stats
|
||||
return self._stats
|
||||
|
||||
def create_volume(self, volume):
|
||||
self.do_setup_status.wait()
|
||||
metadata = self.common.create_volume(volume)
|
||||
return metadata
|
||||
|
||||
def delete_volume(self, volume):
|
||||
self.do_setup_status.wait()
|
||||
self.common.delete_volume(volume)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
self.do_setup_status.wait()
|
||||
metadata = self.common.create_snapshot(snapshot)
|
||||
return metadata
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
self.do_setup_status.wait()
|
||||
self.common.delete_snapshot(snapshot)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
self.do_setup_status.wait()
|
||||
metadata = self.common.create_cloned_volume(volume, src_vref)
|
||||
return metadata
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
self.do_setup_status.wait()
|
||||
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
|
||||
return metadata
|
||||
|
||||
def _initialize_connection(self, ldev, connector, src_hgs=None):
|
||||
LOG.debug("Call _initialize_connection "
|
||||
"(config_group: %(group)s ldev: %(ldev)d)"
|
||||
% {'group': self.configuration.config_group, 'ldev': ldev})
|
||||
if src_hgs:
|
||||
hostgroups = src_hgs[:]
|
||||
else:
|
||||
hostgroups = []
|
||||
security_ports = self._get_hostgroup_info_iscsi(
|
||||
hostgroups, connector['initiator'])
|
||||
self.add_hostgroup_master(hostgroups, connector['initiator'],
|
||||
connector['ip'], security_ports)
|
||||
|
||||
self._add_target(hostgroups, ldev)
|
||||
|
||||
return hostgroups
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
self.do_setup_status.wait()
|
||||
ldev = self.common.get_ldev(volume)
|
||||
if ldev is None:
|
||||
msg = basic_lib.output_err(619, volume_id=volume['id'])
|
||||
raise exception.HBSDError(message=msg)
|
||||
self.common.add_volinfo(ldev, volume['id'])
|
||||
with nested(self.common.volume_info[ldev]['lock'],
|
||||
self.common.volume_info[ldev]['in_use']):
|
||||
hostgroups = self._initialize_connection(ldev, connector)
|
||||
protocol = 'iscsi'
|
||||
properties = self._get_properties(volume, hostgroups)
|
||||
LOG.debug('Initialize volume_info: %s'
|
||||
% self.common.volume_info)
|
||||
|
||||
LOG.debug('HFCDrv: properties=%s' % properties)
|
||||
return {
|
||||
'driver_volume_type': protocol,
|
||||
'data': properties
|
||||
}
|
||||
|
||||
def _terminate_connection(self, ldev, connector, src_hgs):
|
||||
LOG.debug("Call _terminate_connection(config_group: %s)"
|
||||
% self.configuration.config_group)
|
||||
hostgroups = src_hgs[:]
|
||||
self._delete_lun_iscsi(hostgroups, ldev)
|
||||
|
||||
LOG.debug("*** _terminate_ ***")
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
self.do_setup_status.wait()
|
||||
ldev = self.common.get_ldev(volume)
|
||||
if ldev is None:
|
||||
msg = basic_lib.set_msg(302, volume_id=volume['id'])
|
||||
LOG.warning(msg)
|
||||
return
|
||||
|
||||
if 'initiator' not in connector:
|
||||
msg = basic_lib.output_err(650, resource='HBA')
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
hostgroups = []
|
||||
self._get_hostgroup_info_iscsi(hostgroups,
|
||||
connector['initiator'])
|
||||
if not hostgroups:
|
||||
msg = basic_lib.output_err(649)
|
||||
raise exception.HBSDError(message=msg)
|
||||
|
||||
self.common.add_volinfo(ldev, volume['id'])
|
||||
with nested(self.common.volume_info[ldev]['lock'],
|
||||
self.common.volume_info[ldev]['in_use']):
|
||||
self._terminate_connection(ldev, connector, hostgroups)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def pair_initialize_connection(self, unused_ldev):
|
||||
pass
|
||||
|
||||
def pair_terminate_connection(self, unused_ldev):
|
||||
pass
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
self.do_setup_status.wait()
|
||||
if (volume['instance_uuid'] or volume['attached_host']):
|
||||
desc = 'volume %s' % volume['id']
|
||||
msg = basic_lib.output_err(660, desc=desc)
|
||||
raise exception.HBSDError(message=msg)
|
||||
super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
|
||||
image_service,
|
||||
image_meta)
|
1086
cinder/volume/drivers/hitachi/hbsd_snm2.py
Normal file
1086
cinder/volume/drivers/hitachi/hbsd_snm2.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -1235,6 +1235,91 @@
|
||||
#hds_hnas_nfs_config_file=/opt/hds/hnas/cinder_nfs_conf.xml
|
||||
|
||||
|
||||
#
|
||||
# Options defined in cinder.volume.drivers.hitachi.hbsd_common
|
||||
#
|
||||
|
||||
# Serial number of storage system (string value)
|
||||
#hitachi_serial_number=<None>
|
||||
|
||||
# Name of an array unit (string value)
|
||||
#hitachi_unit_name=<None>
|
||||
|
||||
# Pool ID of storage system (integer value)
|
||||
#hitachi_pool_id=<None>
|
||||
|
||||
# Thin pool ID of storage system (integer value)
|
||||
#hitachi_thin_pool_id=<None>
|
||||
|
||||
# Range of logical device of storage system (string value)
|
||||
#hitachi_ldev_range=<None>
|
||||
|
||||
# Default copy method of storage system (string value)
|
||||
#hitachi_default_copy_method=FULL
|
||||
|
||||
# Copy speed of storage system (integer value)
|
||||
#hitachi_copy_speed=3
|
||||
|
||||
# Interval to check copy (integer value)
|
||||
#hitachi_copy_check_interval=3
|
||||
|
||||
# Interval to check copy asynchronously (integer value)
|
||||
#hitachi_async_copy_check_interval=10
|
||||
|
||||
# Control port names for HostGroup or iSCSI Target (string
|
||||
# value)
|
||||
#hitachi_target_ports=<None>
|
||||
|
||||
# Range of group number (string value)
|
||||
#hitachi_group_range=<None>
|
||||
|
||||
# Request for creating HostGroup or iSCSI Target (boolean
|
||||
# value)
|
||||
#hitachi_group_request=false
|
||||
|
||||
|
||||
#
|
||||
# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
|
||||
#
|
||||
|
||||
# Request for FC Zone creating HostGroup (boolean value)
|
||||
#hitachi_zoning_request=false
|
||||
|
||||
|
||||
#
|
||||
# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
|
||||
#
|
||||
|
||||
# Instance numbers for HORCM (string value)
|
||||
#hitachi_horcm_numbers=200,201
|
||||
|
||||
# Username of storage system for HORCM (string value)
|
||||
#hitachi_horcm_user=<None>
|
||||
|
||||
# Password of storage system for HORCM (string value)
|
||||
#hitachi_horcm_password=<None>
|
||||
|
||||
# Add to HORCM configuration (boolean value)
|
||||
#hitachi_horcm_add_conf=true
|
||||
|
||||
|
||||
#
|
||||
# Options defined in cinder.volume.drivers.hitachi.hbsd_iscsi
|
||||
#
|
||||
|
||||
# Add CHAP user (boolean value)
|
||||
#hitachi_add_chap_user=false
|
||||
|
||||
# iSCSI authentication method (string value)
|
||||
#hitachi_auth_method=<None>
|
||||
|
||||
# iSCSI authentication username (string value)
|
||||
#hitachi_auth_user=HBSD-CHAP-user
|
||||
|
||||
# iSCSI authentication password (string value)
|
||||
#hitachi_auth_password=HBSD-CHAP-password
|
||||
|
||||
|
||||
#
|
||||
# Options defined in cinder.volume.drivers.huawei
|
||||
#
|
||||
|
@ -123,3 +123,35 @@ sg_scan: CommandFilter, sg_scan, root
|
||||
|
||||
#cinder/backup/services/tsm.py
|
||||
dsmc:CommandFilter,/usr/bin/dsmc,root
|
||||
|
||||
# cinder/volume/drivers/hitachi/hbsd_horcm.py
|
||||
raidqry: CommandFilter, raidqry, root
|
||||
raidcom: CommandFilter, raidcom, root
|
||||
pairsplit: CommandFilter, pairsplit, root
|
||||
paircreate: CommandFilter, paircreate, root
|
||||
pairdisplay: CommandFilter, pairdisplay, root
|
||||
pairevtwait: CommandFilter, pairevtwait, root
|
||||
horcmstart.sh: CommandFilter, horcmstart.sh, root
|
||||
horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root
|
||||
horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr
|
||||
|
||||
# cinder/volume/drivers/hitachi/hbsd_snm2.py
|
||||
auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman
|
||||
auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref
|
||||
auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef
|
||||
aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1
|
||||
auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn
|
||||
auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap
|
||||
autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap
|
||||
aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol
|
||||
auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd
|
||||
auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel
|
||||
auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize
|
||||
auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser
|
||||
autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef
|
||||
autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt
|
||||
autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini
|
||||
auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi
|
||||
audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool
|
||||
aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal
|
||||
aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon
|
||||
|
Loading…
Reference in New Issue
Block a user