Remove Hitachi volume drivers

Hitachi has decided to provide their drivers out of tree [1]. The
drivers were marked as unsupported in-tree in the Pike release and
are now being removed.

[1] http://lists.openstack.org/pipermail/openstack/2017-March/018812.html

Change-Id: I23867aa98f68298beb5db4558c66c1ffd4e7d6f1
Closes-bug: #1652864
Closes-bug: #1671966
Closes-bug: #1677688
Closes-bug: #1677923
Closes-bug: #1645738
This commit is contained in:
Sean McGinnis
2017-09-26 18:43:21 -05:00
parent df8ec82a18
commit 55d726e5c3
35 changed files with 6 additions and 18996 deletions

View File

@@ -1128,45 +1128,6 @@ class InvalidGroupSnapshotStatus(Invalid):
message = _("Invalid GroupSnapshot Status: %(reason)s")
# Hitachi Block Storage Driver
class HBSDError(VolumeDriverException):
message = _("HBSD error occurs.")
class HBSDCmdError(HBSDError):
def __init__(self, message=None, ret=None, err=None):
self.ret = ret
self.stderr = err
super(HBSDCmdError, self).__init__(message=message)
class HBSDBusy(HBSDError):
message = "Device or resource is busy."
class HBSDNotFound(NotFound):
message = _("Storage resource could not be found.")
class HBSDVolumeIsBusy(VolumeIsBusy):
message = _("Volume %(volume_name)s is busy.")
# Hitachi VSP Driver
class VSPError(VolumeDriverException):
message = _("VSP error occurred. %(message)s")
class VSPBusy(VSPError):
message = _("Device or resource is busy.")
class VSPNotSupported(VSPError):
message = _("The function on the storage is not supported.")
# Datera driver
class DateraAPIException(VolumeBackendAPIException):
message = _("Bad response from Datera API")
@@ -1297,11 +1258,6 @@ class NotSupportedOperation(Invalid):
code = 405
# Hitachi HNAS drivers
class HNASConnError(VolumeDriverException):
message = "%(message)s"
# NexentaStor driver exception
class NexentaException(VolumeDriverException):
message = "%(message)s"

View File

@@ -99,26 +99,6 @@ from cinder.volume.drivers.fujitsu import eternus_dx_common as \
from cinder.volume.drivers.fusionstorage import dsware as \
cinder_volume_drivers_fusionstorage_dsware
from cinder.volume.drivers import hgst as cinder_volume_drivers_hgst
from cinder.volume.drivers.hitachi import hbsd_common as \
cinder_volume_drivers_hitachi_hbsdcommon
from cinder.volume.drivers.hitachi import hbsd_fc as \
cinder_volume_drivers_hitachi_hbsdfc
from cinder.volume.drivers.hitachi import hbsd_horcm as \
cinder_volume_drivers_hitachi_hbsdhorcm
from cinder.volume.drivers.hitachi import hbsd_iscsi as \
cinder_volume_drivers_hitachi_hbsdiscsi
from cinder.volume.drivers.hitachi import hnas_nfs as \
cinder_volume_drivers_hitachi_hnasnfs
from cinder.volume.drivers.hitachi import hnas_utils as \
cinder_volume_drivers_hitachi_hnasutils
from cinder.volume.drivers.hitachi import vsp_common as \
cinder_volume_drivers_hitachi_vspcommon
from cinder.volume.drivers.hitachi import vsp_fc as \
cinder_volume_drivers_hitachi_vspfc
from cinder.volume.drivers.hitachi import vsp_horcm as \
cinder_volume_drivers_hitachi_vsphorcm
from cinder.volume.drivers.hitachi import vsp_iscsi as \
cinder_volume_drivers_hitachi_vspiscsi
from cinder.volume.drivers.hpe import hpe_3par_common as \
cinder_volume_drivers_hpe_hpe3parcommon
from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \
@@ -296,16 +276,6 @@ def list_opts():
FJ_ETERNUS_DX_OPT_opts,
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
cinder_volume_drivers_hgst.hgst_opts,
cinder_volume_drivers_hitachi_hbsdcommon.volume_opts,
cinder_volume_drivers_hitachi_hbsdfc.volume_opts,
cinder_volume_drivers_hitachi_hbsdhorcm.volume_opts,
cinder_volume_drivers_hitachi_hbsdiscsi.volume_opts,
cinder_volume_drivers_hitachi_hnasnfs.NFS_OPTS,
cinder_volume_drivers_hitachi_hnasutils.drivers_common_opts,
cinder_volume_drivers_hitachi_vspcommon.common_opts,
cinder_volume_drivers_hitachi_vspfc.fc_opts,
cinder_volume_drivers_hitachi_vsphorcm.horcm_opts,
cinder_volume_drivers_hitachi_vspiscsi.iscsi_opts,
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts,
cinder_volume_drivers_huawei_huaweidriver.huawei_opts,

View File

@@ -1,618 +0,0 @@
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Self test for Hitachi Block Storage Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hbsd_basiclib
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_fc
from cinder.volume.drivers.hitachi import hbsd_snm2
def _exec_hsnm(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_vals.get(args)
def _exec_hsnm_get_lu_ret_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_ret_err.get(args)
def _exec_hsnm_get_lu_vol_type_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_vol_type_err.get(args)
def _exec_hsnm_get_lu_dppool_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_dppool_err.get(args)
def _exec_hsnm_get_lu_size_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_size_err.get(args)
def _exec_hsnm_get_lu_num_port_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_num_port_err.get(args)
class HBSDSNM2FCDriverTest(test.TestCase):
"""Test HBSDSNM2FCDriver."""
audppool_result = " DP RAID \
Current Utilization Current Over Replication\
Available Current Replication Rotational \
\
Stripe \
Needing Preparation\n\
Pool Tier Mode Level Total Capacity Consumed Capacity \
Percent Provisioning Percent Capacity \
Utilization Percent Type Speed Encryption Status \
\
Reconstruction Progress Size Capacity\n\
30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \
1% 24835% 532.0 GB \
1% SAS 10000rpm N/A Normal \
N/A \
256KB 0.0 GB"
aureplicationlocal_result = "Pair Name LUN Pair \
LUN Status Copy Type Group \
Point-in-Time MU Number\n\
0 10 0 Split( 99%) \
ShadowImage ---:Ungrouped N/A\
"
auluref_result = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 Normal"
auluref_result1 = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 DUMMY"
auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \
Name Port Name Host Group\n\
HBSD-00 10000000C97BCE7A 001:HBSD-01\n\
Assigned WWN\n Name Port Name \
Host Group\n abcdefg 10000000C97BCE7A \
001:HBSD-01"
aufibre1_result = "Port Information\n\
Port Address\n CTL Port\
Node Name Port Name Setting Current\n 0 0 \
50060E801053C2E0 50060E801053C2E0 0000EF 272700"
auhgmap_result = "Mapping Mode = ON\nPort Group \
H-LUN LUN\n 00 001:HBSD-00 0 1000"
hsnm_vals = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, "", ""],
('aureplicationlocal',
'-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
[1, "", ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -refer -pvol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -refer -svol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, "", ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, "", ""],
('auludel', '-unit None -lu 1 -f'): [0, 0, ""],
('auludel', '-unit None -lu 3 -f'): [1, 0, ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, 0, ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""],
('auhgmap', '-unit None -add 0 0 1 1 1'): [0, 0, ""],
('auhgwwn', '-unit None -refer'): [0, "%s" % auhgwwn_result, ""],
('aufibre1', '-unit None -refer'): [0, "%s" % aufibre1_result, ""],
('auhgmap', '-unit None -refer'): [0, "%s" % auhgmap_result, ""]}
auluref_ret_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 Normal"
hsnm_get_lu_ret_err = {
('auluref', '-unit None -lu 0'): [1, "%s" % auluref_ret_err, ""],
}
auluref_vol_type_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 DUMMY"
hsnm_get_lu_vol_type_err = {
('auluref', '-unit None -lu 0'):
[0, "%s" % auluref_vol_type_err, ""],
}
auluref_dppool_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 N/A Enable 0 Normal"
hsnm_get_lu_dppool_err = {
('auluref', '-unit None -lu 0'):
[0, "%s" % auluref_dppool_err, ""],
}
auluref_size_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097151 blocks 256KB N/A 0 Enable 0 Normal"
hsnm_get_lu_size_err = {
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_size_err, ""],
}
auluref_num_port_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 1 Normal"
hsnm_get_lu_num_port_err = {
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_num_port_err, ""],
}
# The following information is passed on to tests, when creating a volume
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
'provider_location': '1', 'name': 'test',
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
test_volume = {'name': 'test_volume', 'size': 128,
'id': 'test-volume-0',
'provider_location': '1', 'status': 'available'}
test_volume_larger = {'name': 'test_volume', 'size': 256,
'id': 'test-volume-0',
'provider_location': '1', 'status': 'available'}
test_volume_error = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '3', 'status': 'available'}
test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
'id': 'test-volume-error',
'provider_location': None, 'status': 'available'}
test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '1', 'status': 'available'}
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
'id': 'test-volume3',
'volume_metadata': [{'key': 'type',
'value': 'V-VOL'}],
'provider_location': '1', 'status': 'available'}
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
'id': 'test-volume2',
'provider_location': '3', 'status': 'available'}
test_snapshot = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
'provider_location': '1', 'status': 'available'}
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0',
'volume': test_volume_error,
'provider_location': None, 'status': 'available'}
UNIT_NAME = 'HUS110_91122819'
test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME}
test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME}
test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME}
test_existing_no_ldev_ref = {'unit_name': UNIT_NAME}
test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None}
test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'}
test_existing_no_unit_ref = {'ldev': '0'}
def __init__(self, *args, **kwargs):
super(HBSDSNM2FCDriverTest, self).__init__(*args, **kwargs)
def setUp(self):
super(HBSDSNM2FCDriverTest, self).setUp()
self._setup_config()
self._setup_driver()
def _setup_config(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.hitachi_pool_id = 30
self.configuration.hitachi_target_ports = "00"
self.configuration.hitachi_debug_level = 0
self.configuration.hitachi_serial_number = "None"
self.configuration.hitachi_unit_name = "None"
self.configuration.hitachi_group_request = False
self.configuration.hitachi_zoning_request = False
self.configuration.config_group = "None"
self.configuration.hitachi_ldev_range = [0, 100]
self.configuration.hitachi_default_copy_method = 'SI'
self.configuration.hitachi_copy_check_interval = 1
self.configuration.hitachi_copy_speed = 3
def _setup_driver(self):
self.driver = hbsd_fc.HBSDFCDriver(
configuration=self.configuration)
context = None
db = None
self.driver.common = hbsd_common.HBSDCommon(
self.configuration, self.driver, context, db)
self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration)
self.driver.common.pair_flock = \
self.driver.common.command.set_pair_flock()
self.driver.common.horcmgr_flock = \
self.driver.common.command.set_horcmgr_flock()
self.driver.do_setup_status.set()
# API test cases
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume(self, arg1, arg2, arg3):
"""test create_volume."""
ret = self.driver.create_volume(self._VOLUME)
vol = self._VOLUME.copy()
vol['provider_location'] = ret['provider_location']
self.assertEqual('1', vol['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_error(self, arg1, arg2, arg3):
"""test create_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_volume,
self.test_volume_error)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats(self, arg1, arg2):
"""test get_volume_stats."""
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats_error(self, arg1, arg2):
"""test get_volume_stats."""
self.configuration.hitachi_pool_id = 29
stats = self.driver.get_volume_stats(True)
self.assertEqual({}, stats)
self.configuration.hitachi_pool_id = 30
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume(self, arg1, arg2):
"""test extend_volume."""
self.driver.extend_volume(self._VOLUME, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume_error(self, arg1, arg2):
"""test extend_volume."""
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
self.test_volume_error3, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume(self, arg1, arg2):
"""test delete_volume."""
self.driver.delete_volume(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume_error(self, arg1, arg2):
"""test delete_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.delete_volume,
self.test_volume_error4)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
ret = self.driver.create_volume(self._VOLUME)
ret = self.driver.create_snapshot(self.test_snapshot)
self.assertEqual('1', ret['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_snapshot,
self.test_snapshot_error2)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot_error(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot_error2)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
vol = self.driver.create_volume_from_snapshot(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
self.assertRaises(exception.HBSDError,
self.driver.create_volume_from_snapshot,
self.test_volume_error2, self.test_snapshot)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
vol = self.driver.create_cloned_volume(self._VOLUME,
self.test_volume)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_common.HBSDCommon, 'extend_volume')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume_larger(self, arg1, arg2, arg3, arg4, arg5):
"""test create_cloned_volume."""
vol = self.driver.create_cloned_volume(self.test_volume_larger,
self._VOLUME)
self.assertIsNotNone(vol)
arg3.assert_called_once_with(self.test_volume_larger,
self.test_volume_larger['size'])
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error1)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
self.assertRaises(exception.HBSDError,
self.driver.create_cloned_volume,
self._VOLUME, self.test_volume_error1)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection(self, arg1, arg2):
"""test initialize connection."""
connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
rc = self.driver.initialize_connection(self._VOLUME, connector)
self.assertEqual('fibre_channel', rc['driver_volume_type'])
self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn'])
self.assertEqual(1, rc['data']['target_lun'])
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection_error(self, arg1, arg2):
"""test initialize connection."""
connector = {'wwpns': 'x', 'ip': '0xc0a80100'}
self.assertRaises(exception.HBSDError,
self.driver.initialize_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection(self, arg1):
"""test terminate connection."""
connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
rc = self.driver.terminate_connection(self._VOLUME, connector)
self.assertEqual('fibre_channel', rc['driver_volume_type'])
self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn'])
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection_error(self, arg1):
"""test terminate connection."""
connector = {'ip': '0xc0a80100'}
self.assertRaises(exception.HBSDError,
self.driver.terminate_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_manage_existing(self, arg1, arg2):
rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref)
self.assertEqual(0, rc['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
size = self.driver.manage_existing_get_size(self._VOLUME,
self.test_existing_ref)
self.assertEqual(1, size)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_ret_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_ret_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_vol_type_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_vol_type_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_dppool_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_dppool_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_size_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_size_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_num_port_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_num_port_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage(self, arg1, arg2):
self.driver.unmanage(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage_busy(self, arg1, arg2):
self.assertRaises(exception.HBSDVolumeIsBusy,
self.driver.unmanage, self.test_volume_error3)

View File

@@ -1,607 +0,0 @@
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Self test for Hitachi Block Storage Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hbsd_basiclib
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_iscsi
from cinder.volume.drivers.hitachi import hbsd_snm2
def _exec_hsnm(*args, **kargs):
return HBSDSNM2ISCSIDriverTest.hsnm_vals.get(args)
def _exec_hsnm_init(*args, **kargs):
return HBSDSNM2ISCSIDriverTest.hsnm_vals_init.get(args)
class HBSDSNM2ISCSIDriverTest(test.TestCase):
"""Test HBSDSNM2ISCSIDriver."""
audppool_result = " DP RAID \
Current Utilization Current Over Replication\
Available Current Replication Rotational \
\
Stripe \
Needing Preparation\n\
Pool Tier Mode Level Total Capacity Consumed Capacity \
Percent Provisioning Percent Capacity \
Utilization Percent Type Speed Encryption Status \
\
Reconstruction Progress Size Capacity\n\
30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \
1% 24835% 532.0 GB \
1% SAS 10000rpm N/A Normal \
N/A \
256KB 0.0 GB"
aureplicationlocal_result = "Pair Name LUN Pair \
LUN Status Copy Type Group \
Point-in-Time MU Number\n\
0 10 0 Split( 99%) \
ShadowImage ---:Ungrouped N/A\
"
auluref_result = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 Normal"
auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \
Name Port Name Host Group\n\
HBSD-00 10000000C97BCE7A 001:HBSD-01\n\
Assigned WWN\n Name Port Name \
Host Group\n abcdefg 10000000C97BCE7A \
001:HBSD-01"
autargetini_result = "Port 00 Target Security ON\n\
Target Name \
iSCSI Name\n\
001:HBSD-01 \
iqn"
autargetini_result2 = "Port 00 Target Security ON\n\
Target Name \
iSCSI Name"
autargetmap_result = "Mapping Mode = ON\n\
Port Target H-LUN LUN\n\
00 001:HBSD-01 0 1000"
auiscsi_result = "Port 00\n\
Port Number : 3260\n\
Keep Alive Timer[sec.] : 60\n\
MTU : 1500\n\
Transfer Rate : 1Gbps\n\
Link Status : Link Up\n\
Ether Address : 00:00:87:33:D1:3E\n\
IPv4\n\
IPv4 Address : 192.168.0.1\n\
IPv4 Subnet Mask : 255.255.252.0\n\
IPv4 Default Gateway : 0.0.0.0\n\
IPv6 Status : Disable\n\
Connecting Hosts : 0\n\
Result : Normal\n\
VLAN Status : Disable\n\
VLAN ID : N/A\n\
Header Digest : Enable\n\
Data Digest : Enable\n\
Window Scale : Disable"
autargetdef_result = "Port 00\n\
Authentication Mutual\n\
Target Method CHAP Algorithm \
Authentication\n\
001:T000 None --- ---\n\
User Name : ---\n\
iSCSI Name : iqn-target"
hsnm_vals = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, "", ""],
('aureplicationlocal',
'-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
[1, "", ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -refer -pvol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -refer -svol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, "", ""],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 1'):
[1, "", ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, "", ""],
('auludel', '-unit None -lu 1 -f'): [0, "", ""],
('auludel', '-unit None -lu 3 -f'): [1, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""],
('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
('autargetini', '-unit None -refer'):
[0, "%s" % autargetini_result, ""],
('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
[0, "", ""],
('autargetmap', '-unit None -refer'):
[0, "%s" % autargetmap_result, ""],
('autargetdef',
'-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
-authmethod None'):
[0, "", ""],
('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 \
-iname iqnX.target -authmethod None'):
[1, "", ""],
('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
-ReportFullPortalList enable'):
[0, "", ""],
('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
('autargetdef', '-unit None -refer'):
[0, "%s" % autargetdef_result, ""]}
hsnm_vals_init = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, 0, ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, 0, ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, 0, ""],
('auludel', '-unit None -lu 1 -f'): [0, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
('autargetini', '-unit None -refer'):
[0, "%s" % autargetini_result2, ""],
('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
[0, "", ""],
('autargetmap', '-unit None -refer'):
[0, "%s" % autargetmap_result, ""],
('autargetdef',
'-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
-authmethod None'):
[0, "", ""],
('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
-ReportFullPortalList enable'):
[0, "", ""],
('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
('autargetdef', '-unit None -refer'):
[0, "%s" % autargetdef_result, ""],
('auman', '-help'):
[0, "Version 27.50", ""]}
# The following information is passed on to tests, when creating a volume
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
'provider_location': '1', 'name': 'test',
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
test_volume = {'name': 'test_volume', 'size': 128,
'id': 'test-volume-0',
'provider_location': '1', 'status': 'available'}
test_volume_larger = {'name': 'test_volume', 'size': 256,
'id': 'test-volume-0',
'provider_location': '1', 'status': 'available'}
test_volume_error = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '3', 'status': 'available'}
test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
'id': 'test-volume-error',
'provider_location': None, 'status': 'available'}
test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '1', 'status': 'available'}
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
'id': 'test-volume3',
'volume_metadata': [{'key': 'type',
'value': 'V-VOL'}],
'provider_location': '1', 'status': 'available'}
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
'id': 'test-volume2',
'provider_location': '3', 'status': 'available'}
test_snapshot = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
'provider_location': '1', 'status': 'available'}
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0',
'volume': test_volume_error,
'provider_location': None, 'status': 'available'}
UNIT_NAME = 'HUS110_91122819'
test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME}
test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME}
test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME}
test_existing_no_ldev_ref = {'unit_name': UNIT_NAME}
test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None}
test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'}
test_existing_no_unit_ref = {'ldev': '0'}
def __init__(self, *args, **kwargs):
super(HBSDSNM2ISCSIDriverTest, self).__init__(*args, **kwargs)
@mock.patch.object(utils, 'brick_get_connector_properties',
return_value={'ip': '0.0.0.0',
'initiator': 'iqn'})
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_init)
@mock.patch.object(utils, 'execute',
return_value=['', ''])
def setUp(self, args1, arg2, arg3, arg4):
super(HBSDSNM2ISCSIDriverTest, self).setUp()
self._setup_config()
self._setup_driver()
self.driver.check_param()
self.driver.common.create_lock_file()
self.driver.common.command.connect_storage()
self.driver.max_hostgroups = \
self.driver.common.command.get_max_hostgroups()
self.driver.add_hostgroup()
self.driver.output_param_to_log()
self.driver.do_setup_status.set()
def _setup_config(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.hitachi_pool_id = 30
self.configuration.hitachi_thin_pool_id = 31
self.configuration.hitachi_target_ports = "00"
self.configuration.hitachi_debug_level = 0
self.configuration.hitachi_serial_number = None
self.configuration.hitachi_unit_name = "None"
self.configuration.hitachi_group_request = True
self.configuration.hitachi_group_range = "0-1"
self.configuration.config_group = "None"
self.configuration.hitachi_ldev_range = "0-100"
self.configuration.hitachi_default_copy_method = 'FULL'
self.configuration.hitachi_copy_check_interval = 1
self.configuration.hitachi_async_copy_check_interval = 1
self.configuration.hitachi_copy_speed = 3
self.configuration.hitachi_auth_method = None
self.configuration.hitachi_auth_user = "HBSD-CHAP-user"
self.configuration.hitachi_auth_password = "HBSD-CHAP-password"
self.configuration.hitachi_add_chap_user = "False"
def _setup_driver(self):
self.driver = hbsd_iscsi.HBSDISCSIDriver(
configuration=self.configuration)
context = None
db = None
self.driver.common = hbsd_common.HBSDCommon(
self.configuration, self.driver, context, db)
self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration)
self.driver.common.horcmgr_flock = \
self.driver.common.command.set_horcmgr_flock()
# API test cases
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume(self, arg1, arg2, arg3):
"""test create_volume."""
ret = self.driver.create_volume(self._VOLUME)
vol = self._VOLUME.copy()
vol['provider_location'] = ret['provider_location']
self.assertEqual('1', vol['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_error(self, arg1, arg2, arg3):
"""test create_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_volume,
self.test_volume_error)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats(self, arg1, arg2):
"""test get_volume_stats."""
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats_error(self, arg1, arg2):
"""test get_volume_stats."""
self.configuration.hitachi_pool_id = 29
stats = self.driver.get_volume_stats(True)
self.assertEqual({}, stats)
self.configuration.hitachi_pool_id = 30
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume(self, arg1, arg2):
"""test extend_volume."""
self.driver.extend_volume(self._VOLUME, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume_error(self, arg1, arg2):
"""test extend_volume."""
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
self.test_volume_error3, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume(self, arg1, arg2):
"""test delete_volume."""
self.driver.delete_volume(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume_error(self, arg1, arg2):
"""test delete_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.delete_volume,
self.test_volume_error4)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
ret = self.driver.create_volume(self._VOLUME)
ret = self.driver.create_snapshot(self.test_snapshot)
self.assertEqual('1', ret['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_snapshot,
self.test_snapshot_error2)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot_error(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot_error2)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
vol = self.driver.create_volume_from_snapshot(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
self.assertRaises(exception.HBSDError,
self.driver.create_volume_from_snapshot,
self.test_volume_error2, self.test_snapshot)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
vol = self.driver.create_cloned_volume(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_common.HBSDCommon, 'extend_volume')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume_larger(self, arg1, arg2, arg3, arg4, arg5):
"""test create_cloned_volume."""
vol = self.driver.create_cloned_volume(self.test_volume_larger,
self._VOLUME)
self.assertIsNotNone(vol)
arg3.assert_called_once_with(self.test_volume_larger,
self.test_volume_larger['size'])
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error1)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
self.assertRaises(exception.HBSDError,
self.driver.create_cloned_volume,
self._VOLUME, self.test_volume_error1)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection(self, arg1, arg2):
"""test initialize connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqn'}
rc = self.driver.initialize_connection(self._VOLUME, connector)
self.assertEqual('iscsi', rc['driver_volume_type'])
self.assertEqual('iqn-target', rc['data']['target_iqn'])
self.assertEqual(1, rc['data']['target_lun'])
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection_error(self, arg1, arg2):
"""test initialize connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqnX'}
self.assertRaises(exception.HBSDError,
self.driver.initialize_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection(self, arg1):
"""test terminate connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqn'}
self.driver.terminate_connection(self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection_error(self, arg1):
"""test terminate connection."""
connector = {'ip': '0.0.0.0'}
self.assertRaises(exception.HBSDError,
self.driver.terminate_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_manage_existing(self, arg1, arg2):
rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref)
self.assertEqual(0, rc['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
size = self.driver.manage_existing_get_size(self._VOLUME,
self.test_existing_ref)
self.assertEqual(1, size)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage(self, arg1, arg2):
self.driver.unmanage(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage_busy(self, arg1, arg2):
self.assertRaises(exception.HBSDVolumeIsBusy,
self.driver.unmanage, self.test_volume_error3)

View File

@@ -1,519 +0,0 @@
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import os
import paramiko
import time
from oslo_concurrency import processutils as putils
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume.drivers.hitachi import hnas_backend
evsfs_list = "\n\
FS ID FS Label FS Permanent ID EVS ID EVS Label\n\
----- ----------- ------------------ ------ ---------\n\
1026 gold 0xaadee0e035cfc0b7 1 EVS-Manila\n\
1029 test_hdp 0xaadee09634acfcac 1 EVS-Manila\n\
1030 fs-cinder 0xaadfcf742fba644e 2 EVS-Cinder\n\
1031 cinder2 0xaadfcf7e0769a6bc 3 EVS-Test\n\
1024 fs02-husvm 0xaac8715e2e9406cd 3 EVS-Test\n\
\n"
cluster_getmac = "cluster MAC: 83-68-96-AA-DA-5D"
version = "\n\
Model: HNAS 4040 \n\n\
Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\n\
Hardware: NAS Platform (M2SEKW1339109) \n\n\
board MMB1 \n\
mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\n\
board MFB1 \n\
mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \
RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \
WD v00E2 DI v001A FC v0002 \n\
Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\n\
board MCP \n\
Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\
\n"
evsipaddr = "\n\
EVS Type Label IP Address Mask Port \n\
---------- --------------- ------------------ --------------- ------\n\
admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\
admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\
evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\
evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\
evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\
\n"
df_f = "\n\
ID Label EVS Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- --- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
1025 fs-cinder 2 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
df_f_tb = "\n\
ID Label EVS Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- --- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
1025 fs-cinder 2 250 TB 21.4 TB (9%) 0 B (0%) NA 228 TB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
nfs_export = "\n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
File system label: fs-cinder \n\
File system size: 250 GB \n\
File system free space: 228 GB \n\
File system state: \n\
formatted = Yes \n\
mounted = Yes \n\
failed = No \n\
thin provisioned = No \n\
Access snapshots: Yes \n\
Display snapshots: Yes \n\
Read Caching: Disabled \n\
Disaster recovery setting: \n\
Recovered = No \n\
Transfer setting = Use file system default \n\n\
Export configuration: \n\
127.0.0.1 \n\
\n"
df_f_single_evs = "\n\
ID Label Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
1025 fs-cinder 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
nfs_export_tb = "\n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
File system label: fs-cinder \n\
File system size: 250 TB \n\
File system free space: 228 TB \n\
\n"
nfs_export_not_available = "\n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
File system label: fs-cinder \n\
*** not available *** \n\
\n"
evs_list = "\n\
Node EVS ID Type Label Enabled Status IP Address Port \n\
---- ------ ------- --------------- ------- ------ ------------------- ---- \n\
1 Cluster hnas4040 Yes Online 192.0.2.200 eth1 \n\
1 0 Admin hnas4040 Yes Online 192.0.2.2 eth1 \n\
172.24.44.15 eth0 \n\
172.24.49.101 ag2 \n\
1 1 Service EVS-Manila Yes Online 172.24.49.32 ag2 \n\
172.24.48.32 ag4 \n\
1 2 Service EVS-Cinder Yes Online 172.24.49.21 ag2 \n\
1 3 Service EVS-Test Yes Online 192.168.100.100 ag2 \n\
\n"
lu_list = "Name : cinder-lu \n\
Comment: \n\
Path : /.cinder/cinder-lu \n\
Size : 2 GB \n\
File System : fs-cinder \n\
File System Mounted : YES \n\
Logical Unit Mounted: No"
lu_list_tb = "Name : test-lu \n\
Comment: \n\
Path : /.cinder/test-lu \n\
Size : 2 TB \n\
File System : fs-cinder \n\
File System Mounted : YES \n\
Logical Unit Mounted: No"
hnas_fs_list = "%(l1)s\n\n%(l2)s\n\n " % {'l1': lu_list,
'l2': lu_list_tb}
add_targetsecret = "Target created successfully."
backend_opts = {'mgmt_ip0': '0.0.0.0',
'cluster_admin_ip0': None,
'ssh_port': '22',
'username': 'supervisor',
'password': 'supervisor',
'ssh_private_key': 'test_key'}
target_chap_disable = "\n\
Alias : cinder-default \n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\
Comment : \n\
Secret : \n\
Authentication : Disabled \n\
Logical units : No logical units. \n\
\n\
LUN Logical Unit \n\
---- -------------------------------- \n\
0 cinder-lu \n\
1 volume-99da7ae7-1e7f-4d57-8bf... \n\
\n\
Access configuration: \n\
"
file_clone_stat = "Clone: /nfs_cinder/cinder-lu \n\
SnapshotFile: FileHandle[00000000004010000d20116826ffffffffffffff] \n\
\n\
SnapshotFile: FileHandle[00000000004029000d81f26826ffffffffffffff] \n\
"
file_clone_stat_snap_file1 = "\
FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\
References: \n\
Clone: /nfs_cinder/cinder-lu \n\
Clone: /nfs_cinder/snapshot-lu-1 \n\
Clone: /nfs_cinder/snapshot-lu-2 \n\
"
file_clone_stat_snap_file2 = "\
FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\
References: \n\
Clone: /nfs_cinder/volume-not-used \n\
Clone: /nfs_cinder/snapshot-1 \n\
Clone: /nfs_cinder/snapshot-2 \n\
"
not_a_clone = "\
file-clone-stat: failed to get predecessor snapshot-files: File is not a clone"
file_relatives =\
[' /nfs_cinder/snapshot-lu-1 ',
' /nfs_cinder/snapshot-lu-2 ',
' /nfs_cinder/volume-not-used ',
' /nfs_cinder/snapshot-1 ',
' /nfs_cinder/snapshot-2 ']
class HDSHNASBackendTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(HDSHNASBackendTest, self).__init__(*args, **kwargs)
def setUp(self):
super(HDSHNASBackendTest, self).setUp()
self.hnas_backend = hnas_backend.HNASSSHBackend(backend_opts)
def test_run_cmd(self):
self.mock_object(os.path, 'isfile', return_value=True)
self.mock_object(utils, 'execute')
self.mock_object(time, 'sleep')
self.mock_object(paramiko, 'SSHClient')
self.mock_object(paramiko.RSAKey, 'from_private_key_file')
self.mock_object(putils, 'ssh_execute',
return_value=(df_f, ''))
out, err = self.hnas_backend._run_cmd('ssh', '0.0.0.0',
'supervisor', 'supervisor',
'df', '-a')
self.assertIn('fs-cinder', out)
self.assertIn('WFS-2,128 DSBs', out)
def test_run_cmd_retry_exception(self):
self.hnas_backend.cluster_admin_ip0 = '172.24.44.11'
exceptions = [putils.ProcessExecutionError(stderr='Connection reset'),
putils.ProcessExecutionError(stderr='Failed to establish'
' SSC connection'),
putils.ProcessExecutionError(stderr='Connection reset'),
putils.ProcessExecutionError(stderr='Connection reset'),
putils.ProcessExecutionError(stderr='Connection reset')]
self.mock_object(os.path, 'isfile',
return_value=True)
self.mock_object(utils, 'execute')
self.mock_object(time, 'sleep')
self.mock_object(paramiko, 'SSHClient')
self.mock_object(paramiko.RSAKey, 'from_private_key_file')
self.mock_object(putils, 'ssh_execute',
side_effect=exceptions)
self.assertRaises(exception.HNASConnError, self.hnas_backend._run_cmd,
'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df',
'-a')
def test_run_cmd_exception_without_retry(self):
self.mock_object(os.path, 'isfile',
return_value=True)
self.mock_object(utils, 'execute')
self.mock_object(time, 'sleep')
self.mock_object(paramiko, 'SSHClient')
self.mock_object(paramiko.RSAKey, 'from_private_key_file')
self.mock_object(putils, 'ssh_execute',
side_effect=putils.ProcessExecutionError(
stderr='Error'))
self.assertRaises(putils.ProcessExecutionError,
self.hnas_backend._run_cmd, 'ssh', '0.0.0.0',
'supervisor', 'supervisor', 'df', '-a')
def test_get_version(self):
expected_out = {
'hardware': 'NAS Platform (M2SEKW1339109)',
'mac': '83-68-96-AA-DA-5D',
'version': '11.2.3319.14',
'model': 'HNAS 4040',
'serial': 'B1339745'
}
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(cluster_getmac, ''), (version, '')])
out = self.hnas_backend.get_version()
self.assertEqual(expected_out, out)
def test_get_evs(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
out = self.hnas_backend.get_evs('fs-cinder')
self.assertEqual('2', out)
def test_get_export_list(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(nfs_export, ''),
(evsfs_list, ''),
(evs_list, '')])
out = self.hnas_backend.get_export_list()
self.assertEqual('fs-cinder', out[0]['fs'])
self.assertEqual(250.0, out[0]['size'])
self.assertEqual(228.0, out[0]['free'])
self.assertEqual('/export01-husvm', out[0]['path'])
def test_get_export_list_data_not_available(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(nfs_export_not_available, ''),
(evsfs_list, ''),
(evs_list, '')])
out = self.hnas_backend.get_export_list()
self.assertEqual('fs-cinder', out[0]['fs'])
self.assertEqual('/export01-husvm', out[0]['path'])
self.assertEqual(-1, out[0]['size'])
self.assertEqual(-1, out[0]['free'])
def test_get_export_list_tb(self):
size = float(250 * 1024)
free = float(228 * 1024)
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(nfs_export_tb, ''),
(evsfs_list, ''),
(evs_list, '')])
out = self.hnas_backend.get_export_list()
self.assertEqual('fs-cinder', out[0]['fs'])
self.assertEqual(size, out[0]['size'])
self.assertEqual(free, out[0]['free'])
self.assertEqual('/export01-husvm', out[0]['path'])
def test_file_clone(self):
path1 = '/.cinder/path1'
path2 = '/.cinder/path2'
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.hnas_backend.file_clone('fs-cinder', path1, path2)
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'file-clone-create',
'-f', 'fs-cinder',
path1, path2)]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_file_clone_wrong_fs(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.assertRaises(exception.InvalidParameterValue,
self.hnas_backend.file_clone, 'fs-fake', 'src',
'dst')
def test_get_evs_info(self):
expected_out = {'evs_number': '1'}
expected_out2 = {'evs_number': '2'}
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsipaddr, ''))
out = self.hnas_backend.get_evs_info()
self.hnas_backend._run_cmd.assert_called_with('evsipaddr', '-l')
self.assertEqual(expected_out, out['10.0.0.20'])
self.assertEqual(expected_out, out['172.24.44.20'])
self.assertEqual(expected_out2, out['172.24.44.21'])
def test_get_fs_info(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(df_f, ''), (evsfs_list, ''),
(hnas_fs_list, '')])
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual('2', out['evs_id'])
self.assertEqual('fs-cinder', out['label'])
self.assertEqual('228', out['available_size'])
self.assertEqual('250', out['total_size'])
self.assertEqual(0, out['provisioned_capacity'])
def test_get_fs_empty_return(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=('Not mounted', ''))
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual({}, out)
def test_get_fs_info_single_evs(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(df_f_single_evs, ''), (evsfs_list, ''),
(hnas_fs_list, '')])
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual('fs-cinder', out['label'])
self.assertEqual('228', out['available_size'])
self.assertEqual('250', out['total_size'])
self.assertEqual(0, out['provisioned_capacity'])
def test_get_fs_tb(self):
available_size = float(228 * 1024 ** 2)
total_size = float(250 * 1024 ** 2)
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(df_f_tb, ''), (evsfs_list, ''),
(hnas_fs_list, '')])
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual('fs-cinder', out['label'])
self.assertEqual(str(available_size), out['available_size'])
self.assertEqual(str(total_size), out['total_size'])
self.assertEqual(0, out['provisioned_capacity'])
def test_get_fs_single_evs_tb(self):
available_size = float(228 * 1024 ** 2)
total_size = float(250 * 1024 ** 2)
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(df_f_tb, ''), (evsfs_list, ''),
(hnas_fs_list, '')])
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual('fs-cinder', out['label'])
self.assertEqual(str(available_size), out['available_size'])
self.assertEqual(str(total_size), out['total_size'])
self.assertEqual(0, out['provisioned_capacity'])
def test_get_cloned_file_relatives(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''), (file_clone_stat, ''),
(file_clone_stat_snap_file1, ''),
(file_clone_stat_snap_file2, '')])
out = self.hnas_backend.get_cloned_file_relatives('cinder-lu',
'fs-cinder')
self.assertEqual(file_relatives, out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'file-clone-stat-'
'snapshot-file',
'-f', 'fs-cinder',
'00000000004029000d81'
'f26826ffffffffffffff]')
def test_get_cloned_file_relatives_not_clone_except(self):
exc = putils.ProcessExecutionError(stderr='File is not a clone')
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''), exc])
self.assertRaises(exception.ManageExistingInvalidReference,
self.hnas_backend.get_cloned_file_relatives,
'cinder-lu', 'fs-cinder', True)
def test_get_cloned_file_relatives_not_clone_no_except(self):
exc = putils.ProcessExecutionError(stderr='File is not a clone')
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''), exc])
out = self.hnas_backend.get_cloned_file_relatives('cinder-lu',
'fs-cinder')
self.assertEqual([], out)
def test_check_snapshot_parent_true(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(file_clone_stat, ''),
(file_clone_stat_snap_file1, ''),
(file_clone_stat_snap_file2, '')])
out = self.hnas_backend.check_snapshot_parent('cinder-lu',
'snapshot-lu-1',
'fs-cinder')
self.assertTrue(out)
def test_check_snapshot_parent_false(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(file_clone_stat, ''),
(file_clone_stat_snap_file1, ''),
(file_clone_stat_snap_file2, '')])
out = self.hnas_backend.check_snapshot_parent('cinder-lu',
'snapshot-lu-3',
'fs-cinder')
self.assertFalse(out)
def test_get_export_path(self):
export_out = '/export01-husvm'
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''), (nfs_export, '')])
out = self.hnas_backend.get_export_path(export_out, 'fs-cinder')
self.assertEqual(export_out, out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'nfs-export', 'list',
export_out)

View File

@@ -1,834 +0,0 @@
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import os
from oslo_concurrency import processutils as putils
import socket
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hnas_backend as backend
from cinder.volume.drivers.hitachi import hnas_nfs as nfs
from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume.drivers import nfs as base_nfs
from cinder.volume import utils as vutils
_VOLUME = {'name': 'cinder-volume',
'id': fake.VOLUME_ID,
'size': 128,
'host': 'host1@hnas-nfs-backend#default',
'volume_type': 'default',
'provider_location': 'hnas'}
_SNAPSHOT = {
'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
'id': fake.SNAPSHOT_ID,
'size': 128,
'volume_type': None,
'provider_location': 'hnas',
'volume_size': 128,
'volume': _VOLUME,
'volume_name': _VOLUME['name'],
'host': 'host1@hnas-iscsi-backend#silver',
'volume_type_id': fake.VOLUME_TYPE_ID,
}
class HNASNFSDriverTest(test.TestCase):
"""Test HNAS NFS volume driver."""
def __init__(self, *args, **kwargs):
super(HNASNFSDriverTest, self).__init__(*args, **kwargs)
def instantiate_snapshot(self, snap):
snap = snap.copy()
snap['volume'] = fake_volume.fake_volume_obj(
None, **snap['volume'])
snapshot = fake_snapshot.fake_snapshot_obj(
None, expected_attrs=['volume'], **snap)
return snapshot
def setUp(self):
super(HNASNFSDriverTest, self).setUp()
self.context = context.get_admin_context()
self.volume = fake_volume.fake_volume_obj(
self.context,
**_VOLUME)
self.snapshot = self.instantiate_snapshot(_SNAPSHOT)
self.volume_type = fake_volume.fake_volume_type_obj(
None,
**{'name': 'silver'}
)
self.clone = fake_volume.fake_volume_obj(
None,
**{'id': fake.VOLUME2_ID,
'size': 128,
'host': 'host1@hnas-nfs-backend#default',
'volume_type': 'default',
'provider_location': 'hnas'})
# xml parsed from utils
self.parsed_xml = {
'username': 'supervisor',
'password': 'supervisor',
'hnas_cmd': 'ssc',
'ssh_port': '22',
'services': {
'default': {
'hdp': '172.24.49.21:/fs-cinder',
'pool_name': 'default',
'label': 'svc_0',
'ctl': '1',
'export': {
'fs': 'fs-cinder',
'path': '/export-cinder/volume'
}
},
},
'cluster_admin_ip0': None,
'ssh_private_key': None,
'chap_enabled': 'True',
'mgmt_ip0': '172.17.44.15',
'ssh_enabled': None
}
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.hds_hnas_nfs_config_file = 'fake.xml'
self.mock_object(hnas_utils, 'read_cinder_conf',
return_value=self.parsed_xml)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.max_over_subscription_ratio = 20.0
self.configuration.reserved_percentage = 0
self.configuration.hds_hnas_nfs_config_file = 'fake_config.xml'
self.configuration.nfs_shares_config = 'fake_nfs_share.xml'
self.configuration.num_shell_tries = 2
self.configuration.nfs_mount_point_base = '%state_path/mnt'
self.configuration.nfs_mount_options = None
self.driver = nfs.HNASNFSDriver(configuration=self.configuration)
def test_check_pool_and_share_no_default_configured(self):
nfs_shares = '172.24.49.21:/fs-cinder'
self.mock_object(hnas_utils, 'get_pool', return_value='default')
self.driver.config['services'] = {
'silver': {
'hdp': 'fs3',
'iscsi_ip': '172.17.39.133',
'iscsi_port': '3260',
'port': '22',
'volume_type': 'silver',
'label': 'svc_1',
'evs': '2',
'tgt': {
'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'
}
}
}
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_share, self.volume,
nfs_shares)
def test_check_pool_and_share_mismatch_exception(self):
# passing a share that does not exists in config should raise an
# exception
nfs_shares = '172.24.49.21:/nfs_share'
self.mock_object(hnas_utils, 'get_pool', return_value='default')
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_share, self.volume,
nfs_shares)
def test_check_pool_and_share_type_mismatch_exception(self):
nfs_shares = '172.24.49.21:/fs-cinder'
self.volume.host = 'host1@hnas-nfs-backend#gold'
# returning a pool different from 'default' should raise an exception
self.mock_object(hnas_utils, 'get_pool', return_value='default')
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_share, self.volume,
nfs_shares)
def test_do_setup(self):
version_info = {
'mac': '83-68-96-AA-DA-5D',
'model': 'HNAS 4040',
'version': '12.4.3924.11',
'hardware': 'NAS Platform',
'serial': 'B1339109',
}
export_list = [
{'fs': 'fs-cinder',
'name': '/fs-cinder',
'free': 228.0,
'path': '/fs-cinder',
'evs': ['172.24.49.21'],
'size': 250.0}
]
showmount = "Export list for 172.24.49.21: \n\
/fs-cinder * \n\
/shares/9bcf0bcc-8cc8-437e38bcbda9 127.0.0.1,10.1.0.5,172.24.44.141 \n\
"
self.mock_object(backend.HNASSSHBackend, 'get_version',
return_value=version_info)
self.mock_object(self.driver, '_load_shares_config')
self.mock_object(backend.HNASSSHBackend, 'get_export_list',
return_value=export_list)
self.mock_object(self.driver, '_execute', return_value=(showmount, ''))
self.driver.do_setup(None)
self.driver._execute.assert_called_with('showmount', '-e',
'172.24.49.21')
self.assertTrue(backend.HNASSSHBackend.get_export_list.called)
def test_do_setup_execute_exception(self):
version_info = {
'mac': '83-68-96-AA-DA-5D',
'model': 'HNAS 4040',
'version': '12.4.3924.11',
'hardware': 'NAS Platform',
'serial': 'B1339109',
}
export_list = [
{'fs': 'fs-cinder',
'name': '/fs-cinder',
'free': 228.0,
'path': '/fs-cinder',
'evs': ['172.24.49.21'],
'size': 250.0}
]
self.mock_object(backend.HNASSSHBackend, 'get_version',
return_value=version_info)
self.mock_object(self.driver, '_load_shares_config')
self.mock_object(backend.HNASSSHBackend, 'get_export_list',
return_value=export_list)
self.mock_object(self.driver, '_execute',
side_effect=putils.ProcessExecutionError)
self.assertRaises(putils.ProcessExecutionError, self.driver.do_setup,
None)
def test_do_setup_missing_export(self):
version_info = {
'mac': '83-68-96-AA-DA-5D',
'model': 'HNAS 4040',
'version': '12.4.3924.11',
'hardware': 'NAS Platform',
'serial': 'B1339109',
}
export_list = [
{'fs': 'fs-cinder',
'name': '/wrong-fs',
'free': 228.0,
'path': '/fs-cinder',
'evs': ['172.24.49.21'],
'size': 250.0}
]
showmount = "Export list for 172.24.49.21: \n\
/fs-cinder * \n\
"
self.mock_object(backend.HNASSSHBackend, 'get_version',
return_value=version_info)
self.mock_object(self.driver, '_load_shares_config')
self.mock_object(backend.HNASSSHBackend, 'get_export_list',
return_value=export_list)
self.mock_object(self.driver, '_execute', return_value=(showmount, ''))
self.assertRaises(exception.InvalidParameterValue,
self.driver.do_setup, None)
def test_create_volume(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(self.driver, '_do_create_volume')
out = self.driver.create_volume(self.volume)
self.assertEqual('172.24.49.21:/fs-cinder', out['provider_location'])
self.assertTrue(self.driver._ensure_shares_mounted.called)
def test_create_volume_exception(self):
# pool 'original' doesnt exists in services
self.volume.host = 'host1@hnas-nfs-backend#original'
self.mock_object(self.driver, '_ensure_shares_mounted')
self.assertRaises(exception.ParameterNotFound,
self.driver.create_volume, self.volume)
def test_create_cloned_volume(self):
self.volume.size = 150
self.mock_object(self.driver, 'extend_volume')
self.mock_object(backend.HNASSSHBackend, 'file_clone')
out = self.driver.create_cloned_volume(self.volume, self.clone)
self.assertEqual('hnas', out['provider_location'])
def test_create_cloned_volume_invalid_volume_type(self):
self.volume.volume_type_id = fake.VOLUME_TYPE_ID
self.clone.volume_type_id = fake.VOLUME_TYPE2_ID
self.mock_object(self.driver, 'extend_volume')
self.mock_object(backend.HNASSSHBackend, 'file_clone')
self.assertRaises(exception.InvalidVolumeType,
self.driver.create_cloned_volume, self.volume,
self.clone)
def test_get_volume_stats(self):
self.driver.pools = [{'pool_name': 'default',
'service_label': 'default',
'fs': '172.24.49.21:/easy-stack'},
{'pool_name': 'cinder_svc',
'service_label': 'cinder_svc',
'fs': '172.24.49.26:/MNT-CinderTest2'}]
self.mock_object(self.driver, '_update_volume_stats')
self.mock_object(self.driver, '_get_capacity_info',
return_value=(150, 50, 100))
out = self.driver.get_volume_stats()
self.assertEqual('6.0.0', out['driver_version'])
self.assertEqual('Hitachi', out['vendor_name'])
self.assertEqual('NFS', out['storage_protocol'])
def test_create_volume_from_snapshot(self):
expected_out = {'provider_location': 'hnas'}
self.mock_object(self.driver, '_file_not_present',
mock.Mock(return_value=False))
self.mock_object(backend.HNASSSHBackend, 'file_clone')
result = self.driver.create_volume_from_snapshot(self.volume,
self.snapshot)
self.assertEqual(expected_out, result)
def test_create_volume_from_snapshot_legacy(self):
expected_out = {'provider_location': 'hnas'}
self.mock_object(self.driver, '_file_not_present',
mock.Mock(return_value=True))
self.mock_object(backend.HNASSSHBackend, 'file_clone')
result = self.driver.create_volume_from_snapshot(self.volume,
self.snapshot)
self.assertEqual(expected_out, result)
def test_create_snapshot(self):
expected_out = {'provider_location': 'hnas'}
self.mock_object(backend.HNASSSHBackend, 'file_clone')
result = self.driver.create_snapshot(self.snapshot)
self.assertEqual(expected_out, result)
def test_delete_snapshot(self):
nfs_mount = "/opt/stack/data/cinder/mnt/"
path = nfs_mount + self.driver._get_snapshot_name(self.snapshot)
self.mock_object(self.driver, '_file_not_present',
mock.Mock(return_value=False))
self.mock_object(self.driver, '_get_file_path',
mock.Mock(return_value=path))
self.mock_object(self.driver, '_execute')
self.driver.delete_snapshot(self.snapshot)
self.driver._execute.assert_called_with('rm', path, run_as_root=True)
def test_delete_snapshot_legacy(self):
nfs_mount = "/opt/stack/data/cinder/mnt/"
legacy_path = nfs_mount + self.snapshot.name
self.mock_object(self.driver, '_file_not_present',
mock.Mock(return_value=True))
self.mock_object(self.driver, '_file_not_present',
mock.Mock(return_value=False))
self.mock_object(self.driver, '_get_file_path',
mock.Mock(return_value=legacy_path))
self.mock_object(self.driver, '_execute')
self.driver.delete_snapshot(self.snapshot)
self.driver._execute.assert_called_with('rm', legacy_path,
run_as_root=True)
def test_extend_volume(self):
share_mount_point = '/fs-cinder'
data = image_utils.imageutils.QemuImgInfo
data.virtual_size = 200 * 1024 ** 3
self.mock_object(self.driver, '_get_mount_point_for_share',
return_value=share_mount_point)
self.mock_object(image_utils, 'qemu_img_info', return_value=data)
self.driver.extend_volume(self.volume, 200)
self.driver._get_mount_point_for_share.assert_called_with('hnas')
def test_extend_volume_resizing_exception(self):
share_mount_point = '/fs-cinder'
data = image_utils.imageutils.QemuImgInfo
data.virtual_size = 2048 ** 3
self.mock_object(self.driver, '_get_mount_point_for_share',
return_value=share_mount_point)
self.mock_object(image_utils, 'qemu_img_info', return_value=data)
self.mock_object(image_utils, 'resize_image')
self.assertRaises(exception.InvalidResults,
self.driver.extend_volume, self.volume, 200)
def test_manage_existing(self):
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
self.mock_object(os.path, 'isfile', return_value=True)
self.mock_object(self.driver, '_get_mount_point_for_share',
return_value='/fs-cinder/cinder-volume')
self.mock_object(utils, 'resolve_hostname',
return_value='172.24.49.21')
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(self.driver, '_execute')
out = self.driver.manage_existing(self.volume, existing_vol_ref)
loc = {'provider_location': '172.24.49.21:/fs-cinder'}
self.assertEqual(loc, out)
os.path.isfile.assert_called_once_with('/fs-cinder/cinder-volume/')
self.driver._get_mount_point_for_share.assert_called_once_with(
'172.24.49.21:/fs-cinder')
utils.resolve_hostname.assert_called_with('172.24.49.21')
self.driver._ensure_shares_mounted.assert_called_once_with()
def test_manage_existing_name_matches(self):
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
return_value=('172.24.49.21:/fs-cinder',
'/mnt/silver',
self.volume.name))
out = self.driver.manage_existing(self.volume, existing_vol_ref)
loc = {'provider_location': '172.24.49.21:/fs-cinder'}
self.assertEqual(loc, out)
def test_manage_existing_exception(self):
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
return_value=('172.24.49.21:/fs-cinder',
'/mnt/silver',
'cinder-volume'))
self.mock_object(self.driver, '_execute',
side_effect=putils.ProcessExecutionError)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing, self.volume,
existing_vol_ref)
def test_manage_existing_missing_source_name(self):
# empty source-name should raise an exception
existing_vol_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing, self.volume,
existing_vol_ref)
def test_manage_existing_already_managed(self):
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
expected_size = 1
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
return_value='/mnt/silver')
self.mock_object(os.path, 'isfile', return_value=True)
self.mock_object(utils, 'get_file_size', return_value=expected_size)
self.mock_object(vutils, 'check_already_managed_volume',
return_value=True)
self.assertRaises(exception.ManageExistingAlreadyManaged,
self.driver.manage_existing, self.volume,
existing_vol_ref)
def test_manage_existing_missing_volume_in_backend(self):
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(utils, 'resolve_hostname',
side_effect=['172.24.49.21', '172.24.49.22'])
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing, self.volume,
existing_vol_ref)
def test_manage_existing_get_size(self):
existing_vol_ref = {
'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
}
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
expected_size = 1
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(utils, 'resolve_hostname',
return_value='172.24.49.21')
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
return_value='/mnt/silver')
self.mock_object(os.path, 'isfile', return_value=True)
self.mock_object(utils, 'get_file_size', return_value=expected_size)
out = self.driver.manage_existing_get_size(self.volume,
existing_vol_ref)
self.assertEqual(1, out)
utils.get_file_size.assert_called_once_with(
'/mnt/silver/cinder-volume')
utils.resolve_hostname.assert_called_with('172.24.49.21')
def test_manage_existing_get_size_exception(self):
existing_vol_ref = {
'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
}
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
return_value=('172.24.49.21:/fs-cinder',
'/mnt/silver',
'cinder-volume'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size, self.volume,
existing_vol_ref)
def test_manage_existing_get_size_resolving_hostname_exception(self):
existing_vol_ref = {
'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
}
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(utils, 'resolve_hostname',
side_effect=socket.gaierror)
self.assertRaises(socket.gaierror,
self.driver.manage_existing_get_size, self.volume,
existing_vol_ref)
def test_unmanage(self):
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
vol_str = 'volume-' + self.volume.id
vol_path = os.path.join(path, vol_str)
new_path = os.path.join(path, 'unmanage-' + vol_str)
self.mock_object(self.driver, '_get_mount_point_for_share',
return_value=path)
self.mock_object(self.driver, '_execute')
self.driver.unmanage(self.volume)
self.driver._execute.assert_called_with('mv', vol_path, new_path,
run_as_root=False,
check_exit_code=True)
self.driver._get_mount_point_for_share.assert_called_with(
self.volume.provider_location)
def test_unmanage_volume_exception(self):
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(self.driver, '_get_mount_point_for_share',
return_value=path)
self.mock_object(self.driver, '_execute', side_effect=ValueError)
self.driver.unmanage(self.volume)
def test_manage_existing_snapshot(self):
nfs_share = "172.24.49.21:/fs-cinder"
nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID
path = "unmanage-%s.%s" % (self.snapshot.volume.name, self.snapshot.id)
loc = {'provider_location': '172.24.49.21:/fs-cinder'}
existing_ref = {'source-name': '172.24.49.21:/fs-cinder/'
+ fake.SNAPSHOT_ID}
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
return_value=(nfs_share, nfs_mount, path))
self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent',
return_value=True)
self.mock_object(self.driver, '_execute')
self.mock_object(backend.HNASSSHBackend, 'get_export_path',
return_value='fs-cinder')
out = self.driver.manage_existing_snapshot(self.snapshot,
existing_ref)
self.assertEqual(loc, out)
def test_manage_existing_snapshot_legacy(self):
nfs_share = "172.24.49.21:/fs-cinder"
nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID
path = "unmanage-snapshot-%s" % self.snapshot.id
loc = {'provider_location': '172.24.49.21:/fs-cinder'}
existing_ref = {
'source-name': '172.24.49.21:/fs-cinder/' + fake.SNAPSHOT_ID}
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
return_value=(nfs_share, nfs_mount, path))
self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent',
return_value=True)
self.mock_object(self.driver, '_execute')
self.mock_object(backend.HNASSSHBackend, 'get_export_path',
return_value='fs-cinder')
out = self.driver.manage_existing_snapshot(self.snapshot, existing_ref)
self.assertEqual(loc, out)
def test_manage_existing_snapshot_not_parent_exception(self):
nfs_share = "172.24.49.21:/fs-cinder"
nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID
path = "unmanage-%s.%s" % (fake.VOLUME_ID, self.snapshot.id)
existing_ref = {'source-name': '172.24.49.21:/fs-cinder/'
+ fake.SNAPSHOT_ID}
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
return_value=(nfs_share, nfs_mount, path))
self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent',
return_value=False)
self.mock_object(backend.HNASSSHBackend, 'get_export_path',
return_value='fs-cinder')
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_snapshot, self.snapshot,
existing_ref)
def test_manage_existing_snapshot_get_size(self):
existing_ref = {
'source-name': '172.24.49.21:/fs-cinder/cinder-snapshot',
}
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
expected_size = 1
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(utils, 'resolve_hostname',
return_value='172.24.49.21')
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
return_value='/mnt/silver')
self.mock_object(os.path, 'isfile', return_value=True)
self.mock_object(utils, 'get_file_size', return_value=expected_size)
out = self.driver.manage_existing_snapshot_get_size(
self.snapshot, existing_ref)
self.assertEqual(1, out)
utils.get_file_size.assert_called_once_with(
'/mnt/silver/cinder-snapshot')
utils.resolve_hostname.assert_called_with('172.24.49.21')
def test_unmanage_snapshot(self):
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
snapshot_name = "%s.%s" % (self.snapshot.volume.name, self.snapshot.id)
old_path = os.path.join(path, snapshot_name)
new_path = os.path.join(path, 'unmanage-' + snapshot_name)
self.mock_object(self.driver, '_get_mount_point_for_share',
return_value=path)
self.mock_object(self.driver, '_execute')
self.driver.unmanage_snapshot(self.snapshot)
self.driver._execute.assert_called_with('mv', old_path, new_path,
run_as_root=False,
check_exit_code=True)
self.driver._get_mount_point_for_share.assert_called_with(
self.snapshot.provider_location)
def test_get_manageable_volumes_not_safe(self):
manageable_vol = [{'cinder_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a',
'extra_info': None,
'reason_not_safe': 'already managed',
'reference': {
'source-name':
'172.24.49.21:/fs-cinder/volume-1e5177e7-'
'95e5-4a0f-b170-e45f4b469f6a'},
'safe_to_manage': False,
'size': 128}]
rsrc = [self.volume]
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
return_value=path)
vols_exp = [self.volume.name]
self.mock_object(self.driver, '_get_volumes_from_export',
return_value=vols_exp)
self.mock_object(self.driver, '_get_file_size',
return_value=self.volume.size)
out = self.driver._get_manageable_resource_info(
rsrc, "volume", None, 1000, 0, ['reference'], ['desc'])
self.driver._get_volumes_from_export.assert_called_with(
'172.24.49.21:/fs-cinder')
self.driver._get_file_size.assert_called_with('%s/%s' % (
path, self.volume.name))
self.driver._get_mount_point_for_share(self.volume.provider_location)
self.assertEqual(out, manageable_vol)
def test_get_manageable_volumes(self):
manageable_vol = [{
'cinder_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a',
'extra_info': None,
'reason_not_safe': 'already managed',
'reference': {
'source-name': '172.24.49.21:/fs-cinder/'
'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a'},
'safe_to_manage': False,
'size': 128}]
rsrc = [self.volume]
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
return_value=path)
vols_exp = [fake.VOLUME_NAME]
self.mock_object(self.driver, '_get_volumes_from_export',
return_value=vols_exp)
self.mock_object(self.driver, '_get_file_size',
return_value=self.volume.size)
out = self.driver._get_manageable_resource_info(rsrc, "volume", None,
1000, 0, ['reference'],
['desc'])
self.driver._get_volumes_from_export.assert_called_with(
'172.24.49.21:/fs-cinder')
self.driver._get_file_size.assert_called_with(
'%s/%s' % (path, self.volume.name))
self.driver._get_mount_point_for_share(self.volume.provider_location)
self.assertEqual(out, manageable_vol)
def test_get_manageable_snapshots(self):
manageable_snap = [{
'cinder_id': '253b2878-ec60-4793-ad19-e65496ec7aab',
'extra_info': None,
'reason_not_safe': 'already managed',
'reference': {
'source-name': '172.24.49.21:/fs-cinder/'
'snapshot-253b2878-ec60-4793-'
'ad19-e65496ec7aab'},
'safe_to_manage': False,
'size': 128,
'source_reference': {'id': '1'}}]
rsrc = [self.snapshot]
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
return_value=path)
vols_exp = [fake.SNAPSHOT_NAME]
self.mock_object(self.driver, '_get_volumes_from_export',
return_value=vols_exp)
self.mock_object(self.driver, '_get_file_size',
return_value=self.volume.size)
self.mock_object(backend.HNASSSHBackend, 'get_cloned_file_relatives',
return_value=[' /nfs_cinder/volume-1',
'/nfs_cinder/snapshot2'])
out = self.driver._get_manageable_resource_info(rsrc, "snapshot", None,
1000, 0, ['reference'],
['desc'])
self.driver._get_volumes_from_export.assert_called_with(
'172.24.49.21:/fs-cinder')
self.driver._get_file_size.assert_called_with(
'%s/%s' % (path, self.snapshot.name))
self.driver._get_mount_point_for_share(self.snapshot.provider_location)
self.assertEqual(out, manageable_snap)
def test_get_manageable_snapshots_unknown_origin(self):
manageable_snap = [{
'cinder_id': '253b2878-ec60-4793-ad19-e65496ec7aab',
'extra_info': 'Could not determine the volume that owns '
'the snapshot',
'reason_not_safe': 'already managed',
'reference': {
'source-name': '172.24.49.21:/fs-cinder/'
'snapshot-253b2878-ec60-4793-'
'ad19-e65496ec7aab'},
'safe_to_manage': False,
'size': 128,
'source_reference': {'id': 'unknown'}}]
rsrc = [self.snapshot]
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
return_value=path)
vols_exp = [fake.SNAPSHOT_NAME]
self.mock_object(self.driver, '_get_volumes_from_export',
return_value=vols_exp)
self.mock_object(self.driver, '_get_file_size',
return_value=self.volume.size)
self.mock_object(backend.HNASSSHBackend, 'get_cloned_file_relatives',
return_value=[' /nfs_cinder/volume-1',
' /nfs_cinder/volume-2',
'/nfs_cinder/snapshot2'])
out = self.driver._get_manageable_resource_info(rsrc, "snapshot", None,
1000, 0, ['reference'],
['desc'])
self.driver._get_volumes_from_export.assert_called_with(
'172.24.49.21:/fs-cinder')
self.driver._get_mount_point_for_share(self.snapshot.provider_location)
self.driver._get_file_size.assert_called_with('%s/%s' % (
path, self.snapshot.name))
self.assertEqual(out, manageable_snap)

View File

@@ -1,305 +0,0 @@
# Copyright (c) 2016 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import ddt
import os
from xml.etree import ElementTree as ETree
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume import volume_types
_VOLUME = {'name': 'cinder-volume',
'id': fake_constants.VOLUME_ID,
'size': 128,
'host': 'host1@hnas-nfs-backend#default',
'volume_type': 'default',
'provider_location': 'hnas'}
service_parameters = ['volume_type', 'hdp']
optional_parameters = ['ssc_cmd', 'cluster_admin_ip0']
config_from_cinder_conf = {
'username': 'supervisor',
'fs': {'easy-stack': 'easy-stack',
'silver': 'silver'},
'ssh_port': 22,
'cluster_admin_ip0': None,
'ssh_private_key': None,
'mgmt_ip0': '172.24.44.15',
'ssc_cmd': 'ssc',
'services': {
'default': {
'label': u'svc_0',
'pool_name': 'default',
'hdp': 'easy-stack'},
'FS-CinderDev1': {
'label': u'svc_1',
'pool_name': 'FS-CinderDev1',
'hdp': 'silver'}},
'password': 'supervisor'}
valid_XML_str = '''
<config>
<mgmt_ip0>172.24.44.15</mgmt_ip0>
<username>supervisor</username>
<password>supervisor</password>
<ssh_enabled>False</ssh_enabled>
<ssh_private_key>/home/ubuntu/.ssh/id_rsa</ssh_private_key>
<svc_0>
<volume_type>default</volume_type>
<hdp>easy-stack</hdp>
</svc_0>
<svc_1>
<volume_type>silver</volume_type>
<hdp>FS-CinderDev1</hdp>
</svc_1>
</config>
'''
XML_no_authentication = '''
<config>
<mgmt_ip0>172.24.44.15</mgmt_ip0>
<username>supervisor</username>
<ssh_enabled>False</ssh_enabled>
</config>
'''
XML_empty_authentication_param = '''
<config>
<mgmt_ip0>172.24.44.15</mgmt_ip0>
<username>supervisor</username>
<password></password>
<ssh_enabled>False</ssh_enabled>
<ssh_private_key></ssh_private_key>
<svc_0>
<volume_type>default</volume_type>
<hdp>easy-stack</hdp>
</svc_0>
</config>
'''
# missing mgmt_ip0
XML_without_mandatory_params = '''
<config>
<username>supervisor</username>
<password>supervisor</password>
<ssh_enabled>False</ssh_enabled>
<svc_0>
<volume_type>default</volume_type>
<hdp>easy-stack</hdp>
</svc_0>
</config>
'''
XML_no_services_configured = '''
<config>
<mgmt_ip0>172.24.44.15</mgmt_ip0>
<username>supervisor</username>
<password>supervisor</password>
<ssh_port>10</ssh_port>
<ssh_enabled>False</ssh_enabled>
<ssh_private_key>/home/ubuntu/.ssh/id_rsa</ssh_private_key>
</config>
'''
parsed_xml = {'username': 'supervisor', 'password': 'supervisor',
'ssc_cmd': 'ssc', 'ssh_port': 22,
'fs': {'easy-stack': 'easy-stack',
'FS-CinderDev1': 'FS-CinderDev1'},
'cluster_admin_ip0': None,
'ssh_private_key': '/home/ubuntu/.ssh/id_rsa',
'services': {
'default': {'hdp': 'easy-stack', 'pool_name': 'default',
'label': 'svc_0'},
'silver': {'hdp': 'FS-CinderDev1', 'pool_name': 'silver',
'label': 'svc_1'}},
'mgmt_ip0': '172.24.44.15'}
valid_XML_etree = ETree.XML(valid_XML_str)
invalid_XML_etree_no_authentication = ETree.XML(XML_no_authentication)
invalid_XML_etree_empty_parameter = ETree.XML(XML_empty_authentication_param)
invalid_XML_etree_no_mandatory_params = ETree.XML(XML_without_mandatory_params)
invalid_XML_etree_no_service = ETree.XML(XML_no_services_configured)
@ddt.ddt
class HNASUtilsTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(HNASUtilsTest, self).__init__(*args, **kwargs)
def setUp(self):
super(HNASUtilsTest, self).setUp()
self.fake_conf = conf.Configuration(hnas_utils.drivers_common_opts,
conf.SHARED_CONF_GROUP)
self.override_config('hnas_username', 'supervisor',
conf.SHARED_CONF_GROUP)
self.override_config('hnas_password', 'supervisor',
conf.SHARED_CONF_GROUP)
self.override_config('hnas_mgmt_ip0', '172.24.44.15',
conf.SHARED_CONF_GROUP)
self.override_config('hnas_svc0_pool_name', 'default',
conf.SHARED_CONF_GROUP)
self.override_config('hnas_svc0_hdp', 'easy-stack',
conf.SHARED_CONF_GROUP)
self.override_config('hnas_svc1_pool_name', 'FS-CinderDev1',
conf.SHARED_CONF_GROUP)
self.override_config('hnas_svc1_hdp', 'silver',
conf.SHARED_CONF_GROUP)
self.context = context.get_admin_context()
self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME)
self.volume_type = (fake_volume.fake_volume_type_obj(None, **{
'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'}))
def test_read_xml_config(self):
self.mock_object(os, 'access', return_value=True)
self.mock_object(ETree, 'parse', return_value=ETree.ElementTree)
self.mock_object(ETree.ElementTree, 'getroot',
return_value=valid_XML_etree)
xml_path = 'xml_file_found'
out = hnas_utils.read_xml_config(xml_path,
service_parameters,
optional_parameters)
self.assertEqual(parsed_xml, out)
def test_read_xml_config_parser_error(self):
xml_file = 'hnas_nfs.xml'
self.mock_object(os, 'access', return_value=True)
self.mock_object(ETree, 'parse', side_effect=ETree.ParseError)
self.assertRaises(exception.ConfigNotFound, hnas_utils.read_xml_config,
xml_file, service_parameters, optional_parameters)
def test_read_xml_config_not_found(self):
self.mock_object(os, 'access', return_value=False)
xml_path = 'xml_file_not_found'
self.assertRaises(exception.NotFound, hnas_utils.read_xml_config,
xml_path, service_parameters, optional_parameters)
def test_read_xml_config_without_services_configured(self):
xml_file = 'hnas_nfs.xml'
self.mock_object(os, 'access', return_value=True)
self.mock_object(ETree, 'parse', return_value=ETree.ElementTree)
self.mock_object(ETree.ElementTree, 'getroot',
return_value=invalid_XML_etree_no_service)
self.assertRaises(exception.ParameterNotFound,
hnas_utils.read_xml_config, xml_file,
service_parameters, optional_parameters)
def test_read_xml_config_empty_authentication_parameter(self):
xml_file = 'hnas_nfs.xml'
self.mock_object(os, 'access', return_value=True)
self.mock_object(ETree, 'parse', return_value=ETree.ElementTree)
self.mock_object(ETree.ElementTree, 'getroot',
return_value=invalid_XML_etree_empty_parameter)
self.assertRaises(exception.ParameterNotFound,
hnas_utils.read_xml_config, xml_file,
service_parameters, optional_parameters)
def test_read_xml_config_mandatory_parameters_missing(self):
xml_file = 'hnas_nfs.xml'
self.mock_object(os, 'access', return_value=True)
self.mock_object(ETree, 'parse', return_value=ETree.ElementTree)
self.mock_object(ETree.ElementTree, 'getroot',
return_value=invalid_XML_etree_no_mandatory_params)
self.assertRaises(exception.ParameterNotFound,
hnas_utils.read_xml_config, xml_file,
service_parameters, optional_parameters)
def test_read_config_xml_without_authentication_parameter(self):
xml_file = 'hnas_nfs.xml'
self.mock_object(os, 'access', return_value=True)
self.mock_object(ETree, 'parse', return_value=ETree.ElementTree)
self.mock_object(ETree.ElementTree, 'getroot',
return_value=invalid_XML_etree_no_authentication)
self.assertRaises(exception.ConfigNotFound, hnas_utils.read_xml_config,
xml_file, service_parameters, optional_parameters)
def test_get_pool_with_vol_type(self):
self.mock_object(volume_types, 'get_volume_type_extra_specs',
return_value={'service_label': 'silver'})
self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID
self.volume.volume_type = self.volume_type
out = hnas_utils.get_pool(parsed_xml, self.volume)
self.assertEqual('silver', out)
def test_get_pool_with_vol_type_id_none(self):
self.volume.volume_type_id = None
self.volume.volume_type = self.volume_type
out = hnas_utils.get_pool(parsed_xml, self.volume)
self.assertEqual('default', out)
def test_get_pool_with_missing_service_label(self):
self.mock_object(volume_types, 'get_volume_type_extra_specs',
return_value={'service_label': 'gold'})
self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID
self.volume.volume_type = self.volume_type
out = hnas_utils.get_pool(parsed_xml, self.volume)
self.assertEqual('default', out)
def test_get_pool_without_vol_type(self):
out = hnas_utils.get_pool(parsed_xml, self.volume)
self.assertEqual('default', out)
def test_read_cinder_conf_nfs(self):
out = hnas_utils.read_cinder_conf(self.fake_conf)
self.assertEqual(config_from_cinder_conf, out)
def test_read_cinder_conf_break(self):
self.override_config('hnas_username', None, conf.SHARED_CONF_GROUP)
self.override_config('hnas_password', None, conf.SHARED_CONF_GROUP)
self.override_config('hnas_mgmt_ip0', None, conf.SHARED_CONF_GROUP)
out = hnas_utils.read_cinder_conf(self.fake_conf)
self.assertIsNone(out)
@ddt.data('hnas_username', 'hnas_password',
'hnas_mgmt_ip0', 'hnas_svc0_pool_name',
'hnas_svc0_hdp', )
def test_init_invalid_conf_parameters(self, attr_name):
self.override_config(attr_name, None, conf.SHARED_CONF_GROUP)
self.assertRaises(exception.InvalidParameterValue,
hnas_utils.read_cinder_conf, self.fake_conf)

View File

@@ -1,283 +0,0 @@
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import os
import shlex
from oslo_concurrency import lockutils
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
SMPL = 1
COPY = 2
PAIR = 3
PSUS = 4
PSUE = 5
UNKN = 0xff
FULL = 'Full copy'
THIN = 'Thin copy'
DEFAULT_TRY_RANGE = range(3)
MAX_PROCESS_WAITTIME = 86400
DEFAULT_PROCESS_WAITTIME = 900
GETSTORAGEARRAY_ONCE = 100
WARNING_ID = 300
DEFAULT_GROUP_RANGE = [0, 65535]
NAME_PREFIX = 'HBSD-'
NORMAL_VOLUME_TYPE = 'Normal'
LOCK_DIR = '/var/lock/hbsd/'
LOG = logging.getLogger(__name__)
HBSD_INFO_MSG = {
1: _('The parameter of the storage backend. '
'(config_group: %(config_group)s)'),
3: _('The storage backend can be used. (config_group: %(config_group)s)'),
4: _('The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)'),
5: _('The volume %(volume_id)s is unmanaged successfully. '
'(LDEV: %(ldev)s)'),
}
HBSD_WARN_MSG = {
301: _('A LUN (HLUN) was not found. (LDEV: %(ldev)s)'),
302: _('Failed to specify a logical device for the volume '
'%(volume_id)s to be unmapped.'),
303: _('An iSCSI CHAP user could not be deleted. (username: %(user)s)'),
304: _('Failed to specify a logical device to be deleted. '
'(method: %(method)s, id: %(id)s)'),
305: _('The logical device for specified %(type)s %(id)s '
'was already deleted.'),
306: _('A host group could not be deleted. (port: %(port)s, '
'gid: %(gid)s, name: %(name)s)'),
307: _('An iSCSI target could not be deleted. (port: %(port)s, '
'tno: %(tno)s, alias: %(alias)s)'),
308: _('A host group could not be added. (port: %(port)s, '
'name: %(name)s)'),
309: _('An iSCSI target could not be added. '
'(port: %(port)s, alias: %(alias)s, reason: %(reason)s)'),
310: _('Failed to unmap a logical device. (LDEV: %(ldev)s, '
'reason: %(reason)s)'),
311: _('A free LUN (HLUN) was not found. Add a different host'
' group. (LDEV: %(ldev)s)'),
312: _('Failed to get a storage resource. The system will attempt '
'to get the storage resource again. (resource: %(resource)s)'),
313: _('Failed to delete a logical device. (LDEV: %(ldev)s, '
'reason: %(reason)s)'),
314: _('Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, '
'port: %(port)s, id: %(id)s)'),
315: _('Failed to perform a zero-page reclamation. '
'(LDEV: %(ldev)s, reason: %(reason)s)'),
316: _('Failed to assign the iSCSI initiator IQN. (port: %(port)s, '
'reason: %(reason)s)'),
}
HBSD_ERR_MSG = {
600: _('The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, '
'stderr: %(err)s)'),
601: _('A parameter is invalid. (%(param)s)'),
602: _('A parameter value is invalid. (%(meta)s)'),
603: _('Failed to acquire a resource lock. (serial: %(serial)s, '
'inst: %(inst)s, ret: %(ret)s, stderr: %(err)s)'),
604: _('Cannot set both hitachi_serial_number and hitachi_unit_name.'),
605: _('Either hitachi_serial_number or hitachi_unit_name is required.'),
615: _('A pair could not be created. The maximum number of pair is '
'exceeded. (copy method: %(copy_method)s, P-VOL: %(pvol)s)'),
616: _('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)'),
617: _('The specified operation is not supported. The volume size '
'must be the same as the source %(type)s. (volume: %(volume_id)s)'),
618: _('The volume %(volume_id)s could not be extended. '
'The volume type must be Normal.'),
619: _('The volume %(volume_id)s to be mapped was not found.'),
624: _('The %(type)s %(id)s source to be replicated was not found.'),
631: _('Failed to create a file. (file: %(file)s, ret: %(ret)s, '
'stderr: %(err)s)'),
632: _('Failed to open a file. (file: %(file)s, ret: %(ret)s, '
'stderr: %(err)s)'),
633: _('%(file)s: Permission denied.'),
636: _('Failed to add the logical device.'),
637: _('The method %(method)s is timed out. (timeout value: %(timeout)s)'),
640: _('A pool could not be found. (pool id: %(pool_id)s)'),
641: _('The host group or iSCSI target could not be added.'),
642: _('An iSCSI CHAP user could not be added. (username: %(user)s)'),
643: _('The iSCSI CHAP user %(user)s does not exist.'),
648: _('There are no resources available for use. '
'(resource: %(resource)s)'),
649: _('The host group or iSCSI target was not found.'),
650: _('The resource %(resource)s was not found.'),
651: _('The IP Address was not found.'),
653: _('The creation of a logical device could not be '
'completed. (LDEV: %(ldev)s)'),
654: _('A volume status is invalid. (status: %(status)s)'),
655: _('A snapshot status is invalid. (status: %(status)s)'),
659: _('A host group is invalid. (host group: %(gid)s)'),
660: _('The specified %(desc)s is busy.'),
700: _('There is no designation of the %(param)s. '
'The specified storage is essential to manage the volume.'),
701: _('There is no designation of the ldev. '
'The specified ldev is essential to manage the volume.'),
702: _('The specified ldev %(ldev)s could not be managed. '
'The volume type must be DP-VOL.'),
703: _('The specified ldev %(ldev)s could not be managed. '
'The ldev size must be in multiples of gigabyte.'),
704: _('The specified ldev %(ldev)s could not be managed. '
'The ldev must not be mapping.'),
705: _('The specified ldev %(ldev)s could not be managed. '
'The ldev must not be paired.'),
706: _('The volume %(volume_id)s could not be unmanaged. '
'The volume type must be %(volume_type)s.'),
}
def set_msg(msg_id, **kwargs):
if msg_id < WARNING_ID:
msg_header = 'MSGID%04d-I:' % msg_id
msg_body = HBSD_INFO_MSG.get(msg_id)
else:
msg_header = 'MSGID%04d-W:' % msg_id
msg_body = HBSD_WARN_MSG.get(msg_id)
return '%(header)s %(body)s' % {'header': msg_header,
'body': msg_body % kwargs}
def output_err(msg_id, **kwargs):
msg = HBSD_ERR_MSG.get(msg_id) % kwargs
LOG.error("MSGID%(id)04d-E: %(msg)s", {'id': msg_id, 'msg': msg})
return msg
def get_process_lock(file):
if not os.access(file, os.W_OK):
msg = output_err(633, file=file)
raise exception.HBSDError(message=msg)
return lockutils.InterProcessLock(file)
def create_empty_file(filename):
if not os.path.exists(filename):
try:
utils.execute('touch', filename)
except putils.ProcessExecutionError as ex:
msg = output_err(
631, file=filename, ret=ex.exit_code, err=ex.stderr)
raise exception.HBSDError(message=msg)
class FileLock(lockutils.InterProcessLock):
def __init__(self, name, lock_object):
self.lock_object = lock_object
super(FileLock, self).__init__(name)
def __enter__(self):
self.lock_object.acquire()
try:
ret = super(FileLock, self).__enter__()
except Exception:
with excutils.save_and_reraise_exception():
self.lock_object.release()
return ret
def __exit__(self, exc_type, exc_val, exc_tb):
try:
super(FileLock, self).__exit__(exc_type, exc_val, exc_tb)
finally:
self.lock_object.release()
class NopLock(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class HBSDBasicLib(object):
def __init__(self, conf=None):
self.conf = conf
def exec_command(self, cmd, args=None, printflag=True):
if printflag:
if args:
LOG.debug('cmd: %(cmd)s, args: %(args)s',
{'cmd': cmd, 'args': args})
else:
LOG.debug('cmd: %s', cmd)
cmd = [cmd]
if args:
if six.PY2 and isinstance(args, six.text_type):
cmd += shlex.split(args.encode())
else:
cmd += shlex.split(args)
try:
stdout, stderr = utils.execute(*cmd, run_as_root=True)
ret = 0
except putils.ProcessExecutionError as e:
ret = e.exit_code
stdout = e.stdout
stderr = e.stderr
LOG.debug('cmd: %s', cmd)
LOG.debug('from: %s', inspect.stack()[2])
LOG.debug('ret: %d', ret)
LOG.debug('stdout: %s', stdout.replace(os.linesep, ' '))
LOG.debug('stderr: %s', stderr.replace(os.linesep, ' '))
return ret, stdout, stderr
def set_pair_flock(self):
return NopLock()
def set_horcmgr_flock(self):
return NopLock()
def discard_zero_page(self, ldev):
pass
def output_param_to_log(self, conf):
pass
def connect_storage(self):
pass
def get_max_hostgroups(self):
pass
def restart_pair_horcm(self):
pass

View File

@@ -1,835 +0,0 @@
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common class for Hitachi storage drivers.
"""
import re
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_horcm as horcm
from cinder.volume.drivers.hitachi import hbsd_snm2 as snm2
from cinder.volume import utils as volume_utils
"""
Version history:
1.0.0 - Initial driver
1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods
"""
VERSION = '1.1.0'
PARAM_RANGE = {
'hitachi_copy_check_interval': {'min': 1, 'max': 600},
'hitachi_async_copy_check_interval': {'min': 1, 'max': 600},
'hitachi_copy_speed': {'min': 1, 'max': 15},
}
DEFAULT_LDEV_RANGE = [0, 65535]
COPY_METHOD = ('FULL', 'THIN')
VALID_DP_VOLUME_STATUS = ['available', 'in-use']
VALID_V_VOLUME_STATUS = ['available']
SYSTEM_LOCK_FILE = basic_lib.LOCK_DIR + 'system'
SERVICE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'service_'
STORAGE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'storage_'
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('hitachi_serial_number',
help='Serial number of storage system'),
cfg.StrOpt('hitachi_unit_name',
help='Name of an array unit'),
cfg.IntOpt('hitachi_pool_id',
help='Pool ID of storage system'),
cfg.IntOpt('hitachi_thin_pool_id',
help='Thin pool ID of storage system'),
cfg.StrOpt('hitachi_ldev_range',
help='Range of logical device of storage system'),
cfg.StrOpt('hitachi_default_copy_method',
default='FULL',
help='Default copy method of storage system'),
cfg.IntOpt('hitachi_copy_speed',
default=3,
help='Copy speed of storage system'),
cfg.IntOpt('hitachi_copy_check_interval',
default=3,
help='Interval to check copy'),
cfg.IntOpt('hitachi_async_copy_check_interval',
default=10,
help='Interval to check copy asynchronously'),
cfg.StrOpt('hitachi_target_ports',
help='Control port names for HostGroup or iSCSI Target'),
cfg.StrOpt('hitachi_group_range',
help='Range of group number'),
cfg.BoolOpt('hitachi_group_request',
default=False,
secret=True,
help='Request for creating HostGroup or iSCSI Target'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
class TryLock(object):
def __init__(self):
self.lock = threading.RLock()
self.desc = None
def set_desc(self, description):
self.desc = description
def __enter__(self):
if not self.lock.acquire(False):
msg = basic_lib.output_err(660, desc=self.desc)
raise exception.HBSDError(message=msg)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.lock.release()
class HBSDCommon(object):
def __init__(self, conf, parent, context, db):
self.configuration = conf
self.generated_from = parent
self.context = context
self.db = db
self.system_lock_file = SYSTEM_LOCK_FILE
self.service_lock_file = '%s%s' % (SERVICE_LOCK_PATH_BASE,
conf.config_group)
if conf.hitachi_serial_number:
self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE,
six.text_type(
conf.hitachi_serial_number))
elif conf.hitachi_unit_name:
self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE,
six.text_type(
conf.hitachi_unit_name))
self.storage_obj_lock = threading.Lock()
self.volinfo_lock = threading.Lock()
self.volume_info = {}
self.output_first = True
def get_volume(self, volume_id):
return self.db.volume_get(self.context, volume_id)
def get_volume_metadata(self, volume_id):
return self.db.volume_metadata_get(self.context, volume_id)
def get_snapshot_metadata(self, snapshot_id):
return self.db.snapshot_metadata_get(self.context, snapshot_id)
def _update_volume_metadata(self, volume_id, volume_metadata):
self.db.volume_metadata_update(self.context, volume_id,
volume_metadata, False)
def get_ldev(self, obj):
if not obj:
return None
ldev = obj.get('provider_location')
if not ldev or not ldev.isdigit():
return None
else:
return int(ldev)
def get_value(self, obj, name, key):
if not obj:
return None
if obj.get(name):
if isinstance(obj[name], dict):
return obj[name].get(key)
else:
for i in obj[name]:
if i['key'] == key:
return i['value']
return None
def get_is_vvol(self, obj, name):
return self.get_value(obj, name, 'type') == 'V-VOL'
def get_volume_is_vvol(self, volume):
return self.get_is_vvol(volume, 'volume_metadata')
def get_snapshot_is_vvol(self, snapshot):
return self.get_is_vvol(snapshot, 'metadata')
def get_copy_method(self, volume):
method = self.get_value(volume, 'volume_metadata', 'copy_method')
if method:
if method not in COPY_METHOD:
msg = basic_lib.output_err(602, meta='copy_method')
raise exception.HBSDError(message=msg)
elif (method == 'THIN'
and self.configuration.hitachi_thin_pool_id is None):
msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
raise exception.HBSDError(message=msg)
else:
method = self.configuration.hitachi_default_copy_method
return method
def _string2int(self, num):
if not num:
return None
if num.isdigit():
return int(num, 10)
if not re.match(r'\w\w:\w\w:\w\w', num):
return None
try:
num = int(num.replace(':', ''), 16)
except ValueError:
return None
return num
def _range2list(self, conf, param):
str = getattr(conf, param)
lists = str.split('-')
if len(lists) != 2:
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
first_type = None
for i in range(len(lists)):
if lists[i].isdigit():
lists[i] = int(lists[i], 10)
if first_type == 'hex':
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
first_type = 'dig'
else:
if (first_type == 'dig'
or not re.match(r'\w\w:\w\w:\w\w', lists[i])):
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
try:
lists[i] = int(lists[i].replace(':', ''), 16)
first_type = 'hex'
except Exception:
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
if lists[0] > lists[1]:
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
return lists
def output_param_to_log(self, storage_protocol):
essential_inherited_param = ['volume_backend_name', 'volume_driver']
conf = self.configuration
LOG.info(basic_lib.set_msg(1, config_group=conf.config_group))
version = self.command.get_comm_version()
if conf.hitachi_unit_name:
prefix = 'HSNM2 version'
else:
prefix = 'RAID Manager version'
LOG.info('\t%(prefix)-35s : %(version)s',
{'prefix': prefix, 'version': version})
for param in essential_inherited_param:
value = conf.safe_get(param)
LOG.info('\t%(param)-35s : %(value)s',
{'param': param, 'value': value})
for opt in volume_opts:
if not opt.secret:
value = getattr(conf, opt.name)
LOG.info('\t%(name)-35s : %(value)s',
{'name': opt.name, 'value': value})
if storage_protocol == 'iSCSI':
value = getattr(conf, 'hitachi_group_request')
LOG.info('\t%(request)-35s : %(value)s',
{'request': 'hitachi_group_request', 'value': value})
def check_param(self):
conf = self.configuration
if conf.hitachi_unit_name and conf.hitachi_serial_number:
msg = basic_lib.output_err(604)
raise exception.HBSDError(message=msg)
if not conf.hitachi_unit_name and not conf.hitachi_serial_number:
msg = basic_lib.output_err(605)
raise exception.HBSDError(message=msg)
if conf.hitachi_pool_id is None:
msg = basic_lib.output_err(601, param='hitachi_pool_id')
raise exception.HBSDError(message=msg)
for param in PARAM_RANGE.keys():
_value = getattr(conf, param)
if (_value and
(not PARAM_RANGE[param]['min'] <= _value <=
PARAM_RANGE[param]['max'])):
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
if conf.hitachi_default_copy_method not in COPY_METHOD:
msg = basic_lib.output_err(601,
param='hitachi_default_copy_method')
raise exception.HBSDError(message=msg)
if (conf.hitachi_default_copy_method == 'THIN'
and conf.hitachi_thin_pool_id is None):
msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
raise exception.HBSDError(message=msg)
for param in ('hitachi_ldev_range', 'hitachi_group_range'):
if not getattr(conf, param):
continue
else:
_value = self._range2list(conf, param)
setattr(conf, param, _value)
if conf.hitachi_target_ports:
conf.hitachi_target_ports = conf.hitachi_target_ports.split(',')
for opt in volume_opts:
getattr(conf, opt.name)
if conf.hitachi_unit_name:
self.command = snm2.HBSDSNM2(conf)
else:
conf.append_config_values(horcm.volume_opts)
self.command = horcm.HBSDHORCM(conf)
self.command.check_param()
self.pair_flock = self.command.set_pair_flock()
self.horcmgr_flock = self.command.set_horcmgr_flock()
def create_lock_file(self):
basic_lib.create_empty_file(self.system_lock_file)
basic_lib.create_empty_file(self.service_lock_file)
basic_lib.create_empty_file(self.storage_lock_file)
self.command.create_lock_file()
def _add_ldev(self, volume_num, capacity, pool_id, is_vvol):
self.command.comm_add_ldev(pool_id, volume_num, capacity, is_vvol)
def _get_unused_volume_num(self, ldev_range):
return self.command.get_unused_ldev(ldev_range)
def add_volinfo(self, ldev, id=None, type='volume'):
with self.volinfo_lock:
if ldev not in self.volume_info:
self.init_volinfo(self.volume_info, ldev)
if id:
desc = '%s %s' % (type, id)
self.volume_info[ldev]['in_use'].set_desc(desc)
def delete_pair(self, ldev, all_split=True, is_vvol=None):
paired_info = self.command.get_paired_info(ldev)
LOG.debug('paired_info: %s', paired_info)
pvol = paired_info['pvol']
svols = paired_info['svol']
driver = self.generated_from
restart = False
svol_list = []
try:
if pvol is None:
return
elif pvol == ldev:
for svol in svols[:]:
if svol['is_vvol'] or svol['status'] != basic_lib.PSUS:
continue
self.command.delete_pair(pvol, svol['lun'], False)
restart = True
driver.pair_terminate_connection(svol['lun'])
svols.remove(svol)
if all_split and svols:
svol_list.append(six.text_type(svols[0]['lun']))
for svol in svols[1:]:
svol_list.append(', %d' % svol['lun'])
msg = basic_lib.output_err(616, pvol=pvol,
svol=''.join(svol_list))
raise exception.HBSDBusy(message=msg)
if not svols:
driver.pair_terminate_connection(pvol)
else:
self.add_volinfo(pvol)
if not self.volume_info[pvol]['in_use'].lock.acquire(False):
desc = self.volume_info[pvol]['in_use'].desc
msg = basic_lib.output_err(660, desc=desc)
raise exception.HBSDBusy(message=msg)
try:
paired_info = self.command.get_paired_info(ldev)
if paired_info['pvol'] is None:
return
svol = paired_info['svol'][0]
if svol['status'] != basic_lib.PSUS:
msg = basic_lib.output_err(616, pvol=pvol, svol=ldev)
raise exception.HBSDBusy(message=msg)
self.command.delete_pair(pvol, ldev, svol['is_vvol'])
if not svol['is_vvol']:
restart = True
driver.pair_terminate_connection(ldev)
paired_info = self.command.get_paired_info(pvol)
if paired_info['pvol'] is None:
driver.pair_terminate_connection(pvol)
finally:
self.volume_info[pvol]['in_use'].lock.release()
except Exception:
with excutils.save_and_reraise_exception():
if restart:
try:
self.command.restart_pair_horcm()
except Exception as e:
LOG.warning('Failed to restart horcm: %s', e)
else:
if (all_split or is_vvol) and restart:
try:
self.command.restart_pair_horcm()
except Exception as e:
LOG.warning('Failed to restart horcm: %s', e)
def copy_async_data(self, pvol, svol, is_vvol):
path_list = []
driver = self.generated_from
try:
with self.pair_flock:
self.delete_pair(pvol, all_split=False, is_vvol=is_vvol)
paired_info = self.command.get_paired_info(pvol)
if paired_info['pvol'] is None:
driver.pair_initialize_connection(pvol)
path_list.append(pvol)
driver.pair_initialize_connection(svol)
path_list.append(svol)
self.command.comm_create_pair(pvol, svol, is_vvol)
except Exception:
with excutils.save_and_reraise_exception():
for ldev in path_list:
try:
driver.pair_terminate_connection(ldev)
except Exception as ex:
LOG.warning(basic_lib.set_msg(310, ldev=ldev,
reason=ex))
def copy_sync_data(self, src_ldev, dest_ldev, size):
src_vol = {'provider_location': six.text_type(src_ldev),
'id': 'src_vol'}
dest_vol = {'provider_location': six.text_type(dest_ldev),
'id': 'dest_vol'}
properties = utils.brick_get_connector_properties()
driver = self.generated_from
src_info = None
dest_info = None
try:
dest_info = driver._attach_volume(self.context, dest_vol,
properties)
src_info = driver._attach_volume(self.context, src_vol,
properties)
volume_utils.copy_volume(src_info['device']['path'],
dest_info['device']['path'], size * 1024,
self.configuration.volume_dd_blocksize)
finally:
if dest_info:
driver._detach_volume(self.context, dest_info,
dest_vol, properties)
if src_info:
driver._detach_volume(self.context, src_info,
src_vol, properties)
self.command.discard_zero_page(dest_ldev)
def copy_data(self, pvol, size, p_is_vvol, method):
type = 'Normal'
is_vvol = method == 'THIN'
svol = self._create_volume(size, is_vvol=is_vvol)
try:
if p_is_vvol:
self.copy_sync_data(pvol, svol, size)
else:
if is_vvol:
type = 'V-VOL'
self.copy_async_data(pvol, svol, is_vvol)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.delete_ldev(svol, is_vvol)
except Exception as ex:
LOG.warning(basic_lib.set_msg(313, ldev=svol,
reason=ex))
return six.text_type(svol), type
def add_lun(self, command, hostgroups, ldev, is_once=False):
lock = basic_lib.get_process_lock(self.storage_lock_file)
with lock:
self.command.comm_add_lun(command, hostgroups, ldev, is_once)
def create_ldev(self, size, ldev_range, pool_id, is_vvol):
LOG.debug('create start (normal)')
for i in basic_lib.DEFAULT_TRY_RANGE:
LOG.debug('Try number: %(tries)s / %(max_tries)s',
{'tries': i + 1,
'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)})
new_ldev = self._get_unused_volume_num(ldev_range)
try:
self._add_ldev(new_ldev, size, pool_id, is_vvol)
except exception.HBSDNotFound:
LOG.warning(basic_lib.set_msg(312, resource='LDEV'))
continue
else:
break
else:
msg = basic_lib.output_err(636)
raise exception.HBSDError(message=msg)
LOG.debug('create end (normal: %s)', new_ldev)
self.init_volinfo(self.volume_info, new_ldev)
return new_ldev
def _create_volume(self, size, is_vvol=False):
ldev_range = self.configuration.hitachi_ldev_range
if not ldev_range:
ldev_range = DEFAULT_LDEV_RANGE
pool_id = self.configuration.hitachi_pool_id
lock = basic_lib.get_process_lock(self.storage_lock_file)
with self.storage_obj_lock, lock:
ldev = self.create_ldev(size, ldev_range, pool_id, is_vvol)
return ldev
def create_volume(self, volume):
volume_metadata = self.get_volume_metadata(volume['id'])
volume_metadata['type'] = 'Normal'
size = volume['size']
ldev = self._create_volume(size)
volume_metadata['ldev'] = six.text_type(ldev)
return {'provider_location': six.text_type(ldev),
'metadata': volume_metadata}
def delete_ldev(self, ldev, is_vvol):
LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)',
{'ldev': ldev, 'vvol': is_vvol})
with self.pair_flock:
self.delete_pair(ldev)
self.command.comm_delete_ldev(ldev, is_vvol)
with self.volinfo_lock:
if ldev in self.volume_info:
self.volume_info.pop(ldev)
LOG.debug('delete_ldev is finished '
'(LDEV: %(ldev)d, is_vvol: %(vvol)s)',
{'ldev': ldev, 'vvol': is_vvol})
def delete_volume(self, volume):
ldev = self.get_ldev(volume)
if ldev is None:
LOG.warning(basic_lib.set_msg(304, method='delete_volume',
id=volume['id']))
return
self.add_volinfo(ldev, volume['id'])
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
desc = self.volume_info[ldev]['in_use'].desc
basic_lib.output_err(660, desc=desc)
raise exception.VolumeIsBusy(volume_name=volume['name'])
try:
is_vvol = self.get_volume_is_vvol(volume)
try:
self.delete_ldev(ldev, is_vvol)
except exception.HBSDNotFound:
with self.volinfo_lock:
if ldev in self.volume_info:
self.volume_info.pop(ldev)
LOG.warning(basic_lib.set_msg(
305, type='volume', id=volume['id']))
except exception.HBSDBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
finally:
if ldev in self.volume_info:
self.volume_info[ldev]['in_use'].lock.release()
def check_volume_status(self, volume, is_vvol):
if not is_vvol:
status = VALID_DP_VOLUME_STATUS
else:
status = VALID_V_VOLUME_STATUS
if volume['status'] not in status:
msg = basic_lib.output_err(654, status=volume['status'])
raise exception.HBSDError(message=msg)
def create_snapshot(self, snapshot):
src_ref = self.get_volume(snapshot['volume_id'])
pvol = self.get_ldev(src_ref)
if pvol is None:
msg = basic_lib.output_err(624, type='volume', id=src_ref['id'])
raise exception.HBSDError(message=msg)
self.add_volinfo(pvol, src_ref['id'])
with self.volume_info[pvol]['in_use']:
is_vvol = self.get_volume_is_vvol(src_ref)
self.check_volume_status(src_ref, is_vvol)
size = snapshot['volume_size']
snap_metadata = snapshot.get('metadata')
method = None if is_vvol else self.get_copy_method(src_ref)
svol, type = self.copy_data(pvol, size, is_vvol, method)
if type == 'V-VOL':
snap_metadata['type'] = type
snap_metadata['ldev'] = svol
return {'provider_location': svol,
'metadata': snap_metadata}
def delete_snapshot(self, snapshot):
ldev = self.get_ldev(snapshot)
if ldev is None:
LOG.warning(basic_lib.set_msg(
304, method='delete_snapshot', id=snapshot['id']))
return
self.add_volinfo(ldev, id=snapshot['id'], type='snapshot')
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
desc = self.volume_info[ldev]['in_use'].desc
basic_lib.output_err(660, desc=desc)
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
try:
is_vvol = self.get_snapshot_is_vvol(snapshot)
try:
self.delete_ldev(ldev, is_vvol)
except exception.HBSDNotFound:
with self.volinfo_lock:
if ldev in self.volume_info:
self.volume_info.pop(ldev)
LOG.warning(basic_lib.set_msg(
305, type='snapshot', id=snapshot['id']))
except exception.HBSDBusy:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
finally:
if ldev in self.volume_info:
self.volume_info[ldev]['in_use'].lock.release()
def create_cloned_volume(self, volume, src_vref):
pvol = self.get_ldev(src_vref)
if pvol is None:
msg = basic_lib.output_err(624, type='volume', id=src_vref['id'])
raise exception.HBSDError(message=msg)
self.add_volinfo(pvol, src_vref['id'])
with self.volume_info[pvol]['in_use']:
is_vvol = self.get_volume_is_vvol(src_vref)
self.check_volume_status(self.get_volume(src_vref['id']), is_vvol)
size = volume['size']
src_size = src_vref['size']
if size < src_size:
msg = basic_lib.output_err(617, type='volume',
volume_id=volume['id'])
raise exception.HBSDError(message=msg)
metadata = self.get_volume_metadata(volume['id'])
method = None if is_vvol else self.get_copy_method(volume)
svol, type = self.copy_data(pvol, src_size, is_vvol, method)
if size > src_size:
self.extend_volume(volume, size)
metadata['type'] = type
metadata['volume'] = src_vref['id']
metadata['ldev'] = svol
return {'provider_location': svol, 'metadata': metadata}
def create_volume_from_snapshot(self, volume, snapshot):
pvol = self.get_ldev(snapshot)
if pvol is None:
msg = basic_lib.output_err(624, type='snapshot', id=snapshot['id'])
raise exception.HBSDError(message=msg)
self.add_volinfo(pvol, id=snapshot['id'], type='snapshot')
with self.volume_info[pvol]['in_use']:
is_vvol = self.get_snapshot_is_vvol(snapshot)
if snapshot['status'] != 'available':
msg = basic_lib.output_err(655, status=snapshot['status'])
raise exception.HBSDError(message=msg)
size = volume['size']
src_size = snapshot['volume_size']
if size != src_size:
msg = basic_lib.output_err(617, type='snapshot',
volume_id=volume['id'])
raise exception.HBSDError(message=msg)
metadata = self.get_volume_metadata(volume['id'])
method = None if is_vvol else self.get_copy_method(volume)
svol, type = self.copy_data(pvol, size, is_vvol, method)
metadata['type'] = type
metadata['snapshot'] = snapshot['id']
metadata['ldev'] = svol
return {'provider_location': svol, 'metadata': metadata}
def _extend_volume(self, ldev, old_size, new_size):
with self.pair_flock:
self.delete_pair(ldev)
self.command.comm_extend_ldev(ldev, old_size, new_size)
def extend_volume(self, volume, new_size):
pvol = self.get_ldev(volume)
self.add_volinfo(pvol, volume['id'])
with self.volume_info[pvol]['in_use']:
if self.get_volume_is_vvol(volume):
msg = basic_lib.output_err(618, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self._extend_volume(pvol, volume['size'], new_size)
def output_backend_available_once(self):
if self.output_first:
self.output_first = False
LOG.warning(basic_lib.set_msg(
3, config_group=self.configuration.config_group))
def update_volume_stats(self, storage_protocol):
data = {}
total_gb = None
free_gb = None
data['volume_backend_name'] = self.configuration.safe_get(
'volume_backend_name') or 'HBSD%s' % storage_protocol
data['vendor_name'] = 'Hitachi'
data['driver_version'] = VERSION
data['storage_protocol'] = storage_protocol
try:
total_gb, free_gb = self.command.comm_get_dp_pool(
self.configuration.hitachi_pool_id)
except Exception as ex:
LOG.error('Failed to update volume status: %s', ex)
return None
data['total_capacity_gb'] = total_gb
data['free_capacity_gb'] = free_gb
data['reserved_percentage'] = self.configuration.safe_get(
'reserved_percentage')
data['QoS_support'] = False
LOG.debug('Updating volume status (%s)', data)
return data
def init_volinfo(self, vol_info, ldev):
vol_info[ldev] = {'in_use': TryLock(), 'lock': threading.Lock()}
def manage_existing(self, volume, existing_ref):
"""Manage an existing Hitachi storage volume.
existing_ref is a dictionary of the form:
For HUS 100 Family:
.. code-block:: default
{
'ldev': <logical device number on storage>,
'unit_name': <storage device name>
}
For VSP G1000/VSP/HUS VM:
.. code-block:: default
{
'ldev': <logical device number on storage>,
'serial_number': <product number of storage system>
}
"""
ldev = self._string2int(existing_ref.get('ldev'))
LOG.info(basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev))
return {'provider_location': ldev}
def _manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume for manage_existing."""
ldev = self._string2int(existing_ref.get('ldev'))
if ldev is None:
msg = basic_lib.output_err(701)
raise exception.HBSDError(data=msg)
size = self.command.get_ldev_size_in_gigabyte(ldev, existing_ref)
metadata = {'type': basic_lib.NORMAL_VOLUME_TYPE, 'ldev': ldev}
self._update_volume_metadata(volume['id'], metadata)
return size
def manage_existing_get_size(self, volume, existing_ref):
try:
return self._manage_existing_get_size(volume, existing_ref)
except exception.HBSDError as ex:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=six.text_type(ex))
def _unmanage(self, volume, ldev):
with self.horcmgr_flock:
self.delete_pair(ldev)
with self.volinfo_lock:
if ldev in self.volume_info:
self.volume_info.pop(ldev)
def unmanage(self, volume):
"""Remove the specified volume from Cinder management."""
ldev = self.get_ldev(volume)
if ldev is None:
return
self.add_volinfo(ldev, volume['id'])
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
desc = self.volume_info[ldev]['in_use'].desc
basic_lib.output_err(660, desc=desc)
raise exception.HBSDVolumeIsBusy(volume_name=volume['name'])
is_vvol = self.get_volume_is_vvol(volume)
if is_vvol:
basic_lib.output_err(706, volume_id=volume['id'],
volume_type=basic_lib.NORMAL_VOLUME_TYPE)
raise exception.HBSDVolumeIsBusy(volume_name=volume['name'])
try:
self._unmanage(volume, ldev)
except exception.HBSDBusy:
raise exception.HBSDVolumeIsBusy(volume_name=volume['name'])
else:
LOG.info(basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev))
finally:
if ldev in self.volume_info:
self.volume_info[ldev]['in_use'].lock.release()

View File

@@ -1,539 +0,0 @@
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fibre channel Cinder volume driver for Hitachi storage.
"""
import os
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import configuration
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_common as common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.BoolOpt('hitachi_zoning_request',
default=False,
help='Request for FC Zone creating HostGroup'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
VERSION = common.VERSION
# ThirdPartySystems wiki page
CI_WIKI_NAME = ["Hitachi_HBSD_CI", "Hitachi_HBSD2_CI"]
SUPPORTED = False
def __init__(self, *args, **kwargs):
os.environ['LANG'] = 'C'
super(HBSDFCDriver, self).__init__(*args, **kwargs)
self.db = kwargs.get('db')
self.common = None
self.configuration.append_config_values(common.volume_opts)
self._stats = {}
self.context = None
self.max_hostgroups = None
self.pair_hostgroups = []
self.pair_hostnum = 0
self.do_setup_status = threading.Event()
def _check_param(self):
self.configuration.append_config_values(volume_opts)
for opt in volume_opts:
getattr(self.configuration, opt.name)
def check_param(self):
try:
self.common.check_param()
self._check_param()
except exception.HBSDError:
raise
except Exception as ex:
msg = basic_lib.output_err(601, param=six.text_type(ex))
raise exception.HBSDError(message=msg)
def output_param_to_log(self):
lock = basic_lib.get_process_lock(self.common.system_lock_file)
with lock:
self.common.output_param_to_log('FC')
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
LOG.info('\t%(name)-35s : %(value)s',
{'name': opt.name, 'value': value})
self.common.command.output_param_to_log(self.configuration)
def _add_wwn(self, hgs, port, gid, wwns):
for wwn in wwns:
wwn = six.text_type(wwn)
self.common.command.comm_add_hbawwn(port, gid, wwn)
detected = self.common.command.is_detected(port, wwn)
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
'detected': detected})
LOG.debug('Create host group for %s', hgs)
def _add_lun(self, hostgroups, ldev):
if hostgroups is self.pair_hostgroups:
is_once = True
else:
is_once = False
self.common.add_lun('auhgmap', hostgroups, ldev, is_once)
def _delete_lun(self, hostgroups, ldev):
try:
self.common.command.comm_delete_lun(hostgroups, ldev)
except exception.HBSDNotFound:
LOG.warning(basic_lib.set_msg(301, ldev=ldev))
def _get_hgname_gid(self, port, host_grp_name):
return self.common.command.get_hgname_gid(port, host_grp_name)
def _get_unused_gid(self, port):
group_range = self.configuration.hitachi_group_range
if not group_range:
group_range = basic_lib.DEFAULT_GROUP_RANGE
return self.common.command.get_unused_gid(group_range, port)
def _get_hostgroup_info(self, hgs, wwns, login=True):
target_ports = self.configuration.hitachi_target_ports
return self.common.command.comm_get_hostgroup_info(
hgs, wwns, target_ports, login=login)
def _fill_group(self, hgs, port, host_grp_name, wwns):
added_hostgroup = False
LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
'name: %(name)s wwns: %(wwns)s)',
{'hgs': hgs, 'port': port,
'name': host_grp_name, 'wwns': wwns})
gid = self._get_hgname_gid(port, host_grp_name)
if gid is None:
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
try:
gid = self._get_unused_gid(port)
self._add_hostgroup(port, gid, host_grp_name)
added_hostgroup = True
except exception.HBSDNotFound:
gid = None
LOG.warning(basic_lib.set_msg(312, resource='GID'))
continue
else:
LOG.debug('Completed to add host target'
'(port: %(port)s gid: %(gid)d)',
{'port': port, 'gid': gid})
break
else:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
try:
if wwns:
self._add_wwn(hgs, port, gid, wwns)
else:
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': None,
'detected': True})
except Exception:
with excutils.save_and_reraise_exception():
if added_hostgroup:
self._delete_hostgroup(port, gid, host_grp_name)
def add_hostgroup_master(self, hgs, master_wwns, host_ip, security_ports):
target_ports = self.configuration.hitachi_target_ports
group_request = self.configuration.hitachi_group_request
wwns = []
for wwn in master_wwns:
wwns.append(wwn.lower())
if target_ports and group_request:
host_grp_name = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
for port in security_ports:
wwns_copy = wwns[:]
for hostgroup in hgs:
if (hostgroup['port'] == port and
hostgroup['initiator_wwn'].lower() in wwns_copy):
wwns_copy.remove(hostgroup['initiator_wwn'].lower())
if wwns_copy:
try:
self._fill_group(hgs, port, host_grp_name, wwns_copy)
except Exception as ex:
LOG.warning('Failed to add host group: %s', ex)
LOG.warning(basic_lib.set_msg(
308, port=port, name=host_grp_name))
if not hgs:
raise exception.HBSDError(message=basic_lib.output_err(649))
def add_hostgroup_pair(self, pair_hostgroups):
if self.configuration.hitachi_unit_name:
return
properties = utils.brick_get_connector_properties()
if 'wwpns' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
hostgroups = []
self._get_hostgroup_info(hostgroups, properties['wwpns'],
login=False)
host_grp_name = '%spair%02x' % (basic_lib.NAME_PREFIX,
self.pair_hostnum)
for hostgroup in hostgroups:
gid = self._get_hgname_gid(hostgroup['port'],
host_grp_name)
# When 'gid' is 0, it should be true.
# So, it cannot remove 'is not None'.
if gid is not None:
pair_hostgroups.append({'port': hostgroup['port'],
'gid': gid, 'initiator_wwn': None,
'detected': True})
break
if not pair_hostgroups:
for hostgroup in hostgroups:
pair_port = hostgroup['port']
try:
self._fill_group(pair_hostgroups, pair_port,
host_grp_name, None)
except Exception:
if hostgroup is hostgroups[-1]:
raise
else:
break
def add_hostgroup(self):
properties = utils.brick_get_connector_properties()
if 'wwpns' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
LOG.debug("wwpns: %s", properties['wwpns'])
hostgroups = []
security_ports = self._get_hostgroup_info(
hostgroups, properties['wwpns'], login=False)
self.add_hostgroup_master(hostgroups, properties['wwpns'],
properties['ip'], security_ports)
self.add_hostgroup_pair(self.pair_hostgroups)
def _get_target_wwn(self, port):
target_wwns = self.common.command.comm_set_target_wwns(
self.configuration.hitachi_target_ports)
return target_wwns[port]
def _add_hostgroup(self, port, gid, host_grp_name):
self.common.command.comm_add_hostgrp(port, gid, host_grp_name)
def _delete_hostgroup(self, port, gid, host_grp_name):
try:
self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.warning(basic_lib.set_msg(
306, port=port, gid=gid, name=host_grp_name))
def _check_volume_mapping(self, hostgroup):
port = hostgroup['port']
gid = hostgroup['gid']
if self.common.command.get_hostgroup_luns(port, gid):
return True
else:
return False
def _build_initiator_target_map(self, hostgroups, terminate=False):
target_wwns = []
init_targ_map = {}
target_ports = self.configuration.hitachi_target_ports
zoning_request = self.configuration.hitachi_zoning_request
for hostgroup in hostgroups:
target_wwn = self._get_target_wwn(hostgroup['port'])
if target_wwn not in target_wwns:
target_wwns.append(target_wwn)
if target_ports and zoning_request:
if terminate and self._check_volume_mapping(hostgroup):
continue
initiator_wwn = hostgroup['initiator_wwn']
if initiator_wwn not in init_targ_map:
init_targ_map[initiator_wwn] = []
init_targ_map[initiator_wwn].append(target_wwn)
return target_wwns, init_targ_map
def _get_properties(self, volume, hostgroups, terminate=False):
properties = {}
target_wwns, init_targ_map = self._build_initiator_target_map(
hostgroups, terminate)
properties['target_wwn'] = target_wwns
if init_targ_map:
properties['initiator_target_map'] = init_targ_map
if not terminate:
properties['target_lun'] = hostgroups[0]['lun']
return properties
def do_setup(self, context):
self.context = context
self.common = common.HBSDCommon(self.configuration, self,
context, self.db)
msg = _("The HBSD FC driver is deprecated and "
"will be removed in P release.")
versionutils.report_deprecated_feature(LOG, msg)
self.check_param()
self.common.create_lock_file()
self.common.command.connect_storage()
self.max_hostgroups = self.common.command.get_max_hostgroups()
lock = basic_lib.get_process_lock(self.common.service_lock_file)
with lock:
self.add_hostgroup()
self.output_param_to_log()
self.do_setup_status.set()
def check_for_setup_error(self):
pass
def extend_volume(self, volume, new_size):
self.do_setup_status.wait()
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
if refresh:
if self.do_setup_status.isSet():
self.common.output_backend_available_once()
_stats = self.common.update_volume_stats("FC")
if _stats:
self._stats = _stats
return self._stats
def create_volume(self, volume):
self.do_setup_status.wait()
metadata = self.common.create_volume(volume)
return metadata
def delete_volume(self, volume):
self.do_setup_status.wait()
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_snapshot(snapshot)
return metadata
def delete_snapshot(self, snapshot):
self.do_setup_status.wait()
self.common.delete_snapshot(snapshot)
def create_cloned_volume(self, volume, src_vref):
self.do_setup_status.wait()
metadata = self.common.create_cloned_volume(volume, src_vref)
return metadata
def create_volume_from_snapshot(self, volume, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
return metadata
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
"(config_group: %(group)s ldev: %(ldev)d)",
{'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs is self.pair_hostgroups:
hostgroups = src_hgs
else:
hostgroups = []
security_ports = self._get_hostgroup_info(
hostgroups, connector['wwpns'], login=True)
self.add_hostgroup_master(hostgroups, connector['wwpns'],
connector['ip'], security_ports)
if src_hgs is self.pair_hostgroups:
try:
self._add_lun(hostgroups, ldev)
except exception.HBSDNotFound:
LOG.warning(basic_lib.set_msg(311, ldev=ldev))
for i in range(self.max_hostgroups + 1):
self.pair_hostnum += 1
pair_hostgroups = []
try:
self.add_hostgroup_pair(pair_hostgroups)
self.pair_hostgroups.extend(pair_hostgroups)
except exception.HBSDNotFound:
if i >= self.max_hostgroups:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
else:
break
self.pair_initialize_connection(ldev)
else:
self._add_lun(hostgroups, ldev)
return hostgroups
@fczm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s',
self.common.volume_info)
LOG.debug('HFCDrv: properties=%s', properties)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
LOG.debug("Call _terminate_connection(config_group: %s)",
self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun(hostgroups, ldev)
LOG.debug("*** _terminate_ ***")
@fczm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
return
if 'wwpns' not in connector:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
hostgroups = []
self._get_hostgroup_info(hostgroups,
connector['wwpns'], login=False)
if not hostgroups:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
properties = self._get_properties(volume, hostgroups,
terminate=True)
LOG.debug('Terminate volume_info: %s', self.common.volume_info)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
}
def pair_initialize_connection(self, ldev):
if self.configuration.hitachi_unit_name:
return
self._initialize_connection(ldev, None, self.pair_hostgroups)
def pair_terminate_connection(self, ldev):
if self.configuration.hitachi_unit_name:
return
self._terminate_connection(ldev, None, self.pair_hostgroups)
def discard_zero_page(self, volume):
self.common.command.discard_zero_page(self.common.get_ldev(volume))
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def copy_image_to_volume(self, context, volume, image_service, image_id):
self.do_setup_status.wait()
super(HBSDFCDriver, self).copy_image_to_volume(context, volume,
image_service,
image_id)
self.discard_zero_page(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self.do_setup_status.wait()
if volume['volume_attachment']:
desc = 'volume %s' % volume['id']
msg = basic_lib.output_err(660, desc=desc)
raise exception.HBSDError(message=msg)
super(HBSDFCDriver, self).copy_volume_to_image(context, volume,
image_service,
image_meta)
def before_volume_copy(self, context, src_vol, dest_vol, remote=None):
"""Driver-specific actions before copyvolume data.
This method will be called before _copy_volume_data during volume
migration
"""
self.do_setup_status.wait()
def after_volume_copy(self, context, src_vol, dest_vol, remote=None):
"""Driver-specific actions after copyvolume data.
This method will be called after _copy_volume_data during volume
migration
"""
self.discard_zero_page(dest_vol)
def manage_existing(self, volume, existing_ref):
return self.common.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
self.do_setup_status.wait()
return self.common.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
self.do_setup_status.wait()
self.common.unmanage(volume)

File diff suppressed because it is too large Load Diff

View File

@@ -1,432 +0,0 @@
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iSCSI Cinder volume driver for Hitachi storage.
"""
import os
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import configuration
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_common as common
LOG = logging.getLogger(__name__)
CHAP_METHOD = ('None', 'CHAP None', 'CHAP')
volume_opts = [
cfg.BoolOpt('hitachi_add_chap_user',
default=False,
help='Add CHAP user'),
cfg.StrOpt('hitachi_auth_method',
help='iSCSI authentication method'),
cfg.StrOpt('hitachi_auth_user',
default='%sCHAP-user' % basic_lib.NAME_PREFIX,
help='iSCSI authentication username'),
cfg.StrOpt('hitachi_auth_password',
default='%sCHAP-password' % basic_lib.NAME_PREFIX,
help='iSCSI authentication password', secret=True),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
VERSION = common.VERSION
# ThirdPartySystems wiki page
CI_WIKI_NAME = ["Hitachi_HBSD_CI", "Hitachi_HBSD2_CI"]
SUPPORTED = False
def __init__(self, *args, **kwargs):
os.environ['LANG'] = 'C'
super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
self.db = kwargs.get('db')
self.common = None
self.configuration.append_config_values(common.volume_opts)
self._stats = {}
self.context = None
self.do_setup_status = threading.Event()
def _check_param(self):
self.configuration.append_config_values(volume_opts)
if (self.configuration.hitachi_auth_method and
self.configuration.hitachi_auth_method not in CHAP_METHOD):
raise exception.HBSDError(
message=basic_lib.output_err(601, param='hitachi_auth_method'))
if self.configuration.hitachi_auth_method == 'None':
self.configuration.hitachi_auth_method = None
for opt in volume_opts:
getattr(self.configuration, opt.name)
def check_param(self):
try:
self.common.check_param()
self._check_param()
except exception.HBSDError:
raise
except Exception as ex:
raise exception.HBSDError(
message=basic_lib.output_err(601, param=six.text_type(ex)))
def output_param_to_log(self):
lock = basic_lib.get_process_lock(self.common.system_lock_file)
with lock:
self.common.output_param_to_log('iSCSI')
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
LOG.info('\t%(name)-35s : %(value)s',
{'name': opt.name, 'value': value})
def _delete_lun_iscsi(self, hostgroups, ldev):
try:
self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
except exception.HBSDNotFound:
LOG.warning(basic_lib.set_msg(301, ldev=ldev))
def _add_target(self, hostgroups, ldev):
self.common.add_lun('autargetmap', hostgroups, ldev)
def _add_initiator(self, hgs, port, gid, host_iqn):
self.common.command.comm_add_initiator(port, gid, host_iqn)
hgs.append({'port': port, 'gid': int(gid), 'detected': True})
LOG.debug("Create iSCSI target for %s", hgs)
def _get_unused_gid_iscsi(self, port):
group_range = self.configuration.hitachi_group_range
if not group_range:
group_range = basic_lib.DEFAULT_GROUP_RANGE
return self.common.command.get_unused_gid_iscsi(group_range, port)
def _delete_iscsi_target(self, port, target_no, target_alias):
ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
port, target_no, target_alias)
if ret:
LOG.warning(basic_lib.set_msg(
307, port=port, tno=target_no, alias=target_alias))
def _delete_chap_user(self, port):
ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
if ret:
LOG.warning(basic_lib.set_msg(
303, user=self.configuration.hitachi_auth_user))
def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
return self.common.command.comm_get_hostgroup_info_iscsi(
hgs, host_iqn, self.configuration.hitachi_target_ports)
def _discovery_iscsi_target(self, hostgroups):
for hostgroup in hostgroups:
ip_addr, ip_port = self.common.command.comm_get_iscsi_ip(
hostgroup['port'])
target_iqn = self.common.command.comm_get_target_iqn(
hostgroup['port'], hostgroup['gid'])
hostgroup['ip_addr'] = ip_addr
hostgroup['ip_port'] = ip_port
hostgroup['target_iqn'] = target_iqn
LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s",
{'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
for port in ports:
added_hostgroup = False
added_user = False
LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
'target_iqn: %(tiqn)s target_alias: %(alias)s '
'add_iqn: %(aiqn)s)',
{'hgs': hgs, 'port': port, 'tiqn': target_iqn,
'alias': target_alias, 'aiqn': add_iqn})
gid = self.common.command.get_gid_from_targetiqn(
target_iqn, target_alias, port)
if gid is None:
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
gid = None
try:
gid = self._get_unused_gid_iscsi(port)
self.common.command.comm_add_hostgrp_iscsi(
port, gid, target_alias, target_iqn)
added_hostgroup = True
except exception.HBSDNotFound:
LOG.warning(basic_lib.set_msg(312, resource='GID'))
continue
except Exception as ex:
LOG.warning(basic_lib.set_msg(
309, port=port, alias=target_alias,
reason=ex))
break
else:
LOG.debug('Completed to add target'
'(port: %(port)s gid: %(gid)d)',
{'port': port, 'gid': gid})
break
if gid is None:
LOG.error('Failed to add target(port: %s)', port)
continue
try:
if added_hostgroup:
if self.configuration.hitachi_auth_method:
added_user = self.common.command.set_chap_authention(
port, gid)
self.common.command.comm_set_hostgrp_reportportal(
port, target_alias)
self._add_initiator(hgs, port, gid, add_iqn)
except Exception as ex:
LOG.warning(basic_lib.set_msg(
316, port=port, reason=ex))
if added_hostgroup:
if added_user:
self._delete_chap_user(port)
self._delete_iscsi_target(port, gid, target_alias)
def add_hostgroup_core(self, hgs, ports, target_iqn,
target_alias, add_iqn):
if ports:
self._fill_groups(hgs, ports, target_iqn, target_alias, add_iqn)
def add_hostgroup_master(self, hgs, master_iqn, host_ip, security_ports):
target_ports = self.configuration.hitachi_target_ports
group_request = self.configuration.hitachi_group_request
target_alias = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
if target_ports and group_request:
target_iqn = '%s.target' % master_iqn
diff_ports = []
for port in security_ports:
for hostgroup in hgs:
if hostgroup['port'] == port:
break
else:
diff_ports.append(port)
self.add_hostgroup_core(hgs, diff_ports, target_iqn,
target_alias, master_iqn)
if not hgs:
raise exception.HBSDError(message=basic_lib.output_err(649))
def add_hostgroup(self):
properties = utils.brick_get_connector_properties()
if 'initiator' not in properties:
raise exception.HBSDError(
message=basic_lib.output_err(650, resource='HBA'))
LOG.debug("initiator: %s", properties['initiator'])
hostgroups = []
security_ports = self._get_hostgroup_info_iscsi(
hostgroups, properties['initiator'])
self.add_hostgroup_master(hostgroups, properties['initiator'],
properties['ip'], security_ports)
def _get_properties(self, volume, hostgroups):
conf = self.configuration
properties = {}
self._discovery_iscsi_target(hostgroups)
hostgroup = hostgroups[0]
properties['target_discovered'] = True
properties['target_portal'] = "%s:%s" % (hostgroup['ip_addr'],
hostgroup['ip_port'])
properties['target_iqn'] = hostgroup['target_iqn']
properties['target_lun'] = hostgroup['lun']
if conf.hitachi_auth_method:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = conf.hitachi_auth_user
properties['auth_password'] = conf.hitachi_auth_password
return properties
def do_setup(self, context):
self.context = context
self.common = common.HBSDCommon(self.configuration, self,
context, self.db)
msg = _("The HBSD iSCSI driver is deprecated and "
"will be removed in P release")
versionutils.report_deprecated_feature(LOG, msg)
self.check_param()
self.common.create_lock_file()
self.common.command.connect_storage()
lock = basic_lib.get_process_lock(self.common.service_lock_file)
with lock:
self.add_hostgroup()
self.output_param_to_log()
self.do_setup_status.set()
def check_for_setup_error(self):
pass
def extend_volume(self, volume, new_size):
self.do_setup_status.wait()
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
if refresh:
if self.do_setup_status.isSet():
self.common.output_backend_available_once()
_stats = self.common.update_volume_stats("iSCSI")
if _stats:
self._stats = _stats
return self._stats
def create_volume(self, volume):
self.do_setup_status.wait()
metadata = self.common.create_volume(volume)
return metadata
def delete_volume(self, volume):
self.do_setup_status.wait()
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_snapshot(snapshot)
return metadata
def delete_snapshot(self, snapshot):
self.do_setup_status.wait()
self.common.delete_snapshot(snapshot)
def create_cloned_volume(self, volume, src_vref):
self.do_setup_status.wait()
metadata = self.common.create_cloned_volume(volume, src_vref)
return metadata
def create_volume_from_snapshot(self, volume, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
return metadata
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
"(config_group: %(group)s ldev: %(ldev)d)",
{'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs:
hostgroups = src_hgs[:]
else:
hostgroups = []
security_ports = self._get_hostgroup_info_iscsi(
hostgroups, connector['initiator'])
self.add_hostgroup_master(hostgroups, connector['initiator'],
connector['ip'], security_ports)
self._add_target(hostgroups, ldev)
return hostgroups
def initialize_connection(self, volume, connector):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
raise exception.HBSDError(
message=basic_lib.output_err(619, volume_id=volume['id']))
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
protocol = 'iscsi'
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s',
self.common.volume_info)
LOG.debug('HFCDrv: properties=%s', properties)
return {
'driver_volume_type': protocol,
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
LOG.debug("Call _terminate_connection(config_group: %s)",
self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun_iscsi(hostgroups, ldev)
LOG.debug("*** _terminate_ ***")
def terminate_connection(self, volume, connector, **kwargs):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
return
if 'initiator' not in connector:
raise exception.HBSDError(
message=basic_lib.output_err(650, resource='HBA'))
hostgroups = []
self._get_hostgroup_info_iscsi(hostgroups,
connector['initiator'])
if not hostgroups:
raise exception.HBSDError(message=basic_lib.output_err(649))
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def pair_initialize_connection(self, unused_ldev):
pass
def pair_terminate_connection(self, unused_ldev):
pass
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self.do_setup_status.wait()
if volume['volume_attachment']:
desc = 'volume %s' % volume['id']
raise exception.HBSDError(
message=basic_lib.output_err(660, desc=desc))
super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
image_service,
image_meta)
def manage_existing(self, volume, existing_ref):
return self.common.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
self.do_setup_status.wait()
return self.common.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
self.do_setup_status.wait()
self.common.unmanage(volume)

File diff suppressed because it is too large Load Diff

View File

@@ -1,483 +0,0 @@
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Hitachi Unified Storage (HUS-HNAS) platform. Backend operations.
"""
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder import ssh_utils
from cinder import utils
LOG = logging.getLogger("cinder.volume.driver")
HNAS_SSC_RETRIES = 5
class HNASSSHBackend(object):
def __init__(self, backend_opts):
self.mgmt_ip0 = backend_opts.get('mgmt_ip0')
self.hnas_cmd = backend_opts.get('ssc_cmd', 'ssc')
self.cluster_admin_ip0 = backend_opts.get('cluster_admin_ip0')
self.ssh_port = backend_opts.get('ssh_port', '22')
self.ssh_username = backend_opts.get('username')
self.ssh_pwd = backend_opts.get('password')
self.ssh_private_key = backend_opts.get('ssh_private_key')
self.storage_version = None
self.sshpool = None
self.fslist = {}
self.tgt_list = {}
@utils.retry(exceptions=exception.HNASConnError, retries=HNAS_SSC_RETRIES,
wait_random=True)
def _run_cmd(self, *args, **kwargs):
"""Runs a command on SMU using SSH.
:returns: stdout and stderr of the command
"""
if self.cluster_admin_ip0 is None:
# Connect to SMU through SSH and run ssc locally
args = (self.hnas_cmd, 'localhost') + args
else:
args = (self.hnas_cmd, '--smuauth', self.cluster_admin_ip0) + args
utils.check_ssh_injection(args)
command = ' '.join(args)
command = command.replace('"', '\\"')
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(ip=self.mgmt_ip0,
port=int(self.ssh_port),
conn_timeout=None,
login=self.ssh_username,
password=self.ssh_pwd,
privatekey=self.ssh_private_key)
with self.sshpool.item() as ssh:
try:
out, err = putils.ssh_execute(ssh, command,
check_exit_code=True)
LOG.debug("command %(cmd)s result: out = "
"%(out)s - err = %(err)s",
{'cmd': self.hnas_cmd, 'out': out, 'err': err})
return out, err
except putils.ProcessExecutionError as e:
if 'Failed to establish SSC connection' in e.stderr:
msg = _("Failed to establish SSC connection!")
LOG.exception(msg)
raise exception.HNASConnError(msg)
elif 'Connection reset' in e.stderr:
msg = _("HNAS connection reset!")
LOG.exception(msg)
raise exception.HNASConnError(msg)
else:
raise
def get_version(self):
"""Gets version information from the storage unit.
:returns: dictionary with HNAS information
.. code:: python
storage_version={
'mac': HNAS MAC ID,
'model': HNAS model,
'version': the software version,
'hardware': the hardware version,
'serial': HNAS serial number
}
"""
if not self.storage_version:
version_info = {}
out, err = self._run_cmd("cluster-getmac")
mac = out.split(':')[1].strip()
version_info['mac'] = mac
out, err = self._run_cmd("ver")
split_out = out.split('\n')
model = split_out[1].split(':')[1].strip()
version = split_out[3].split()[1]
hardware = split_out[5].split(':')[1].strip()
serial = split_out[12].split()[2]
version_info['model'] = model
version_info['version'] = version
version_info['hardware'] = hardware
version_info['serial'] = serial
self.storage_version = version_info
LOG.debug("version_info: %(info)s", {'info': self.storage_version})
return self.storage_version
def get_evs_info(self):
"""Gets the IP addresses of all EVSs in HNAS.
:returns: dictionary with EVS information
.. code:: python
evs_info={
<IP1>: {evs_number: number identifying the EVS1 on HNAS},
<IP2>: {evs_number: number identifying the EVS2 on HNAS},
...
}
"""
evs_info = {}
out, err = self._run_cmd("evsipaddr", "-l")
out = out.split('\n')
for line in out:
if 'evs' in line and 'admin' not in line:
ip = line.split()[3].strip()
evs_info[ip] = {}
evs_info[ip]['evs_number'] = line.split()[1].strip()
return evs_info
def get_fs_info(self, fs_label):
"""Gets the information of a given FS.
:param fs_label: Label of the filesystem
:returns: dictionary with FS information
.. code:: python
fs_info={
'id': a Logical Unit ID,
'label': a Logical Unit name,
'evs_id': the ID of the EVS in which the filesystem is created
(not present if there is a single EVS),
'total_size': the total size of the FS (in GB),
'used_size': the size that is already used (in GB),
'available_size': the free space (in GB)
}
"""
def _convert_size(param):
size = float(param) * units.Mi
return six.text_type(size)
fs_info = {}
single_evs = True
id, lbl, evs, t_sz, u_sz, a_sz = 0, 1, 2, 3, 5, 12
t_sz_unit, u_sz_unit, a_sz_unit = 4, 6, 13
out, err = self._run_cmd("df", "-af", fs_label)
invalid_outs = ['Not mounted', 'Not determined', 'not found']
for problem in invalid_outs:
if problem in out:
return {}
if 'EVS' in out:
single_evs = False
fs_data = out.split('\n')[3].split()
# Getting only the desired values from the output. If there is a single
# EVS, its ID is not shown in the output and we have to decrease the
# indexes to get the right values.
fs_info['id'] = fs_data[id]
fs_info['label'] = fs_data[lbl]
if not single_evs:
fs_info['evs_id'] = fs_data[evs]
fs_info['total_size'] = (
(fs_data[t_sz]) if not single_evs else fs_data[t_sz - 1])
fs_info['used_size'] = (
fs_data[u_sz] if not single_evs else fs_data[u_sz - 1])
fs_info['available_size'] = (
fs_data[a_sz] if not single_evs else fs_data[a_sz - 1])
# Converting the sizes if necessary.
if not single_evs:
if fs_data[t_sz_unit] == 'TB':
fs_info['total_size'] = _convert_size(fs_info['total_size'])
if fs_data[u_sz_unit] == 'TB':
fs_info['used_size'] = _convert_size(fs_info['used_size'])
if fs_data[a_sz_unit] == 'TB':
fs_info['available_size'] = _convert_size(
fs_info['available_size'])
else:
if fs_data[t_sz_unit - 1] == 'TB':
fs_info['total_size'] = _convert_size(fs_info['total_size'])
if fs_data[u_sz_unit - 1] == 'TB':
fs_info['used_size'] = _convert_size(fs_info['used_size'])
if fs_data[a_sz_unit - 1] == 'TB':
fs_info['available_size'] = _convert_size(
fs_info['available_size'])
fs_info['provisioned_capacity'] = 0
LOG.debug("File system info of %(fs)s (sizes in GB): %(info)s.",
{'fs': fs_label, 'info': fs_info})
return fs_info
def get_evs(self, fs_label):
"""Gets the EVS ID for the named filesystem.
:param fs_label: The filesystem label related to the EVS required
:returns: EVS ID of the filesystem
"""
if not self.fslist:
self._get_fs_list()
# When the FS is found in the list of known FS, returns the EVS ID
for key in self.fslist:
if fs_label == self.fslist[key]['label']:
LOG.debug("EVS ID for fs %(fs)s: %(id)s.",
{'fs': fs_label, 'id': self.fslist[key]['evsid']})
return self.fslist[key]['evsid']
LOG.debug("Can't find EVS ID for fs %(fs)s.", {'fs': fs_label})
def file_clone(self, fs_label, src, name):
"""Clones NFS files to a new one named 'name'.
Clone primitive used to support all NFS snapshot/cloning functions.
:param fs_label: file system label of the new file
:param src: source file
:param name: target path of the new created file
"""
fs_list = self._get_fs_list()
fs = fs_list.get(fs_label)
if not fs:
LOG.error("Can't find file %(file)s in FS %(label)s",
{'file': src, 'label': fs_label})
msg = _('FS label: %(fs_label)s') % {'fs_label': fs_label}
raise exception.InvalidParameterValue(err=msg)
self._run_cmd("console-context", "--evs", fs['evsid'],
'file-clone-create', '-f', fs_label, src, name)
LOG.debug('file_clone: fs:%(fs_label)s %(src)s/src: -> %(name)s/dst',
{'fs_label': fs_label, 'src': src, 'name': name})
def _get_fs_list(self):
"""Gets a list of file systems configured on the backend.
:returns: a list with the Filesystems configured on HNAS
"""
if not self.fslist:
fslist_out, err = self._run_cmd('evsfs', 'list')
list_raw = fslist_out.split('\n')[3:-2]
for fs_raw in list_raw:
fs = {}
fs_raw = fs_raw.split()
fs['id'] = fs_raw[0]
fs['label'] = fs_raw[1]
fs['permid'] = fs_raw[2]
fs['evsid'] = fs_raw[3]
fs['evslabel'] = fs_raw[4]
self.fslist[fs['label']] = fs
return self.fslist
def _get_evs_list(self):
"""Gets a list of EVS configured on the backend.
:returns: a list of the EVS configured on HNAS
"""
evslist_out, err = self._run_cmd('evs', 'list')
evslist = {}
idx = 0
for evs_raw in evslist_out.split('\n'):
idx += 1
if 'Service' in evs_raw and 'Online' in evs_raw:
evs = {}
evs_line = evs_raw.split()
evs['node'] = evs_line[0]
evs['id'] = evs_line[1]
evs['label'] = evs_line[3]
evs['ips'] = []
evs['ips'].append(evs_line[6])
# Each EVS can have a list of IPs that are displayed in the
# next lines of the evslist_out. We need to check if the next
# lines is a new EVS entry or and IP of this current EVS.
for evs_ip_raw in evslist_out.split('\n')[idx:]:
if 'Service' in evs_ip_raw or not evs_ip_raw.split():
break
ip = evs_ip_raw.split()[0]
evs['ips'].append(ip)
evslist[evs['label']] = evs
return evslist
def get_export_list(self):
"""Gets information on each NFS export.
:returns: a list of the exports configured on HNAS
"""
nfs_export_out, _ = self._run_cmd('for-each-evs', '-q', 'nfs-export',
'list')
fs_list = self._get_fs_list()
evs_list = self._get_evs_list()
export_list = []
for export_raw_data in nfs_export_out.split("Export name:")[1:]:
export_info = {}
export_data = export_raw_data.split('\n')
export_info['name'] = export_data[0].strip()
export_info['path'] = export_data[1].split(':')[1].strip()
export_info['fs'] = export_data[2].split(':')[1].strip()
if "*** not available ***" in export_raw_data:
export_info['size'] = -1
export_info['free'] = -1
else:
evslbl = fs_list[export_info['fs']]['evslabel']
export_info['evs'] = evs_list[evslbl]['ips']
size = export_data[3].split(':')[1].strip().split()[0]
multiplier = export_data[3].split(':')[1].strip().split()[1]
if multiplier == 'TB':
export_info['size'] = float(size) * units.Ki
else:
export_info['size'] = float(size)
free = export_data[4].split(':')[1].strip().split()[0]
fmultiplier = export_data[4].split(':')[1].strip().split()[1]
if fmultiplier == 'TB':
export_info['free'] = float(free) * units.Ki
else:
export_info['free'] = float(free)
export_list.append(export_info)
LOG.debug("get_export_list: %(exp_list)s", {'exp_list': export_list})
return export_list
def _get_file_handler(self, volume_path, _evs_id, fs_label,
raise_except):
try:
out, err = self._run_cmd("console-context", "--evs", _evs_id,
'file-clone-stat', '-f', fs_label,
volume_path)
except putils.ProcessExecutionError as e:
if 'File is not a clone' in e.stderr and raise_except:
msg = (_("%s is not a clone!") % volume_path)
raise exception.ManageExistingInvalidReference(
existing_ref=volume_path, reason=msg)
else:
return
lines = out.split('\n')
filehandle_list = []
for line in lines:
if "SnapshotFile:" in line and "FileHandle" in line:
item = line.split(':')
handler = item[1][:-1].replace(' FileHandle[', "")
filehandle_list.append(handler)
LOG.debug("Volume handler found: %(fh)s. Adding to list...",
{'fh': handler})
return filehandle_list
def get_cloned_file_relatives(self, file_path, fs_label,
raise_except=False):
"""Gets the files related to a clone
:param file_path: path of the cloned file
:param fs_label: filesystem of the cloned file
:param raise_except: If True exception will be raised for files that
aren't clones. If False, only an error message
is logged.
:returns: list with names of the related files
"""
relatives = []
_evs_id = self.get_evs(fs_label)
file_handler_list = self._get_file_handler(file_path, _evs_id,
fs_label, raise_except)
if file_handler_list:
for file_handler in file_handler_list:
out, err = self._run_cmd('console-context', '--evs', _evs_id,
'file-clone-stat-snapshot-file', '-f',
fs_label, file_handler)
results = out.split('\n')
for value in results:
if 'Clone:' in value and file_path not in value:
relative = value.split(':')[1]
relatives.append(relative)
else:
LOG.debug("File %(path)s is not a clone.", {
'path': file_path})
return relatives
def check_snapshot_parent(self, volume_path, snap_name, fs_label):
"""Check if a volume is the snapshot source
:param volume_path: path of the volume
:param snap_name: name of the snapshot
:param fs_label: filesystem label
:return: True if the volume is the snapshot's source or False otherwise
"""
lines = self.get_cloned_file_relatives(volume_path, fs_label, True)
for line in lines:
if snap_name in line:
LOG.debug("Snapshot %(snap)s found in children list from "
"%(vol)s!", {'snap': snap_name,
'vol': volume_path})
return True
LOG.debug("Snapshot %(snap)s was not found in children list from "
"%(vol)s, probably it is not the parent!",
{'snap': snap_name, 'vol': volume_path})
return False
def get_export_path(self, export, fs_label):
"""Gets the path of an export on HNAS
:param export: the export's name
:param fs_label: the filesystem name
:returns: string of the export's path
"""
evs_id = self.get_evs(fs_label)
out, err = self._run_cmd("console-context", "--evs", evs_id,
'nfs-export', 'list', export)
lines = out.split('\n')
for line in lines:
if 'Export path:' in line:
return line.split('Export path:')[1].strip()

File diff suppressed because it is too large Load Diff

View File

@@ -1,342 +0,0 @@
# Copyright (c) 2016 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code for HNAS drivers
"""
import os
import re
from oslo_config import cfg
from oslo_log import log as logging
import six
from xml.etree import ElementTree as ETree
from cinder import exception
from cinder.i18n import _
from cinder.volume import configuration
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc',
'chap_enabled': True,
'ssh_port': 22}
MAX_HNAS_ISCSI_TARGETS = 32
drivers_common_opts = [
cfg.IPOpt('hnas_mgmt_ip0',
help='Management IP address of HNAS. This can '
'be any IP in the admin address on HNAS or '
'the SMU IP.'),
cfg.StrOpt('hnas_ssc_cmd',
default='ssc',
help='Command to communicate to HNAS.'),
cfg.StrOpt('hnas_username',
help='HNAS username.'),
cfg.StrOpt('hnas_password',
secret=True,
help='HNAS password.'),
cfg.PortOpt('hnas_ssh_port',
default=22,
help='Port to be used for SSH authentication.'),
cfg.StrOpt('hnas_ssh_private_key',
help='Path to the SSH private key used to '
'authenticate in HNAS SMU.'),
cfg.StrOpt('hnas_cluster_admin_ip0',
default=None,
help='The IP of the HNAS cluster admin. '
'Required only for HNAS multi-cluster setups.'),
cfg.StrOpt('hnas_svc0_pool_name',
help='Service 0 pool name',
deprecated_name='hnas_svc0_volume_type'),
cfg.StrOpt('hnas_svc0_hdp',
help='Service 0 HDP'),
cfg.StrOpt('hnas_svc1_pool_name',
help='Service 1 pool name',
deprecated_name='hnas_svc1_volume_type'),
cfg.StrOpt('hnas_svc1_hdp',
help='Service 1 HDP'),
cfg.StrOpt('hnas_svc2_pool_name',
help='Service 2 pool name',
deprecated_name='hnas_svc2_volume_type'),
cfg.StrOpt('hnas_svc2_hdp',
help='Service 2 HDP'),
cfg.StrOpt('hnas_svc3_pool_name',
help='Service 3 pool name:',
deprecated_name='hnas_svc3_volume_type'),
cfg.StrOpt('hnas_svc3_hdp',
help='Service 3 HDP')
]
CONF = cfg.CONF
CONF.register_opts(drivers_common_opts, group=configuration.SHARED_CONF_GROUP)
def _check_conf_params(config, pool_name, idx):
"""Validates if the configuration on cinder.conf is complete.
:param config: Dictionary with the driver configurations
:param pool_name: The name of the current pool
:param dv_type: The type of the driver (NFS or iSCSI)
:param idx: Index of the current pool
"""
# Validating the inputs on cinder.conf
if config['username'] is None:
msg = (_("The config parameter hnas_username "
"is not set in the cinder.conf."))
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
if (config['password'] is None and
config['ssh_private_key'] is None):
msg = (_("Credentials configuration parameters "
"missing: you need to set hnas_password "
"or hnas_ssh_private_key "
"in the cinder.conf."))
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
if config['mgmt_ip0'] is None:
msg = (_("The config parameter hnas_mgmt_ip0 "
"is not set in the cinder.conf."))
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
if config['services'][pool_name]['hdp'] is None:
msg = (_("The config parameter hnas_svc%(idx)s_hdp is "
"not set in the cinder.conf. Note that you need to "
"have at least one pool configured.") %
{'idx': idx})
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
if config['services'][pool_name]['pool_name'] is None:
msg = (_("The config parameter "
"hnas_svc%(idx)s_pool_name is not set "
"in the cinder.conf. Note that you need to "
"have at least one pool configured.") %
{'idx': idx})
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
def _xml_read(root, element, check=None):
"""Read an xml element.
:param root: XML object
:param element: string desired tag
:param check: string if present, throw exception if element missing
"""
val = root.findtext(element)
# mandatory parameter not found
if val is None and check:
LOG.error("Mandatory parameter not found: %(p)s", {'p': element})
raise exception.ParameterNotFound(param=element)
# tag not found
if val is None:
return None
svc_tag_pattern = re.compile("svc_[0-3]$")
# tag found but empty parameter.
if not val.strip():
if svc_tag_pattern.search(element):
return ""
LOG.error("Parameter not found: %(param)s", {'param': element})
raise exception.ParameterNotFound(param=element)
LOG.debug("%(element)s: %(val)s",
{'element': element,
'val': val if element != 'password' else '***'})
return val.strip()
def read_xml_config(xml_config_file, svc_params, optional_params):
"""Read Hitachi driver specific xml config file.
:param xml_config_file: string filename containing XML configuration
:param svc_params: parameters to configure the services
.. code:: python
['volume_type', 'hdp']
:param optional_params: parameters to configure that are not mandatory
.. code:: python
['ssc_cmd', 'cluster_admin_ip0', 'chap_enabled']
"""
if not os.access(xml_config_file, os.R_OK):
msg = (_("Can't find HNAS configurations on cinder.conf neither "
"on the path %(xml)s.") % {'xml': xml_config_file})
LOG.error(msg)
raise exception.ConfigNotFound(message=msg)
else:
LOG.warning("This XML configuration file %(xml)s is deprecated. "
"Please, move all the configurations to the "
"cinder.conf file. If you keep both configuration "
"files, the options set on cinder.conf will be "
"used.", {'xml': xml_config_file})
try:
root = ETree.parse(xml_config_file).getroot()
except ETree.ParseError:
msg = (_("Error parsing config file: %(xml_config_file)s") %
{'xml_config_file': xml_config_file})
LOG.error(msg)
raise exception.ConfigNotFound(message=msg)
# mandatory parameters for NFS
config = {}
arg_prereqs = ['mgmt_ip0', 'username']
for req in arg_prereqs:
config[req] = _xml_read(root, req, 'check')
# optional parameters for NFS
for req in optional_params:
config[req] = _xml_read(root, req)
if config[req] is None and HNAS_DEFAULT_CONFIG.get(req) is not None:
config[req] = HNAS_DEFAULT_CONFIG.get(req)
config['ssh_private_key'] = _xml_read(root, 'ssh_private_key')
config['password'] = _xml_read(root, 'password')
if config['ssh_private_key'] is None and config['password'] is None:
msg = _("Missing authentication option (passw or private key file).")
LOG.error(msg)
raise exception.ConfigNotFound(message=msg)
if _xml_read(root, 'ssh_port') is not None:
config['ssh_port'] = int(_xml_read(root, 'ssh_port'))
else:
config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
config['fs'] = {}
config['services'] = {}
# min one needed
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
if _xml_read(root, svc) is None:
continue
service = {'label': svc}
# none optional
for arg in svc_params:
service[arg] = _xml_read(root, svc + '/' + arg, 'check')
# Backward compatibility with volume_type
service.setdefault('pool_name', service.pop('volume_type', None))
config['services'][service['pool_name']] = service
config['fs'][service['hdp']] = service['hdp']
# at least one service required!
if not config['services'].keys():
LOG.error("No service found in xml config file")
raise exception.ParameterNotFound(param="svc_0")
return config
def get_pool(config, volume):
"""Get the pool of a volume.
:param config: dictionary containing the configuration parameters
:param volume: dictionary volume reference
:returns: the pool related to the volume
"""
if volume.volume_type:
metadata = {}
type_id = volume.volume_type_id
if type_id is not None:
metadata = volume_types.get_volume_type_extra_specs(type_id)
if metadata.get('service_label'):
if metadata['service_label'] in config['services'].keys():
return metadata['service_label']
return 'default'
def read_cinder_conf(config_opts):
"""Reads cinder.conf
Gets the driver specific information set on cinder.conf configuration
file.
:param config_opts: Configuration object that contains the information
needed by HNAS driver
:param dv_type: The type of the driver (NFS or iSCSI)
:returns: Dictionary with the driver configuration
"""
config = {}
config['services'] = {}
config['fs'] = {}
mandatory_parameters = ['username', 'password', 'mgmt_ip0']
optional_parameters = ['ssc_cmd',
'ssh_port', 'cluster_admin_ip0',
'ssh_private_key']
# Trying to get the mandatory parameters from cinder.conf
for opt in mandatory_parameters:
config[opt] = config_opts.safe_get('hnas_%(opt)s' % {'opt': opt})
# If there is at least one of the mandatory parameters in
# cinder.conf, we assume that we should use the configuration
# from this file.
# Otherwise, we use the configuration from the deprecated XML file.
for param in mandatory_parameters:
if config[param] is not None:
break
else:
return None
# Getting the optional parameters from cinder.conf
for opt in optional_parameters:
config[opt] = config_opts.safe_get('hnas_%(opt)s' % {'opt': opt})
# It's possible to have up to 4 pools configured.
for i in range(0, 4):
idx = six.text_type(i)
svc_pool_name = (config_opts.safe_get(
'hnas_svc%(idx)s_pool_name' % {'idx': idx}))
svc_hdp = (config_opts.safe_get(
'hnas_svc%(idx)s_hdp' % {'idx': idx}))
# It's mandatory to have at least 1 pool configured (svc_0)
if (idx == '0' or svc_pool_name is not None or
svc_hdp is not None):
config['services'][svc_pool_name] = {}
config['fs'][svc_hdp] = svc_hdp
config['services'][svc_pool_name]['hdp'] = svc_hdp
config['services'][svc_pool_name]['pool_name'] = svc_pool_name
config['services'][svc_pool_name]['label'] = (
'svc_%(idx)s' % {'idx': idx})
# Checking to ensure that the pools configurations are complete
_check_conf_params(config, svc_pool_name, idx)
return config

View File

@@ -1,955 +0,0 @@
# Copyright (C) 2016, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Common module for Hitachi VSP Driver."""
import abc
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import coordination
from cinder import exception
from cinder import utils as cinder_utils
from cinder.volume import configuration
from cinder.volume.drivers.hitachi import vsp_utils as utils
from cinder.volume import utils as volume_utils
VERSION = '1.0.0'
_COPY_METHOD = set(['FULL', 'THIN'])
_INHERITED_VOLUME_OPTS = [
'volume_backend_name',
'volume_driver',
'reserved_percentage',
'use_multipath_for_image_xfer',
'enforce_multipath_for_image_xfer',
'num_volume_device_scan_tries',
]
common_opts = [
cfg.StrOpt(
'vsp_storage_id',
help='Product number of the storage system.'),
cfg.StrOpt(
'vsp_pool',
help='Pool number or pool name of the DP pool.'),
cfg.StrOpt(
'vsp_thin_pool',
help='Pool number or pool name of the Thin Image pool.'),
cfg.StrOpt(
'vsp_ldev_range',
help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that '
'can be used by the driver. Values can be in decimal format '
'(e.g. 1000) or in colon-separated hexadecimal format '
'(e.g. 00:03:E8).'),
cfg.StrOpt(
'vsp_default_copy_method',
default='FULL',
choices=['FULL', 'THIN'],
help='Method of volume copy. FULL indicates full data copy by '
'Shadow Image and THIN indicates differential data copy by Thin '
'Image.'),
cfg.IntOpt(
'vsp_copy_speed',
min=1,
max=15,
default=3,
help='Speed at which data is copied by Shadow Image. 1 or 2 indicates '
'low speed, 3 indicates middle speed, and a value between 4 and '
'15 indicates high speed.'),
cfg.IntOpt(
'vsp_copy_check_interval',
min=1,
max=600,
default=3,
help='Interval in seconds at which volume pair synchronization status '
'is checked when volume pairs are created.'),
cfg.IntOpt(
'vsp_async_copy_check_interval',
min=1,
max=600,
default=10,
help='Interval in seconds at which volume pair synchronization status '
'is checked when volume pairs are deleted.'),
cfg.ListOpt(
'vsp_target_ports',
help='IDs of the storage ports used to attach volumes to the '
'controller node. To specify multiple ports, connect them by '
'commas (e.g. CL1-A,CL2-A).'),
cfg.ListOpt(
'vsp_compute_target_ports',
help='IDs of the storage ports used to attach volumes to compute '
'nodes. To specify multiple ports, connect them by commas '
'(e.g. CL1-A,CL2-A).'),
cfg.BoolOpt(
'vsp_group_request',
default=False,
help='If True, the driver will create host groups or iSCSI targets on '
'storage ports as needed.'),
]
_REQUIRED_COMMON_OPTS = [
'vsp_storage_id',
'vsp_pool',
]
CONF = cfg.CONF
CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
MSG = utils.VSPMsg
def _str2int(num):
"""Convert a string into an integer."""
if not num:
return None
if num.isdigit():
return int(num)
if not re.match(r'\w\w:\w\w:\w\w', num):
return None
try:
return int(num.replace(':', ''), 16)
except ValueError:
return None
@six.add_metaclass(abc.ABCMeta)
class VSPCommon(object):
"""Common class for Hitachi VSP Driver."""
def __init__(self, conf, driverinfo, db):
"""Initialize instance variables."""
self.conf = conf
self.db = db
self.ctxt = None
self.lock = {}
self.driver_info = driverinfo
self.storage_info = {
'protocol': driverinfo['proto'],
'pool_id': None,
'ldev_range': [],
'controller_ports': [],
'compute_ports': [],
'pair_ports': [],
'wwns': {},
'portals': {},
'output_first': True,
}
self._stats = {}
def run_and_verify_storage_cli(self, *cmd, **kwargs):
"""Run storage CLI and return the result or raise an exception."""
do_raise = kwargs.pop('do_raise', True)
ignore_error = kwargs.get('ignore_error')
success_code = kwargs.get('success_code', set([0]))
(ret, stdout, stderr) = self.run_storage_cli(*cmd, **kwargs)
if (ret not in success_code and
not utils.check_ignore_error(ignore_error, stderr)):
msg = utils.output_log(
MSG.STORAGE_COMMAND_FAILED, cmd=utils.mask_password(cmd),
ret=ret, out=' '.join(stdout.splitlines()),
err=' '.join(stderr.splitlines()))
if do_raise:
raise exception.VSPError(msg)
return ret, stdout, stderr
@abc.abstractmethod
def run_storage_cli(self, *cmd, **kwargs):
"""Run storage CLI."""
raise NotImplementedError()
def get_copy_method(self, metadata):
"""Return copy method(FULL or THIN)."""
method = metadata.get(
'copy_method', self.conf.vsp_default_copy_method)
if method not in _COPY_METHOD:
msg = utils.output_log(MSG.INVALID_PARAMETER_VALUE,
meta='copy_method')
raise exception.VSPError(msg)
if method == 'THIN' and not self.conf.vsp_thin_pool:
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='vsp_thin_pool')
raise exception.VSPError(msg)
return method
def create_volume(self, volume):
"""Create a volume and return its properties."""
try:
ldev = self.create_ldev(volume['size'])
except exception.VSPError:
with excutils.save_and_reraise_exception():
utils.output_log(MSG.CREATE_LDEV_FAILED)
return {
'provider_location': six.text_type(ldev),
}
def create_ldev(self, size, is_vvol=False):
"""Create an LDEV and return its LDEV number."""
ldev = self.get_unused_ldev()
self.create_ldev_on_storage(ldev, size, is_vvol)
LOG.debug('Created logical device. (LDEV: %s)', ldev)
return ldev
@abc.abstractmethod
def create_ldev_on_storage(self, ldev, size, is_vvol):
"""Create an LDEV on the storage system."""
raise NotImplementedError()
@abc.abstractmethod
def get_unused_ldev(self):
"""Find an unused LDEV and return its LDEV number."""
raise NotImplementedError()
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot and return its properties."""
ldev = utils.get_ldev(snapshot)
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if ldev is None:
msg = utils.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='snapshot',
id=snapshot['id'])
raise exception.VSPError(msg)
size = volume['size']
metadata = utils.get_volume_metadata(volume)
if size < snapshot['volume_size']:
msg = utils.output_log(
MSG.INVALID_VOLUME_SIZE_FOR_COPY, type='snapshot',
volume_id=volume['id'])
raise exception.VSPError(msg)
elif (size > snapshot['volume_size'] and not self.check_vvol(ldev) and
self.get_copy_method(metadata) == "THIN"):
msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI,
copy_method=utils.THIN,
type='snapshot', volume_id=volume['id'])
raise exception.VSPError(msg)
sync = size > snapshot['volume_size']
new_ldev = self._copy_ldev(
ldev, snapshot['volume_size'], metadata, sync)
if sync:
self.delete_pair(new_ldev)
self.extend_ldev(new_ldev, snapshot['volume_size'], size)
return {
'provider_location': six.text_type(new_ldev),
}
def _copy_ldev(self, ldev, size, metadata, sync=False):
"""Create a copy of the specified volume and return its properties."""
try:
return self.copy_on_storage(ldev, size, metadata, sync)
except exception.VSPNotSupported:
return self._copy_on_host(ldev, size)
def _copy_on_host(self, src_ldev, size):
"""Create a copy of the specified LDEV via host."""
dest_ldev = self.create_ldev(size)
try:
self._copy_with_dd(src_ldev, dest_ldev, size)
except Exception:
with excutils.save_and_reraise_exception():
try:
self._delete_ldev(dest_ldev)
except exception.VSPError:
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=dest_ldev)
return dest_ldev
def _copy_with_dd(self, src_ldev, dest_ldev, size):
"""Copy the content of a volume by dd command."""
src_info = None
dest_info = None
properties = cinder_utils.brick_get_connector_properties(
multipath=self.conf.use_multipath_for_image_xfer,
enforce_multipath=self.conf.enforce_multipath_for_image_xfer)
try:
dest_info = self._attach_ldev(dest_ldev, properties)
src_info = self._attach_ldev(src_ldev, properties)
volume_utils.copy_volume(
src_info['device']['path'], dest_info['device']['path'],
size * units.Ki, self.conf.volume_dd_blocksize)
finally:
if src_info:
self._detach_ldev(src_info, src_ldev, properties)
if dest_info:
self._detach_ldev(dest_info, dest_ldev, properties)
self.discard_zero_page({'provider_location': six.text_type(dest_ldev)})
def _attach_ldev(self, ldev, properties):
"""Attach the specified LDEV to the server."""
volume = {
'provider_location': six.text_type(ldev),
}
conn = self.initialize_connection(volume, properties)
try:
connector = cinder_utils.brick_get_connector(
conn['driver_volume_type'],
use_multipath=self.conf.use_multipath_for_image_xfer,
device_scan_attempts=self.conf.num_volume_device_scan_tries,
conn=conn)
device = connector.connect_volume(conn['data'])
except Exception as ex:
with excutils.save_and_reraise_exception():
utils.output_log(MSG.CONNECT_VOLUME_FAILED, ldev=ldev,
reason=six.text_type(ex))
self._terminate_connection(volume, properties)
return {
'conn': conn,
'device': device,
'connector': connector,
}
def _detach_ldev(self, attach_info, ldev, properties):
"""Detach the specified LDEV from the server."""
volume = {
'provider_location': six.text_type(ldev),
}
connector = attach_info['connector']
try:
connector.disconnect_volume(
attach_info['conn']['data'], attach_info['device'])
except Exception as ex:
utils.output_log(MSG.DISCONNECT_VOLUME_FAILED, ldev=ldev,
reason=six.text_type(ex))
self._terminate_connection(volume, properties)
def _terminate_connection(self, volume, connector):
"""Disconnect the specified volume from the server."""
try:
self.terminate_connection(volume, connector)
except exception.VSPError:
utils.output_log(MSG.UNMAP_LDEV_FAILED,
ldev=utils.get_ldev(volume))
def copy_on_storage(self, pvol, size, metadata, sync):
"""Create a copy of the specified LDEV on the storage."""
is_thin = self.get_copy_method(metadata) == "THIN"
svol = self.create_ldev(size, is_vvol=is_thin)
try:
self.create_pair_on_storage(pvol, svol, is_thin)
if sync:
self.wait_full_copy_completion(pvol, svol)
except exception.VSPError:
with excutils.save_and_reraise_exception():
try:
self._delete_ldev(svol)
except exception.VSPError:
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol)
return svol
@abc.abstractmethod
def create_pair_on_storage(self, pvol, svol, is_thin):
"""Create a copy pair on the storage."""
raise NotImplementedError()
def _delete_ldev(self, ldev):
"""Delete the specified LDEV."""
self.delete_pair(ldev)
self.unmap_ldev_from_storage(ldev)
self.delete_ldev_from_storage(ldev)
def unmap_ldev_from_storage(self, ldev):
"""Delete the connection between the specified LDEV and servers."""
targets = {
'list': [],
}
self.find_all_mapped_targets_from_storage(targets, ldev)
self.unmap_ldev(targets, ldev)
@abc.abstractmethod
def find_all_mapped_targets_from_storage(self, targets, ldev):
"""Add all port-gids connected with the LDEV to the list."""
raise NotImplementedError()
def delete_pair(self, ldev, all_split=True):
"""Disconnect all volume pairs to which the specified LDEV belongs."""
pair_info = self.get_pair_info(ldev)
if not pair_info:
return
if pair_info['pvol'] == ldev:
self.delete_pair_based_on_pvol(pair_info, all_split)
else:
self.delete_pair_based_on_svol(
pair_info['pvol'], pair_info['svol_info'][0])
@abc.abstractmethod
def get_pair_info(self, ldev):
"""Return volume pair info(LDEV number, pair status and pair type)."""
raise NotImplementedError()
@abc.abstractmethod
def delete_pair_based_on_pvol(self, pair_info, all_split):
"""Disconnect all volume pairs to which the specified P-VOL belongs."""
raise NotImplementedError()
@abc.abstractmethod
def delete_pair_based_on_svol(self, pvol, svol_info):
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
raise NotImplementedError()
@abc.abstractmethod
def delete_pair_from_storage(self, pvol, svol, is_thin):
"""Disconnect the volume pair that consists of the specified LDEVs."""
raise NotImplementedError()
@abc.abstractmethod
def delete_ldev_from_storage(self, ldev):
"""Delete the specified LDEV from the storage."""
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume and return its properties."""
ldev = utils.get_ldev(src_vref)
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is not None'.
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='volume', id=src_vref['id'])
raise exception.VSPError(msg)
size = volume['size']
metadata = utils.get_volume_metadata(volume)
if size < src_vref['size']:
msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_COPY,
type='volume', volume_id=volume['id'])
raise exception.VSPError(msg)
elif (size > src_vref['size'] and not self.check_vvol(ldev) and
self.get_copy_method(metadata) == "THIN"):
msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI,
copy_method=utils.THIN, type='volume',
volume_id=volume['id'])
raise exception.VSPError(msg)
sync = size > src_vref['size']
new_ldev = self._copy_ldev(ldev, src_vref['size'], metadata, sync)
if sync:
self.delete_pair(new_ldev)
self.extend_ldev(new_ldev, src_vref['size'], size)
return {
'provider_location': six.text_type(new_ldev),
}
def delete_volume(self, volume):
"""Delete the specified volume."""
ldev = utils.get_ldev(volume)
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is not None'.
if ldev is None:
utils.output_log(MSG.INVALID_LDEV_FOR_DELETION,
method='delete_volume', id=volume['id'])
return
try:
self._delete_ldev(ldev)
except exception.VSPBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
def create_snapshot(self, snapshot):
"""Create a snapshot from a volume and return its properties."""
src_vref = snapshot.volume
ldev = utils.get_ldev(src_vref)
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='volume', id=src_vref['id'])
raise exception.VSPError(msg)
size = snapshot['volume_size']
metadata = utils.get_volume_metadata(src_vref)
new_ldev = self._copy_ldev(ldev, size, metadata)
return {
'provider_location': six.text_type(new_ldev),
}
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot."""
ldev = utils.get_ldev(snapshot)
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if ldev is None:
utils.output_log(
MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot',
id=snapshot['id'])
return
try:
self._delete_ldev(ldev)
except exception.VSPBusy:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
def get_volume_stats(self, refresh=False):
"""Return properties, capabilities and current states of the driver."""
if refresh:
if self.storage_info['output_first']:
self.storage_info['output_first'] = False
utils.output_log(MSG.DRIVER_READY_FOR_USE,
config_group=self.conf.config_group)
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Update properties, capabilities and current states of the driver."""
data = {}
backend_name = self.conf.safe_get('volume_backend_name')
data['volume_backend_name'] = (
backend_name or self.driver_info['volume_backend_name'])
data['vendor_name'] = 'Hitachi'
data['driver_version'] = VERSION
data['storage_protocol'] = self.storage_info['protocol']
try:
total_gb, free_gb = self.get_pool_info()
except exception.VSPError:
utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED,
pool=self.conf.vsp_pool)
return
data['total_capacity_gb'] = total_gb
data['free_capacity_gb'] = free_gb
data['reserved_percentage'] = self.conf.safe_get('reserved_percentage')
data['QoS_support'] = False
data['multiattach'] = False
LOG.debug("Updating volume status. (%s)", data)
self._stats = data
@abc.abstractmethod
def get_pool_info(self):
"""Return the total and free capacity of the storage pool."""
raise NotImplementedError()
@abc.abstractmethod
def discard_zero_page(self, volume):
"""Return the volume's no-data pages to the storage pool."""
raise NotImplementedError()
def extend_volume(self, volume, new_size):
"""Extend the specified volume to the specified size."""
ldev = utils.get_ldev(volume)
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION,
volume_id=volume['id'])
raise exception.VSPError(msg)
if self.check_vvol(ldev):
msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND,
volume_id=volume['id'])
raise exception.VSPError(msg)
self.delete_pair(ldev)
self.extend_ldev(ldev, volume['size'], new_size)
@abc.abstractmethod
def check_vvol(self, ldev):
"""Return True if the specified LDEV is V-VOL, False otherwise."""
raise NotImplementedError()
@abc.abstractmethod
def extend_ldev(self, ldev, old_size, new_size):
"""Extend the specified LDEV to the specified new size."""
raise NotImplementedError()
def manage_existing(self, existing_ref):
"""Return volume properties which Cinder needs to manage the volume."""
ldev = _str2int(existing_ref.get('source-id'))
return {
'provider_location': six.text_type(ldev),
}
def manage_existing_get_size(self, existing_ref):
"""Return the size[GB] of the specified volume."""
ldev = _str2int(existing_ref.get('source-id'))
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
return self.get_ldev_size_in_gigabyte(ldev, existing_ref)
@abc.abstractmethod
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
"""Return the size[GB] of the specified LDEV."""
raise NotImplementedError()
def unmanage(self, volume):
"""Prepare the volume for removing it from Cinder management."""
ldev = utils.get_ldev(volume)
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if ldev is None:
utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage',
id=volume['id'])
return
if self.check_vvol(ldev):
utils.output_log(
MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'],
volume_type=utils.NORMAL_LDEV_TYPE)
raise exception.VolumeIsBusy(volume_name=volume['name'])
try:
self.delete_pair(ldev)
except exception.VSPBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
def do_setup(self, context):
"""Prepare for the startup of the driver."""
self.ctxt = context
self.check_param()
self.config_lock()
self.connect_storage()
self.init_cinder_hosts()
self.output_param_to_log()
def check_param(self):
"""Check parameter values and consistency among them."""
utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS)
utils.check_opts(self.conf, common_opts)
utils.check_opts(self.conf, self.driver_info['volume_opts'])
if (self.conf.vsp_default_copy_method == 'THIN' and
not self.conf.vsp_thin_pool):
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='vsp_thin_pool')
raise exception.VSPError(msg)
if self.conf.vsp_ldev_range:
self.storage_info['ldev_range'] = self._range2list(
'vsp_ldev_range')
if (not self.conf.vsp_target_ports and
not self.conf.vsp_compute_target_ports):
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='vsp_target_ports or '
'vsp_compute_target_ports')
raise exception.VSPError(msg)
for opt in _REQUIRED_COMMON_OPTS:
if not self.conf.safe_get(opt):
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
raise exception.VSPError(msg)
if self.storage_info['protocol'] == 'iSCSI':
self.check_param_iscsi()
def check_param_iscsi(self):
"""Check iSCSI-related parameter values and consistency among them."""
if self.conf.vsp_use_chap_auth:
if not self.conf.vsp_auth_user:
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='vsp_auth_user')
raise exception.VSPError(msg)
if not self.conf.vsp_auth_password:
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='vsp_auth_password')
raise exception.VSPError(msg)
def _range2list(self, param):
"""Analyze a 'xxx-xxx' string and return a list of two integers."""
values = [_str2int(value) for value in
self.conf.safe_get(param).split('-')]
if (len(values) != 2 or
values[0] is None or values[1] is None or
values[0] > values[1]):
msg = utils.output_log(MSG.INVALID_PARAMETER, param=param)
raise exception.VSPError(msg)
return values
@abc.abstractmethod
def config_lock(self):
"""Initialize lock resource names."""
raise NotImplementedError()
def connect_storage(self):
"""Prepare for using the storage."""
self.storage_info['pool_id'] = self.get_pool_id()
# When 'pool_id' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if self.storage_info['pool_id'] is None:
msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=self.conf.vsp_pool)
raise exception.VSPError(msg)
utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID',
value=self.storage_info['pool_id'])
def check_ports_info(self):
"""Check if available storage ports exist."""
if (self.conf.vsp_target_ports and
not self.storage_info['controller_ports']):
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource="Target ports")
raise exception.VSPError(msg)
if (self.conf.vsp_compute_target_ports and
not self.storage_info['compute_ports']):
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource="Compute target ports")
raise exception.VSPError(msg)
utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list',
value=self.storage_info['controller_ports'])
utils.output_log(MSG.SET_CONFIG_VALUE,
object='compute target port list',
value=self.storage_info['compute_ports'])
def get_pool_id(self):
"""Return the storage pool ID as integer."""
pool = self.conf.vsp_pool
if pool.isdigit():
return int(pool)
return None
def init_cinder_hosts(self, **kwargs):
"""Initialize server-storage connection."""
targets = kwargs.pop('targets', {'info': {}, 'list': [], 'iqns': {}})
connector = cinder_utils.brick_get_connector_properties(
multipath=self.conf.use_multipath_for_image_xfer,
enforce_multipath=self.conf.enforce_multipath_for_image_xfer)
target_ports = self.storage_info['controller_ports']
if target_ports:
if (self.find_targets_from_storage(
targets, connector, target_ports) and
self.conf.vsp_group_request):
self.create_mapping_targets(targets, connector)
utils.require_target_existed(targets)
@abc.abstractmethod
def find_targets_from_storage(self, targets, connector, target_ports):
"""Find mapped ports, memorize them and return unmapped port count."""
raise NotImplementedError()
def create_mapping_targets(self, targets, connector):
"""Create server-storage connection for all specified storage ports."""
hba_ids = self.get_hba_ids_from_connector(connector)
for port in targets['info'].keys():
if targets['info'][port]:
continue
try:
self._create_target(targets, port, connector, hba_ids)
except exception.VSPError:
utils.output_log(
self.driver_info['msg_id']['target'], port=port)
if not targets['list']:
self.find_targets_from_storage(
targets, connector, targets['info'].keys())
def get_hba_ids_from_connector(self, connector):
"""Return the HBA ID stored in the connector."""
if self.driver_info['hba_id'] in connector:
return connector[self.driver_info['hba_id']]
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource=self.driver_info['hba_id_type'])
raise exception.VSPError(msg)
def _create_target(self, targets, port, connector, hba_ids):
"""Create a host group or an iSCSI target on the storage port."""
target_name, gid = self.create_target_to_storage(port, connector,
hba_ids)
utils.output_log(MSG.OBJECT_CREATED, object='a target',
details='port: %(port)s, gid: %(gid)s, target_name: '
'%(target)s' %
{'port': port, 'gid': gid, 'target': target_name})
try:
self.set_target_mode(port, gid)
self.set_hba_ids(port, gid, hba_ids)
except exception.VSPError:
with excutils.save_and_reraise_exception():
self.delete_target_from_storage(port, gid)
targets['info'][port] = True
targets['list'].append((port, gid))
@abc.abstractmethod
def create_target_to_storage(self, port, connector, hba_ids):
"""Create a host group or an iSCSI target on the specified port."""
raise NotImplementedError()
@abc.abstractmethod
def set_target_mode(self, port, gid):
"""Configure the target to meet the environment."""
raise NotImplementedError()
@abc.abstractmethod
def set_hba_ids(self, port, gid, hba_ids):
"""Connect all specified HBAs with the specified port."""
raise NotImplementedError()
@abc.abstractmethod
def delete_target_from_storage(self, port, gid):
"""Delete the host group or the iSCSI target from the port."""
raise NotImplementedError()
def output_param_to_log(self):
"""Output configuration parameter values to the log file."""
utils.output_log(MSG.OUTPUT_PARAMETER_VALUES,
config_group=self.conf.config_group)
name, version = self.get_storage_cli_info()
utils.output_storage_cli_info(name, version)
utils.output_opt_info(self.conf, _INHERITED_VOLUME_OPTS)
utils.output_opts(self.conf, common_opts)
utils.output_opts(self.conf, self.driver_info['volume_opts'])
@abc.abstractmethod
def get_storage_cli_info(self):
"""Return a tuple of the storage CLI name and its version."""
raise NotImplementedError()
@coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-'
'{connector[host]}')
def initialize_connection(self, volume, connector):
"""Initialize connection between the server and the volume."""
targets = {
'info': {},
'list': [],
'lun': {},
'iqns': {},
}
ldev = utils.get_ldev(volume)
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION,
volume_id=volume['id'])
raise exception.VSPError(msg)
target_ports = self.get_target_ports(connector)
if (self.find_targets_from_storage(
targets, connector, target_ports) and
self.conf.vsp_group_request):
self.create_mapping_targets(targets, connector)
utils.require_target_existed(targets)
targets['list'].sort()
for port in target_ports:
targets['lun'][port] = False
target_lun = int(self.map_ldev(targets, ldev))
return {
'driver_volume_type': self.driver_info['volume_type'],
'data': self.get_properties(targets, connector, target_lun),
}
def get_target_ports(self, connector):
"""Return a list of ports corresponding to the specified connector."""
if 'ip' in connector and connector['ip'] == CONF.my_ip:
return self.storage_info['controller_ports']
return (self.storage_info['compute_ports'] or
self.storage_info['controller_ports'])
@abc.abstractmethod
def map_ldev(self, targets, ldev):
"""Create the path between the server and the LDEV and return LUN."""
raise NotImplementedError()
def get_properties(self, targets, connector, target_lun=None):
"""Return server-LDEV connection info."""
multipath = connector.get('multipath', False)
if self.storage_info['protocol'] == 'FC':
data = self.get_properties_fc(targets)
elif self.storage_info['protocol'] == 'iSCSI':
data = self.get_properties_iscsi(targets, multipath)
if target_lun is not None:
data['target_discovered'] = False
if not multipath or self.storage_info['protocol'] == 'FC':
data['target_lun'] = target_lun
else:
target_luns = []
for target in targets['list']:
if targets['lun'][target[0]]:
target_luns.append(target_lun)
data['target_luns'] = target_luns
return data
def get_properties_fc(self, targets):
"""Return FC-specific server-LDEV connection info."""
data = {}
data['target_wwn'] = [
self.storage_info['wwns'][target[0]] for target in targets['list']
if targets['lun'][target[0]]]
return data
def get_properties_iscsi(self, targets, multipath):
"""Return iSCSI-specific server-LDEV connection info."""
data = {}
primary_target = targets['list'][0]
if not multipath:
data['target_portal'] = self.storage_info[
'portals'][primary_target[0]]
data['target_iqn'] = targets['iqns'][primary_target]
else:
data['target_portals'] = [
self.storage_info['portals'][target[0]] for target in
targets['list'] if targets['lun'][target[0]]]
data['target_iqns'] = [
targets['iqns'][target] for target in targets['list']
if targets['lun'][target[0]]]
if self.conf.vsp_use_chap_auth:
data['auth_method'] = 'CHAP'
data['auth_username'] = self.conf.vsp_auth_user
data['auth_password'] = self.conf.vsp_auth_password
return data
@coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-'
'{connector[host]}')
def terminate_connection(self, volume, connector):
"""Terminate connection between the server and the volume."""
targets = {
'info': {},
'list': [],
'iqns': {},
}
mapped_targets = {
'list': [],
}
unmap_targets = {}
ldev = utils.get_ldev(volume)
if ldev is None:
utils.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING,
volume_id=volume['id'])
return
target_ports = self.get_target_ports(connector)
self.find_targets_from_storage(targets, connector, target_ports)
if not targets['list']:
utils.output_log(MSG.NO_CONNECTED_TARGET)
self.find_mapped_targets_from_storage(
mapped_targets, ldev, target_ports)
unmap_targets['list'] = self.get_unmap_targets_list(
targets['list'], mapped_targets['list'])
unmap_targets['list'].sort(reverse=True)
self.unmap_ldev(unmap_targets, ldev)
if self.storage_info['protocol'] == 'FC':
target_wwn = [
self.storage_info['wwns'][port_gid[:utils.PORT_ID_LENGTH]]
for port_gid in unmap_targets['list']]
return {'driver_volume_type': self.driver_info['volume_type'],
'data': {'target_wwn': target_wwn}}
@abc.abstractmethod
def find_mapped_targets_from_storage(self, targets, ldev, target_ports):
"""Find and store IDs of ports used for server-LDEV connection."""
raise NotImplementedError()
@abc.abstractmethod
def get_unmap_targets_list(self, target_list, mapped_list):
"""Return a list of IDs of ports that need to be disconnected."""
raise NotImplementedError()
@abc.abstractmethod
def unmap_ldev(self, targets, ldev):
"""Delete the LUN between the specified LDEV and port-gid."""
raise NotImplementedError()
@abc.abstractmethod
def wait_full_copy_completion(self, pvol, svol):
"""Wait until FULL copy is completed."""
raise NotImplementedError()

View File

@@ -1,181 +0,0 @@
# Copyright (C) 2016, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Fibre channel module for Hitachi VSP Driver."""
from oslo_config import cfg
from cinder import interface
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.hitachi import vsp_common as common
from cinder.volume.drivers.hitachi import vsp_utils as utils
fc_opts = [
cfg.BoolOpt(
'vsp_zoning_request',
default=False,
help='If True, the driver will configure FC zoning between the server '
'and the storage system provided that FC zoning manager is '
'enabled.'),
]
MSG = utils.VSPMsg
_DRIVER_INFO = {
'proto': 'FC',
'hba_id': 'wwpns',
'hba_id_type': 'World Wide Name',
'msg_id': {
'target': MSG.CREATE_HOST_GROUP_FAILED,
},
'volume_backend_name': utils.DRIVER_PREFIX + 'FC',
'volume_opts': fc_opts,
'volume_type': 'fibre_channel',
}
CONF = cfg.CONF
CONF.register_opts(fc_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class VSPFCDriver(driver.FibreChannelDriver):
"""Fibre channel class for Hitachi VSP Driver.
Version history:
.. code-block:: none
1.0.0 - Initial driver.
"""
VERSION = common.VERSION
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Hitachi_VSP_CI"
SUPPORTED = False
def __init__(self, *args, **kwargs):
"""Initialize instance variables."""
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
driver=self.__class__.__name__,
version=self.get_version())
super(VSPFCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common.common_opts)
self.configuration.append_config_values(fc_opts)
self.common = utils.import_object(
self.configuration, _DRIVER_INFO, kwargs.get('db'))
def check_for_setup_error(self):
"""Error are checked in do_setup() instead of this method."""
pass
@utils.output_start_end_log
def create_volume(self, volume):
"""Create a volume and return its properties."""
return self.common.create_volume(volume)
@utils.output_start_end_log
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot and return its properties."""
return self.common.create_volume_from_snapshot(volume, snapshot)
@utils.output_start_end_log
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume and return its properties."""
return self.common.create_cloned_volume(volume, src_vref)
@utils.output_start_end_log
def delete_volume(self, volume):
"""Delete the specified volume."""
self.common.delete_volume(volume)
@utils.output_start_end_log
def create_snapshot(self, snapshot):
"""Create a snapshot from a volume and return its properties."""
return self.common.create_snapshot(snapshot)
@utils.output_start_end_log
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot."""
self.common.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
"""Return properties, capabilities and current states of the driver."""
return self.common.get_volume_stats(refresh)
@utils.output_start_end_log
def update_migrated_volume(
self, ctxt, volume, new_volume, original_volume_status):
"""Do any remaining jobs after migration."""
self.common.discard_zero_page(new_volume)
super(VSPFCDriver, self).update_migrated_volume(
ctxt, volume, new_volume, original_volume_status)
@utils.output_start_end_log
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
super(VSPFCDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
self.common.discard_zero_page(volume)
@utils.output_start_end_log
def extend_volume(self, volume, new_size):
"""Extend the specified volume to the specified size."""
self.common.extend_volume(volume, new_size)
@utils.output_start_end_log
def manage_existing(self, volume, existing_ref):
"""Return volume properties which Cinder needs to manage the volume."""
return self.common.manage_existing(existing_ref)
@utils.output_start_end_log
def manage_existing_get_size(self, volume, existing_ref):
"""Return the size[GB] of the specified volume."""
return self.common.manage_existing_get_size(existing_ref)
@utils.output_start_end_log
def unmanage(self, volume):
"""Prepare the volume for removing it from Cinder management."""
self.common.unmanage(volume)
@utils.output_start_end_log
def do_setup(self, context):
"""Prepare for the startup of the driver."""
self.common.do_setup(context)
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def create_export(self, context, volume, connector):
"""Export the volume."""