Remove HNAS iSCSI driver

Hitachi NAS Platform iSCSI driver is deprecated since the last
release and this patch is removing its code from OpenStack.

DocImpact

Change-Id: I9c5b0443d52ccd1c6fd18b5cfaab8514be17a293
This commit is contained in:
Adriano Rosso 2017-03-03 17:31:02 -03:00
parent cefe9844cb
commit 6c603df9ca
10 changed files with 29 additions and 2215 deletions

View File

@ -107,8 +107,6 @@ from cinder.volume.drivers.hitachi import hbsd_horcm as \
cinder_volume_drivers_hitachi_hbsdhorcm
from cinder.volume.drivers.hitachi import hbsd_iscsi as \
cinder_volume_drivers_hitachi_hbsdiscsi
from cinder.volume.drivers.hitachi import hnas_iscsi as \
cinder_volume_drivers_hitachi_hnasiscsi
from cinder.volume.drivers.hitachi import hnas_nfs as \
cinder_volume_drivers_hitachi_hnasnfs
from cinder.volume.drivers.hitachi import hnas_utils as \
@ -286,7 +284,6 @@ def list_opts():
cinder_volume_drivers_hitachi_hbsdfc.volume_opts,
cinder_volume_drivers_hitachi_hbsdhorcm.volume_opts,
cinder_volume_drivers_hitachi_hbsdiscsi.volume_opts,
cinder_volume_drivers_hitachi_hnasiscsi.iSCSI_OPTS,
cinder_volume_drivers_hitachi_hnasnfs.NFS_OPTS,
cinder_volume_drivers_hitachi_hnasutils.drivers_common_opts,
cinder_volume_drivers_hitachi_vspcommon.common_opts,

View File

@ -103,22 +103,6 @@ Export configuration: \n\
127.0.0.1 \n\
\n"
iscsi_one_target = "\n\
Alias : cinder-default \n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\
Comment : \n\
Secret : pxr6U37LZZJBoMc \n\
Authentication : Enabled \n\
Logical units : No logical units. \n\
\n\
LUN Logical Unit \n\
---- -------------------------------- \n\
0 cinder-lu \n\
1 volume-99da7ae7-1e7f-4d57-8bf... \n\
\n\
Access configuration: \n\
"
df_f_single_evs = "\n\
ID Label Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
@ -156,51 +140,27 @@ Node EVS ID Type Label Enabled Status IP Address Port \n\
1 3 Service EVS-Test Yes Online 192.168.100.100 ag2 \n\
\n"
iscsilu_list = "Name : cinder-lu \n\
lu_list = "Name : cinder-lu \n\
Comment: \n\
Path : /.cinder/cinder-lu.iscsi \n\
Path : /.cinder/cinder-lu \n\
Size : 2 GB \n\
File System : fs-cinder \n\
File System Mounted : YES \n\
Logical Unit Mounted: No"
iscsilu_list_tb = "Name : test-lu \n\
lu_list_tb = "Name : test-lu \n\
Comment: \n\
Path : /.cinder/test-lu.iscsi \n\
Path : /.cinder/test-lu \n\
Size : 2 TB \n\
File System : fs-cinder \n\
File System Mounted : YES \n\
Logical Unit Mounted: No"
hnas_fs_list = "%(l1)s\n\n%(l2)s\n\n " % {'l1': iscsilu_list,
'l2': iscsilu_list_tb}
hnas_fs_list = "%(l1)s\n\n%(l2)s\n\n " % {'l1': lu_list,
'l2': lu_list_tb}
add_targetsecret = "Target created successfully."
iscsi_target_list = "\n\
Alias : cinder-GoldIsh\n\
Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-goldish\n\
Comment :\n\
Secret : None\n\
Authentication : Enabled\n\
Logical units : No logical units.\n\
Access configuration :\n\
\n\
Alias : cinder-default\n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default\n\
Comment :\n\
Secret : pxr6U37LZZJBoMc\n\
Authentication : Enabled\n\
Logical units : Logical units :\n\
\n\
LUN Logical Unit\n\
---- --------------------------------\n\
0 cinder-lu\n\
1 volume-99da7ae7-1e7f-4d57-8bf...\n\
\n\
Access configuration :\n\
"
backend_opts = {'mgmt_ip0': '0.0.0.0',
'cluster_admin_ip0': None,
'ssh_port': '22',
@ -320,51 +280,6 @@ class HDSHNASBackendTest(test.TestCase):
self.hnas_backend._run_cmd, 'ssh', '0.0.0.0',
'supervisor', 'supervisor', 'df', '-a')
def test_get_targets_empty_list(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=('No targets', ''))
out = self.hnas_backend._get_targets('2')
self.assertEqual([], out)
def test_get_targets_not_found(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(iscsi_target_list, ''))
out = self.hnas_backend._get_targets('2', 'fake-volume')
self.assertEqual([], out)
def test__get_unused_luid_number_0(self):
tgt_info = {
'alias': 'cinder-default',
'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [
{'id': '1',
'name': 'cinder-lu2'},
{'id': '2',
'name': 'volume-test2'}
],
'auth': 'Enabled'
}
out = self.hnas_backend._get_unused_luid(tgt_info)
self.assertEqual(0, out)
def test__get_unused_no_luns(self):
tgt_info = {
'alias': 'cinder-default',
'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [],
'auth': 'Enabled'
}
out = self.hnas_backend._get_unused_luid(tgt_info)
self.assertEqual(0, out)
def test_get_version(self):
expected_out = {
'hardware': 'NAS Platform (M2SEKW1339109)',
@ -479,7 +394,7 @@ class HDSHNASBackendTest(test.TestCase):
self.assertEqual('fs-cinder', out['label'])
self.assertEqual('228', out['available_size'])
self.assertEqual('250', out['total_size'])
self.assertEqual(2050.0, out['provisioned_capacity'])
self.assertEqual(0, out['provisioned_capacity'])
def test_get_fs_empty_return(self):
self.mock_object(self.hnas_backend, '_run_cmd',
@ -498,7 +413,7 @@ class HDSHNASBackendTest(test.TestCase):
self.assertEqual('fs-cinder', out['label'])
self.assertEqual('228', out['available_size'])
self.assertEqual('250', out['total_size'])
self.assertEqual(2050.0, out['provisioned_capacity'])
self.assertEqual(0, out['provisioned_capacity'])
def test_get_fs_tb(self):
available_size = float(228 * 1024 ** 2)
@ -513,7 +428,7 @@ class HDSHNASBackendTest(test.TestCase):
self.assertEqual('fs-cinder', out['label'])
self.assertEqual(str(available_size), out['available_size'])
self.assertEqual(str(total_size), out['total_size'])
self.assertEqual(2050.0, out['provisioned_capacity'])
self.assertEqual(0, out['provisioned_capacity'])
def test_get_fs_single_evs_tb(self):
available_size = float(228 * 1024 ** 2)
@ -528,288 +443,7 @@ class HDSHNASBackendTest(test.TestCase):
self.assertEqual('fs-cinder', out['label'])
self.assertEqual(str(available_size), out['available_size'])
self.assertEqual(str(total_size), out['total_size'])
self.assertEqual(2050.0, out['provisioned_capacity'])
def test_create_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.hnas_backend.create_lu('fs-cinder', '128', 'cinder-lu')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'add',
'-e', 'cinder-lu',
'fs-cinder',
'/.cinder/cinder-lu.'
'iscsi', '128G')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_delete_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.hnas_backend.delete_lu('fs-cinder', 'cinder-lu')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'del', '-d',
'-f', 'cinder-lu')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_extend_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.hnas_backend.extend_lu('fs-cinder', '128', 'cinder-lu')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'expand',
'cinder-lu', '128G')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_cloned_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.hnas_backend.create_cloned_lu('cinder-lu', 'fs-cinder', 'snap')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'clone',
'-e', 'cinder-lu',
'snap',
'/.cinder/snap.iscsi')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_get_existing_lu_info(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsilu_list, '')])
out = self.hnas_backend.get_existing_lu_info('cinder-lu', None, None)
self.assertEqual('cinder-lu', out['name'])
self.assertEqual('fs-cinder', out['filesystem'])
self.assertEqual(2.0, out['size'])
def test_get_existing_lu_info_tb(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsilu_list_tb, '')])
out = self.hnas_backend.get_existing_lu_info('test-lu', None, None)
self.assertEqual('test-lu', out['name'])
self.assertEqual('fs-cinder', out['filesystem'])
self.assertEqual(2048.0, out['size'])
def test_rename_existing_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.hnas_backend.rename_existing_lu('fs-cinder', 'cinder-lu',
'new-lu-name')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'mod', '-n',
"'new-lu-name'",
'cinder-lu')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_check_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsi_target_list, '')])
out = self.hnas_backend.check_lu('cinder-lu', 'fs-cinder')
self.assertEqual('cinder-lu', out['tgt']['lus'][0]['name'])
self.assertEqual('pxr6U37LZZJBoMc', out['tgt']['secret'])
self.assertTrue(out['mapped'])
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-target', 'list')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_check_lu_not_found(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsi_target_list, '')])
# passing a volume fake-volume not mapped
out = self.hnas_backend.check_lu('fake-volume', 'fs-cinder')
self.assertFalse(out['mapped'])
self.assertEqual(0, out['id'])
self.assertIsNone(out['tgt'])
def test_add_iscsi_conn(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsi_target_list, ''),
(evsfs_list, '')])
out = self.hnas_backend.add_iscsi_conn('cinder-lu', 'fs-cinder', 3260,
'cinder-default', 'initiator')
self.assertEqual('cinder-lu', out['lu_name'])
self.assertEqual('fs-cinder', out['fs'])
self.assertEqual('0', out['lu_id'])
self.assertEqual(3260, out['port'])
calls = [mock.call('evsfs', 'list'),
mock.call('console-context', '--evs', '2', 'iscsi-target',
'list')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_add_iscsi_conn_not_mapped_volume(self):
not_mapped = {'mapped': False,
'id': 0,
'tgt': None}
self.mock_object(self.hnas_backend, 'check_lu',
return_value=not_mapped)
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsi_target_list, ''),
('', '')])
out = self.hnas_backend.add_iscsi_conn('cinder-lu', 'fs-cinder', 3260,
'cinder-default', 'initiator')
self.assertEqual('cinder-lu', out['lu_name'])
self.assertEqual('fs-cinder', out['fs'])
self.assertEqual(2, out['lu_id'])
self.assertEqual(3260, out['port'])
calls = [mock.call('evsfs', 'list'),
mock.call('console-context', '--evs', '2', 'iscsi-target',
'list')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_del_iscsi_conn(self):
iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(iscsi_one_target, ''))
self.hnas_backend.del_iscsi_conn('2', iqn, '0')
calls = [mock.call('console-context', '--evs', '2', 'iscsi-target',
'list', iqn),
mock.call('console-context', '--evs', '2', 'iscsi-target',
'dellu', '-f', iqn, '0')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_del_iscsi_conn_volume_not_found(self):
iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-fake'
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(iscsi_one_target, ''))
self.hnas_backend.del_iscsi_conn('2', iqn, '10')
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'iscsi-target', 'list',
iqn)
def test_check_target(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsi_target_list, '')])
out = self.hnas_backend.check_target('fs-cinder', 'cinder-default')
self.assertTrue(out['found'])
self.assertEqual('cinder-lu', out['tgt']['lus'][0]['name'])
self.assertEqual('cinder-default', out['tgt']['alias'])
self.assertEqual('pxr6U37LZZJBoMc', out['tgt']['secret'])
def test_check_target_not_found(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsi_target_list, '')])
out = self.hnas_backend.check_target('fs-cinder', 'cinder-fake')
self.assertFalse(out['found'])
self.assertIsNone(out['tgt'])
def test_set_target_secret(self):
targetalias = 'cinder-default'
secret = 'pxr6U37LZZJBoMc'
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.hnas_backend.set_target_secret(targetalias, 'fs-cinder', secret)
calls = [mock.call('evsfs', 'list'),
mock.call('console-context', '--evs', '2', 'iscsi-target',
'mod', '-s', 'pxr6U37LZZJBoMc', '-a', 'enable',
'cinder-default')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_set_target_secret_empty_target_list(self):
targetalias = 'cinder-default'
secret = 'pxr6U37LZZJBoMc'
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
('does not exist', ''),
('', '')])
self.hnas_backend.set_target_secret(targetalias, 'fs-cinder', secret)
calls = [mock.call('console-context', '--evs', '2', 'iscsi-target',
'mod', '-s', 'pxr6U37LZZJBoMc', '-a', 'enable',
'cinder-default')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_get_target_secret(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsi_one_target, '')])
out = self.hnas_backend.get_target_secret('cinder-default',
'fs-cinder')
self.assertEqual('pxr6U37LZZJBoMc', out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'iscsi-target', 'list',
'cinder-default')
def test_get_target_secret_chap_disabled(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(target_chap_disable, '')])
out = self.hnas_backend.get_target_secret('cinder-default',
'fs-cinder')
self.assertEqual('', out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'iscsi-target', 'list',
'cinder-default')
def test_get_target_iqn(self):
self.mock_object(self.hnas_backend, '_run_cmd',
side_effect=[(evsfs_list, ''),
(iscsi_one_target, ''),
(add_targetsecret, '')])
out = self.hnas_backend.get_target_iqn('cinder-default', 'fs-cinder')
self.assertEqual('iqn.2014-12.10.10.10.10:evstest1.cinder-default',
out)
def test_create_target(self):
self.mock_object(self.hnas_backend, '_run_cmd',
return_value=(evsfs_list, ''))
self.hnas_backend.create_target('cinder-default', 'fs-cinder',
'pxr6U37LZZJBoMc')
self.assertEqual(0, out['provisioned_capacity'])
def test_get_cloned_file_relatives(self):
self.mock_object(self.hnas_backend, '_run_cmd',

View File

@ -1,590 +0,0 @@
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from oslo_concurrency import processutils as putils
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi.hnas_backend import HNASSSHBackend
from cinder.volume.drivers.hitachi import hnas_iscsi as iscsi
from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume import volume_types
# The following information is passed on to tests, when creating a volume
_VOLUME = {'name': 'volume-cinder',
'id': fake.VOLUME_ID,
'size': 128,
'host': 'host1@hnas-iscsi-backend#default',
'provider_location': '83-68-96-AA-DA-5D.volume-2dfe280e-470a-'
'4182-afb8-1755025c35b8'}
_VOLUME2 = {'name': 'volume-clone',
'id': fake.VOLUME2_ID,
'size': 150,
'host': 'host1@hnas-iscsi-backend#default',
'provider_location': '83-68-96-AA-DA-5D.volume-8fe1802a-316b-'
'5237-1c57-c35b81755025'}
_SNAPSHOT = {
'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
'id': fake.SNAPSHOT_ID,
'size': 128,
'volume_type': None,
'provider_location': None,
'volume_size': 128,
'volume': _VOLUME,
'volume_name': _VOLUME['name'],
'host': 'host1@hnas-iscsi-backend#silver',
'volume_type_id': fake.VOLUME_TYPE_ID,
}
class HNASiSCSIDriverTest(test.TestCase):
"""Test HNAS iSCSI volume driver."""
def setUp(self):
super(HNASiSCSIDriverTest, self).setUp()
self.context = context.get_admin_context()
self.volume = fake_volume.fake_volume_obj(
self.context, **_VOLUME)
self.volume_clone = fake_volume.fake_volume_obj(
self.context, **_VOLUME2)
self.snapshot = self.instantiate_snapshot(_SNAPSHOT)
self.volume_type = fake_volume.fake_volume_type_obj(
None,
**{'name': 'silver',
'id': fake.VOLUME_TYPE_ID}
)
self.parsed_xml = {
'username': 'supervisor',
'password': 'supervisor',
'hnas_cmd': 'ssc',
'fs': {'fs2': 'fs2'},
'ssh_port': '22',
'port': '3260',
'services': {
'default': {
'hdp': 'fs2',
'iscsi_ip': '172.17.39.132',
'iscsi_port': '3260',
'port': '22',
'pool_name': 'default',
'label': 'svc_0',
'evs': '1',
'tgt': {
'alias': 'test',
'secret': 'itEpgB5gPefGhW2'
}
},
'silver': {
'hdp': 'fs3',
'iscsi_ip': '172.17.39.133',
'iscsi_port': '3260',
'port': '22',
'pool_name': 'silver',
'label': 'svc_1',
'evs': '2',
'tgt': {
'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'
}
}
},
'cluster_admin_ip0': None,
'ssh_private_key': None,
'chap_enabled': True,
'mgmt_ip0': '172.17.44.15',
'ssh_enabled': None
}
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.hds_hnas_iscsi_config_file = 'fake.xml'
self.mock_object(hnas_utils, 'read_cinder_conf',
return_value=self.parsed_xml)
self.driver = iscsi.HNASISCSIDriver(configuration=self.configuration)
@staticmethod
def instantiate_snapshot(snap):
snap = snap.copy()
snap['volume'] = fake_volume.fake_volume_obj(
None, **snap['volume'])
snapshot = fake_snapshot.fake_snapshot_obj(
None, expected_attrs=['volume'], **snap)
return snapshot
def test_get_service_target_chap_enabled(self):
lu_info = {'mapped': False,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
tgt = {'found': True,
'tgt': {
'alias': 'cinder-default',
'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [
{'id': '0',
'name': 'cinder-lu'},
{'id': '1',
'name': 'volume-99da7ae7-1e7f-4d57-8bf...'}
],
'auth': 'Enabled'}}
iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
self.mock_object(HNASSSHBackend, 'get_evs', return_value='1')
self.mock_object(HNASSSHBackend, 'check_lu', return_value=lu_info)
self.mock_object(HNASSSHBackend, 'check_target', return_value=tgt)
self.mock_object(HNASSSHBackend, 'get_target_secret', return_value='')
self.mock_object(HNASSSHBackend, 'set_target_secret')
self.mock_object(HNASSSHBackend, 'get_target_iqn', return_value=iqn)
self.driver._get_service_target(self.volume)
def test_get_service_target_chap_disabled(self):
lu_info = {'mapped': False,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
tgt = {'found': False,
'tgt': {
'alias': 'cinder-default',
'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [
{'id': '0',
'name': 'cinder-lu'},
{'id': '1',
'name': 'volume-99da7ae7-1e7f-4d57-8bf...'}
],
'auth': 'Enabled'}}
iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
self.driver.config['chap_enabled'] = False
self.mock_object(HNASSSHBackend, 'get_evs', return_value='1')
self.mock_object(HNASSSHBackend, 'check_lu', return_value=lu_info)
self.mock_object(HNASSSHBackend, 'check_target', return_value=tgt)
self.mock_object(HNASSSHBackend, 'get_target_iqn', return_value=iqn)
self.mock_object(HNASSSHBackend, 'create_target')
self.driver._get_service_target(self.volume)
def test_get_service_target_no_more_targets_exception(self):
iscsi.MAX_HNAS_LUS_PER_TARGET = 4
lu_info = {'mapped': False, 'id': 1,
'tgt': {'alias': 'iscsi-test', 'secret': 'itEpgB5gPefGhW2'}}
tgt = {'found': True,
'tgt': {
'alias': 'cinder-default', 'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [
{'id': '0', 'name': 'volume-0'},
{'id': '1', 'name': 'volume-1'},
{'id': '2', 'name': 'volume-2'},
{'id': '3', 'name': 'volume-3'}, ],
'auth': 'Enabled'}}
self.mock_object(HNASSSHBackend, 'get_evs', return_value='1')
self.mock_object(HNASSSHBackend, 'check_lu', return_value=lu_info)
self.mock_object(HNASSSHBackend, 'check_target', return_value=tgt)
self.assertRaises(exception.NoMoreTargets,
self.driver._get_service_target, self.volume)
def test_check_pool_and_fs(self):
self.mock_object(hnas_utils, 'get_pool', return_value='default')
self.driver._check_pool_and_fs(self.volume, 'fs2')
def test_check_pool_and_fs_no_default_configured(self):
self.volume.volume_type = self.volume_type
self.mock_object(hnas_utils, 'get_pool', return_value='default')
self.driver.config['services'] = {
'silver': {
'hdp': 'fs3',
'iscsi_ip': '172.17.39.133',
'iscsi_port': '3260',
'port': '22',
'volume_type': 'silver',
'label': 'svc_1',
'evs': '2',
'tgt': {
'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'
}
}
}
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_fs, self.volume,
'fs-cinder')
def test_check_pool_and_fs_mismatch(self):
self.mock_object(hnas_utils, 'get_pool', return_value='default')
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_fs, self.volume,
'fs-cinder')
def test_check_pool_and_fs_host_mismatch(self):
self.mock_object(hnas_utils, 'get_pool', return_value='silver')
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_fs, self.volume,
'fs3')
def test_do_setup(self):
evs_info = {'172.17.39.132': {'evs_number': 1},
'172.17.39.133': {'evs_number': 2},
'172.17.39.134': {'evs_number': 3}}
version_info = {
'mac': '83-68-96-AA-DA-5D',
'model': 'HNAS 4040',
'version': '12.4.3924.11',
'hardware': 'NAS Platform',
'serial': 'B1339109',
}
self.mock_object(HNASSSHBackend, 'get_fs_info', return_value=True)
self.mock_object(HNASSSHBackend, 'get_evs_info', return_value=evs_info)
self.mock_object(HNASSSHBackend, 'get_version',
return_value=version_info)
self.driver.do_setup(None)
HNASSSHBackend.get_fs_info.assert_called_with('fs2')
self.assertTrue(HNASSSHBackend.get_evs_info.called)
def test_do_setup_portal_not_found(self):
evs_info = {'172.17.48.132': {'evs_number': 1},
'172.17.39.133': {'evs_number': 2},
'172.17.39.134': {'evs_number': 3}}
version_info = {
'mac': '83-68-96-AA-DA-5D',
'model': 'HNAS 4040',
'version': '12.4.3924.11',
'hardware': 'NAS Platform',
'serial': 'B1339109',
}
self.mock_object(HNASSSHBackend, 'get_fs_info', return_value=True)
self.mock_object(HNASSSHBackend, 'get_evs_info', return_value=evs_info)
self.mock_object(HNASSSHBackend, 'get_version',
return_value=version_info)
self.assertRaises(exception.InvalidParameterValue,
self.driver.do_setup, None)
def test_do_setup_umounted_filesystem(self):
self.mock_object(HNASSSHBackend, 'get_fs_info', return_value=False)
self.assertRaises(exception.ParameterNotFound, self.driver.do_setup,
None)
def test_initialize_connection(self):
lu_info = {'mapped': True,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
conn = {'lun_name': 'cinder-lu',
'initiator': 'initiator',
'hdp': 'fs-cinder',
'lu_id': '0',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'port': 3260}
connector = {'initiator': 'fake_initiator'}
self.mock_object(HNASSSHBackend, 'get_evs', return_value=2)
self.mock_object(HNASSSHBackend, 'check_lu', return_value=lu_info)
self.mock_object(HNASSSHBackend, 'add_iscsi_conn', return_value=conn)
self.driver.initialize_connection(self.volume, connector)
HNASSSHBackend.add_iscsi_conn.assert_called_with(self.volume.name,
'fs2', '22',
'iscsi-test',
connector[
'initiator'])
def test_initialize_connection_command_error(self):
lu_info = {'mapped': True,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
connector = {'initiator': 'fake_initiator'}
self.mock_object(HNASSSHBackend, 'get_evs', return_value=2)
self.mock_object(HNASSSHBackend, 'check_lu', return_value=lu_info)
self.mock_object(HNASSSHBackend, 'add_iscsi_conn',
side_effect=putils.ProcessExecutionError)
self.assertRaises(exception.ISCSITargetAttachFailed,
self.driver.initialize_connection, self.volume,
connector)
def test_terminate_connection(self):
connector = {}
lu_info = {'mapped': True,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
self.mock_object(HNASSSHBackend, 'get_evs', return_value=2)
self.mock_object(HNASSSHBackend, 'check_lu', return_value=lu_info)
self.mock_object(HNASSSHBackend, 'del_iscsi_conn')
self.driver.terminate_connection(self.volume, connector)
HNASSSHBackend.del_iscsi_conn.assert_called_with('1',
'iscsi-test',
lu_info['id'])
def test_get_volume_stats(self):
self.driver.pools = [{'pool_name': 'default',
'service_label': 'svc_0',
'fs': '172.17.39.132:/fs2'},
{'pool_name': 'silver',
'service_label': 'svc_1',
'fs': '172.17.39.133:/fs3'}]
fs_cinder = {
'evs_id': '2',
'total_size': '250',
'label': 'fs-cinder',
'available_size': '228',
'used_size': '21.4',
'id': '1025',
'provisioned_capacity': 0.0
}
self.mock_object(HNASSSHBackend, 'get_fs_info', return_value=fs_cinder)
stats = self.driver.get_volume_stats(refresh=True)
self.assertEqual('5.0.0', stats['driver_version'])
self.assertEqual('Hitachi', stats['vendor_name'])
self.assertEqual('iSCSI', stats['storage_protocol'])
def test_create_volume(self):
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location': version_info['mac'] + '.' + self.volume.name
}
self.mock_object(HNASSSHBackend, 'create_lu')
self.mock_object(HNASSSHBackend, 'get_version',
return_value=version_info)
out = self.driver.create_volume(self.volume)
self.assertEqual(expected_out, out)
HNASSSHBackend.create_lu.assert_called_with('fs2', u'128',
self.volume.name)
def test_create_volume_missing_fs(self):
self.volume.host = 'host1@hnas-iscsi-backend#missing'
self.assertRaises(exception.ParameterNotFound,
self.driver.create_volume, self.volume)
def test_delete_volume(self):
self.mock_object(HNASSSHBackend, 'delete_lu')
self.driver.delete_volume(self.volume)
HNASSSHBackend.delete_lu.assert_called_once_with(
self.parsed_xml['fs']['fs2'], self.volume.name)
def test_extend_volume(self):
new_size = 200
self.mock_object(HNASSSHBackend, 'extend_lu')
self.driver.extend_volume(self.volume, new_size)
HNASSSHBackend.extend_lu.assert_called_once_with(
self.parsed_xml['fs']['fs2'], new_size,
self.volume.name)
def test_create_cloned_volume(self):
clone_name = self.volume_clone.name
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location':
version_info['mac'] + '.' + self.volume_clone.name
}
self.mock_object(HNASSSHBackend, 'create_cloned_lu')
self.mock_object(HNASSSHBackend, 'get_version',
return_value=version_info)
self.mock_object(HNASSSHBackend, 'extend_lu')
out = self.driver.create_cloned_volume(self.volume_clone, self.volume)
self.assertEqual(expected_out, out)
HNASSSHBackend.create_cloned_lu.assert_called_with(self.volume.name,
'fs2',
clone_name)
def test_functions_with_pass(self):
self.driver.check_for_setup_error()
self.driver.ensure_export(None, self.volume)
self.driver.create_export(None, self.volume, 'connector')
self.driver.remove_export(None, self.volume)
def test_create_snapshot(self):
lu_info = {'lu_mounted': 'No',
'name': 'cinder-lu',
'fs_mounted': 'YES',
'filesystem': 'FS-Cinder',
'path': '/.cinder/cinder-lu.iscsi',
'size': 2.0}
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location': version_info['mac'] + '.' + self.snapshot.name
}
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
return_value=lu_info)
self.mock_object(volume_types, 'get_volume_type',
return_value=self.volume_type)
self.mock_object(HNASSSHBackend, 'create_cloned_lu')
self.mock_object(HNASSSHBackend, 'get_version',
return_value=version_info)
out = self.driver.create_snapshot(self.snapshot)
self.assertEqual(expected_out, out)
def test_delete_snapshot(self):
lu_info = {'filesystem': 'FS-Cinder'}
self.mock_object(volume_types, 'get_volume_type',
return_value=self.volume_type)
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
return_value=lu_info)
self.mock_object(HNASSSHBackend, 'delete_lu')
self.driver.delete_snapshot(self.snapshot)
def test_create_volume_from_snapshot(self):
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location': version_info['mac'] + '.' + self.snapshot.name
}
self.mock_object(HNASSSHBackend, 'create_cloned_lu')
self.mock_object(HNASSSHBackend, 'get_version',
return_value=version_info)
out = self.driver.create_volume_from_snapshot(self.volume,
self.snapshot)
self.assertEqual(expected_out, out)
HNASSSHBackend.create_cloned_lu.assert_called_with(self.snapshot.name,
'fs2',
self.volume.name)
def test_manage_existing_get_size(self):
existing_vol_ref = {'source-name': 'fs-cinder/volume-cinder'}
lu_info = {
'name': 'volume-cinder',
'comment': None,
'path': ' /.cinder/volume-cinder',
'size': 128,
'filesystem': 'fs-cinder',
'fs_mounted': 'Yes',
'lu_mounted': 'Yes'
}
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
return_value=lu_info)
out = self.driver.manage_existing_get_size(self.volume,
existing_vol_ref)
self.assertEqual(lu_info['size'], out)
HNASSSHBackend.get_existing_lu_info.assert_called_with(
'volume-cinder', lu_info['filesystem'])
def test_manage_existing_get_size_no_source_name(self):
existing_vol_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self.volume,
existing_vol_ref)
def test_manage_existing_get_size_wrong_source_name(self):
existing_vol_ref = {'source-name': 'fs-cinder/volume/cinder'}
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
return_value={})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self.volume,
existing_vol_ref)
def test_manage_existing_get_size_volume_not_found(self):
existing_vol_ref = {'source-name': 'fs-cinder/volume-cinder'}
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
return_value={})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self.volume,
existing_vol_ref)
def test_manage_existing(self):
self.volume.volume_type = self.volume_type
existing_vol_ref = {'source-name': 'fs2/volume-cinder'}
metadata = {'service_label': 'default'}
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location': version_info['mac'] + '.' + self.volume.name
}
self.mock_object(HNASSSHBackend, 'rename_existing_lu')
self.mock_object(volume_types, 'get_volume_type_extra_specs',
return_value=metadata)
self.mock_object(HNASSSHBackend, 'get_version',
return_value=version_info)
out = self.driver.manage_existing(self.volume, existing_vol_ref)
self.assertEqual(expected_out, out)
HNASSSHBackend.rename_existing_lu.assert_called_with('fs2',
'volume-cinder',
self.volume.name)
def test_unmanage(self):
self.mock_object(HNASSSHBackend, 'rename_existing_lu')
self.driver.unmanage(self.volume)
HNASSSHBackend.rename_existing_lu.assert_called_with(
self.parsed_xml['fs']['fs2'],
self.volume.name, 'unmanage-' + self.volume.name)

View File

@ -14,7 +14,6 @@
# under the License.
#
import copy
import ddt
import os
@ -26,7 +25,6 @@ from cinder import test
from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hnas_iscsi
from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume import volume_types
@ -38,14 +36,13 @@ _VOLUME = {'name': 'cinder-volume',
'provider_location': 'hnas'}
service_parameters = ['volume_type', 'hdp']
optional_parameters = ['ssc_cmd', 'cluster_admin_ip0', 'iscsi_ip']
optional_parameters = ['ssc_cmd', 'cluster_admin_ip0']
config_from_cinder_conf = {
'username': 'supervisor',
'fs': {'easy-stack': 'easy-stack',
'silver': 'silver'},
'ssh_port': 22,
'chap_enabled': True,
'cluster_admin_ip0': None,
'ssh_private_key': None,
'mgmt_ip0': '172.24.44.15',
@ -70,12 +67,10 @@ valid_XML_str = '''
<ssh_private_key>/home/ubuntu/.ssh/id_rsa</ssh_private_key>
<svc_0>
<volume_type>default</volume_type>
<iscsi_ip>172.24.49.21</iscsi_ip>
<hdp>easy-stack</hdp>
</svc_0>
<svc_1>
<volume_type>silver</volume_type>
<iscsi_ip>172.24.49.32</iscsi_ip>
<hdp>FS-CinderDev1</hdp>
</svc_1>
</config>
@ -98,7 +93,6 @@ XML_empty_authentication_param = '''
<ssh_private_key></ssh_private_key>
<svc_0>
<volume_type>default</volume_type>
<iscsi_ip>172.24.49.21</iscsi_ip>
<hdp>easy-stack</hdp>
</svc_0>
</config>
@ -112,7 +106,6 @@ XML_without_mandatory_params = '''
<ssh_enabled>False</ssh_enabled>
<svc_0>
<volume_type>default</volume_type>
<iscsi_ip>172.24.49.21</iscsi_ip>
<hdp>easy-stack</hdp>
</svc_0>
</config>
@ -130,7 +123,7 @@ XML_no_services_configured = '''
'''
parsed_xml = {'username': 'supervisor', 'password': 'supervisor',
'ssc_cmd': 'ssc', 'iscsi_ip': None, 'ssh_port': 22,
'ssc_cmd': 'ssc', 'ssh_port': 22,
'fs': {'easy-stack': 'easy-stack',
'FS-CinderDev1': 'FS-CinderDev1'},
'cluster_admin_ip0': None,
@ -159,17 +152,14 @@ class HNASUtilsTest(test.TestCase):
super(HNASUtilsTest, self).setUp()
self.fake_conf = conf.Configuration(hnas_utils.CONF)
self.fake_conf.append_config_values(hnas_iscsi.iSCSI_OPTS)
self.override_config('hnas_username', 'supervisor')
self.override_config('hnas_password', 'supervisor')
self.override_config('hnas_mgmt_ip0', '172.24.44.15')
self.override_config('hnas_svc0_pool_name', 'default')
self.override_config('hnas_svc0_hdp', 'easy-stack')
self.override_config('hnas_svc0_iscsi_ip', '172.24.49.21')
self.override_config('hnas_svc1_pool_name', 'FS-CinderDev1')
self.override_config('hnas_svc1_hdp', 'silver')
self.override_config('hnas_svc1_iscsi_ip', '172.24.49.32')
self.context = context.get_admin_context()
self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME)
@ -286,32 +276,22 @@ class HNASUtilsTest(test.TestCase):
self.assertEqual('default', out)
def test_read_cinder_conf_nfs(self):
out = hnas_utils.read_cinder_conf(self.fake_conf, 'nfs')
out = hnas_utils.read_cinder_conf(self.fake_conf)
self.assertEqual(config_from_cinder_conf, out)
def test_read_cinder_conf_iscsi(self):
local_config = copy.deepcopy(config_from_cinder_conf)
local_config['services']['FS-CinderDev1']['iscsi_ip'] = '172.24.49.32'
local_config['services']['default']['iscsi_ip'] = '172.24.49.21'
out = hnas_utils.read_cinder_conf(self.fake_conf, 'iscsi')
self.assertEqual(local_config, out)
def test_read_cinder_conf_break(self):
self.override_config('hnas_username', None)
self.override_config('hnas_password', None)
self.override_config('hnas_mgmt_ip0', None)
out = hnas_utils.read_cinder_conf(self.fake_conf, 'nfs')
out = hnas_utils.read_cinder_conf(self.fake_conf)
self.assertIsNone(out)
@ddt.data('hnas_username', 'hnas_password',
'hnas_mgmt_ip0', 'hnas_svc0_iscsi_ip', 'hnas_svc0_pool_name',
'hnas_mgmt_ip0', 'hnas_svc0_pool_name',
'hnas_svc0_hdp', )
def test_init_invalid_conf_parameters(self, attr_name):
self.override_config(attr_name, None)
self.assertRaises(exception.InvalidParameterValue,
hnas_utils.read_cinder_conf, self.fake_conf, 'iscsi')
hnas_utils.read_cinder_conf, self.fake_conf)

View File

@ -220,19 +220,7 @@ class HNASSSHBackend(object):
fs_info['available_size'] = _convert_size(
fs_info['available_size'])
# Get the iSCSI LUs in the FS
evs_id = self.get_evs(fs_label)
out, err = self._run_cmd('console-context', '--evs', evs_id,
'iscsi-lu', 'list')
all_lus = [self._parse_lu_info(lu_raw)
for lu_raw in out.split('\n\n')[:-1]]
provisioned_cap = 0
for lu in all_lus:
if lu['filesystem'] == fs_label:
provisioned_cap += lu['size']
fs_info['provisioned_capacity'] = provisioned_cap
fs_info['provisioned_capacity'] = 0
LOG.debug("File system info of %(fs)s (sizes in GB): %(info)s.",
{'fs': fs_label, 'info': fs_info})
@ -256,119 +244,6 @@ class HNASSSHBackend(object):
return self.fslist[key]['evsid']
LOG.debug("Can't find EVS ID for fs %(fs)s.", {'fs': fs_label})
def _get_targets(self, evs_id, tgt_alias=None, refresh=False):
"""Gets the target list of an EVS.
Gets the target list of an EVS. Optionally can return the information
of a specific target.
:returns: Target list or Target info (EVS ID) or empty list
"""
LOG.debug("Getting target list for evs %(evs)s, tgtalias: %(tgt)s.",
{'evs': evs_id, 'tgt': tgt_alias})
if (refresh or
evs_id not in self.tgt_list.keys() or
tgt_alias is not None):
self.tgt_list[evs_id] = []
out, err = self._run_cmd("console-context", "--evs", evs_id,
'iscsi-target', 'list')
if 'No targets' in out:
LOG.debug("No targets found in EVS %(evsid)s.",
{'evsid': evs_id})
return self.tgt_list[evs_id]
tgt_raw_list = out.split('Alias')[1:]
for tgt_raw_info in tgt_raw_list:
tgt = {}
tgt['alias'] = tgt_raw_info.split('\n')[0].split(' ').pop()
tgt['iqn'] = tgt_raw_info.split('\n')[1].split(' ').pop()
tgt['secret'] = tgt_raw_info.split('\n')[3].split(' ').pop()
tgt['auth'] = tgt_raw_info.split('\n')[4].split(' ').pop()
lus = []
tgt_raw_info = tgt_raw_info.split('\n\n')[1]
tgt_raw_list = tgt_raw_info.split('\n')[2:]
for lu_raw_line in tgt_raw_list:
lu_raw_line = lu_raw_line.strip()
lu_raw_line = lu_raw_line.split(' ')
lu = {}
lu['id'] = lu_raw_line[0]
lu['name'] = lu_raw_line.pop()
lus.append(lu)
tgt['lus'] = lus
if tgt_alias == tgt['alias']:
return tgt
self.tgt_list[evs_id].append(tgt)
if tgt_alias is not None:
# We tried to find 'tgtalias' but didn't find. Return a empty
# list.
LOG.debug("There's no target %(alias)s in EVS %(evsid)s.",
{'alias': tgt_alias, 'evsid': evs_id})
return []
LOG.debug("Targets in EVS %(evs)s: %(tgtl)s.",
{'evs': evs_id, 'tgtl': self.tgt_list[evs_id]})
return self.tgt_list[evs_id]
def _get_unused_luid(self, tgt_info):
"""Gets a free logical unit id number to be used.
:param tgt_info: dictionary with the target information
:returns: a free logical unit id number
"""
if len(tgt_info['lus']) == 0:
return 0
free_lu = 0
for lu in tgt_info['lus']:
if int(lu['id']) == free_lu:
free_lu += 1
if int(lu['id']) > free_lu:
# Found a free LU number
break
LOG.debug("Found the free LU ID: %(lu)s.", {'lu': free_lu})
return free_lu
def create_lu(self, fs_label, size, lu_name):
"""Creates a new Logical Unit.
If the operation can not be performed for some reason, utils.execute()
throws an error and aborts the operation. Used for iSCSI only
:param fs_label: data pool the Logical Unit will be created
:param size: Size (GB) of the new Logical Unit
:param lu_name: name of the Logical Unit
"""
evs_id = self.get_evs(fs_label)
self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'add',
"-e", lu_name, fs_label, '/.cinder/' + lu_name +
'.iscsi', size + 'G')
LOG.debug('Created %(size)s GB LU: %(name)s FS: %(fs)s.',
{'size': size, 'name': lu_name, 'fs': fs_label})
def delete_lu(self, fs_label, lu_name):
"""Deletes a Logical Unit.
:param fs_label: data pool of the Logical Unit
:param lu_name: id of the Logical Unit being deleted
"""
evs_id = self.get_evs(fs_label)
self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'del',
'-d', '-f', lu_name)
LOG.debug('LU %(lu)s deleted.', {'lu': lu_name})
def file_clone(self, fs_label, src, name):
"""Clones NFS files to a new one named 'name'.
@ -391,319 +266,6 @@ class HNASSSHBackend(object):
LOG.debug('file_clone: fs:%(fs_label)s %(src)s/src: -> %(name)s/dst',
{'fs_label': fs_label, 'src': src, 'name': name})
def extend_lu(self, fs_label, new_size, lu_name):
"""Extends an iSCSI volume.
:param fs_label: data pool of the Logical Unit
:param new_size: new size of the Logical Unit
:param lu_name: name of the Logical Unit
"""
evs_id = self.get_evs(fs_label)
size = six.text_type(new_size)
self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'expand',
lu_name, size + 'G')
LOG.debug('LU %(lu)s extended.', {'lu': lu_name})
@utils.retry(putils.ProcessExecutionError, retries=HNAS_SSC_RETRIES,
wait_random=True)
def add_iscsi_conn(self, lu_name, fs_label, port, tgt_alias, initiator):
"""Sets up the Logical Unit on the specified target port.
:param lu_name: id of the Logical Unit being extended
:param fs_label: data pool of the Logical Unit
:param port: iSCSI port
:param tgt_alias: iSCSI qualified name
:param initiator: initiator address
:returns: dictionary (conn_info) with the connection information
conn_info={
'lu': Logical Unit ID,
'iqn': iSCSI qualified name,
'lu_name': Logical Unit name,
'initiator': iSCSI initiator,
'fs_label': File system to connect,
'port': Port to make the iSCSI connection
}
"""
conn_info = {}
lu_info = self.check_lu(lu_name, fs_label)
_evs_id = self.get_evs(fs_label)
if not lu_info['mapped']:
tgt = self._get_targets(_evs_id, tgt_alias)
lu_id = self._get_unused_luid(tgt)
conn_info['lu_id'] = lu_id
conn_info['iqn'] = tgt['iqn']
# In busy situations where 2 or more instances of the driver are
# trying to map an LU, 2 hosts can retrieve the same 'lu_id',
# and try to map the LU in the same LUN. To handle that we
# capture the ProcessExecutionError exception, backoff for some
# seconds and retry it.
self._run_cmd("console-context", "--evs", _evs_id, 'iscsi-target',
'addlu', tgt_alias, lu_name, six.text_type(lu_id))
else:
conn_info['lu_id'] = lu_info['id']
conn_info['iqn'] = lu_info['tgt']['iqn']
conn_info['lu_name'] = lu_name
conn_info['initiator'] = initiator
conn_info['fs'] = fs_label
conn_info['port'] = port
LOG.debug('add_iscsi_conn: LU %(lu)s added to %(tgt)s.',
{'lu': lu_name, 'tgt': tgt_alias})
LOG.debug('conn_info: %(conn_info)s', {'conn_info': conn_info})
return conn_info
def del_iscsi_conn(self, evs_id, iqn, lu_id):
"""Removes the Logical Unit on the specified target port.
:param evs_id: EVSID for the file system
:param iqn: iSCSI qualified name
:param lu_id: Logical Unit id
"""
found = False
out, err = self._run_cmd("console-context", "--evs", evs_id,
'iscsi-target', 'list', iqn)
# see if LU is already detached
lines = out.split('\n')
for line in lines:
if line.startswith(' '):
lu_line = line.split()[0]
if lu_line[0].isdigit() and lu_line == lu_id:
found = True
break
# LU wasn't found
if not found:
LOG.debug("del_iscsi_conn: LU already deleted from "
"target %(iqn)s", {'lu': lu_id, 'iqn': iqn})
return
# remove the LU from the target
self._run_cmd("console-context", "--evs", evs_id, 'iscsi-target',
'dellu', '-f', iqn, lu_id)
LOG.debug("del_iscsi_conn: LU: %(lu)s successfully deleted from "
"target %(iqn)s", {'lu': lu_id, 'iqn': iqn})
def get_target_iqn(self, tgt_alias, fs_label):
"""Obtains the target full iqn
Returns the target's full iqn rather than its alias.
:param tgt_alias: alias of the target
:param fs_label: data pool of the Logical Unit
:returns: string with full IQN
"""
_evs_id = self.get_evs(fs_label)
out, err = self._run_cmd("console-context", "--evs", _evs_id,
'iscsi-target', 'list', tgt_alias)
lines = out.split('\n')
# returns the first iqn
for line in lines:
if 'Globally unique name' in line:
full_iqn = line.split()[3]
LOG.debug('get_target_iqn: %(iqn)s', {'iqn': full_iqn})
return full_iqn
LOG.debug("Could not find iqn for alias %(alias)s on fs %(fs_label)s",
{'alias': tgt_alias, 'fs_label': fs_label})
def set_target_secret(self, targetalias, fs_label, secret):
"""Sets the chap secret for the specified target.
:param targetalias: alias of the target
:param fs_label: data pool of the Logical Unit
:param secret: CHAP secret of the target
"""
_evs_id = self.get_evs(fs_label)
self._run_cmd("console-context", "--evs", _evs_id, 'iscsi-target',
'mod', '-s', secret, '-a', 'enable', targetalias)
LOG.debug("set_target_secret: Secret set on target %(tgt)s.",
{'tgt': targetalias})
def get_target_secret(self, targetalias, fs_label):
"""Gets the chap secret for the specified target.
:param targetalias: alias of the target
:param fs_label: data pool of the Logical Unit
:returns: CHAP secret of the target
"""
_evs_id = self.get_evs(fs_label)
out, err = self._run_cmd("console-context", "--evs", _evs_id,
'iscsi-target', 'list', targetalias)
enabled = ""
secret = ""
lines = out.split('\n')
for line in lines:
if 'Secret' in line:
if len(line.split()) > 2:
secret = line.split()[2]
if 'Authentication' in line:
enabled = line.split()[2]
if enabled == 'Enabled':
return secret
else:
return ""
def check_target(self, fs_label, target_alias):
"""Checks if a given target exists and gets its info.
:param fs_label: pool name used
:param target_alias: alias of the target
:returns: dictionary (tgt_info)
tgt_info={
'alias': The alias of the target,
'found': boolean to inform if the target was found or not,
'tgt': dictionary with the target information
}
"""
tgt_info = {}
_evs_id = self.get_evs(fs_label)
_tgt_list = self._get_targets(_evs_id)
for tgt in _tgt_list:
if tgt['alias'] == target_alias:
attached_lus = len(tgt['lus'])
tgt_info['found'] = True
tgt_info['tgt'] = tgt
LOG.debug("Target %(tgt)s has %(lu)s volumes.",
{'tgt': target_alias, 'lu': attached_lus})
return tgt_info
tgt_info['found'] = False
tgt_info['tgt'] = None
LOG.debug("check_target: Target %(tgt)s does not exist.",
{'tgt': target_alias})
return tgt_info
def check_lu(self, vol_name, fs_label):
"""Checks if a given LU is already mapped
:param vol_name: name of the LU
:param fs_label: storage pool of the LU
:returns: dictionary (lu_info) with LU information
lu_info={
'mapped': LU state (mapped or not),
'id': ID of the LU,
'tgt': the iSCSI target alias
}
"""
lu_info = {}
evs_id = self.get_evs(fs_label)
tgt_list = self._get_targets(evs_id, refresh=True)
for tgt in tgt_list:
if len(tgt['lus']) == 0:
continue
for lu in tgt['lus']:
lu_id = lu['id']
lu_name = lu['name']
if lu_name[:29] == vol_name[:29]:
lu_info['mapped'] = True
lu_info['id'] = lu_id
lu_info['tgt'] = tgt
LOG.debug("LU %(lu)s attached on %(luid)s, "
"target: %(tgt)s.",
{'lu': vol_name, 'luid': lu_id, 'tgt': tgt})
return lu_info
lu_info['mapped'] = False
lu_info['id'] = 0
lu_info['tgt'] = None
LOG.debug("LU %(lu)s not attached. lu_info: %(lu_info)s",
{'lu': vol_name, 'lu_info': lu_info})
return lu_info
def _parse_lu_info(self, output):
lu_info = {}
if 'does not exist.' not in output:
aux = output.split('\n')
lu_info['name'] = aux[0].split(':')[1].strip()
lu_info['comment'] = aux[1].split(':')[1].strip()
lu_info['path'] = aux[2].split(':')[1].strip()
lu_info['size'] = aux[3].split(':')[1].strip()
lu_info['filesystem'] = aux[4].split(':')[1].strip()
lu_info['fs_mounted'] = aux[5].split(':')[1].strip()
lu_info['lu_mounted'] = aux[6].split(':')[1].strip()
if 'TB' in lu_info['size']:
sz_convert = float(lu_info['size'].split()[0]) * units.Ki
lu_info['size'] = sz_convert
elif 'MB' in lu_info['size']:
sz_convert = float(lu_info['size'].split()[0]) / units.Ki
lu_info['size'] = sz_convert
else:
lu_info['size'] = float(lu_info['size'].split()[0])
return lu_info
def get_existing_lu_info(self, lu_name, fs_label=None, evs_id=None):
"""Gets the information for the specified Logical Unit.
Returns the information of an existing Logical Unit on HNAS, according
to the name provided.
:param lu_name: label of the Logical Unit
:param fs_label: label of the file system
:param evs_id: ID of the EVS where the LU is located
:returns: dictionary (lu_info) with LU information
lu_info={
'name': A Logical Unit name,
'comment': A comment about the LU, not used for Cinder,
'path': Path to LU inside filesystem,
'size': Logical Unit size returned always in GB (volume size),
'filesystem': File system where the Logical Unit was created,
'fs_mounted': Information about the state of file system
(mounted or not),
'lu_mounted': Information about the state of Logical Unit
(mounted or not)
}
"""
if evs_id is None:
evs_id = self.get_evs(fs_label)
lu_name = "'{}'".format(lu_name)
out, err = self._run_cmd("console-context", "--evs", evs_id,
'iscsi-lu', 'list', lu_name)
lu_info = self._parse_lu_info(out)
LOG.debug('get_existing_lu_info: LU info: %(lu)s', {'lu': lu_info})
return lu_info
def rename_existing_lu(self, fs_label, vol_name, new_name):
"""Renames the specified Logical Unit.
Renames an existing Logical Unit on HNAS according to the new name
provided.
:param fs_label: label of the file system
:param vol_name: current name of the existing volume
:param new_name: new name to the existing volume
"""
new_name = "'{}'".format(new_name)
evs_id = self.get_evs(fs_label)
self._run_cmd("console-context", "--evs", evs_id, "iscsi-lu", "mod",
"-n", new_name, vol_name)
LOG.debug('rename_existing_lu_info:'
'LU %(old)s was renamed to %(new)s',
{'old': vol_name, 'new': new_name})
def _get_fs_list(self):
"""Gets a list of file systems configured on the backend.
@ -804,37 +366,6 @@ class HNASSSHBackend(object):
LOG.debug("get_export_list: %(exp_list)s", {'exp_list': export_list})
return export_list
def create_cloned_lu(self, src_lu, fs_label, clone_name):
"""Clones a Logical Unit
Clone primitive used to support all iSCSI snapshot/cloning functions.
:param src_lu: id of the Logical Unit being deleted
:param fs_label: data pool of the Logical Unit
:param clone_name: name of the snapshot
"""
evs_id = self.get_evs(fs_label)
self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'clone',
'-e', src_lu, clone_name,
'/.cinder/' + clone_name + '.iscsi')
LOG.debug('LU %(lu)s cloned.', {'lu': clone_name})
def create_target(self, tgt_alias, fs_label, secret):
"""Creates a new iSCSI target
:param tgt_alias: the alias with which the target will be created
:param fs_label: the label of the file system to create the target
:param secret: the secret for authentication of the target
"""
_evs_id = self.get_evs(fs_label)
self._run_cmd("console-context", "--evs", _evs_id,
'iscsi-target', 'add', tgt_alias, secret)
self._get_targets(_evs_id, refresh=True)
LOG.debug("create_target: alias: %(alias)s fs_label: %(fs_label)s",
{'alias': tgt_alias, 'fs_label': fs_label})
def _get_file_handler(self, volume_path, _evs_id, fs_label,
raise_except):

View File

@ -1,724 +0,0 @@
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS-HNAS) platform.
"""
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder import interface
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hnas_backend
from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume import utils
HNAS_ISCSI_VERSION = '5.0.0'
LOG = logging.getLogger(__name__)
iSCSI_OPTS = [
cfg.StrOpt('hds_hnas_iscsi_config_file',
default='/opt/hds/hnas/cinder_iscsi_conf.xml',
help='Legacy configuration file for HNAS iSCSI Cinder '
'plugin. This is not needed if you fill all '
'configuration on cinder.conf',
deprecated_for_removal=True),
cfg.BoolOpt('hnas_chap_enabled',
default=True,
help='Whether the chap authentication is enabled in the '
'iSCSI target or not.'),
cfg.IPOpt('hnas_svc0_iscsi_ip',
help='Service 0 iSCSI IP'),
cfg.IPOpt('hnas_svc1_iscsi_ip',
help='Service 1 iSCSI IP'),
cfg.IPOpt('hnas_svc2_iscsi_ip',
help='Service 2 iSCSI IP'),
cfg.IPOpt('hnas_svc3_iscsi_ip',
help='Service 3 iSCSI IP')
]
CONF = cfg.CONF
CONF.register_opts(iSCSI_OPTS)
HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc',
'chap_enabled': True,
'ssh_port': 22}
MAX_HNAS_ISCSI_TARGETS = 32
MAX_HNAS_LUS_PER_TARGET = 32
@interface.volumedriver
class HNASISCSIDriver(driver.ISCSIDriver):
"""HNAS iSCSI volume driver.
Version history:
code-block:: none
Version 1.0.0: Initial driver version
Version 2.2.0: Added support to SSH authentication
Version 3.2.0: Added pool aware scheduling
Fixed concurrency errors
Version 3.3.0: Fixed iSCSI target limitation error
Version 4.0.0: Added manage/unmanage features
Version 4.1.0: Fixed XML parser checks on blank options
Version 4.2.0: Fixed SSH and cluster_admin_ip0 verification
Version 4.3.0: Fixed attachment with os-brick 1.0.0
Version 5.0.0: Code cleaning up
New communication interface between the driver and HNAS
Removed the option to use local SSC (ssh_enabled=False)
Updated to use versioned objects
Changed the class name to HNASISCSIDriver
Deprecated XML config file
Fixed driver stats reporting
"""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Hitachi_HNAS_CI"
VERSION = HNAS_ISCSI_VERSION
SUPPORTED = False
def __init__(self, *args, **kwargs):
"""Initializes and reads different config parameters."""
super(HNASISCSIDriver, self).__init__(*args, **kwargs)
msg = _("The Hitachi NAS iSCSI driver is deprecated and will be "
"removed in a future release.")
versionutils.report_deprecated_feature(LOG, msg)
self.configuration = kwargs.get('configuration', None)
self.context = {}
self.config = {}
service_parameters = ['volume_type', 'hdp', 'iscsi_ip']
optional_parameters = ['ssc_cmd', 'cluster_admin_ip0',
'chap_enabled']
if self.configuration:
self.configuration.append_config_values(
hnas_utils.drivers_common_opts)
self.configuration.append_config_values(iSCSI_OPTS)
# Trying to get HNAS configuration from cinder.conf
self.config = hnas_utils.read_cinder_conf(
self.configuration, 'iscsi')
# If HNAS configuration are not set on cinder.conf, tries to use
# the deprecated XML configuration file
if not self.config:
self.config = hnas_utils.read_xml_config(
self.configuration.hds_hnas_iscsi_config_file,
service_parameters,
optional_parameters)
self.reserved_percentage = (
self.configuration.safe_get('reserved_percentage'))
self.max_osr = (
self.configuration.safe_get('max_over_subscription_ratio'))
self.backend = hnas_backend.HNASSSHBackend(self.config)
def _get_service(self, volume):
"""Gets the available service parameters.
Get the available service parameters for a given volume using its
type.
:param volume: dictionary volume reference
:returns: HDP (file system) related to the service or error if no
configuration is found.
:raises: ParameterNotFound
"""
LOG.debug("Available services: %(svc)s.",
{'svc': self.config['services'].keys()})
label = utils.extract_host(volume.host, level='pool')
if label in self.config['services'].keys():
svc = self.config['services'][label]
LOG.info(_LI("Using service label: %(lbl)s."), {'lbl': label})
return svc['hdp']
else:
LOG.error(_LE("No configuration found for service: %(lbl)s."),
{'lbl': label})
raise exception.ParameterNotFound(param=label)
def _get_service_target(self, volume):
"""Gets the available service parameters
Gets the available service parameters for a given volume using its
type.
:param volume: dictionary volume reference
:returns: service target information or raises error
:raises: NoMoreTargets
"""
fs_label = self._get_service(volume)
evs_id = self.backend.get_evs(fs_label)
svc_label = utils.extract_host(volume.host, level='pool')
svc = self.config['services'][svc_label]
lu_info = self.backend.check_lu(volume.name, fs_label)
# The volume is already mapped to a LU, so no need to create any
# targets
if lu_info['mapped']:
service = (
svc['iscsi_ip'], svc['iscsi_port'], svc['evs'], svc['port'],
fs_label, lu_info['tgt']['alias'], lu_info['tgt']['secret'])
LOG.info(_LI("Volume %(vol_name)s already mapped on target "
"%(tgt)s to LUN %(lunid)s."),
{'vol_name': volume.name, 'tgt': lu_info['tgt']['alias'],
'lunid': lu_info['id']})
return service
# Each EVS can have up to 32 targets. Each target can have up to 32
# LUs attached and have the name format 'evs<id>-tgt<0-N>'. We run
# from the first 'evs1-tgt0' until we find a target that is not already
# created in the BE or is created but have slots to place new LUs.
tgt_alias = ''
for i in range(0, MAX_HNAS_ISCSI_TARGETS):
tgt_alias = 'evs' + evs_id + '-tgt' + six.text_type(i)
tgt = self.backend.check_target(fs_label, tgt_alias)
if (tgt['found'] and
len(tgt['tgt']['lus']) < MAX_HNAS_LUS_PER_TARGET or
not tgt['found']):
# Target exists and has free space or, target does not exist
# yet. Proceed and use the target or create a target using this
# name.
break
else:
# If we've got here, we run out of targets, raise and go away.
LOG.error(_LE("No more targets available."))
raise exception.NoMoreTargets(param=tgt_alias)
LOG.info(_LI("Using target label: %(tgt)s."), {'tgt': tgt_alias})
# Check if we have a secret stored for this target so we don't have to
# go to BE on every query
if 'targets' not in self.config.keys():
self.config['targets'] = {}
if tgt_alias not in self.config['targets'].keys():
self.config['targets'][tgt_alias] = {}
tgt_info = self.config['targets'][tgt_alias]
# HNAS - one time lookup
# see if the client supports CHAP authentication and if
# iscsi_secret has already been set, retrieve the secret if
# available, otherwise generate and store
if self.config['chap_enabled']:
# CHAP support is enabled. Tries to get the target secret.
if 'iscsi_secret' not in tgt_info.keys():
LOG.info(_LI("Retrieving secret for service: %(tgt)s."),
{'tgt': tgt_alias})
out = self.backend.get_target_secret(tgt_alias, fs_label)
tgt_info['iscsi_secret'] = out
# CHAP supported and the target has no secret yet. So, the
# secret is created for the target
if tgt_info['iscsi_secret'] == "":
random_secret = utils.generate_password()[0:15]
tgt_info['iscsi_secret'] = random_secret
LOG.info(_LI("Set tgt CHAP secret for service: %(tgt)s."),
{'tgt': tgt_alias})
else:
# We set blank password when the client does not
# support CHAP. Later on, if the client tries to create a new
# target that does not exist in the backend, we check for this
# value and use a temporary dummy password.
if 'iscsi_secret' not in tgt_info.keys():
# Warns in the first time
LOG.info(_LI("CHAP authentication disabled."))
tgt_info['iscsi_secret'] = "''"
# If the target does not exist, it should be created
if not tgt['found']:
self.backend.create_target(tgt_alias, fs_label,
tgt_info['iscsi_secret'])
elif (tgt['tgt']['secret'] == "" and
self.config['chap_enabled']):
# The target exists, has no secret and chap is enabled
self.backend.set_target_secret(tgt_alias, fs_label,
tgt_info['iscsi_secret'])
if 'tgt_iqn' not in tgt_info:
LOG.info(_LI("Retrieving IQN for service: %(tgt)s."),
{'tgt': tgt_alias})
out = self.backend.get_target_iqn(tgt_alias, fs_label)
tgt_info['tgt_iqn'] = out
self.config['targets'][tgt_alias] = tgt_info
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['evs'], svc['port'],
fs_label, tgt_alias, tgt_info['iscsi_secret'])
return service
def _get_stats(self):
"""Get FS stats from HNAS.
:returns: dictionary with the stats from HNAS
_stats['pools'] = {
'total_capacity_gb': total size of the pool,
'free_capacity_gb': the available size,
'QoS_support': bool to indicate if QoS is supported,
'reserved_percentage': percentage of size reserved,
'max_over_subscription_ratio': oversubscription rate,
'thin_provisioning_support': thin support (True),
'reserved_percentage': reserved percentage
}
"""
hnas_stat = {}
be_name = self.configuration.safe_get('volume_backend_name')
hnas_stat["volume_backend_name"] = be_name or 'HNASISCSIDriver'
hnas_stat["vendor_name"] = 'Hitachi'
hnas_stat["driver_version"] = HNAS_ISCSI_VERSION
hnas_stat["storage_protocol"] = 'iSCSI'
for pool in self.pools:
fs_info = self.backend.get_fs_info(pool['fs'])
pool['provisioned_capacity_gb'] = fs_info['provisioned_capacity']
pool['total_capacity_gb'] = (float(fs_info['total_size']))
pool['free_capacity_gb'] = (
float(fs_info['total_size']) - float(fs_info['used_size']))
pool['QoS_support'] = 'False'
pool['reserved_percentage'] = self.reserved_percentage
pool['max_over_subscription_ratio'] = self.max_osr
pool['thin_provisioning_support'] = True
hnas_stat['pools'] = self.pools
LOG.debug("stats: %(stat)s.", {'stat': hnas_stat})
return hnas_stat
def _check_fs_list(self):
"""Verifies the FSs list in HNAS.
Verify that all FSs specified in the configuration files actually
exists on the storage.
"""
fs_list = self.config['fs'].keys()
for fs in fs_list:
if not self.backend.get_fs_info(fs):
msg = (_("File system not found or not mounted: %(fs)s") %
{'fs': fs})
LOG.error(msg)
raise exception.ParameterNotFound(param=msg)
def _check_pool_and_fs(self, volume, fs_label):
"""Validates pool and file system of a volume being managed.
Checks if the file system for the volume-type chosen matches the
one passed in the volume reference. Also, checks if the pool
for the volume type matches the pool for the host passed.
:param volume: Reference to the volume.
:param fs_label: Label of the file system.
:raises: ManageExistingVolumeTypeMismatch
"""
pool_from_vol_type = hnas_utils.get_pool(self.config, volume)
if (pool_from_vol_type == 'default' and
'default' not in self.config['services']):
msg = (_("Failed to manage existing volume %(volume)s because the "
"chosen volume type %(vol_type)s does not have a "
"service_label configured in its extra-specs and there "
"is no pool configured with hnas_svcX_volume_type as "
"'default' in cinder.conf.") %
{'volume': volume.id,
'vol_type': getattr(volume.volume_type, 'id', None)})
LOG.error(msg)
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
pool = self.config['services'][pool_from_vol_type]['hdp']
if pool != fs_label:
msg = (_("Failed to manage existing volume because the "
"pool %(pool)s of the volume type chosen does not "
"match the file system %(fs_label)s passed in the "
"volume reference.")
% {'pool': pool, 'fs_label': fs_label})
LOG.error(msg)
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
pool_from_host = utils.extract_host(volume.host, level='pool')
if pool_from_host != pool_from_vol_type:
msg = (_("Failed to manage existing volume because the pool "
"%(pool)s of the volume type chosen does not match the "
"pool %(pool_host)s of the host.") %
{'pool': pool_from_vol_type, 'pool_host': pool_from_host})
LOG.error(msg)
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
def _get_info_from_vol_ref(self, vol_ref):
"""Gets information from the volume reference.
Returns the information (File system and volume name) taken from
the volume reference.
:param vol_ref: existing volume to take under management
:returns: the file system label and the volume name or raises error
:raises: ManageExistingInvalidReference
"""
vol_info = vol_ref.strip().split('/')
if len(vol_info) == 2 and '' not in vol_info:
fs_label = vol_info[0]
vol_name = vol_info[1]
return fs_label, vol_name
else:
msg = _("The reference to the volume in the backend should have "
"the format file_system/volume_name (volume_name cannot "
"contain '/')")
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=msg)
def check_for_setup_error(self):
pass
def do_setup(self, context):
"""Sets up and verify Hitachi HNAS storage connection."""
self.context = context
self._check_fs_list()
version_info = self.backend.get_version()
LOG.info(_LI("HNAS iSCSI driver."))
LOG.info(_LI("HNAS model: %(mdl)s"), {'mdl': version_info['model']})
LOG.info(_LI("HNAS version: %(version)s"),
{'version': version_info['version']})
LOG.info(_LI("HNAS hardware: %(hw)s"),
{'hw': version_info['hardware']})
LOG.info(_LI("HNAS S/N: %(sn)s"), {'sn': version_info['serial']})
service_list = self.config['services'].keys()
for svc in service_list:
svc = self.config['services'][svc]
pool = {}
pool['pool_name'] = svc['pool_name']
pool['service_label'] = svc['pool_name']
pool['fs'] = svc['hdp']
self.pools.append(pool)
LOG.debug("Configured pools: %(pool)s", {'pool': self.pools})
evs_info = self.backend.get_evs_info()
LOG.info(_LI("Configured EVSs: %(evs)s"), {'evs': evs_info})
for svc in self.config['services'].keys():
svc_ip = self.config['services'][svc]['iscsi_ip']
if svc_ip in evs_info.keys():
LOG.info(_LI("iSCSI portal found for service: %(svc_ip)s"),
{'svc_ip': svc_ip})
self.config['services'][svc]['evs'] = (
evs_info[svc_ip]['evs_number'])
self.config['services'][svc]['iscsi_port'] = '3260'
self.config['services'][svc]['port'] = '0'
else:
LOG.error(_LE("iSCSI portal not found "
"for service: %(svc)s"), {'svc': svc_ip})
raise exception.InvalidParameterValue(err=svc_ip)
LOG.info(_LI("HNAS iSCSI Driver loaded successfully."))
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
@cinder_utils.trace
def create_volume(self, volume):
"""Creates a LU on HNAS.
:param volume: dictionary volume reference
:returns: the volume provider location
"""
fs = self._get_service(volume)
size = six.text_type(volume.size)
self.backend.create_lu(fs, size, volume.name)
return {'provider_location': self._get_provider_location(volume)}
@cinder_utils.trace
def create_cloned_volume(self, dst, src):
"""Creates a clone of a volume.
:param dst: dictionary destination volume reference
:param src: dictionary source volume reference
:returns: the provider location of the extended volume
"""
fs_label = self._get_service(dst)
self.backend.create_cloned_lu(src.name, fs_label, dst.name)
if src.size < dst.size:
LOG.debug("Increasing dest size from %(old_size)s to "
"%(new_size)s",
{'old_size': src.size, 'new_size': dst.size})
self.extend_volume(dst, dst.size)
return {'provider_location': self._get_provider_location(dst)}
@cinder_utils.trace
def extend_volume(self, volume, new_size):
"""Extends an existing volume.
:param volume: dictionary volume reference
:param new_size: int size in GB to extend
"""
fs = self._get_service(volume)
self.backend.extend_lu(fs, new_size, volume.name)
@cinder_utils.trace
def delete_volume(self, volume):
"""Deletes the volume on HNAS.
:param volume: dictionary volume reference
"""
fs = self._get_service(volume)
self.backend.delete_lu(fs, volume.name)
@cinder_utils.synchronized('volume_mapping')
@cinder_utils.trace
def initialize_connection(self, volume, connector):
"""Maps the created volume to connector['initiator'].
:param volume: dictionary volume reference
:param connector: dictionary connector reference
:returns: The connection information
:raises: ISCSITargetAttachFailed
"""
service_info = self._get_service_target(volume)
(ip, ipp, evs, port, _fs, tgtalias, secret) = service_info
try:
conn = self.backend.add_iscsi_conn(volume.name, _fs, port,
tgtalias,
connector['initiator'])
except processutils.ProcessExecutionError:
msg = (_("Error attaching volume %(vol)s. "
"Target limit might be reached!") % {'vol': volume.id})
LOG.error(msg)
raise exception.ISCSITargetAttachFailed(volume_id=volume.id)
hnas_portal = ip + ':' + ipp
lu_id = six.text_type(conn['lu_id'])
fulliqn = conn['iqn']
tgt = (hnas_portal + ',' + tgtalias + ',' +
volume.provider_location + ',' + evs + ',' +
port + ',' + lu_id)
LOG.info(_LI("initiate: connection %(tgt)s"), {'tgt': tgt})
properties = {}
properties['provider_location'] = tgt
properties['target_discovered'] = False
properties['target_portal'] = hnas_portal
properties['target_iqn'] = fulliqn
properties['target_lu'] = int(lu_id)
properties['volume_id'] = volume.id
properties['auth_username'] = connector['initiator']
if self.config['chap_enabled']:
properties['auth_method'] = 'CHAP'
properties['auth_password'] = secret
conn_info = {'driver_volume_type': 'iscsi', 'data': properties}
return conn_info
@cinder_utils.synchronized('volume_mapping')
@cinder_utils.trace
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume.
:param volume: dictionary volume reference
:param connector: dictionary connector reference
"""
service_info = self._get_service_target(volume)
(ip, ipp, evs, port, fs, tgtalias, secret) = service_info
lu_info = self.backend.check_lu(volume.name, fs)
self.backend.del_iscsi_conn(evs, tgtalias, lu_info['id'])
@cinder_utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
:param volume: dictionary volume reference
:param snapshot: dictionary snapshot reference
:returns: the provider location of the snapshot
"""
fs = self._get_service(volume)
self.backend.create_cloned_lu(snapshot.name, fs, volume.name)
return {'provider_location': self._get_provider_location(snapshot)}
@cinder_utils.trace
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: dictionary snapshot reference
:returns: the provider location of the snapshot
"""
fs = self._get_service(snapshot.volume)
self.backend.create_cloned_lu(snapshot.volume_name, fs, snapshot.name)
return {'provider_location': self._get_provider_location(snapshot)}
@cinder_utils.trace
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: dictionary snapshot reference
"""
fs = self._get_service(snapshot.volume)
self.backend.delete_lu(fs, snapshot.name)
def get_volume_stats(self, refresh=False):
"""Gets the volume driver stats.
:param refresh: if refresh is True, the driver_stats is updated
:returns: the driver stats
"""
if refresh:
self.driver_stats = self._get_stats()
return self.driver_stats
@cinder_utils.trace
def manage_existing_get_size(self, volume, existing_vol_ref):
"""Gets the size to manage_existing.
Returns the size of volume to be managed by manage_existing.
:param volume: cinder volume to manage
:param existing_vol_ref: existing volume to take under management
:returns: the size of the volume to be managed or raises error
:raises: ManageExistingInvalidReference
"""
# Check if the reference is valid.
if 'source-name' not in existing_vol_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_vol_ref, reason=reason)
fs_label, vol_name = (
self._get_info_from_vol_ref(existing_vol_ref['source-name']))
LOG.debug("File System: %(fs_label)s "
"Volume name: %(vol_name)s.",
{'fs_label': fs_label, 'vol_name': vol_name})
if utils.check_already_managed_volume(vol_name):
raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name)
lu_info = self.backend.get_existing_lu_info(vol_name, fs_label)
if lu_info != {}:
return lu_info['size']
else:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_vol_ref,
reason=_('Volume not found on configured storage backend. '
'If your volume name contains "/", please rename it '
'and try to manage again.'))
@cinder_utils.trace
def manage_existing(self, volume, existing_vol_ref):
"""Manages an existing volume.
The specified Cinder volume is to be taken into Cinder management.
The driver will verify its existence and then rename it to the
new Cinder volume name. It is expected that the existing volume
reference is a File System and some volume_name;
e.g., openstack/vol_to_manage
:param volume: cinder volume to manage
:param existing_vol_ref: driver specific information used to identify a
volume
:returns: the provider location of the volume managed
"""
LOG.info(_LI("Asked to manage ISCSI volume %(vol)s, with vol "
"ref %(ref)s."), {'vol': volume.id,
'ref': existing_vol_ref['source-name']})
fs_label, vol_name = (
self._get_info_from_vol_ref(existing_vol_ref['source-name']))
if volume.volume_type is not None:
self._check_pool_and_fs(volume, fs_label)
self.backend.rename_existing_lu(fs_label, vol_name, volume.name)
LOG.info(_LI("Set newly managed Cinder volume name to %(name)s."),
{'name': volume.name})
return {'provider_location': self._get_provider_location(volume)}
@cinder_utils.trace
def unmanage(self, volume):
"""Unmanages a volume from cinder.
Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. A log entry
will be made to notify the admin that the volume is no longer being
managed.
:param volume: cinder volume to unmanage
"""
fslabel = self._get_service(volume)
new_name = 'unmanage-' + volume.name
vol_path = fslabel + '/' + volume.name
self.backend.rename_existing_lu(fslabel, volume.name, new_name)
LOG.info(_LI("The volume with path %(old)s is no longer being managed "
"by Cinder. However, it was not deleted and can be found "
"with the new name %(cr)s on backend."),
{'old': vol_path, 'cr': new_name})
def _get_provider_location(self, volume):
"""Gets the provider location of a given volume
:param volume: dictionary volume reference
:returns: the provider_location related to the volume
"""
return self.backend.get_version()['mac'] + '.' + volume.name

View File

@ -105,7 +105,7 @@ class HNASNFSDriver(nfs.NfsDriver):
# Trying to get HNAS configuration from cinder.conf
self.config = hnas_utils.read_cinder_conf(
self.configuration, 'nfs')
self.configuration)
# If HNAS configuration are not set on cinder.conf, tries to use
# the deprecated XML configuration file

View File

@ -86,7 +86,7 @@ CONF = cfg.CONF
CONF.register_opts(drivers_common_opts)
def _check_conf_params(config, pool_name, dv_type, idx):
def _check_conf_params(config, pool_name, idx):
"""Validates if the configuration on cinder.conf is complete.
:param config: Dictionary with the driver configurations
@ -134,15 +134,6 @@ def _check_conf_params(config, pool_name, dv_type, idx):
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
if (dv_type == 'iscsi' and
config['services'][pool_name]['iscsi_ip'] is None):
msg = (_("The config parameter "
"hnas_svc%(idx)s_iscsi_ip is not set "
"in the cinder.conf. Note that you need to "
"have at least one pool configured.") % {'idx': idx})
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
def _xml_read(root, element, check=None):
"""Read an xml element.
@ -183,7 +174,7 @@ def read_xml_config(xml_config_file, svc_params, optional_params):
:param xml_config_file: string filename containing XML configuration
:param svc_params: parameters to configure the services
['volume_type', 'hdp', 'iscsi_ip']
['volume_type', 'hdp']
:param optional_params: parameters to configure that are not mandatory
['ssc_cmd', 'cluster_admin_ip0', 'chap_enabled']
"""
@ -208,13 +199,13 @@ def read_xml_config(xml_config_file, svc_params, optional_params):
LOG.error(msg)
raise exception.ConfigNotFound(message=msg)
# mandatory parameters for NFS and iSCSI
# mandatory parameters for NFS
config = {}
arg_prereqs = ['mgmt_ip0', 'username']
for req in arg_prereqs:
config[req] = _xml_read(root, req, 'check')
# optional parameters for NFS and iSCSI
# optional parameters for NFS
for req in optional_params:
config[req] = _xml_read(root, req)
if config[req] is None and HNAS_DEFAULT_CONFIG.get(req) is not None:
@ -279,7 +270,7 @@ def get_pool(config, volume):
return 'default'
def read_cinder_conf(config_opts, dv_type):
def read_cinder_conf(config_opts):
"""Reads cinder.conf
Gets the driver specific information set on cinder.conf configuration
@ -295,7 +286,7 @@ def read_cinder_conf(config_opts, dv_type):
config['services'] = {}
config['fs'] = {}
mandatory_parameters = ['username', 'password', 'mgmt_ip0']
optional_parameters = ['ssc_cmd', 'chap_enabled',
optional_parameters = ['ssc_cmd',
'ssh_port', 'cluster_admin_ip0',
'ssh_private_key']
@ -334,14 +325,9 @@ def read_cinder_conf(config_opts, dv_type):
config['services'][svc_pool_name]['hdp'] = svc_hdp
config['services'][svc_pool_name]['pool_name'] = svc_pool_name
if dv_type == 'iscsi':
svc_ip = (config_opts.safe_get(
'hnas_svc%(idx)s_iscsi_ip' % {'idx': idx}))
config['services'][svc_pool_name]['iscsi_ip'] = svc_ip
config['services'][svc_pool_name]['label'] = (
'svc_%(idx)s' % {'idx': idx})
# Checking to ensure that the pools configurations are complete
_check_conf_params(config, svc_pool_name, dv_type, idx)
_check_conf_params(config, svc_pool_name, idx)
return config

View File

@ -147,12 +147,8 @@ CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.ibm.xiv_ds8k':
'cinder.volume.drivers.ibm.ibm_storage',
'cinder.volume.drivers.emc.scaleio':

View File

@ -0,0 +1,4 @@
---
upgrade:
- The Hitachi NAS Platform iSCSI driver was marked as not supported in the
Ocata realease and has now been removed.