HNAS drivers refactoring
HNAS NFS and iSCSI drivers need a refactoring in order to make the codes more readable and easier to maintain. This patch is refactoring the HNAS drivers codes, deprecating the old paths and increasing the unit tests coverage. Co-Authored-By: Alyson Rosa <alyson.rodrigues.rosa@gmail.com> Also-By: Erlon Cruz <sombrafam@gmail.com> DocImpact Change-Id: I1b5d5155306c39c528af4037a5f54cf3d4dc1ffa
This commit is contained in:
parent
178ea591c2
commit
6c61bdda46
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -14,550 +14,490 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import mock
|
||||
import six
|
||||
import os
|
||||
|
||||
from oslo_concurrency import processutils as putils
|
||||
import socket
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.image import image_utils
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder import utils
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.hitachi import hnas_backend as backend
|
||||
from cinder.volume.drivers.hitachi import hnas_nfs as nfs
|
||||
from cinder.volume.drivers import nfs as drivernfs
|
||||
from cinder.volume.drivers import remotefs
|
||||
from cinder.volume import volume_types
|
||||
from cinder.volume.drivers.hitachi import hnas_utils
|
||||
from cinder.volume.drivers import nfs as base_nfs
|
||||
|
||||
SHARESCONF = """172.17.39.132:/cinder
|
||||
172.17.39.133:/cinder"""
|
||||
|
||||
HNASCONF = """<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<config>
|
||||
<hnas_cmd>ssc</hnas_cmd>
|
||||
<mgmt_ip0>172.17.44.15</mgmt_ip0>
|
||||
<username>supervisor</username>
|
||||
<password>supervisor</password>
|
||||
<svc_0>
|
||||
<volume_type>default</volume_type>
|
||||
<hdp>172.17.39.132:/cinder</hdp>
|
||||
</svc_0>
|
||||
<svc_1>
|
||||
<volume_type>silver</volume_type>
|
||||
<hdp>172.17.39.133:/cinder</hdp>
|
||||
</svc_1>
|
||||
</config>
|
||||
"""
|
||||
|
||||
HNAS_WRONG_CONF1 = """<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<config>
|
||||
<hnas_cmd>ssc</hnas_cmd>
|
||||
<mgmt_ip0>172.17.44.15</mgmt_ip0>
|
||||
<username>supervisor</username>
|
||||
<password>supervisor</password>
|
||||
<volume_type>default</volume_type>
|
||||
<hdp>172.17.39.132:/cinder</hdp>
|
||||
</svc_0>
|
||||
</config>
|
||||
"""
|
||||
|
||||
HNAS_WRONG_CONF2 = """<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<config>
|
||||
<hnas_cmd>ssc</hnas_cmd>
|
||||
<mgmt_ip0>172.17.44.15</mgmt_ip0>
|
||||
<username>supervisor</username>
|
||||
<password>supervisor</password>
|
||||
<svc_0>
|
||||
<volume_type>default</volume_type>
|
||||
</svc_0>
|
||||
<svc_1>
|
||||
<volume_type>silver</volume_type>
|
||||
</svc_1>
|
||||
</config>
|
||||
"""
|
||||
|
||||
HNAS_WRONG_CONF3 = """<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<config>
|
||||
<hnas_cmd>ssc</hnas_cmd>
|
||||
<mgmt_ip0>172.17.44.15</mgmt_ip0>
|
||||
<username> </username>
|
||||
<password>supervisor</password>
|
||||
<svc_0>
|
||||
<volume_type>default</volume_type>
|
||||
<hdp>172.17.39.132:/cinder</hdp>
|
||||
</svc_0>
|
||||
<svc_1>
|
||||
<volume_type>silver</volume_type>
|
||||
<hdp>172.17.39.133:/cinder</hdp>
|
||||
</svc_1>
|
||||
</config>
|
||||
"""
|
||||
|
||||
HNAS_WRONG_CONF4 = """<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<config>
|
||||
<hnas_cmd>ssc</hnas_cmd>
|
||||
<mgmt_ip0>172.17.44.15</mgmt_ip0>
|
||||
<username>super</username>
|
||||
<password>supervisor</password>
|
||||
<svc_0>
|
||||
<volume_type>default</volume_type>
|
||||
<hdp>172.17.39.132:/cinder</hdp>
|
||||
</svc_0>
|
||||
<svc_4>
|
||||
<volume_type>silver</volume_type>
|
||||
<hdp>172.17.39.133:/cinder</hdp>
|
||||
</svc_4>
|
||||
</config>
|
||||
"""
|
||||
|
||||
HNAS_FULL_CONF = """<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<config>
|
||||
<hnas_cmd>ssc</hnas_cmd>
|
||||
<mgmt_ip0>172.17.44.15</mgmt_ip0>
|
||||
<username>super</username>
|
||||
<password>supervisor</password>
|
||||
<ssh_enabled>True</ssh_enabled>
|
||||
<ssh_port>2222</ssh_port>
|
||||
<chap_enabled>True</chap_enabled>
|
||||
<ssh_private_key>/etc/cinder/ssh_priv</ssh_private_key>
|
||||
<cluster_admin_ip0>10.0.0.1</cluster_admin_ip0>
|
||||
<svc_0>
|
||||
<volume_type>default</volume_type>
|
||||
<hdp>172.17.39.132:/cinder</hdp>
|
||||
</svc_0>
|
||||
<svc_1>
|
||||
<volume_type>silver</volume_type>
|
||||
<hdp>172.17.39.133:/cinder/silver </hdp>
|
||||
</svc_1>
|
||||
<svc_2>
|
||||
<volume_type>gold</volume_type>
|
||||
<hdp>172.17.39.133:/cinder/gold</hdp>
|
||||
</svc_2>
|
||||
<svc_3>
|
||||
<volume_type>platinum</volume_type>
|
||||
<hdp>172.17.39.133:/cinder/platinum</hdp>
|
||||
</svc_3>
|
||||
</config>
|
||||
"""
|
||||
|
||||
|
||||
# The following information is passed on to tests, when creating a volume
|
||||
_SERVICE = ('Test_hdp', 'Test_path', 'Test_label')
|
||||
_SHARE = '172.17.39.132:/cinder'
|
||||
_SHARE2 = '172.17.39.133:/cinder'
|
||||
_EXPORT = '/cinder'
|
||||
_VOLUME = {'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190',
|
||||
'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190',
|
||||
_VOLUME = {'name': 'cinder-volume',
|
||||
'id': fake.VOLUME_ID,
|
||||
'size': 128,
|
||||
'volume_type': 'silver',
|
||||
'volume_type_id': 'test',
|
||||
'metadata': [{'key': 'type',
|
||||
'service_label': 'silver'}],
|
||||
'provider_location': None,
|
||||
'id': 'bcc48c61-9691-4e5f-897c-793686093190',
|
||||
'status': 'available',
|
||||
'host': 'host1@hnas-iscsi-backend#silver'}
|
||||
_SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
|
||||
'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc',
|
||||
'size': 128,
|
||||
'volume_type': None,
|
||||
'provider_location': None,
|
||||
'volume_size': 128,
|
||||
'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190',
|
||||
'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191',
|
||||
'host': 'host1@hnas-iscsi-backend#silver'}
|
||||
'host': 'host1@hnas-nfs-backend#default',
|
||||
'volume_type': 'default',
|
||||
'provider_location': 'hnas'}
|
||||
|
||||
_VOLUME_NFS = {'name': 'volume-61da3-8d23-4bb9-3136-ca819d89e7fc',
|
||||
'id': '61da3-8d23-4bb9-3136-ca819d89e7fc',
|
||||
'size': 4,
|
||||
'metadata': [{'key': 'type',
|
||||
'service_label': 'silver'}],
|
||||
'volume_type': 'silver',
|
||||
'volume_type_id': 'silver',
|
||||
'provider_location': '172.24.44.34:/silver/',
|
||||
'volume_size': 128,
|
||||
'host': 'host1@hnas-nfs#silver'}
|
||||
|
||||
GET_ID_VOL = {
|
||||
("bcc48c61-9691-4e5f-897c-793686093190"): [_VOLUME],
|
||||
("bcc48c61-9691-4e5f-897c-793686093191"): [_SNAPVOLUME]
|
||||
_SNAPSHOT = {
|
||||
'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
|
||||
'id': fake.SNAPSHOT_ID,
|
||||
'size': 128,
|
||||
'volume_type': None,
|
||||
'provider_location': None,
|
||||
'volume_size': 128,
|
||||
'volume': _VOLUME,
|
||||
'volume_name': _VOLUME['name'],
|
||||
'host': 'host1@hnas-iscsi-backend#silver',
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID,
|
||||
}
|
||||
|
||||
|
||||
def id_to_vol(arg):
|
||||
return GET_ID_VOL.get(arg)
|
||||
|
||||
|
||||
class SimulatedHnasBackend(object):
|
||||
"""Simulation Back end. Talks to HNAS."""
|
||||
|
||||
# these attributes are shared across object instances
|
||||
start_lun = 0
|
||||
|
||||
def __init__(self):
|
||||
self.type = 'HNAS'
|
||||
self.out = ''
|
||||
|
||||
def file_clone(self, cmd, ip0, user, pw, fslabel, source_path,
|
||||
target_path):
|
||||
return ""
|
||||
|
||||
def get_version(self, ver, cmd, ip0, user, pw):
|
||||
self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \
|
||||
"version: 11.2.3319.09 LU: 256 " \
|
||||
"RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01"
|
||||
return self.out
|
||||
|
||||
def get_hdp_info(self, ip0, user, pw):
|
||||
self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: 70 " \
|
||||
"Normal fs1\n" \
|
||||
"HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 " \
|
||||
"Normal fs2"
|
||||
return self.out
|
||||
|
||||
def get_nfs_info(self, cmd, ip0, user, pw):
|
||||
self.out = "Export: /cinder Path: /volumes HDP: fs1 FSID: 1024 " \
|
||||
"EVS: 1 IPS: 172.17.39.132\n" \
|
||||
"Export: /cinder Path: /volumes HDP: fs2 FSID: 1025 " \
|
||||
"EVS: 1 IPS: 172.17.39.133"
|
||||
return self.out
|
||||
|
||||
|
||||
class HDSNFSDriverTest(test.TestCase):
|
||||
class HNASNFSDriverTest(test.TestCase):
|
||||
"""Test HNAS NFS volume driver."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HDSNFSDriverTest, self).__init__(*args, **kwargs)
|
||||
super(HNASNFSDriverTest, self).__init__(*args, **kwargs)
|
||||
|
||||
@mock.patch.object(nfs, 'factory_bend')
|
||||
def setUp(self, m_factory_bend):
|
||||
super(HDSNFSDriverTest, self).setUp()
|
||||
def instantiate_snapshot(self, snap):
|
||||
snap = snap.copy()
|
||||
snap['volume'] = fake_volume.fake_volume_obj(
|
||||
None, **snap['volume'])
|
||||
snapshot = fake_snapshot.fake_snapshot_obj(
|
||||
None, expected_attrs=['volume'], **snap)
|
||||
return snapshot
|
||||
|
||||
self.backend = SimulatedHnasBackend()
|
||||
m_factory_bend.return_value = self.backend
|
||||
def setUp(self):
|
||||
super(HNASNFSDriverTest, self).setUp()
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml')
|
||||
self.addCleanup(self.config_file.close)
|
||||
self.config_file.write(HNASCONF)
|
||||
self.config_file.flush()
|
||||
self.volume = fake_volume.fake_volume_obj(
|
||||
self.context,
|
||||
**_VOLUME)
|
||||
|
||||
self.shares_file = tempfile.NamedTemporaryFile("w+", suffix='.xml')
|
||||
self.addCleanup(self.shares_file.close)
|
||||
self.shares_file.write(SHARESCONF)
|
||||
self.shares_file.flush()
|
||||
self.snapshot = self.instantiate_snapshot(_SNAPSHOT)
|
||||
|
||||
self.volume_type = fake_volume.fake_volume_type_obj(
|
||||
None,
|
||||
**{'name': 'silver'}
|
||||
)
|
||||
self.clone = fake_volume.fake_volume_obj(
|
||||
None,
|
||||
**{'id': fake.VOLUME2_ID,
|
||||
'size': 128,
|
||||
'host': 'host1@hnas-nfs-backend#default',
|
||||
'volume_type': 'default',
|
||||
'provider_location': 'hnas'})
|
||||
|
||||
# xml parsed from utils
|
||||
self.parsed_xml = {
|
||||
'username': 'supervisor',
|
||||
'password': 'supervisor',
|
||||
'hnas_cmd': 'ssc',
|
||||
'ssh_port': '22',
|
||||
'services': {
|
||||
'default': {
|
||||
'hdp': '172.24.49.21:/fs-cinder',
|
||||
'volume_type': 'default',
|
||||
'label': 'svc_0',
|
||||
'ctl': '1',
|
||||
'export': {
|
||||
'fs': 'fs-cinder',
|
||||
'path': '/export-cinder/volume'
|
||||
}
|
||||
},
|
||||
},
|
||||
'cluster_admin_ip0': None,
|
||||
'ssh_private_key': None,
|
||||
'chap_enabled': 'True',
|
||||
'mgmt_ip0': '172.17.44.15',
|
||||
'ssh_enabled': None
|
||||
}
|
||||
|
||||
self.configuration = mock.Mock(spec=conf.Configuration)
|
||||
self.configuration.hds_hnas_nfs_config_file = 'fake.xml'
|
||||
|
||||
self.mock_object(hnas_utils, 'read_config',
|
||||
mock.Mock(return_value=self.parsed_xml))
|
||||
|
||||
self.configuration = mock.Mock(spec=conf.Configuration)
|
||||
self.configuration.max_over_subscription_ratio = 20.0
|
||||
self.configuration.reserved_percentage = 0
|
||||
self.configuration.hds_hnas_nfs_config_file = self.config_file.name
|
||||
self.configuration.nfs_shares_config = self.shares_file.name
|
||||
self.configuration.nfs_mount_point_base = '/opt/stack/cinder/mnt'
|
||||
self.configuration.nfs_mount_options = None
|
||||
self.configuration.nas_host = None
|
||||
self.configuration.nas_share_path = None
|
||||
self.configuration.nas_mount_options = None
|
||||
self.configuration.hds_hnas_nfs_config_file = 'fake_config.xml'
|
||||
self.configuration.nfs_shares_config = 'fake_nfs_share.xml'
|
||||
self.configuration.num_shell_tries = 2
|
||||
|
||||
self.driver = nfs.HDSNFSDriver(configuration=self.configuration)
|
||||
self.driver.do_setup("")
|
||||
self.driver = nfs.HNASNFSDriver(configuration=self.configuration)
|
||||
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
@mock.patch.object(os, 'access')
|
||||
def test_read_config(self, m_access, m_open):
|
||||
# Test exception when file is not found
|
||||
m_access.return_value = False
|
||||
m_open.return_value = six.StringIO(HNASCONF)
|
||||
self.assertRaises(exception.NotFound, nfs._read_config, '')
|
||||
def test_check_pool_and_share_mismatch_exception(self):
|
||||
# passing a share that does not exists in config should raise an
|
||||
# exception
|
||||
nfs_shares = '172.24.49.21:/nfs_share'
|
||||
|
||||
# Test exception when config file has parsing errors
|
||||
# due to missing <svc> tag
|
||||
m_access.return_value = True
|
||||
m_open.return_value = six.StringIO(HNAS_WRONG_CONF1)
|
||||
self.assertRaises(exception.ConfigNotFound, nfs._read_config, '')
|
||||
|
||||
# Test exception when config file has parsing errors
|
||||
# due to missing <hdp> tag
|
||||
m_open.return_value = six.StringIO(HNAS_WRONG_CONF2)
|
||||
self.configuration.hds_hnas_iscsi_config_file = ''
|
||||
self.assertRaises(exception.ParameterNotFound, nfs._read_config, '')
|
||||
|
||||
# Test exception when config file has parsing errors
|
||||
# due to blank tag
|
||||
m_open.return_value = six.StringIO(HNAS_WRONG_CONF3)
|
||||
self.configuration.hds_hnas_iscsi_config_file = ''
|
||||
self.assertRaises(exception.ParameterNotFound, nfs._read_config, '')
|
||||
|
||||
# Test when config file has parsing errors due invalid svc_number
|
||||
m_open.return_value = six.StringIO(HNAS_WRONG_CONF4)
|
||||
self.configuration.hds_hnas_iscsi_config_file = ''
|
||||
config = nfs._read_config('')
|
||||
self.assertEqual(1, len(config['services']))
|
||||
|
||||
# Test config with full options
|
||||
# due invalid svc_number
|
||||
m_open.return_value = six.StringIO(HNAS_FULL_CONF)
|
||||
self.configuration.hds_hnas_iscsi_config_file = ''
|
||||
config = nfs._read_config('')
|
||||
self.assertEqual(4, len(config['services']))
|
||||
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_export_path')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
|
||||
def test_create_snapshot(self, m_get_volume_location, m_get_export_path,
|
||||
m_get_provider_location, m_id_to_vol):
|
||||
svol = _SNAPVOLUME.copy()
|
||||
m_id_to_vol.return_value = svol
|
||||
|
||||
m_get_provider_location.return_value = _SHARE
|
||||
m_get_volume_location.return_value = _SHARE
|
||||
m_get_export_path.return_value = _EXPORT
|
||||
|
||||
loc = self.driver.create_snapshot(svol)
|
||||
out = "{'provider_location': \'" + _SHARE + "'}"
|
||||
self.assertEqual(out, str(loc))
|
||||
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_service')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
|
||||
def test_create_cloned_volume(self, m_get_volume_location,
|
||||
m_get_provider_location, m_id_to_vol,
|
||||
m_get_service):
|
||||
vol = _VOLUME.copy()
|
||||
svol = _SNAPVOLUME.copy()
|
||||
|
||||
m_get_service.return_value = _SERVICE
|
||||
m_get_provider_location.return_value = _SHARE
|
||||
m_get_volume_location.return_value = _SHARE
|
||||
|
||||
loc = self.driver.create_cloned_volume(vol, svol)
|
||||
|
||||
out = "{'provider_location': \'" + _SHARE + "'}"
|
||||
self.assertEqual(out, str(loc))
|
||||
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_service')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, 'extend_volume')
|
||||
def test_create_cloned_volume_larger(self, m_extend_volume,
|
||||
m_get_volume_location,
|
||||
m_get_provider_location,
|
||||
m_id_to_vol, m_get_service):
|
||||
vol = _VOLUME.copy()
|
||||
svol = _SNAPVOLUME.copy()
|
||||
|
||||
m_get_service.return_value = _SERVICE
|
||||
m_get_provider_location.return_value = _SHARE
|
||||
m_get_volume_location.return_value = _SHARE
|
||||
|
||||
svol['size'] = 256
|
||||
|
||||
loc = self.driver.create_cloned_volume(svol, vol)
|
||||
|
||||
out = "{'provider_location': \'" + _SHARE + "'}"
|
||||
self.assertEqual(out, str(loc))
|
||||
m_extend_volume.assert_called_once_with(svol, svol['size'])
|
||||
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_ensure_shares_mounted')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_do_create_volume')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
|
||||
def test_create_volume(self, m_get_volume_location,
|
||||
m_get_provider_location, m_id_to_vol,
|
||||
m_do_create_volume, m_ensure_shares_mounted):
|
||||
|
||||
vol = _VOLUME.copy()
|
||||
|
||||
m_get_provider_location.return_value = _SHARE2
|
||||
m_get_volume_location.return_value = _SHARE2
|
||||
|
||||
loc = self.driver.create_volume(vol)
|
||||
|
||||
out = "{'provider_location': \'" + _SHARE2 + "'}"
|
||||
self.assertEqual(str(loc), out)
|
||||
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_volume_not_present')
|
||||
def test_delete_snapshot(self, m_volume_not_present,
|
||||
m_get_provider_location, m_id_to_vol):
|
||||
svol = _SNAPVOLUME.copy()
|
||||
|
||||
m_id_to_vol.return_value = svol
|
||||
m_get_provider_location.return_value = _SHARE
|
||||
|
||||
m_volume_not_present.return_value = True
|
||||
|
||||
self.driver.delete_snapshot(svol)
|
||||
self.assertIsNone(svol['provider_location'])
|
||||
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_service')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_export_path')
|
||||
@mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
|
||||
def test_create_volume_from_snapshot(self, m_get_volume_location,
|
||||
m_get_export_path,
|
||||
m_get_provider_location, m_id_to_vol,
|
||||
m_get_service):
|
||||
vol = _VOLUME.copy()
|
||||
svol = _SNAPVOLUME.copy()
|
||||
|
||||
m_get_service.return_value = _SERVICE
|
||||
m_get_provider_location.return_value = _SHARE
|
||||
m_get_export_path.return_value = _EXPORT
|
||||
m_get_volume_location.return_value = _SHARE
|
||||
|
||||
loc = self.driver.create_volume_from_snapshot(vol, svol)
|
||||
out = "{'provider_location': \'" + _SHARE + "'}"
|
||||
self.assertEqual(out, str(loc))
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
|
||||
return_value={'key': 'type', 'service_label': 'silver'})
|
||||
def test_get_pool(self, m_ext_spec):
|
||||
vol = _VOLUME.copy()
|
||||
|
||||
self.assertEqual('silver', self.driver.get_pool(vol))
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.24.44.34')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing(self, m_ensure_shares, m_resolve, m_mount_point,
|
||||
m_isfile, m_get_extra_specs):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
|
||||
m_get_extra_specs.return_value = {'key': 'type',
|
||||
'service_label': 'silver'}
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
|
||||
with mock.patch.object(self.driver, '_execute'):
|
||||
out = self.driver.manage_existing(vol, existing_vol_ref)
|
||||
|
||||
loc = {'provider_location': '172.17.39.133:/cinder'}
|
||||
self.assertEqual(loc, out)
|
||||
|
||||
m_get_extra_specs.assert_called_once_with('silver')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing_move_fails(self, m_ensure_shares, m_resolve,
|
||||
m_mount_point, m_isfile,
|
||||
m_get_extra_specs):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
|
||||
m_get_extra_specs.return_value = {'key': 'type',
|
||||
'service_label': 'silver'}
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
self.driver._execute = mock.Mock(side_effect=OSError)
|
||||
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.manage_existing, vol, existing_vol_ref)
|
||||
m_get_extra_specs.assert_called_once_with('silver')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing_invalid_pool(self, m_ensure_shares, m_resolve,
|
||||
m_mount_point, m_isfile,
|
||||
m_get_extra_specs):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
m_get_extra_specs.return_value = {'key': 'type',
|
||||
'service_label': 'gold'}
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
self.driver._execute = mock.Mock(side_effect=OSError)
|
||||
self.mock_object(hnas_utils, 'get_pool',
|
||||
mock.Mock(return_value='default'))
|
||||
|
||||
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
|
||||
self.driver.manage_existing, vol, existing_vol_ref)
|
||||
m_get_extra_specs.assert_called_once_with('silver')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
self.driver._check_pool_and_share, self.volume,
|
||||
nfs_shares)
|
||||
|
||||
@mock.patch.object(utils, 'get_file_size', return_value=4000000000)
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing_get_size(self, m_ensure_shares, m_resolve,
|
||||
m_mount_point,
|
||||
m_isfile, m_file_size):
|
||||
def test_check_pool_and_share_type_mismatch_exception(self):
|
||||
nfs_shares = '172.24.49.21:/fs-cinder'
|
||||
self.volume.host = 'host1@hnas-nfs-backend#gold'
|
||||
|
||||
vol = _VOLUME_NFS.copy()
|
||||
# returning a pool different from 'default' should raise an exception
|
||||
self.mock_object(hnas_utils, 'get_pool',
|
||||
mock.Mock(return_value='default'))
|
||||
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
|
||||
self.driver._check_pool_and_share, self.volume,
|
||||
nfs_shares)
|
||||
|
||||
out = self.driver.manage_existing_get_size(vol, existing_vol_ref)
|
||||
def test_do_setup(self):
|
||||
version_info = {
|
||||
'mac': '83-68-96-AA-DA-5D',
|
||||
'model': 'HNAS 4040',
|
||||
'version': '12.4.3924.11',
|
||||
'hardware': 'NAS Platform',
|
||||
'serial': 'B1339109',
|
||||
}
|
||||
export_list = [
|
||||
{'fs': 'fs-cinder',
|
||||
'name': '/fs-cinder',
|
||||
'free': 228.0,
|
||||
'path': '/fs-cinder',
|
||||
'evs': ['172.24.49.21'],
|
||||
'size': 250.0}
|
||||
]
|
||||
|
||||
self.assertEqual(vol['size'], out)
|
||||
m_file_size.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
showmount = "Export list for 172.24.49.21: \n\
|
||||
/fs-cinder * \n\
|
||||
/shares/9bcf0bcc-8cc8-437e38bcbda9 127.0.0.1,10.1.0.5,172.24.44.141 \n\
|
||||
"
|
||||
|
||||
@mock.patch.object(utils, 'get_file_size', return_value='badfloat')
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing_get_size_error(self, m_ensure_shares, m_resolve,
|
||||
m_mount_point,
|
||||
m_isfile, m_file_size):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
self.mock_object(backend.HNASSSHBackend, 'get_version',
|
||||
mock.Mock(return_value=version_info))
|
||||
self.mock_object(self.driver, '_load_shares_config')
|
||||
self.mock_object(backend.HNASSSHBackend, 'get_export_list',
|
||||
mock.Mock(return_value=export_list))
|
||||
self.mock_object(self.driver, '_execute',
|
||||
mock.Mock(return_value=(showmount, '')))
|
||||
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
self.driver.do_setup(None)
|
||||
|
||||
self.driver._execute.assert_called_with('showmount', '-e',
|
||||
'172.24.49.21')
|
||||
self.assertTrue(backend.HNASSSHBackend.get_export_list.called)
|
||||
|
||||
def test_do_setup_execute_exception(self):
|
||||
version_info = {
|
||||
'mac': '83-68-96-AA-DA-5D',
|
||||
'model': 'HNAS 4040',
|
||||
'version': '12.4.3924.11',
|
||||
'hardware': 'NAS Platform',
|
||||
'serial': 'B1339109',
|
||||
}
|
||||
|
||||
export_list = [
|
||||
{'fs': 'fs-cinder',
|
||||
'name': '/fs-cinder',
|
||||
'free': 228.0,
|
||||
'path': '/fs-cinder',
|
||||
'evs': ['172.24.49.21'],
|
||||
'size': 250.0}
|
||||
]
|
||||
|
||||
self.mock_object(backend.HNASSSHBackend, 'get_version',
|
||||
mock.Mock(return_value=version_info))
|
||||
self.mock_object(self.driver, '_load_shares_config')
|
||||
self.mock_object(backend.HNASSSHBackend, 'get_export_list',
|
||||
mock.Mock(return_value=export_list))
|
||||
self.mock_object(self.driver, '_execute',
|
||||
mock.Mock(side_effect=putils.ProcessExecutionError))
|
||||
|
||||
self.assertRaises(putils.ProcessExecutionError, self.driver.do_setup,
|
||||
None)
|
||||
|
||||
def test_do_setup_missing_export(self):
|
||||
version_info = {
|
||||
'mac': '83-68-96-AA-DA-5D',
|
||||
'model': 'HNAS 4040',
|
||||
'version': '12.4.3924.11',
|
||||
'hardware': 'NAS Platform',
|
||||
'serial': 'B1339109',
|
||||
}
|
||||
export_list = [
|
||||
{'fs': 'fs-cinder',
|
||||
'name': '/wrong-fs',
|
||||
'free': 228.0,
|
||||
'path': '/fs-cinder',
|
||||
'evs': ['172.24.49.21'],
|
||||
'size': 250.0}
|
||||
]
|
||||
|
||||
showmount = "Export list for 172.24.49.21: \n\
|
||||
/fs-cinder * \n\
|
||||
"
|
||||
|
||||
self.mock_object(backend.HNASSSHBackend, 'get_version',
|
||||
mock.Mock(return_value=version_info))
|
||||
self.mock_object(self.driver, '_load_shares_config')
|
||||
self.mock_object(backend.HNASSSHBackend, 'get_export_list',
|
||||
mock.Mock(return_value=export_list))
|
||||
self.mock_object(self.driver, '_execute',
|
||||
mock.Mock(return_value=(showmount, '')))
|
||||
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self.driver.do_setup, None)
|
||||
|
||||
def test_create_volume(self):
|
||||
self.mock_object(self.driver, '_ensure_shares_mounted')
|
||||
self.mock_object(self.driver, '_do_create_volume')
|
||||
|
||||
out = self.driver.create_volume(self.volume)
|
||||
|
||||
self.assertEqual('172.24.49.21:/fs-cinder', out['provider_location'])
|
||||
self.assertTrue(self.driver._ensure_shares_mounted.called)
|
||||
|
||||
def test_create_volume_exception(self):
|
||||
# pool 'original' doesnt exists in services
|
||||
self.volume.host = 'host1@hnas-nfs-backend#original'
|
||||
|
||||
self.mock_object(self.driver, '_ensure_shares_mounted')
|
||||
|
||||
self.assertRaises(exception.ParameterNotFound,
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
def test_create_cloned_volume(self):
|
||||
self.volume.size = 150
|
||||
|
||||
self.mock_object(self.driver, 'extend_volume')
|
||||
self.mock_object(backend.HNASSSHBackend, 'file_clone')
|
||||
|
||||
out = self.driver.create_cloned_volume(self.volume, self.clone)
|
||||
|
||||
self.assertEqual('hnas', out['provider_location'])
|
||||
|
||||
def test_get_volume_stats(self):
|
||||
self.driver.pools = [{'pool_name': 'default',
|
||||
'service_label': 'default',
|
||||
'fs': '172.24.49.21:/easy-stack'},
|
||||
{'pool_name': 'cinder_svc',
|
||||
'service_label': 'cinder_svc',
|
||||
'fs': '172.24.49.26:/MNT-CinderTest2'}]
|
||||
|
||||
self.mock_object(self.driver, '_update_volume_stats')
|
||||
self.mock_object(self.driver, '_get_capacity_info',
|
||||
mock.Mock(return_value=(150, 50, 100)))
|
||||
|
||||
out = self.driver.get_volume_stats()
|
||||
|
||||
self.assertEqual('5.0.0', out['driver_version'])
|
||||
self.assertEqual('Hitachi', out['vendor_name'])
|
||||
self.assertEqual('NFS', out['storage_protocol'])
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
self.mock_object(backend.HNASSSHBackend, 'file_clone')
|
||||
|
||||
self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
|
||||
|
||||
def test_create_snapshot(self):
|
||||
self.mock_object(backend.HNASSSHBackend, 'file_clone')
|
||||
self.driver.create_snapshot(self.snapshot)
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
self.mock_object(self.driver, '_execute')
|
||||
|
||||
self.driver.delete_snapshot(self.snapshot)
|
||||
|
||||
def test_delete_snapshot_execute_exception(self):
|
||||
self.mock_object(self.driver, '_execute',
|
||||
mock.Mock(side_effect=putils.ProcessExecutionError))
|
||||
|
||||
self.driver.delete_snapshot(self.snapshot)
|
||||
|
||||
def test_extend_volume(self):
|
||||
share_mount_point = '/fs-cinder'
|
||||
data = image_utils.imageutils.QemuImgInfo
|
||||
data.virtual_size = 200 * 1024 ** 3
|
||||
|
||||
self.mock_object(self.driver, '_get_mount_point_for_share',
|
||||
mock.Mock(return_value=share_mount_point))
|
||||
self.mock_object(image_utils, 'qemu_img_info',
|
||||
mock.Mock(return_value=data))
|
||||
|
||||
self.driver.extend_volume(self.volume, 200)
|
||||
|
||||
self.driver._get_mount_point_for_share.assert_called_with('hnas')
|
||||
|
||||
def test_extend_volume_resizing_exception(self):
|
||||
share_mount_point = '/fs-cinder'
|
||||
data = image_utils.imageutils.QemuImgInfo
|
||||
data.virtual_size = 2048 ** 3
|
||||
|
||||
self.mock_object(self.driver, '_get_mount_point_for_share',
|
||||
mock.Mock(return_value=share_mount_point))
|
||||
self.mock_object(image_utils, 'qemu_img_info',
|
||||
mock.Mock(return_value=data))
|
||||
|
||||
self.mock_object(image_utils, 'resize_image')
|
||||
|
||||
self.assertRaises(exception.InvalidResults,
|
||||
self.driver.extend_volume, self.volume, 200)
|
||||
|
||||
def test_manage_existing(self):
|
||||
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
|
||||
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
|
||||
|
||||
self.mock_object(os.path, 'isfile', mock.Mock(return_value=True))
|
||||
self.mock_object(self.driver, '_get_mount_point_for_share',
|
||||
mock.Mock(return_value='/fs-cinder/cinder-volume'))
|
||||
self.mock_object(utils, 'resolve_hostname',
|
||||
mock.Mock(return_value='172.24.49.21'))
|
||||
self.mock_object(self.driver, '_ensure_shares_mounted')
|
||||
self.mock_object(self.driver, '_execute')
|
||||
|
||||
out = self.driver.manage_existing(self.volume, existing_vol_ref)
|
||||
|
||||
loc = {'provider_location': '172.24.49.21:/fs-cinder'}
|
||||
self.assertEqual(loc, out)
|
||||
|
||||
os.path.isfile.assert_called_once_with('/fs-cinder/cinder-volume/')
|
||||
self.driver._get_mount_point_for_share.assert_called_once_with(
|
||||
'172.24.49.21:/fs-cinder')
|
||||
utils.resolve_hostname.assert_called_with('172.24.49.21')
|
||||
self.driver._ensure_shares_mounted.assert_called_once_with()
|
||||
|
||||
def test_manage_existing_name_matches(self):
|
||||
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
|
||||
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
|
||||
|
||||
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
|
||||
mock.Mock(return_value=('172.24.49.21:/fs-cinder',
|
||||
'/mnt/silver',
|
||||
self.volume.name)))
|
||||
|
||||
out = self.driver.manage_existing(self.volume, existing_vol_ref)
|
||||
|
||||
loc = {'provider_location': '172.24.49.21:/fs-cinder'}
|
||||
self.assertEqual(loc, out)
|
||||
|
||||
def test_manage_existing_exception(self):
|
||||
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
|
||||
|
||||
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
|
||||
mock.Mock(return_value=('172.24.49.21:/fs-cinder',
|
||||
'/mnt/silver',
|
||||
'cinder-volume')))
|
||||
self.mock_object(self.driver, '_execute',
|
||||
mock.Mock(side_effect=putils.ProcessExecutionError))
|
||||
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.manage_existing_get_size, vol,
|
||||
self.driver.manage_existing, self.volume,
|
||||
existing_vol_ref)
|
||||
m_file_size.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
|
||||
def test_manage_existing_get_size_without_source_name(self):
|
||||
vol = _VOLUME.copy()
|
||||
existing_vol_ref = {
|
||||
'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'}
|
||||
def test_manage_existing_missing_source_name(self):
|
||||
# empty source-name should raise an exception
|
||||
existing_vol_ref = {}
|
||||
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.driver.manage_existing_get_size, vol,
|
||||
self.driver.manage_existing, self.volume,
|
||||
existing_vol_ref)
|
||||
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
def test_unmanage(self, m_mount_point):
|
||||
with mock.patch.object(self.driver, '_execute'):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
self.driver.unmanage(vol)
|
||||
def test_manage_existing_missing_volume_in_backend(self):
|
||||
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
|
||||
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
|
||||
|
||||
m_mount_point.assert_called_once_with('172.24.44.34:/silver/')
|
||||
self.mock_object(self.driver, '_ensure_shares_mounted')
|
||||
self.mock_object(utils, 'resolve_hostname',
|
||||
mock.Mock(side_effect=['172.24.49.21',
|
||||
'172.24.49.22']))
|
||||
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.driver.manage_existing, self.volume,
|
||||
existing_vol_ref)
|
||||
|
||||
def test_manage_existing_get_size(self):
|
||||
existing_vol_ref = {
|
||||
'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
|
||||
}
|
||||
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
|
||||
expected_size = 1
|
||||
|
||||
self.mock_object(self.driver, '_ensure_shares_mounted')
|
||||
self.mock_object(utils, 'resolve_hostname',
|
||||
mock.Mock(return_value='172.24.49.21'))
|
||||
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
|
||||
mock.Mock(return_value='/mnt/silver'))
|
||||
self.mock_object(os.path, 'isfile',
|
||||
mock.Mock(return_value=True))
|
||||
self.mock_object(utils, 'get_file_size',
|
||||
mock.Mock(return_value=expected_size))
|
||||
|
||||
out = self.driver.manage_existing_get_size(self.volume,
|
||||
existing_vol_ref)
|
||||
|
||||
self.assertEqual(1, out)
|
||||
utils.get_file_size.assert_called_once_with(
|
||||
'/mnt/silver/cinder-volume')
|
||||
utils.resolve_hostname.assert_called_with('172.24.49.21')
|
||||
|
||||
def test_manage_existing_get_size_exception(self):
|
||||
existing_vol_ref = {
|
||||
'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
|
||||
}
|
||||
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
|
||||
|
||||
self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
|
||||
mock.Mock(return_value=('172.24.49.21:/fs-cinder',
|
||||
'/mnt/silver',
|
||||
'cinder-volume')))
|
||||
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.manage_existing_get_size, self.volume,
|
||||
existing_vol_ref)
|
||||
|
||||
def test_manage_existing_get_size_resolving_hostname_exception(self):
|
||||
existing_vol_ref = {
|
||||
'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
|
||||
}
|
||||
|
||||
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
|
||||
|
||||
self.mock_object(self.driver, '_ensure_shares_mounted')
|
||||
self.mock_object(utils, 'resolve_hostname',
|
||||
mock.Mock(side_effect=socket.gaierror))
|
||||
|
||||
self.assertRaises(socket.gaierror,
|
||||
self.driver.manage_existing_get_size, self.volume,
|
||||
existing_vol_ref)
|
||||
|
||||
def test_unmanage(self):
|
||||
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
|
||||
vol_str = 'volume-' + self.volume.id
|
||||
vol_path = os.path.join(path, vol_str)
|
||||
new_path = os.path.join(path, 'unmanage-' + vol_str)
|
||||
|
||||
self.mock_object(self.driver, '_get_mount_point_for_share',
|
||||
mock.Mock(return_value=path))
|
||||
self.mock_object(self.driver, '_execute')
|
||||
|
||||
self.driver.unmanage(self.volume)
|
||||
|
||||
self.driver._execute.assert_called_with('mv', vol_path, new_path,
|
||||
run_as_root=False,
|
||||
check_exit_code=True)
|
||||
self.driver._get_mount_point_for_share.assert_called_with(
|
||||
self.volume.provider_location)
|
||||
|
||||
def test_unmanage_volume_exception(self):
|
||||
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
|
||||
|
||||
self.mock_object(self.driver, '_get_mount_point_for_share',
|
||||
mock.Mock(return_value=path))
|
||||
self.mock_object(self.driver, '_execute',
|
||||
mock.Mock(side_effect=ValueError))
|
||||
|
||||
self.driver.unmanage(self.volume)
|
||||
|
@ -0,0 +1,259 @@
|
||||
# Copyright (c) 2016 Hitachi Data Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
from xml.etree import ElementTree as ETree
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_constants
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.volume.drivers.hitachi import hnas_utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
_VOLUME = {'name': 'cinder-volume',
|
||||
'id': fake_constants.VOLUME_ID,
|
||||
'size': 128,
|
||||
'host': 'host1@hnas-nfs-backend#default',
|
||||
'volume_type': 'default',
|
||||
'provider_location': 'hnas'}
|
||||
|
||||
service_parameters = ['volume_type', 'hdp']
|
||||
optional_parameters = ['hnas_cmd', 'cluster_admin_ip0', 'iscsi_ip']
|
||||
|
||||
config_from_cinder_conf = {
|
||||
'username': 'supervisor',
|
||||
'fs': {'silver': 'silver',
|
||||
'easy-stack': 'easy-stack'},
|
||||
'ssh_port': '22',
|
||||
'chap_enabled': None,
|
||||
'cluster_admin_ip0': None,
|
||||
'ssh_private_key': None,
|
||||
'mgmt_ip0': '172.24.44.15',
|
||||
'services': {
|
||||
'default': {
|
||||
'label': u'svc_0',
|
||||
'volume_type': 'default',
|
||||
'hdp': 'easy-stack'},
|
||||
'FS-CinderDev1': {
|
||||
'label': u'svc_1',
|
||||
'volume_type': 'FS-CinderDev1',
|
||||
'hdp': 'silver'}},
|
||||
'password': 'supervisor',
|
||||
'hnas_cmd': 'ssc'}
|
||||
|
||||
valid_XML_str = '''
|
||||
<config>
|
||||
<mgmt_ip0>172.24.44.15</mgmt_ip0>
|
||||
<username>supervisor</username>
|
||||
<password>supervisor</password>
|
||||
<ssh_enabled>False</ssh_enabled>
|
||||
<ssh_private_key>/home/ubuntu/.ssh/id_rsa</ssh_private_key>
|
||||
<svc_0>
|
||||
<volume_type>default</volume_type>
|
||||
<iscsi_ip>172.24.49.21</iscsi_ip>
|
||||
<hdp>easy-stack</hdp>
|
||||
</svc_0>
|
||||
<svc_1>
|
||||
<volume_type>silver</volume_type>
|
||||
<iscsi_ip>172.24.49.32</iscsi_ip>
|
||||
<hdp>FS-CinderDev1</hdp>
|
||||
</svc_1>
|
||||
</config>
|
||||
'''
|
||||
|
||||
XML_no_authentication = '''
|
||||
<config>
|
||||
<mgmt_ip0>172.24.44.15</mgmt_ip0>
|
||||
<username>supervisor</username>
|
||||
<ssh_enabled>False</ssh_enabled>
|
||||
</config>
|
||||
'''
|
||||
|
||||
XML_empty_authentication_param = '''
|
||||
<config>
|
||||
<mgmt_ip0>172.24.44.15</mgmt_ip0>
|
||||
<username>supervisor</username>
|
||||
<password></password>
|
||||
<ssh_enabled>False</ssh_enabled>
|
||||
<ssh_private_key></ssh_private_key>
|
||||
<svc_0>
|
||||
<volume_type>default</volume_type>
|
||||
<iscsi_ip>172.24.49.21</iscsi_ip>
|
||||
<hdp>easy-stack</hdp>
|
||||
</svc_0>
|
||||
</config>
|
||||
'''
|
||||
|
||||
# missing mgmt_ip0
|
||||
XML_without_mandatory_params = '''
|
||||
<config>
|
||||
<username>supervisor</username>
|
||||
<password>supervisor</password>
|
||||
<ssh_enabled>False</ssh_enabled>
|
||||
<svc_0>
|
||||
<volume_type>default</volume_type>
|
||||
<iscsi_ip>172.24.49.21</iscsi_ip>
|
||||
<hdp>easy-stack</hdp>
|
||||
</svc_0>
|
||||
</config>
|
||||
'''
|
||||
|
||||
XML_no_services_configured = '''
|
||||
<config>
|
||||
<mgmt_ip0>172.24.44.15</mgmt_ip0>
|
||||
<username>supervisor</username>
|
||||
<password>supervisor</password>
|
||||
<ssh_enabled>False</ssh_enabled>
|
||||
<ssh_private_key>/home/ubuntu/.ssh/id_rsa</ssh_private_key>
|
||||
</config>
|
||||
'''
|
||||
|
||||
parsed_xml = {'username': 'supervisor', 'password': 'supervisor',
|
||||
'hnas_cmd': 'ssc', 'iscsi_ip': None, 'ssh_port': '22',
|
||||
'fs': {'easy-stack': 'easy-stack',
|
||||
'FS-CinderDev1': 'FS-CinderDev1'},
|
||||
'cluster_admin_ip0': None,
|
||||
'ssh_private_key': '/home/ubuntu/.ssh/id_rsa',
|
||||
'services': {
|
||||
'default': {'hdp': 'easy-stack', 'volume_type': 'default',
|
||||
'label': 'svc_0'},
|
||||
'silver': {'hdp': 'FS-CinderDev1', 'volume_type': 'silver',
|
||||
'label': 'svc_1'}},
|
||||
'mgmt_ip0': '172.24.44.15'}
|
||||
|
||||
valid_XML_etree = ETree.XML(valid_XML_str)
|
||||
invalid_XML_etree_no_authentication = ETree.XML(XML_no_authentication)
|
||||
invalid_XML_etree_empty_parameter = ETree.XML(XML_empty_authentication_param)
|
||||
invalid_XML_etree_no_mandatory_params = ETree.XML(XML_without_mandatory_params)
|
||||
invalid_XML_etree_no_service = ETree.XML(XML_no_services_configured)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class HNASUtilsTest(test.TestCase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HNASUtilsTest, self).__init__(*args, **kwargs)
|
||||
|
||||
def setUp(self):
|
||||
super(HNASUtilsTest, self).setUp()
|
||||
self.context = context.get_admin_context()
|
||||
self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME)
|
||||
self.volume_type = (fake_volume.fake_volume_type_obj(None, **{
|
||||
'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'}))
|
||||
|
||||
def test_read_config(self):
|
||||
|
||||
self.mock_object(os, 'access', mock.Mock(return_value=True))
|
||||
self.mock_object(ETree, 'parse',
|
||||
mock.Mock(return_value=ETree.ElementTree))
|
||||
self.mock_object(ETree.ElementTree, 'getroot',
|
||||
mock.Mock(return_value=valid_XML_etree))
|
||||
|
||||
xml_path = 'xml_file_found'
|
||||
out = hnas_utils.read_config(xml_path,
|
||||
service_parameters,
|
||||
optional_parameters)
|
||||
|
||||
self.assertEqual(parsed_xml, out)
|
||||
|
||||
def test_read_config_parser_error(self):
|
||||
xml_file = 'hnas_nfs.xml'
|
||||
self.mock_object(os, 'access', mock.Mock(return_value=True))
|
||||
self.mock_object(ETree, 'parse',
|
||||
mock.Mock(side_effect=ETree.ParseError))
|
||||
|
||||
self.assertRaises(exception.ConfigNotFound, hnas_utils.read_config,
|
||||
xml_file, service_parameters, optional_parameters)
|
||||
|
||||
def test_read_config_not_found(self):
|
||||
self.mock_object(os, 'access', mock.Mock(return_value=False))
|
||||
|
||||
xml_path = 'xml_file_not_found'
|
||||
self.assertRaises(exception.NotFound, hnas_utils.read_config,
|
||||
xml_path, service_parameters, optional_parameters)
|
||||
|
||||
def test_read_config_without_services_configured(self):
|
||||
xml_file = 'hnas_nfs.xml'
|
||||
|
||||
self.mock_object(os, 'access', mock.Mock(return_value=True))
|
||||
self.mock_object(ETree, 'parse',
|
||||
mock.Mock(return_value=ETree.ElementTree))
|
||||
self.mock_object(ETree.ElementTree, 'getroot',
|
||||
mock.Mock(return_value=invalid_XML_etree_no_service))
|
||||
|
||||
self.assertRaises(exception.ParameterNotFound, hnas_utils.read_config,
|
||||
xml_file, service_parameters, optional_parameters)
|
||||
|
||||
def test_read_config_empty_authentication_parameter(self):
|
||||
xml_file = 'hnas_nfs.xml'
|
||||
|
||||
self.mock_object(os, 'access', mock.Mock(return_value=True))
|
||||
self.mock_object(ETree, 'parse',
|
||||
mock.Mock(return_value=ETree.ElementTree))
|
||||
self.mock_object(ETree.ElementTree, 'getroot',
|
||||
mock.Mock(return_value=
|
||||
invalid_XML_etree_empty_parameter))
|
||||
|
||||
self.assertRaises(exception.ParameterNotFound, hnas_utils.read_config,
|
||||
xml_file, service_parameters, optional_parameters)
|
||||
|
||||
def test_read_config_mandatory_parameters_missing(self):
|
||||
xml_file = 'hnas_nfs.xml'
|
||||
|
||||
self.mock_object(os, 'access', mock.Mock(return_value=True))
|
||||
self.mock_object(ETree, 'parse',
|
||||
mock.Mock(return_value=ETree.ElementTree))
|
||||
self.mock_object(ETree.ElementTree, 'getroot',
|
||||
mock.Mock(return_value=
|
||||
invalid_XML_etree_no_mandatory_params))
|
||||
|
||||
self.assertRaises(exception.ParameterNotFound, hnas_utils.read_config,
|
||||
xml_file, service_parameters, optional_parameters)
|
||||
|
||||
def test_read_config_XML_without_authentication_parameter(self):
|
||||
xml_file = 'hnas_nfs.xml'
|
||||
|
||||
self.mock_object(os, 'access', mock.Mock(return_value=True))
|
||||
self.mock_object(ETree, 'parse',
|
||||
mock.Mock(return_value=ETree.ElementTree))
|
||||
self.mock_object(ETree.ElementTree, 'getroot',
|
||||
mock.Mock(return_value=
|
||||
invalid_XML_etree_no_authentication))
|
||||
|
||||
self.assertRaises(exception.ConfigNotFound, hnas_utils.read_config,
|
||||
xml_file, service_parameters, optional_parameters)
|
||||
|
||||
def test_get_pool_with_vol_type(self):
|
||||
self.mock_object(volume_types, 'get_volume_type_extra_specs',
|
||||
mock.Mock(return_value={'service_label': 'silver'}))
|
||||
|
||||
self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID
|
||||
self.volume.volume_type = self.volume_type
|
||||
|
||||
out = hnas_utils.get_pool(parsed_xml, self.volume)
|
||||
|
||||
self.assertEqual('silver', out)
|
||||
|
||||
def test_get_pool_without_vol_type(self):
|
||||
out = hnas_utils.get_pool(parsed_xml, self.volume)
|
||||
self.assertEqual('default', out)
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -14,21 +14,18 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Volume driver for HDS HNAS NFS storage.
|
||||
Volume driver for HNAS NFS storage.
|
||||
"""
|
||||
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import six
|
||||
import socket
|
||||
import time
|
||||
from xml.etree import ElementTree as ETree
|
||||
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI
|
||||
@ -36,19 +33,19 @@ from cinder.image import image_utils
|
||||
from cinder import interface
|
||||
from cinder import utils as cutils
|
||||
from cinder.volume.drivers.hitachi import hnas_backend
|
||||
from cinder.volume.drivers.hitachi import hnas_utils
|
||||
from cinder.volume.drivers import nfs
|
||||
from cinder.volume import utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
HDS_HNAS_NFS_VERSION = '4.1.0'
|
||||
HNAS_NFS_VERSION = '5.0.0'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
NFS_OPTS = [
|
||||
cfg.StrOpt('hds_hnas_nfs_config_file',
|
||||
default='/opt/hds/hnas/cinder_nfs_conf.xml',
|
||||
help='Configuration file for HDS NFS cinder plugin'), ]
|
||||
help='Configuration file for HNAS NFS cinder plugin'), ]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(NFS_OPTS)
|
||||
@ -56,151 +53,46 @@ CONF.register_opts(NFS_OPTS)
|
||||
HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'ssh_port': '22'}
|
||||
|
||||
|
||||
def _xml_read(root, element, check=None):
|
||||
"""Read an xml element."""
|
||||
|
||||
val = root.findtext(element)
|
||||
|
||||
# mandatory parameter not found
|
||||
if val is None and check:
|
||||
raise exception.ParameterNotFound(param=element)
|
||||
|
||||
# tag not found
|
||||
if val is None:
|
||||
return None
|
||||
|
||||
svc_tag_pattern = re.compile("svc_.$")
|
||||
# tag found but empty parameter.
|
||||
if not val.strip():
|
||||
if svc_tag_pattern.search(element):
|
||||
return ""
|
||||
raise exception.ParameterNotFound(param=element)
|
||||
|
||||
LOG.debug(_LI("%(element)s: %(val)s"),
|
||||
{'element': element,
|
||||
'val': val if element != 'password' else '***'})
|
||||
|
||||
return val.strip()
|
||||
|
||||
|
||||
def _read_config(xml_config_file):
|
||||
"""Read hds driver specific xml config file.
|
||||
|
||||
:param xml_config_file: string filename containing XML configuration
|
||||
"""
|
||||
|
||||
if not os.access(xml_config_file, os.R_OK):
|
||||
msg = (_("Can't open config file: %s") % xml_config_file)
|
||||
raise exception.NotFound(message=msg)
|
||||
|
||||
try:
|
||||
root = ETree.parse(xml_config_file).getroot()
|
||||
except Exception:
|
||||
msg = (_("Error parsing config file: %s") % xml_config_file)
|
||||
raise exception.ConfigNotFound(message=msg)
|
||||
|
||||
# mandatory parameters
|
||||
config = {}
|
||||
arg_prereqs = ['mgmt_ip0', 'username']
|
||||
for req in arg_prereqs:
|
||||
config[req] = _xml_read(root, req, True)
|
||||
|
||||
# optional parameters
|
||||
opt_parameters = ['hnas_cmd', 'ssh_enabled', 'cluster_admin_ip0']
|
||||
for req in opt_parameters:
|
||||
config[req] = _xml_read(root, req)
|
||||
|
||||
if config['ssh_enabled'] == 'True':
|
||||
config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', True)
|
||||
config['password'] = _xml_read(root, 'password')
|
||||
config['ssh_port'] = _xml_read(root, 'ssh_port')
|
||||
if config['ssh_port'] is None:
|
||||
config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
|
||||
else:
|
||||
# password is mandatory when not using SSH
|
||||
config['password'] = _xml_read(root, 'password', True)
|
||||
|
||||
if config['hnas_cmd'] is None:
|
||||
config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd']
|
||||
|
||||
config['hdp'] = {}
|
||||
config['services'] = {}
|
||||
|
||||
# min one needed
|
||||
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
|
||||
if _xml_read(root, svc) is None:
|
||||
continue
|
||||
service = {'label': svc}
|
||||
|
||||
# none optional
|
||||
for arg in ['volume_type', 'hdp']:
|
||||
service[arg] = _xml_read(root, svc + '/' + arg, True)
|
||||
config['services'][service['volume_type']] = service
|
||||
config['hdp'][service['hdp']] = service['hdp']
|
||||
|
||||
# at least one service required!
|
||||
if config['services'].keys() is None:
|
||||
raise exception.ParameterNotFound(param="No service found")
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def factory_bend(drv_config):
|
||||
"""Factory over-ride in self-tests."""
|
||||
|
||||
return hnas_backend.HnasBackend(drv_config)
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class HDSNFSDriver(nfs.NfsDriver):
|
||||
class HNASNFSDriver(nfs.NfsDriver):
|
||||
"""Base class for Hitachi NFS driver.
|
||||
|
||||
Executes commands relating to Volumes.
|
||||
|
||||
.. code-block:: none
|
||||
Version history:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
Version 1.0.0: Initial driver version
|
||||
Version 2.2.0: Added support to SSH authentication
|
||||
Version 3.0.0: Added pool aware scheduling
|
||||
Version 4.0.0: Added manage/unmanage features
|
||||
Version 4.1.0: Fixed XML parser checks on blank options
|
||||
Version 5.0.0: Remove looping in driver initialization
|
||||
Code cleaning up
|
||||
New communication interface between the driver and HNAS
|
||||
Removed the option to use local SSC (ssh_enabled=False)
|
||||
Updated to use versioned objects
|
||||
Changed the class name to HNASNFSDriver
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# NOTE(vish): db is set by Manager
|
||||
self._execute = None
|
||||
self.context = None
|
||||
self.configuration = kwargs.get('configuration', None)
|
||||
|
||||
service_parameters = ['volume_type', 'hdp']
|
||||
optional_parameters = ['hnas_cmd', 'cluster_admin_ip0']
|
||||
|
||||
if self.configuration:
|
||||
self.configuration.append_config_values(NFS_OPTS)
|
||||
self.config = _read_config(
|
||||
self.configuration.hds_hnas_nfs_config_file)
|
||||
self.config = hnas_utils.read_config(
|
||||
self.configuration.hds_hnas_nfs_config_file,
|
||||
service_parameters,
|
||||
optional_parameters)
|
||||
|
||||
super(HDSNFSDriver, self).__init__(*args, **kwargs)
|
||||
self.bend = factory_bend(self.config)
|
||||
|
||||
def _array_info_get(self):
|
||||
"""Get array parameters."""
|
||||
|
||||
out = self.bend.get_version(self.config['hnas_cmd'],
|
||||
HDS_HNAS_NFS_VERSION,
|
||||
self.config['mgmt_ip0'],
|
||||
self.config['username'],
|
||||
self.config['password'])
|
||||
|
||||
inf = out.split()
|
||||
return inf[1], 'nfs_' + inf[1], inf[6]
|
||||
|
||||
def _id_to_vol(self, volume_id):
|
||||
"""Given the volume id, retrieve the volume object from database.
|
||||
|
||||
:param volume_id: string volume id
|
||||
"""
|
||||
|
||||
vol = self.db.volume_get(self.context, volume_id)
|
||||
|
||||
return vol
|
||||
super(HNASNFSDriver, self).__init__(*args, **kwargs)
|
||||
self.backend = hnas_backend.HNASSSHBackend(self.config)
|
||||
|
||||
def _get_service(self, volume):
|
||||
"""Get service parameters.
|
||||
@ -209,21 +101,24 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
its type.
|
||||
|
||||
:param volume: dictionary volume reference
|
||||
:returns: Tuple containing the service parameters (label,
|
||||
export path and export file system) or error if no configuration is
|
||||
found.
|
||||
:raises: ParameterNotFound
|
||||
"""
|
||||
|
||||
LOG.debug("_get_service: volume: %s", volume)
|
||||
label = utils.extract_host(volume['host'], level='pool')
|
||||
LOG.debug("_get_service: volume: %(vol)s", {'vol': volume})
|
||||
label = utils.extract_host(volume.host, level='pool')
|
||||
|
||||
if label in self.config['services'].keys():
|
||||
svc = self.config['services'][label]
|
||||
LOG.info(_LI("Get service: %(lbl)s->%(svc)s"),
|
||||
{'lbl': label, 'svc': svc['fslabel']})
|
||||
service = (svc['hdp'], svc['path'], svc['fslabel'])
|
||||
LOG.info(_LI("_get_service: %(lbl)s->%(svc)s"),
|
||||
{'lbl': label, 'svc': svc['export']['fs']})
|
||||
service = (svc['hdp'], svc['export']['path'], svc['export']['fs'])
|
||||
else:
|
||||
LOG.info(_LI("Available services: %s"),
|
||||
self.config['services'].keys())
|
||||
LOG.error(_LE("No configuration found for service: %s"),
|
||||
label)
|
||||
LOG.info(_LI("Available services: %(svc)s"),
|
||||
{'svc': self.config['services'].keys()})
|
||||
LOG.error(_LE("No configuration found for service: %(lbl)s"),
|
||||
{'lbl': label})
|
||||
raise exception.ParameterNotFound(param=label)
|
||||
|
||||
return service
|
||||
@ -233,30 +128,26 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
|
||||
:param volume: dictionary volume reference
|
||||
:param new_size: int size in GB to extend
|
||||
:raises: InvalidResults
|
||||
"""
|
||||
|
||||
nfs_mount = self._get_provider_location(volume['id'])
|
||||
path = self._get_volume_path(nfs_mount, volume['name'])
|
||||
nfs_mount = volume.provider_location
|
||||
path = self._get_volume_path(nfs_mount, volume.name)
|
||||
|
||||
# Resize the image file on share to new size.
|
||||
LOG.debug("Checking file for resize")
|
||||
|
||||
if self._is_file_size_equal(path, new_size):
|
||||
return
|
||||
else:
|
||||
LOG.info(_LI("Resizing file to %sG"), new_size)
|
||||
if not self._is_file_size_equal(path, new_size):
|
||||
LOG.info(_LI("Resizing file to %(sz)sG"), {'sz': new_size})
|
||||
image_utils.resize_image(path, new_size)
|
||||
if self._is_file_size_equal(path, new_size):
|
||||
LOG.info(_LI("LUN %(id)s extended to %(size)s GB."),
|
||||
{'id': volume['id'], 'size': new_size})
|
||||
return
|
||||
else:
|
||||
raise exception.InvalidResults(
|
||||
_("Resizing image file failed."))
|
||||
|
||||
if self._is_file_size_equal(path, new_size):
|
||||
LOG.info(_LI("LUN %(id)s extended to %(size)s GB."),
|
||||
{'id': volume.id, 'size': new_size})
|
||||
else:
|
||||
raise exception.InvalidResults(_("Resizing image file failed."))
|
||||
|
||||
def _is_file_size_equal(self, path, size):
|
||||
"""Checks if file size at path is equal to size."""
|
||||
|
||||
data = image_utils.qemu_img_info(path)
|
||||
virt_size = data.virtual_size / units.Gi
|
||||
|
||||
@ -266,22 +157,16 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
return False
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
"""Creates a volume from a snapshot.
|
||||
|
||||
LOG.debug("create_volume_from %s", volume)
|
||||
vol_size = volume['size']
|
||||
snap_size = snapshot['volume_size']
|
||||
:param volume: volume to be created
|
||||
:param snapshot: source snapshot
|
||||
:returns: the provider_location of the volume created
|
||||
"""
|
||||
LOG.debug("create_volume_from %(vol)s", {'vol': volume})
|
||||
|
||||
if vol_size != snap_size:
|
||||
msg = _("Cannot create volume of size %(vol_size)s from "
|
||||
"snapshot of size %(snap_size)s")
|
||||
msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size}
|
||||
raise exception.CinderException(msg % msg_fmt)
|
||||
|
||||
self._clone_volume(snapshot['name'],
|
||||
volume['name'],
|
||||
snapshot['volume_id'])
|
||||
share = self._get_volume_location(snapshot['volume_id'])
|
||||
self._clone_volume(snapshot.volume, volume.name, snapshot.name)
|
||||
share = snapshot.volume.provider_location
|
||||
|
||||
return {'provider_location': share}
|
||||
|
||||
@ -289,13 +174,12 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
"""Create a snapshot.
|
||||
|
||||
:param snapshot: dictionary snapshot reference
|
||||
:returns: the provider_location of the snapshot created
|
||||
"""
|
||||
self._clone_volume(snapshot.volume, snapshot.name)
|
||||
|
||||
self._clone_volume(snapshot['volume_name'],
|
||||
snapshot['name'],
|
||||
snapshot['volume_id'])
|
||||
share = self._get_volume_location(snapshot['volume_id'])
|
||||
LOG.debug('Share: %s', share)
|
||||
share = snapshot.volume.provider_location
|
||||
LOG.debug('Share: %(shr)s', {'shr': share})
|
||||
|
||||
# returns the mount point (not path)
|
||||
return {'provider_location': share}
|
||||
@ -306,133 +190,81 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
:param snapshot: dictionary snapshot reference
|
||||
"""
|
||||
|
||||
nfs_mount = self._get_provider_location(snapshot['volume_id'])
|
||||
nfs_mount = snapshot.volume.provider_location
|
||||
|
||||
if self._volume_not_present(nfs_mount, snapshot['name']):
|
||||
if self._volume_not_present(nfs_mount, snapshot.name):
|
||||
return True
|
||||
|
||||
self._execute('rm', self._get_volume_path(nfs_mount, snapshot['name']),
|
||||
self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
|
||||
run_as_root=True)
|
||||
|
||||
def _get_volume_location(self, volume_id):
|
||||
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>.
|
||||
|
||||
:param volume_id: string volume id
|
||||
"""
|
||||
|
||||
nfs_server_ip = self._get_host_ip(volume_id)
|
||||
export_path = self._get_export_path(volume_id)
|
||||
|
||||
return nfs_server_ip + ':' + export_path
|
||||
|
||||
def _get_provider_location(self, volume_id):
|
||||
"""Returns provider location for given volume.
|
||||
|
||||
:param volume_id: string volume id
|
||||
"""
|
||||
|
||||
volume = self.db.volume_get(self.context, volume_id)
|
||||
|
||||
# same format as _get_volume_location
|
||||
return volume.provider_location
|
||||
|
||||
def _get_host_ip(self, volume_id):
|
||||
"""Returns IP address for the given volume.
|
||||
|
||||
:param volume_id: string volume id
|
||||
"""
|
||||
|
||||
return self._get_provider_location(volume_id).split(':')[0]
|
||||
|
||||
def _get_export_path(self, volume_id):
|
||||
"""Returns NFS export path for the given volume.
|
||||
|
||||
:param volume_id: string volume id
|
||||
"""
|
||||
|
||||
return self._get_provider_location(volume_id).split(':')[1]
|
||||
|
||||
def _volume_not_present(self, nfs_mount, volume_name):
|
||||
"""Check if volume exists.
|
||||
"""Check if volume does not exist.
|
||||
|
||||
:param nfs_mount: string path of the nfs share
|
||||
:param volume_name: string volume name
|
||||
:returns: boolean (true for volume not present and false otherwise)
|
||||
"""
|
||||
|
||||
try:
|
||||
self._try_execute('ls', self._get_volume_path(nfs_mount,
|
||||
volume_name))
|
||||
self._try_execute('ls',
|
||||
self._get_volume_path(nfs_mount, volume_name))
|
||||
except processutils.ProcessExecutionError:
|
||||
# If the volume isn't present
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _try_execute(self, *command, **kwargs):
|
||||
# NOTE(vish): Volume commands can partially fail due to timing, but
|
||||
# running them a second time on failure will usually
|
||||
# recover nicely.
|
||||
tries = 0
|
||||
while True:
|
||||
try:
|
||||
self._execute(*command, **kwargs)
|
||||
return True
|
||||
except processutils.ProcessExecutionError:
|
||||
tries += 1
|
||||
if tries >= self.configuration.num_shell_tries:
|
||||
raise
|
||||
LOG.exception(_LE("Recovering from a failed execute. "
|
||||
"Try number %s"), tries)
|
||||
time.sleep(tries ** 2)
|
||||
|
||||
def _get_volume_path(self, nfs_share, volume_name):
|
||||
"""Get volume path (local fs path) for given name on given nfs share.
|
||||
|
||||
:param nfs_share string, example 172.18.194.100:/var/nfs
|
||||
:param volume_name string,
|
||||
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
|
||||
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
|
||||
:returns: the local path according to the parameters
|
||||
"""
|
||||
|
||||
return os.path.join(self._get_mount_point_for_share(nfs_share),
|
||||
volume_name)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume.
|
||||
|
||||
:param volume: dictionary volume reference
|
||||
:param src_vref: dictionary src_vref reference
|
||||
:param volume: reference to the volume being created
|
||||
:param src_vref: reference to the source volume
|
||||
:returns: the provider_location of the cloned volume
|
||||
"""
|
||||
vol_size = volume.size
|
||||
src_vol_size = src_vref.size
|
||||
|
||||
vol_size = volume['size']
|
||||
src_vol_size = src_vref['size']
|
||||
self._clone_volume(src_vref, volume.name, src_vref.name)
|
||||
|
||||
if vol_size < src_vol_size:
|
||||
msg = _("Cannot create clone of size %(vol_size)s from "
|
||||
"volume of size %(src_vol_size)s")
|
||||
msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size}
|
||||
raise exception.CinderException(msg % msg_fmt)
|
||||
|
||||
self._clone_volume(src_vref['name'], volume['name'], src_vref['id'])
|
||||
share = src_vref.provider_location
|
||||
|
||||
if vol_size > src_vol_size:
|
||||
volume.provider_location = share
|
||||
self.extend_volume(volume, vol_size)
|
||||
|
||||
share = self._get_volume_location(src_vref['id'])
|
||||
|
||||
return {'provider_location': share}
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume stats.
|
||||
|
||||
if 'refresh' is True, update the stats first.
|
||||
:param refresh: if it is True, update the stats first.
|
||||
:returns: dictionary with the stats from HNAS
|
||||
_stats['pools']={
|
||||
'total_capacity_gb': total size of the pool,
|
||||
'free_capacity_gb': the available size,
|
||||
'allocated_capacity_gb': current allocated size,
|
||||
'QoS_support': bool to indicate if QoS is supported,
|
||||
'reserved_percentage': percentage of size reserved
|
||||
}
|
||||
"""
|
||||
|
||||
_stats = super(HDSNFSDriver, self).get_volume_stats(refresh)
|
||||
_stats["vendor_name"] = 'HDS'
|
||||
_stats["driver_version"] = HDS_HNAS_NFS_VERSION
|
||||
_stats = super(HNASNFSDriver, self).get_volume_stats(refresh)
|
||||
_stats["vendor_name"] = 'Hitachi'
|
||||
_stats["driver_version"] = HNAS_NFS_VERSION
|
||||
_stats["storage_protocol"] = 'NFS'
|
||||
|
||||
for pool in self.pools:
|
||||
capacity, free, used = self._get_capacity_info(pool['hdp'])
|
||||
capacity, free, used = self._get_capacity_info(pool['fs'])
|
||||
pool['total_capacity_gb'] = capacity / float(units.Gi)
|
||||
pool['free_capacity_gb'] = free / float(units.Gi)
|
||||
pool['allocated_capacity_gb'] = used / float(units.Gi)
|
||||
@ -441,79 +273,61 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
|
||||
_stats['pools'] = self.pools
|
||||
|
||||
LOG.info(_LI('Driver stats: %s'), _stats)
|
||||
LOG.info(_LI('Driver stats: %(stat)s'), {'stat': _stats})
|
||||
|
||||
return _stats
|
||||
|
||||
def _get_nfs_info(self):
|
||||
out = self.bend.get_nfs_info(self.config['hnas_cmd'],
|
||||
self.config['mgmt_ip0'],
|
||||
self.config['username'],
|
||||
self.config['password'])
|
||||
lines = out.split('\n')
|
||||
|
||||
# dict based on NFS exports addresses
|
||||
conf = {}
|
||||
for line in lines:
|
||||
if 'Export' in line:
|
||||
inf = line.split()
|
||||
(export, path, fslabel, hdp, ip1) = \
|
||||
inf[1], inf[3], inf[5], inf[7], inf[11]
|
||||
# 9, 10, etc are IP addrs
|
||||
key = ip1 + ':' + export
|
||||
conf[key] = {}
|
||||
conf[key]['path'] = path
|
||||
conf[key]['hdp'] = hdp
|
||||
conf[key]['fslabel'] = fslabel
|
||||
LOG.info(_LI("nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s "
|
||||
"FSID: %(hdp)s"),
|
||||
{'key': key, 'path': path,
|
||||
'fslabel': fslabel, 'hdp': hdp})
|
||||
|
||||
return conf
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Perform internal driver setup."""
|
||||
version_info = self.backend.get_version()
|
||||
LOG.info(_LI("HNAS Array NFS driver"))
|
||||
LOG.info(_LI("HNAS model: %s"), version_info['model'])
|
||||
LOG.info(_LI("HNAS version: %s"), version_info['version'])
|
||||
LOG.info(_LI("HNAS hardware: %s"), version_info['hardware'])
|
||||
LOG.info(_LI("HNAS S/N: %s"), version_info['serial'])
|
||||
|
||||
self.context = context
|
||||
self._load_shares_config(getattr(self.configuration,
|
||||
self.driver_prefix +
|
||||
'_shares_config'))
|
||||
LOG.info(_LI("Review shares: %s"), self.shares)
|
||||
self._load_shares_config(
|
||||
getattr(self.configuration, self.driver_prefix + '_shares_config'))
|
||||
LOG.info(_LI("Review shares: %(shr)s"), {'shr': self.shares})
|
||||
|
||||
nfs_info = self._get_nfs_info()
|
||||
elist = self.backend.get_export_list()
|
||||
|
||||
LOG.debug("nfs_info: %s", nfs_info)
|
||||
# Check for all configured exports
|
||||
for svc_name, svc_info in self.config['services'].items():
|
||||
server_ip = svc_info['hdp'].split(':')[0]
|
||||
mountpoint = svc_info['hdp'].split(':')[1]
|
||||
|
||||
for share in self.shares:
|
||||
if share in nfs_info.keys():
|
||||
LOG.info(_LI("share: %(share)s -> %(info)s"),
|
||||
{'share': share, 'info': nfs_info[share]['path']})
|
||||
# Ensure export are configured in HNAS
|
||||
export_configured = False
|
||||
for export in elist:
|
||||
if mountpoint == export['name'] and server_ip in export['evs']:
|
||||
svc_info['export'] = export
|
||||
export_configured = True
|
||||
|
||||
for svc in self.config['services'].keys():
|
||||
if share == self.config['services'][svc]['hdp']:
|
||||
self.config['services'][svc]['path'] = \
|
||||
nfs_info[share]['path']
|
||||
# don't overwrite HDP value
|
||||
self.config['services'][svc]['fsid'] = \
|
||||
nfs_info[share]['hdp']
|
||||
self.config['services'][svc]['fslabel'] = \
|
||||
nfs_info[share]['fslabel']
|
||||
LOG.info(_LI("Save service info for"
|
||||
" %(svc)s -> %(hdp)s, %(path)s"),
|
||||
{'svc': svc, 'hdp': nfs_info[share]['hdp'],
|
||||
'path': nfs_info[share]['path']})
|
||||
break
|
||||
if share != self.config['services'][svc]['hdp']:
|
||||
LOG.error(_LE("NFS share %(share)s has no service entry:"
|
||||
" %(svc)s -> %(hdp)s"),
|
||||
{'share': share, 'svc': svc,
|
||||
'hdp': self.config['services'][svc]['hdp']})
|
||||
raise exception.ParameterNotFound(param=svc)
|
||||
else:
|
||||
LOG.info(_LI("share: %s incorrect entry"), share)
|
||||
# Ensure export are reachable
|
||||
try:
|
||||
out, err = self._execute('showmount', '-e', server_ip)
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.error(_LE("NFS server %(srv)s not reachable!"),
|
||||
{'srv': server_ip})
|
||||
raise
|
||||
|
||||
LOG.debug("self.config['services'] = %s", self.config['services'])
|
||||
export_list = out.split('\n')[1:]
|
||||
export_list.pop()
|
||||
mountpoint_not_found = mountpoint not in map(
|
||||
lambda x: x.split()[0], export_list)
|
||||
if (len(export_list) < 1 or
|
||||
mountpoint_not_found or
|
||||
not export_configured):
|
||||
LOG.error(_LE("Configured share %(share)s is not present"
|
||||
"in %(srv)s."),
|
||||
{'share': mountpoint, 'srv': server_ip})
|
||||
msg = _('Section: %s') % svc_name
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
LOG.debug("Loading services: %(svc)s", {
|
||||
'svc': self.config['services']})
|
||||
|
||||
service_list = self.config['services'].keys()
|
||||
for svc in service_list:
|
||||
@ -521,74 +335,57 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
pool = {}
|
||||
pool['pool_name'] = svc['volume_type']
|
||||
pool['service_label'] = svc['volume_type']
|
||||
pool['hdp'] = svc['hdp']
|
||||
pool['fs'] = svc['hdp']
|
||||
|
||||
self.pools.append(pool)
|
||||
|
||||
LOG.info(_LI("Configured pools: %s"), self.pools)
|
||||
LOG.info(_LI("Configured pools: %(pool)s"), {'pool': self.pools})
|
||||
|
||||
def _clone_volume(self, volume_name, clone_name, volume_id):
|
||||
def _clone_volume(self, src_vol, clone_name, src_name=None):
|
||||
"""Clones mounted volume using the HNAS file_clone.
|
||||
|
||||
:param volume_name: string volume name
|
||||
:param src_vol: object source volume
|
||||
:param clone_name: string clone name (or snapshot)
|
||||
:param volume_id: string volume id
|
||||
:param src_name: name of the source volume.
|
||||
"""
|
||||
|
||||
export_path = self._get_export_path(volume_id)
|
||||
# when the source is a snapshot, we need to pass the source name and
|
||||
# use the information of the volume that originated the snapshot to
|
||||
# get the clone path.
|
||||
if not src_name:
|
||||
src_name = src_vol.name
|
||||
|
||||
# volume-ID snapshot-ID, /cinder
|
||||
LOG.info(_LI("Cloning with volume_name %(vname)s clone_name %(cname)s"
|
||||
" export_path %(epath)s"), {'vname': volume_name,
|
||||
'cname': clone_name,
|
||||
'epath': export_path})
|
||||
LOG.info(_LI("Cloning with volume_name %(vname)s, clone_name %(cname)s"
|
||||
" ,export_path %(epath)s"),
|
||||
{'vname': src_name, 'cname': clone_name,
|
||||
'epath': src_vol.provider_location})
|
||||
|
||||
source_vol = self._id_to_vol(volume_id)
|
||||
# sps; added target
|
||||
(_hdp, _path, _fslabel) = self._get_service(source_vol)
|
||||
target_path = '%s/%s' % (_path, clone_name)
|
||||
source_path = '%s/%s' % (_path, volume_name)
|
||||
out = self.bend.file_clone(self.config['hnas_cmd'],
|
||||
self.config['mgmt_ip0'],
|
||||
self.config['username'],
|
||||
self.config['password'],
|
||||
_fslabel, source_path, target_path)
|
||||
(fs, path, fs_label) = self._get_service(src_vol)
|
||||
|
||||
return out
|
||||
target_path = '%s/%s' % (path, clone_name)
|
||||
source_path = '%s/%s' % (path, src_name)
|
||||
|
||||
def get_pool(self, volume):
|
||||
if not volume['volume_type']:
|
||||
return 'default'
|
||||
else:
|
||||
metadata = {}
|
||||
type_id = volume['volume_type_id']
|
||||
if type_id is not None:
|
||||
metadata = volume_types.get_volume_type_extra_specs(type_id)
|
||||
if not metadata.get('service_label'):
|
||||
return 'default'
|
||||
else:
|
||||
if metadata['service_label'] not in \
|
||||
self.config['services'].keys():
|
||||
return 'default'
|
||||
else:
|
||||
return metadata['service_label']
|
||||
self.backend.file_clone(fs_label, source_path, target_path)
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume.
|
||||
|
||||
:param volume: volume reference
|
||||
:returns: the volume provider_location
|
||||
"""
|
||||
self._ensure_shares_mounted()
|
||||
|
||||
(_hdp, _path, _fslabel) = self._get_service(volume)
|
||||
(fs_id, path, fslabel) = self._get_service(volume)
|
||||
|
||||
volume['provider_location'] = _hdp
|
||||
volume.provider_location = fs_id
|
||||
|
||||
LOG.info(_LI("Volume service: %(label)s. Casted to: %(loc)s"),
|
||||
{'label': _fslabel, 'loc': volume['provider_location']})
|
||||
{'label': fslabel, 'loc': volume.provider_location})
|
||||
|
||||
self._do_create_volume(volume)
|
||||
|
||||
return {'provider_location': volume['provider_location']}
|
||||
return {'provider_location': fs_id}
|
||||
|
||||
def _convert_vol_ref_share_name_to_share_ip(self, vol_ref):
|
||||
"""Converts the share point name to an IP address.
|
||||
@ -596,8 +393,10 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
The volume reference may have a DNS name portion in the share name.
|
||||
Convert that to an IP address and then restore the entire path.
|
||||
|
||||
:param vol_ref: driver-specific information used to identify a volume
|
||||
:returns: a volume reference where share is in IP format
|
||||
:param vol_ref: driver-specific information used to identify a volume
|
||||
:returns: a volume reference where share is in IP format or raises
|
||||
error
|
||||
:raises: e.strerror
|
||||
"""
|
||||
|
||||
# First strip out share and convert to IP format.
|
||||
@ -608,7 +407,7 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
except socket.gaierror as e:
|
||||
LOG.error(_LE('Invalid hostname %(host)s'),
|
||||
{'host': share_split[0]})
|
||||
LOG.debug('error: %s', e.strerror)
|
||||
LOG.debug('error: %(err)s', {'err': e.strerror})
|
||||
raise
|
||||
|
||||
# Now place back into volume reference.
|
||||
@ -624,7 +423,8 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
if unsuccessful.
|
||||
|
||||
:param vol_ref: driver-specific information used to identify a volume
|
||||
:returns: NFS Share, NFS mount, volume path or raise error
|
||||
:returns: NFS Share, NFS mount, volume path or raise error
|
||||
:raises: ManageExistingInvalidReference
|
||||
"""
|
||||
# Check that the reference is valid.
|
||||
if 'source-name' not in vol_ref:
|
||||
@ -677,30 +477,34 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
e.g., 10.10.32.1:/openstack/vol_to_manage
|
||||
or 10.10.32.1:/openstack/some_directory/vol_to_manage
|
||||
|
||||
:param volume: cinder volume to manage
|
||||
:param volume: cinder volume to manage
|
||||
:param existing_vol_ref: driver-specific information used to identify a
|
||||
volume
|
||||
volume
|
||||
:returns: the provider location
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
|
||||
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
|
||||
(nfs_share, nfs_mount, vol_path
|
||||
(nfs_share, nfs_mount, vol_name
|
||||
) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
|
||||
|
||||
LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s.",
|
||||
{'vol': volume['id'],
|
||||
{'vol': volume.id,
|
||||
'ref': existing_vol_ref['source-name']})
|
||||
|
||||
self._check_pool_and_share(volume, nfs_share)
|
||||
if vol_path == volume['name']:
|
||||
LOG.debug("New Cinder volume %s name matches reference name: "
|
||||
"no need to rename.", volume['name'])
|
||||
|
||||
if vol_name == volume.name:
|
||||
LOG.debug("New Cinder volume %(vol)s name matches reference name: "
|
||||
"no need to rename.", {'vol': volume.name})
|
||||
else:
|
||||
src_vol = os.path.join(nfs_mount, vol_path)
|
||||
dst_vol = os.path.join(nfs_mount, volume['name'])
|
||||
src_vol = os.path.join(nfs_mount, vol_name)
|
||||
dst_vol = os.path.join(nfs_mount, volume.name)
|
||||
try:
|
||||
self._execute("mv", src_vol, dst_vol, run_as_root=False,
|
||||
check_exit_code=True)
|
||||
LOG.debug("Setting newly managed Cinder volume name to %s.",
|
||||
volume['name'])
|
||||
self._try_execute("mv", src_vol, dst_vol, run_as_root=False,
|
||||
check_exit_code=True)
|
||||
LOG.debug("Setting newly managed Cinder volume name "
|
||||
"to %(vol)s.", {'vol': volume.name})
|
||||
self._set_rw_permissions_for_all(dst_vol)
|
||||
except (OSError, processutils.ProcessExecutionError) as err:
|
||||
exception_msg = (_("Failed to manage existing volume "
|
||||
@ -718,20 +522,20 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
one passed in the volume reference. Also, checks if the pool
|
||||
for the volume type matches the pool for the host passed.
|
||||
|
||||
:param volume: cinder volume reference
|
||||
:param volume: cinder volume reference
|
||||
:param nfs_share: NFS share passed to manage
|
||||
:raises: ManageExistingVolumeTypeMismatch
|
||||
"""
|
||||
pool_from_vol_type = self.get_pool(volume)
|
||||
pool_from_vol_type = hnas_utils.get_pool(self.config, volume)
|
||||
|
||||
pool_from_host = utils.extract_host(volume['host'], level='pool')
|
||||
pool_from_host = utils.extract_host(volume.host, level='pool')
|
||||
|
||||
if self.config['services'][pool_from_vol_type]['hdp'] != nfs_share:
|
||||
msg = (_("Failed to manage existing volume because the pool of "
|
||||
"the volume type chosen does not match the NFS share "
|
||||
"the volume type chosen does not match the NFS share "
|
||||
"passed in the volume reference."),
|
||||
{'Share passed': nfs_share,
|
||||
'Share for volume type':
|
||||
self.config['services'][pool_from_vol_type]['hdp']})
|
||||
{'Share passed': nfs_share, 'Share for volume type':
|
||||
self.config['services'][pool_from_vol_type]['hdp']})
|
||||
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
|
||||
|
||||
if pool_from_host != pool_from_vol_type:
|
||||
@ -739,7 +543,7 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
"the volume type chosen does not match the pool of "
|
||||
"the host."),
|
||||
{'Pool of the volume type': pool_from_vol_type,
|
||||
'Pool of the host': pool_from_host})
|
||||
'Pool of the host': pool_from_host})
|
||||
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_vol_ref):
|
||||
@ -747,19 +551,24 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
|
||||
When calculating the size, round up to the next GB.
|
||||
|
||||
:param volume: cinder volume to manage
|
||||
:param volume: cinder volume to manage
|
||||
:param existing_vol_ref: existing volume to take under management
|
||||
:returns: the size of the volume or raise error
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
|
||||
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
|
||||
(nfs_share, nfs_mount, vol_path
|
||||
(nfs_share, nfs_mount, vol_name
|
||||
) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
|
||||
|
||||
try:
|
||||
LOG.debug("Asked to get size of NFS vol_ref %s.",
|
||||
existing_vol_ref['source-name'])
|
||||
LOG.debug("Asked to get size of NFS vol_ref %(ref)s.",
|
||||
{'ref': existing_vol_ref['source-name']})
|
||||
|
||||
file_path = os.path.join(nfs_mount, vol_path)
|
||||
if utils.check_already_managed_volume(vol_name):
|
||||
raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name)
|
||||
|
||||
try:
|
||||
file_path = os.path.join(nfs_mount, vol_name)
|
||||
file_size = float(cutils.get_file_size(file_path)) / units.Gi
|
||||
vol_size = int(math.ceil(file_size))
|
||||
except (OSError, ValueError):
|
||||
@ -783,8 +592,8 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
|
||||
:param volume: cinder volume to unmanage
|
||||
"""
|
||||
vol_str = CONF.volume_name_template % volume['id']
|
||||
path = self._get_mount_point_for_share(volume['provider_location'])
|
||||
vol_str = CONF.volume_name_template % volume.id
|
||||
path = self._get_mount_point_for_share(volume.provider_location)
|
||||
|
||||
new_str = "unmanage-" + vol_str
|
||||
|
||||
@ -792,8 +601,8 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
new_path = os.path.join(path, new_str)
|
||||
|
||||
try:
|
||||
self._execute("mv", vol_path, new_path,
|
||||
run_as_root=False, check_exit_code=True)
|
||||
self._try_execute("mv", vol_path, new_path,
|
||||
run_as_root=False, check_exit_code=True)
|
||||
|
||||
LOG.info(_LI("Cinder NFS volume with current path %(cr)s is "
|
||||
"no longer being managed."), {'cr': new_path})
|
||||
|
152
cinder/volume/drivers/hitachi/hnas_utils.py
Normal file
152
cinder/volume/drivers/hitachi/hnas_utils.py
Normal file
@ -0,0 +1,152 @@
|
||||
# Copyright (c) 2016 Hitachi Data Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Shared code for HNAS drivers
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
from xml.etree import ElementTree as ETree
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LI
|
||||
from cinder.volume import volume_types
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc',
|
||||
'chap_enabled': 'True',
|
||||
'ssh_port': '22'}
|
||||
|
||||
MAX_HNAS_ISCSI_TARGETS = 32
|
||||
|
||||
|
||||
def _xml_read(root, element, check=None):
|
||||
"""Read an xml element.
|
||||
|
||||
:param root: XML object
|
||||
:param element: string desired tag
|
||||
:param check: string if present, throw exception if element missing
|
||||
"""
|
||||
|
||||
val = root.findtext(element)
|
||||
|
||||
# mandatory parameter not found
|
||||
if val is None and check:
|
||||
raise exception.ParameterNotFound(param=element)
|
||||
|
||||
# tag not found
|
||||
if val is None:
|
||||
return None
|
||||
|
||||
svc_tag_pattern = re.compile("svc_[0-3]$")
|
||||
# tag found but empty parameter.
|
||||
if not val.strip():
|
||||
if svc_tag_pattern.search(element):
|
||||
return ""
|
||||
raise exception.ParameterNotFound(param=element)
|
||||
|
||||
LOG.debug(_LI("%(element)s: %(val)s"),
|
||||
{'element': element,
|
||||
'val': val if element != 'password' else '***'})
|
||||
|
||||
return val.strip()
|
||||
|
||||
|
||||
def read_config(xml_config_file, svc_params, optional_params):
|
||||
"""Read Hitachi driver specific xml config file.
|
||||
|
||||
:param xml_config_file: string filename containing XML configuration
|
||||
:param svc_params: parameters to configure the services
|
||||
['volume_type', 'hdp', 'iscsi_ip']
|
||||
:param optional_params: parameters to configure that are not mandatory
|
||||
['hnas_cmd', 'ssh_enabled', 'cluster_admin_ip0', 'chap_enabled']
|
||||
"""
|
||||
|
||||
if not os.access(xml_config_file, os.R_OK):
|
||||
msg = (_("Can't open config file: %s") % xml_config_file)
|
||||
raise exception.NotFound(message=msg)
|
||||
|
||||
try:
|
||||
root = ETree.parse(xml_config_file).getroot()
|
||||
except ETree.ParseError:
|
||||
msg = (_("Error parsing config file: %s") % xml_config_file)
|
||||
raise exception.ConfigNotFound(message=msg)
|
||||
|
||||
# mandatory parameters for NFS and iSCSI
|
||||
config = {}
|
||||
arg_prereqs = ['mgmt_ip0', 'username']
|
||||
for req in arg_prereqs:
|
||||
config[req] = _xml_read(root, req, 'check')
|
||||
|
||||
# optional parameters for NFS and iSCSI
|
||||
for req in optional_params:
|
||||
config[req] = _xml_read(root, req)
|
||||
if config[req] is None and HNAS_DEFAULT_CONFIG.get(req) is not None:
|
||||
config[req] = HNAS_DEFAULT_CONFIG.get(req)
|
||||
|
||||
config['ssh_private_key'] = _xml_read(root, 'ssh_private_key')
|
||||
config['password'] = _xml_read(root, 'password')
|
||||
|
||||
if config['ssh_private_key'] is None and config['password'] is None:
|
||||
msg = (_("Missing authentication option (passw or private key file)."))
|
||||
raise exception.ConfigNotFound(message=msg)
|
||||
|
||||
config['ssh_port'] = _xml_read(root, 'ssh_port')
|
||||
if config['ssh_port'] is None:
|
||||
config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
|
||||
|
||||
config['fs'] = {}
|
||||
config['services'] = {}
|
||||
|
||||
# min one needed
|
||||
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
|
||||
if _xml_read(root, svc) is None:
|
||||
continue
|
||||
service = {'label': svc}
|
||||
|
||||
# none optional
|
||||
for arg in svc_params:
|
||||
service[arg] = _xml_read(root, svc + '/' + arg, 'check')
|
||||
config['services'][service['volume_type']] = service
|
||||
config['fs'][service['hdp']] = service['hdp']
|
||||
|
||||
# at least one service required!
|
||||
if not config['services'].keys():
|
||||
msg = (_("svc_0"))
|
||||
raise exception.ParameterNotFound(param=msg)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_pool(config, volume):
|
||||
"""Get the pool of a volume.
|
||||
|
||||
:param config: dictionary containing the configuration parameters
|
||||
:param volume: dictionary volume reference
|
||||
:returns: the pool related to the volume
|
||||
"""
|
||||
if volume.volume_type:
|
||||
metadata = {}
|
||||
type_id = volume.volume_type_id
|
||||
if type_id is not None:
|
||||
metadata = volume_types.get_volume_type_extra_specs(type_id)
|
||||
if metadata.get('service_label'):
|
||||
if metadata['service_label'] in config['services'].keys():
|
||||
return metadata['service_label']
|
||||
return 'default'
|
@ -140,9 +140,13 @@ MAPPING = {
|
||||
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
|
||||
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
|
||||
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
|
||||
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
|
||||
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
|
||||
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
|
||||
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
|
||||
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
|
||||
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
|
||||
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
|
||||
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
|
||||
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
|
||||
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
|
||||
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
|
||||
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
|
||||
|
@ -0,0 +1,7 @@
|
||||
upgrade:
|
||||
- HNAS drivers have new configuration paths. Users should now use
|
||||
``cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver
|
||||
and ``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS
|
||||
iSCSI driver.
|
||||
deprecations:
|
||||
- The old HNAS drivers configuration paths have been marked for deprecation.
|
Loading…
Reference in New Issue
Block a user