FalconStor: New Cinder driver in Newton
This driver supports the following APIs: - Volume Create/Delete - Volume Attach/Detach - Snapshot Create/Delete - Create Volume from Snapshot - Get Volume Stats - Clone Volume - Extend Volume - Consistency Group Create/Delete/Update - Consistency Group Snapshot Create/Delete - Manage/Unmanage existing volume DocImpact Change-Id: Iea3ff7f1b8a055979da10d2d484c8a2ba0c48bac Implements: blueprint falconstor-freestor-cinder-driver
This commit is contained in:
parent
ccd410b0aa
commit
a6f48a55eb
@ -85,6 +85,8 @@ from cinder.volume.drivers.emc import scaleio as \
|
||||
from cinder.volume.drivers.emc import xtremio as \
|
||||
cinder_volume_drivers_emc_xtremio
|
||||
from cinder.volume.drivers import eqlx as cinder_volume_drivers_eqlx
|
||||
from cinder.volume.drivers.falconstor import fss_common as \
|
||||
cinder_volume_drivers_falconstor_fsscommon
|
||||
from cinder.volume.drivers.fujitsu import eternus_dx_common as \
|
||||
cinder_volume_drivers_fujitsu_eternusdxcommon
|
||||
from cinder.volume.drivers import glusterfs as cinder_volume_drivers_glusterfs
|
||||
@ -182,8 +184,8 @@ def list_opts():
|
||||
return [
|
||||
('FC-ZONE-MANAGER',
|
||||
itertools.chain(
|
||||
cinder_zonemanager_fczonemanager.zone_manager_opts,
|
||||
cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts,
|
||||
cinder_zonemanager_fczonemanager.zone_manager_opts,
|
||||
cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts,
|
||||
)),
|
||||
('KEYMGR',
|
||||
@ -269,6 +271,7 @@ def list_opts():
|
||||
cinder_volume_drivers_xio.XIO_OPTS,
|
||||
cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc.
|
||||
storwize_svc_fc_opts,
|
||||
cinder_volume_drivers_falconstor_fsscommon.FSS_OPTS,
|
||||
cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS,
|
||||
cinder_volume_driver.volume_opts,
|
||||
cinder_volume_driver.iser_opts,
|
||||
|
895
cinder/tests/unit/test_falconstor_fss.py
Normal file
895
cinder/tests/unit/test_falconstor_fss.py
Normal file
@ -0,0 +1,895 @@
|
||||
# Copyright (c) 2016 FalconStor, Inc.
|
||||
# All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from copy import deepcopy
|
||||
import mock
|
||||
import time
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.falconstor import fc
|
||||
from cinder.volume.drivers.falconstor import iscsi
|
||||
from cinder.volume.drivers.falconstor import rest_proxy as proxy
|
||||
|
||||
|
||||
DRIVER_PATH = "cinder.volume.drivers.falconstor"
|
||||
BASE_DRIVER = DRIVER_PATH + ".fss_common.FalconstorBaseDriver"
|
||||
ISCSI_DRIVER = DRIVER_PATH + ".iscsi.FSSISCSIDriver"
|
||||
|
||||
PRIMARY_IP = '10.0.0.1'
|
||||
SECONDARY_IP = '10.0.0.2'
|
||||
FAKE_ID = 123
|
||||
FAKE = 'fake'
|
||||
FAKE_HOST = 'fakehost'
|
||||
API_RESPONSE = {'rc': 0}
|
||||
ISCSI_VOLUME_BACKEND_NAME = "FSSISCSIDriver"
|
||||
SESSION_ID = "a76d506c-abcd-1234-efgh-710e1fd90527"
|
||||
VOLUME_ID = '6068ea6d-f221-4213-bde9-f1b50aecdf36'
|
||||
ADD_VOLUME_ID = '6068ed7f-f231-4283-bge9-f1b51aecdf36'
|
||||
GROUP_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
|
||||
|
||||
PORTAL_RESPONSE = {'rc': 0, 'ipaddress': FAKE}
|
||||
VOLUME_METADATA = {'metadata': {'FSS-vid': 1}}
|
||||
EXTENT_NEW_SIZE = 3
|
||||
DATA_SERVER_INFO = 0, {'metadata': {'vendor': 'FalconStor', 'version': '1.5'}}
|
||||
|
||||
FSS_SINGLE_TYPE = 'single'
|
||||
RAWTIMESTAMP = '1324975390'
|
||||
|
||||
VOLUME = {'id': VOLUME_ID,
|
||||
'name': "volume-" + VOLUME_ID,
|
||||
'display_name': 'fake_volume',
|
||||
'display_description': '',
|
||||
'size': 1,
|
||||
'host': "hostname@backend#%s" % FAKE_ID,
|
||||
'volume_type': None,
|
||||
'volume_type_id': None,
|
||||
'consistencygroup_id': None,
|
||||
'volume_metadata': [],
|
||||
'metadata': {"Type": "work"}}
|
||||
|
||||
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
|
||||
SRC_VOL = {
|
||||
"name": "volume-" + SRC_VOL_ID,
|
||||
"id": SRC_VOL_ID,
|
||||
"display_name": "fake_src_vol",
|
||||
"size": 1,
|
||||
"host": "hostname@backend#%s" % FAKE_ID,
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
"volume_size": 1
|
||||
}
|
||||
|
||||
VOLUME_NAME = 'cinder-' + VOLUME['id']
|
||||
SRC_VOL_NAME = 'cinder-' + SRC_VOL['id']
|
||||
DATA_OUTPUT = VOLUME_NAME, VOLUME_METADATA
|
||||
SNAPSHOT_METADATA = {'fss-tm-comment': None}
|
||||
|
||||
ADD_VOLUME_IN_CG = {
|
||||
'id': ADD_VOLUME_ID,
|
||||
'display_name': 'abc123',
|
||||
'display_description': '',
|
||||
'size': 1,
|
||||
'consistencygroup_id': GROUP_ID,
|
||||
'status': 'available',
|
||||
'host': "hostname@backend#%s" % FAKE_ID}
|
||||
|
||||
REMOVE_VOLUME_IN_CG = {
|
||||
'id': 'fe2dbc515810451dab2f8c8a48d15bee',
|
||||
'display_name': 'fe2dbc515810451dab2f8c8a48d15bee',
|
||||
'display_description': '',
|
||||
'size': 1,
|
||||
'consistencygroup_id': GROUP_ID,
|
||||
'status': 'available',
|
||||
'host': "hostname@backend#%s" % FAKE_ID}
|
||||
|
||||
CONSISTGROUP = {'id': GROUP_ID,
|
||||
'name': 'fake_group',
|
||||
'description': 'fake_group_des',
|
||||
'status': ''}
|
||||
CG_SNAPSHOT = {
|
||||
'consistencygroup_id': GROUP_ID,
|
||||
'id': '3c61b0f9-842e-46bf-b061-5e0031d8083f',
|
||||
'name': 'cgsnapshot1',
|
||||
'description': 'cgsnapshot1',
|
||||
'status': ''}
|
||||
|
||||
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
|
||||
SNAPSHOT = {'name': "snapshot-" + SNAPSHOT_ID,
|
||||
'id': SNAPSHOT_ID,
|
||||
'volume_id': VOLUME_ID,
|
||||
'volume_name': "volume-" + VOLUME_ID,
|
||||
'volume_size': 2,
|
||||
'display_name': "fake_snapshot",
|
||||
'display_description': '',
|
||||
'volume': VOLUME,
|
||||
'metadata': SNAPSHOT_METADATA,
|
||||
'status': ''}
|
||||
|
||||
INITIATOR_IQN = 'iqn.2015-08.org.falconstor:01:fss'
|
||||
TARGET_IQN = "iqn.2015-06.com.falconstor:freestor.fss-12345abc"
|
||||
TARGET_PORT = "3260"
|
||||
ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"]
|
||||
ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))]
|
||||
|
||||
ISCSI_PORTS = {"iqn": TARGET_IQN, "lun": 1}
|
||||
ISCSI_CONNECTOR = {'initiator': INITIATOR_IQN,
|
||||
'host': "hostname@backend#%s" % FAKE_ID}
|
||||
ISCSI_INFO = {
|
||||
'driver_volume_type': 'iscsi',
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'discard': True,
|
||||
'encrypted': False,
|
||||
'qos_specs': None,
|
||||
'access_mode': 'rw',
|
||||
'volume_id': VOLUME_ID,
|
||||
'target_iqn': ISCSI_PORTS['iqn'],
|
||||
'target_portal': ISCSI_IPS[0] + ':' + TARGET_PORT,
|
||||
'target_lun': 1
|
||||
},
|
||||
}
|
||||
|
||||
ISCSI_MULTIPATH_INFO = {
|
||||
'driver_volume_type': 'iscsi',
|
||||
'data''data': {
|
||||
'target_discovered': False,
|
||||
'discard': True,
|
||||
'encrypted': False,
|
||||
'qos_specs': None,
|
||||
'access_mode': 'rw',
|
||||
'volume_id': VOLUME_ID,
|
||||
'target_iqns': [ISCSI_PORTS['iqn']],
|
||||
'target_portals': [ISCSI_IPS[0] + ':' + TARGET_PORT],
|
||||
'target_luns': [1]
|
||||
},
|
||||
}
|
||||
|
||||
FC_INITIATOR_WWPNS = ['2100000d778301c3', '2101000d77a301c3']
|
||||
FC_TARGET_WWPNS = ['11000024ff2d2ca4', '11000024ff2d2ca5',
|
||||
'11000024ff2d2c23', '11000024ff2d2c24']
|
||||
FC_WWNS = ['20000024ff2d2ca4', '20000024ff2d2ca5',
|
||||
'20000024ff2d2c23', '20000024ff2d2c24']
|
||||
FC_CONNECTOR = {'ip': '10.10.0.1',
|
||||
'initiator': 'iqn.1988-08.org.oracle:568eb4ccbbcc',
|
||||
'wwpns': FC_INITIATOR_WWPNS,
|
||||
'wwnns': FC_WWNS,
|
||||
'host': FAKE_HOST,
|
||||
'multipath': False}
|
||||
FC_INITIATOR_TARGET_MAP = {
|
||||
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
|
||||
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
|
||||
}
|
||||
FC_DEVICE_MAPPING = {
|
||||
"fabric": {
|
||||
'initiator_port_wwn_list': FC_INITIATOR_WWPNS,
|
||||
'target_port_wwn_list': FC_WWNS
|
||||
}
|
||||
}
|
||||
|
||||
FC_INFO = {
|
||||
'driver_volume_type': 'fibre_channel',
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'volume_id': VOLUME_ID,
|
||||
'target_lun': 1,
|
||||
'target_wwn': FC_TARGET_WWPNS,
|
||||
'initiator_target_map': FC_INITIATOR_TARGET_MAP
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def Fake_sleep(time):
|
||||
pass
|
||||
|
||||
|
||||
class FSSDriverTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(FSSDriverTestCase, self).setUp()
|
||||
self.mock_config = mock.Mock()
|
||||
self.mock_config.san_ip = PRIMARY_IP
|
||||
self.mock_config.san_login = FAKE
|
||||
self.mock_config.san_password = FAKE
|
||||
self.mock_config.fss_pool = FAKE_ID
|
||||
self.mock_config.san_is_local = False
|
||||
self.mock_config.fss_debug = False
|
||||
self.mock_config.additional_retry_list = False
|
||||
self.stubs.Set(time, 'sleep', Fake_sleep)
|
||||
|
||||
|
||||
class TestFSSISCSIDriver(FSSDriverTestCase):
|
||||
def __init__(self, method):
|
||||
super(TestFSSISCSIDriver, self).__init__(method)
|
||||
|
||||
def setUp(self):
|
||||
super(TestFSSISCSIDriver, self).setUp()
|
||||
self.mock_config.use_chap_auth = False
|
||||
self.mock_config.use_multipath_for_image_xfer = False
|
||||
self.mock_config.volume_backend_name = ISCSI_VOLUME_BACKEND_NAME
|
||||
self.driver = iscsi.FSSISCSIDriver(configuration=self.mock_config)
|
||||
self.mock_utils = mock.Mock()
|
||||
self.driver.driver_utils = self.mock_utils
|
||||
|
||||
def tearDown(self):
|
||||
super(TestFSSISCSIDriver, self).tearDown()
|
||||
|
||||
def test_initialized_should_set_fss_info(self):
|
||||
self.assertEqual(self.driver.proxy.fss_host,
|
||||
self.driver.configuration.san_ip)
|
||||
self.assertEqual(self.driver.proxy.fss_username,
|
||||
self.driver.configuration.san_login)
|
||||
self.assertEqual(self.driver.proxy.fss_password,
|
||||
self.driver.configuration.san_password)
|
||||
self.assertEqual(self.driver.proxy.fss_defined_pool,
|
||||
self.driver.configuration.fss_pool)
|
||||
|
||||
def test_check_for_setup_error(self):
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.check_for_setup_error)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'create_vdev',
|
||||
return_value=DATA_OUTPUT)
|
||||
def test_create_volume(self, mock_create_vdev):
|
||||
self.driver.create_volume(VOLUME)
|
||||
mock_create_vdev.assert_called_once_with(VOLUME)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name',
|
||||
return_value=VOLUME_NAME)
|
||||
def test_extend_volume(self, mock__get_fss_volume_name):
|
||||
"""Volume extended_volume successfully."""
|
||||
self.driver.proxy.extend_vdev = mock.Mock()
|
||||
result = self.driver.extend_volume(VOLUME, EXTENT_NEW_SIZE)
|
||||
mock__get_fss_volume_name.assert_called_once_with(VOLUME)
|
||||
self.driver.proxy.extend_vdev.assert_called_once_with(VOLUME_NAME,
|
||||
VOLUME["size"],
|
||||
EXTENT_NEW_SIZE)
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name')
|
||||
def test_clone_volume(self, mock__get_fss_volume_name):
|
||||
mock__get_fss_volume_name.side_effect = [VOLUME_NAME, SRC_VOL_NAME]
|
||||
self.driver.proxy.clone_volume = mock.Mock(
|
||||
return_value=VOLUME_METADATA)
|
||||
self.driver.proxy.extend_vdev = mock.Mock()
|
||||
|
||||
self.driver.create_cloned_volume(VOLUME, SRC_VOL)
|
||||
self.driver.proxy.clone_volume.assert_called_with(VOLUME_NAME,
|
||||
SRC_VOL_NAME)
|
||||
|
||||
mock__get_fss_volume_name.assert_any_call(VOLUME)
|
||||
mock__get_fss_volume_name.assert_any_call(SRC_VOL)
|
||||
self.assertEqual(2, mock__get_fss_volume_name.call_count)
|
||||
|
||||
self.driver.proxy.extend_vdev(VOLUME_NAME, VOLUME["size"],
|
||||
SRC_VOL["size"])
|
||||
self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME,
|
||||
VOLUME["size"],
|
||||
SRC_VOL["size"])
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'delete_vdev')
|
||||
def test_delete_volume(self, mock_delete_vdev):
|
||||
result = self.driver.delete_volume(VOLUME)
|
||||
mock_delete_vdev.assert_called_once_with(VOLUME)
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'create_snapshot',
|
||||
return_value=API_RESPONSE)
|
||||
def test_create_snapshot(self, mock_create_snapshot):
|
||||
snap_name = SNAPSHOT.get('display_name')
|
||||
SNAPSHOT_METADATA["fss-tm-comment"] = snap_name
|
||||
result = self.driver.create_snapshot(SNAPSHOT)
|
||||
mock_create_snapshot.assert_called_once_with(SNAPSHOT)
|
||||
self.assertEqual(result, {'metadata': SNAPSHOT_METADATA})
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'delete_snapshot',
|
||||
return_value=API_RESPONSE)
|
||||
def test_delete_snapshot(self, mock_delete_snapshot):
|
||||
result = self.driver.delete_snapshot(SNAPSHOT)
|
||||
mock_delete_snapshot.assert_called_once_with(SNAPSHOT)
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'create_volume_from_snapshot',
|
||||
return_value=(VOLUME_NAME, VOLUME_METADATA))
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name',
|
||||
return_value=VOLUME_NAME)
|
||||
def test_create_volume_from_snapshot(self, mock__get_fss_volume_name,
|
||||
mock_create_volume_from_snapshot):
|
||||
vol_size = VOLUME['size']
|
||||
snap_size = SNAPSHOT['volume_size']
|
||||
self.driver.proxy.extend_vdev = mock.Mock()
|
||||
|
||||
self.assertEqual(
|
||||
self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT),
|
||||
dict(metadata=VOLUME_METADATA))
|
||||
mock_create_volume_from_snapshot.assert_called_once_with(VOLUME,
|
||||
SNAPSHOT)
|
||||
|
||||
if vol_size != snap_size:
|
||||
mock__get_fss_volume_name.assert_called_once_with(VOLUME)
|
||||
self.driver.proxy.extend_vdev(VOLUME_NAME, snap_size, vol_size)
|
||||
self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME,
|
||||
snap_size,
|
||||
vol_size)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'create_group')
|
||||
def test_create_consistency_group(self, mock_create_group):
|
||||
ctxt = context.get_admin_context()
|
||||
model_update = self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
|
||||
mock_create_group.assert_called_once_with(CONSISTGROUP)
|
||||
self.assertDictMatch({'status': 'available'}, model_update)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'destroy_group')
|
||||
@mock.patch(BASE_DRIVER + ".delete_volume", autospec=True)
|
||||
def test_delete_consistency_group(self, mock_delete_vdev,
|
||||
mock_destroy_group):
|
||||
mock_cgroup = mock.MagicMock()
|
||||
mock_cgroup.id = FAKE_ID
|
||||
mock_cgroup['status'] = "deleted"
|
||||
mock_context = mock.Mock()
|
||||
mock_volume = mock.MagicMock()
|
||||
expected_volume_updates = [{
|
||||
'id': mock_volume.id,
|
||||
'status': 'deleted'
|
||||
}]
|
||||
model_update, volumes = self.driver.delete_consistencygroup(
|
||||
mock_context, mock_cgroup, [mock_volume])
|
||||
|
||||
mock_destroy_group.assert_called_with(mock_cgroup)
|
||||
self.assertEqual(expected_volume_updates, volumes)
|
||||
self.assertEqual(mock_cgroup['status'], model_update['status'])
|
||||
mock_delete_vdev.assert_called_with(self.driver, mock_volume)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'set_group')
|
||||
def test_update_consistency_group(self, mock_set_group):
|
||||
ctxt = context.get_admin_context()
|
||||
add_vols = [
|
||||
{'name': 'vol1', 'id': 'vol1', 'display_name': ''},
|
||||
{'name': 'vol2', 'id': 'vol2', 'display_name': ''}
|
||||
]
|
||||
remove_vols = [
|
||||
{'name': 'vol3', 'id': 'vol3', 'display_name': ''},
|
||||
{'name': 'vol4', 'id': 'vol4', 'display_name': ''}
|
||||
]
|
||||
|
||||
expected_addvollist = ["cinder-%s" % volume['id'] for volume in
|
||||
add_vols]
|
||||
expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols]
|
||||
|
||||
self.driver.update_consistencygroup(ctxt, CONSISTGROUP,
|
||||
add_volumes=add_vols,
|
||||
remove_volumes=remove_vols)
|
||||
mock_set_group.assert_called_with(GROUP_ID,
|
||||
addvollist=expected_addvollist,
|
||||
remvollist=expected_remvollist)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'create_cgsnapshot')
|
||||
def test_create_cgsnapshot(self, mock_create_cgsnapshot):
|
||||
mock_cgsnap = CG_SNAPSHOT
|
||||
mock_context = mock.Mock()
|
||||
mock_snap = mock.MagicMock()
|
||||
model_update, snapshots = self.driver.create_cgsnapshot(mock_context,
|
||||
mock_cgsnap,
|
||||
[mock_snap])
|
||||
mock_create_cgsnapshot.assert_called_once_with(mock_cgsnap)
|
||||
self.assertEqual({'status': 'available'}, model_update)
|
||||
expected_snapshot_update = [{
|
||||
'id': mock_snap.id,
|
||||
'status': 'available'
|
||||
}]
|
||||
self.assertEqual(expected_snapshot_update, snapshots)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'delete_cgsnapshot')
|
||||
def test_delete_cgsnapshot(self, mock_delete_cgsnapshot):
|
||||
mock_cgsnap = mock.Mock()
|
||||
mock_cgsnap.id = FAKE_ID
|
||||
mock_cgsnap.status = 'deleted'
|
||||
mock_context = mock.Mock()
|
||||
mock_snap = mock.MagicMock()
|
||||
|
||||
model_update, snapshots = self.driver.delete_cgsnapshot(mock_context,
|
||||
mock_cgsnap,
|
||||
[mock_snap])
|
||||
mock_delete_cgsnapshot.assert_called_once_with(mock_cgsnap)
|
||||
self.assertEqual({'status': mock_cgsnap.status}, model_update)
|
||||
|
||||
expected_snapshot_update = [dict(id=mock_snap.id, status='deleted')]
|
||||
self.assertEqual(expected_snapshot_update, snapshots)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi',
|
||||
return_value=ISCSI_PORTS)
|
||||
def test_initialize_connection(self, mock_initialize_connection_iscsi):
|
||||
FSS_HOSTS = []
|
||||
FSS_HOSTS.append(PRIMARY_IP)
|
||||
ret = self.driver.initialize_connection(VOLUME, ISCSI_CONNECTOR)
|
||||
mock_initialize_connection_iscsi.assert_called_once_with(
|
||||
VOLUME,
|
||||
ISCSI_CONNECTOR,
|
||||
FSS_HOSTS)
|
||||
result = deepcopy(ISCSI_INFO)
|
||||
self.assertDictMatch(result, ret)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi')
|
||||
@mock.patch(ISCSI_DRIVER + "._check_multipath", autospec=True)
|
||||
def test_initialize_connection_multipath(self, mock__check_multipath,
|
||||
mock_initialize_connection_iscsi):
|
||||
fss_hosts = []
|
||||
fss_hosts.append(self.mock_config.san_ip)
|
||||
mock_initialize_connection_iscsi.return_value = ISCSI_PORTS
|
||||
mock__check_multipath.retuen_value = True
|
||||
|
||||
self.mock_config.use_multipath_for_image_xfer = True
|
||||
self.mock_config.san_secondary_ip = SECONDARY_IP
|
||||
multipath_connector = deepcopy(ISCSI_CONNECTOR)
|
||||
multipath_connector["multipath"] = True
|
||||
fss_hosts.append(SECONDARY_IP)
|
||||
|
||||
self.driver.initialize_connection(VOLUME, multipath_connector)
|
||||
mock_initialize_connection_iscsi.assert_called_once_with(
|
||||
VOLUME,
|
||||
multipath_connector,
|
||||
fss_hosts)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi')
|
||||
def test_terminate_connection(self, mock_terminate_connection_iscsi):
|
||||
self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR)
|
||||
mock_terminate_connection_iscsi.assert_called_once_with(
|
||||
VOLUME,
|
||||
ISCSI_CONNECTOR)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_manage_existing_volume')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid')
|
||||
def test_manage_existing(self, mock__get_existing_volume_ref_vid,
|
||||
mock__manage_existing_volume):
|
||||
ref_vid = 1
|
||||
volume_ref = {'source-id': ref_vid}
|
||||
self.driver.manage_existing(VOLUME, volume_ref)
|
||||
mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref)
|
||||
mock__manage_existing_volume.assert_called_once_with(
|
||||
volume_ref['source-id'], VOLUME)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid',
|
||||
return_value=5120)
|
||||
def test_manage_existing_get_size(self, mock__get_existing_volume_ref_vid):
|
||||
ref_vid = 1
|
||||
volume_ref = {'source-id': ref_vid}
|
||||
expected_size = 5
|
||||
size = self.driver.manage_existing_get_size(VOLUME, volume_ref)
|
||||
mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref)
|
||||
self.assertEqual(expected_size, size)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'unmanage')
|
||||
def test_unmanage(self, mock_unmanage):
|
||||
self.driver.unmanage(VOLUME)
|
||||
mock_unmanage.assert_called_once_with(VOLUME)
|
||||
|
||||
|
||||
class TestFSSFCDriver(FSSDriverTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestFSSFCDriver, self).setUp()
|
||||
self.driver = fc.FSSFCDriver(configuration=self.mock_config)
|
||||
self.driver._lookup_service = mock.Mock()
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'fc_initialize_connection')
|
||||
def test_initialize_connection(self, mock_fc_initialize_connection):
|
||||
fss_hosts = []
|
||||
fss_hosts.append(PRIMARY_IP)
|
||||
self.driver.initialize_connection(VOLUME, FC_CONNECTOR)
|
||||
mock_fc_initialize_connection.assert_called_once_with(
|
||||
VOLUME,
|
||||
FC_CONNECTOR,
|
||||
fss_hosts)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_check_fc_host_devices_empty',
|
||||
return_value=False)
|
||||
@mock.patch.object(proxy.RESTProxy, 'fc_terminate_connection',
|
||||
return_value=FAKE_ID)
|
||||
def test_terminate_connection(self, mock_fc_terminate_connection,
|
||||
mock__check_fc_host_devices_empty):
|
||||
self.driver.terminate_connection(VOLUME, FC_CONNECTOR)
|
||||
mock_fc_terminate_connection.assert_called_once_with(
|
||||
VOLUME,
|
||||
FC_CONNECTOR)
|
||||
mock__check_fc_host_devices_empty.assert_called_once_with(FAKE_ID)
|
||||
|
||||
|
||||
class TestRESTProxy(test.TestCase):
|
||||
"""Test REST Proxy Driver."""
|
||||
|
||||
def setUp(self):
|
||||
super(TestRESTProxy, self).setUp()
|
||||
configuration = mock.Mock(conf.Configuration)
|
||||
configuration.san_ip = FAKE
|
||||
configuration.san_login = FAKE
|
||||
configuration.san_password = FAKE
|
||||
configuration.fss_pool = FAKE_ID
|
||||
configuration.fss_debug = False
|
||||
configuration.additional_retry_list = None
|
||||
|
||||
self.proxy = proxy.RESTProxy(configuration)
|
||||
self.FSS_MOCK = mock.MagicMock()
|
||||
self.proxy.FSS = self.FSS_MOCK
|
||||
self.FSS_MOCK._fss_request.return_value = API_RESPONSE
|
||||
self.stubs.Set(time, 'sleep', Fake_sleep)
|
||||
|
||||
def tearDown(self):
|
||||
super(TestRESTProxy, self).tearDown()
|
||||
|
||||
def test_do_setup(self):
|
||||
self.proxy.do_setup()
|
||||
self.FSS_MOCK.fss_login.assert_called_once_with()
|
||||
self.assertNotEqual(self.proxy.session_id, SESSION_ID)
|
||||
|
||||
def test_create_volume(self):
|
||||
sizemb = self.proxy._convert_size_to_mb(VOLUME['size'])
|
||||
volume_name = self.proxy._get_fss_volume_name(VOLUME)
|
||||
|
||||
params = dict(storagepoolid=self.proxy.fss_defined_pool,
|
||||
sizemb=sizemb,
|
||||
category="virtual",
|
||||
name=volume_name)
|
||||
self.proxy.create_vdev(VOLUME)
|
||||
self.FSS_MOCK.create_vdev.assert_called_once_with(params)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
|
||||
return_value=FAKE_ID)
|
||||
def test_extend_volume(self, mock__get_fss_vid_from_name):
|
||||
size = self.proxy._convert_size_to_mb(EXTENT_NEW_SIZE - VOLUME['size'])
|
||||
params = dict(
|
||||
action='expand',
|
||||
sizemb=size
|
||||
)
|
||||
volume_name = self.proxy._get_fss_volume_name(VOLUME)
|
||||
self.proxy.extend_vdev(volume_name, VOLUME["size"], EXTENT_NEW_SIZE)
|
||||
|
||||
mock__get_fss_vid_from_name.assert_called_once_with(volume_name,
|
||||
FSS_SINGLE_TYPE)
|
||||
self.FSS_MOCK.extend_vdev.assert_called_once_with(FAKE_ID, params)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
|
||||
return_value=FAKE_ID)
|
||||
def test_delete_volume(self, mock__get_fss_vid_from_name):
|
||||
volume_name = self.proxy._get_fss_volume_name(VOLUME)
|
||||
self.proxy.delete_vdev(VOLUME)
|
||||
mock__get_fss_vid_from_name.assert_called_once_with(volume_name,
|
||||
FSS_SINGLE_TYPE)
|
||||
self.FSS_MOCK.delete_vdev.assert_called_once_with(FAKE_ID)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
|
||||
return_value=FAKE_ID)
|
||||
def test_clone_volume(self, mock__get_fss_vid_from_name):
|
||||
self.FSS_MOCK.create_mirror.return_value = API_RESPONSE
|
||||
self.FSS_MOCK.sync_mirror.return_value = API_RESPONSE
|
||||
mirror_params = dict(
|
||||
category='virtual',
|
||||
selectioncriteria='anydrive',
|
||||
mirrortarget="virtual",
|
||||
storagepoolid=self.proxy.fss_defined_pool
|
||||
)
|
||||
ret = self.proxy.clone_volume(VOLUME_NAME, SRC_VOL_NAME)
|
||||
|
||||
self.FSS_MOCK.create_mirror.assert_called_once_with(FAKE_ID,
|
||||
mirror_params)
|
||||
self.FSS_MOCK.sync_mirror.assert_called_once_with(FAKE_ID)
|
||||
self.FSS_MOCK.promote_mirror.assert_called_once_with(FAKE_ID,
|
||||
VOLUME_NAME)
|
||||
self.assertNotEqual(ret, VOLUME_METADATA)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
|
||||
return_value=FAKE_ID)
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap',
|
||||
return_value=VOLUME_NAME)
|
||||
def test_create_snapshot(self, mock__get_vol_name_from_snap,
|
||||
mock__get_fss_vid_from_name,
|
||||
mock_create_vdev_snapshot):
|
||||
self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = [
|
||||
False, False, SNAPSHOT['volume_size']]
|
||||
|
||||
self.proxy.create_snapshot(SNAPSHOT)
|
||||
self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_once_with(
|
||||
FAKE_ID)
|
||||
sizemb = self.proxy._convert_size_to_mb(SNAPSHOT['volume_size'])
|
||||
mock_create_vdev_snapshot.assert_called_once_with(FAKE_ID, sizemb)
|
||||
self.FSS_MOCK.create_timemark_policy.assert_called_once_with(
|
||||
FAKE_ID,
|
||||
storagepoolid=self.proxy.fss_defined_pool)
|
||||
self.FSS_MOCK.create_timemark.assert_called_once_with(
|
||||
FAKE_ID,
|
||||
SNAPSHOT["display_name"])
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_timestamp',
|
||||
return_value=RAWTIMESTAMP)
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
|
||||
return_value=FAKE_ID)
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap',
|
||||
return_value=VOLUME_NAME)
|
||||
def test_delete_snapshot(self, mock__get_vol_name_from_snap,
|
||||
mock__get_fss_vid_from_name,
|
||||
mock__get_timestamp):
|
||||
timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP)
|
||||
|
||||
self.proxy.delete_snapshot(SNAPSHOT)
|
||||
mock__get_vol_name_from_snap.assert_called_once_with(SNAPSHOT)
|
||||
self.FSS_MOCK.delete_timemark.assert_called_once_with(timestamp)
|
||||
self.FSS_MOCK.get_timemark.assert_any_call(FAKE_ID)
|
||||
self.assertEqual(2, self.FSS_MOCK.get_timemark.call_count)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_timestamp')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap')
|
||||
def test_create_volume_from_snapshot(self, mock__get_vol_name_from_snap,
|
||||
mock__get_fss_vid_from_name,
|
||||
mock__get_timestamp):
|
||||
tm_info = {"rc": 0,
|
||||
"data":
|
||||
{
|
||||
"guid": "497bad5e-e589-bb0a-e0e7-00004eeac169",
|
||||
"name": "SANDisk-001",
|
||||
"total": "1",
|
||||
"timemark": [
|
||||
{
|
||||
"size": 131072,
|
||||
"comment": "123test456",
|
||||
"hastimeview": False,
|
||||
"priority": "low",
|
||||
"quiescent": "yes",
|
||||
"timeviewdata": "notkept",
|
||||
"rawtimestamp": "1324975390",
|
||||
"timestamp": "2015-10-11 16:43:10"
|
||||
}]
|
||||
}
|
||||
}
|
||||
mock__get_vol_name_from_snap.return_value = VOLUME_NAME
|
||||
new_vol_name = self.proxy._get_fss_volume_name(VOLUME)
|
||||
mock__get_fss_vid_from_name.return_value = FAKE_ID
|
||||
|
||||
self.FSS_MOCK.get_timemark.return_value = tm_info
|
||||
mock__get_timestamp.return_value = RAWTIMESTAMP
|
||||
timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP)
|
||||
|
||||
self.proxy.create_volume_from_snapshot(VOLUME, SNAPSHOT)
|
||||
self.FSS_MOCK.get_timemark.assert_called_once_with(FAKE_ID)
|
||||
mock__get_timestamp.assert_called_once_with(tm_info,
|
||||
SNAPSHOT['display_name'])
|
||||
self.FSS_MOCK.copy_timemark.assert_called_once_with(
|
||||
timestamp,
|
||||
storagepoolid=self.proxy.fss_defined_pool,
|
||||
name=new_vol_name)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
|
||||
def test_create_consistency_group(self, mock__get_group_name_from_id):
|
||||
|
||||
mock__get_group_name_from_id.return_value = CONSISTGROUP['name']
|
||||
params = dict(name=CONSISTGROUP['name'])
|
||||
self.proxy.create_group(CONSISTGROUP)
|
||||
self.FSS_MOCK.create_group.assert_called_once_with(params)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
|
||||
def test_delete_consistency_group(self, mock__get_group_name_from_id,
|
||||
mock__get_fss_gid_from_name):
|
||||
mock__get_group_name_from_id.return_value = CONSISTGROUP['name']
|
||||
mock__get_fss_gid_from_name.return_value = FAKE_ID
|
||||
|
||||
self.proxy.destroy_group(CONSISTGROUP)
|
||||
mock__get_group_name_from_id.assert_called_once_with(
|
||||
CONSISTGROUP['id'])
|
||||
mock__get_fss_gid_from_name.assert_called_once_with(
|
||||
CONSISTGROUP['name'])
|
||||
self.FSS_MOCK.destroy_group.assert_called_once_with(FAKE_ID)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
|
||||
def test_update_consistency_group(self, mock__get_group_name_from_id,
|
||||
mock__get_fss_gid_from_name,
|
||||
mock__get_fss_vid_from_name):
|
||||
join_vid_list = [1, 2]
|
||||
leave_vid_list = [3, 4]
|
||||
mock__get_group_name_from_id.return_value = CONSISTGROUP['name']
|
||||
mock__get_fss_gid_from_name.return_value = FAKE_ID
|
||||
mock__get_fss_vid_from_name.side_effect = [join_vid_list,
|
||||
leave_vid_list]
|
||||
add_vols = [
|
||||
{'name': 'vol1', 'id': 'vol1'},
|
||||
{'name': 'vol2', 'id': 'vol2'}
|
||||
]
|
||||
remove_vols = [
|
||||
{'name': 'vol3', 'id': 'vol3'},
|
||||
{'name': 'vol4', 'id': 'vol4'}
|
||||
]
|
||||
expected_addvollist = ["cinder-%s" % volume['id'] for volume in
|
||||
add_vols]
|
||||
expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols]
|
||||
|
||||
self.proxy.set_group(CONSISTGROUP, addvollist=expected_addvollist,
|
||||
remvollist=expected_remvollist)
|
||||
|
||||
if expected_addvollist:
|
||||
mock__get_fss_vid_from_name.assert_any_call(expected_addvollist)
|
||||
|
||||
if expected_remvollist:
|
||||
mock__get_fss_vid_from_name.assert_any_call(expected_remvollist)
|
||||
self.assertEqual(2, mock__get_fss_vid_from_name.call_count)
|
||||
|
||||
join_params = dict()
|
||||
leave_params = dict()
|
||||
|
||||
join_params.update(
|
||||
action='join',
|
||||
virtualdevices=join_vid_list
|
||||
)
|
||||
leave_params.update(
|
||||
action='leave',
|
||||
virtualdevices=leave_vid_list
|
||||
)
|
||||
self.FSS_MOCK.set_group.assert_called_once_with(FAKE_ID, join_params,
|
||||
leave_params)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot')
|
||||
@mock.patch.object(proxy.RESTProxy, 'create_group_timemark')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_vdev_id_from_group_id')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
|
||||
def test_create_cgsnapshot(self, mock__get_group_name_from_id,
|
||||
mock__get_fss_gid_from_name,
|
||||
mock__get_vdev_id_from_group_id,
|
||||
mock_create_group_timemark,
|
||||
mock_create_vdev_snapshot
|
||||
):
|
||||
vid_list = [1]
|
||||
|
||||
group_name = "cinder-consisgroup-%s" % CG_SNAPSHOT[
|
||||
'consistencygroup_id']
|
||||
mock__get_group_name_from_id.return_value = group_name
|
||||
mock__get_fss_gid_from_name.return_value = FAKE_ID
|
||||
mock__get_vdev_id_from_group_id.return_value = vid_list
|
||||
gsnap_name = self.proxy._encode_name(CG_SNAPSHOT['id'])
|
||||
self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = (
|
||||
False,
|
||||
False,
|
||||
1024)
|
||||
|
||||
self.proxy.create_cgsnapshot(CG_SNAPSHOT)
|
||||
mock__get_group_name_from_id.assert_called_once_with(
|
||||
CG_SNAPSHOT['consistencygroup_id'])
|
||||
mock__get_fss_gid_from_name.assert_called_once_with(group_name)
|
||||
mock__get_vdev_id_from_group_id.assert_called_once_with(FAKE_ID)
|
||||
|
||||
for vid in vid_list:
|
||||
self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_with(vid)
|
||||
mock_create_vdev_snapshot.assert_called_once_with(vid, 1024)
|
||||
self.FSS_MOCK.create_timemark_policy.assert_called_once_with(
|
||||
vid,
|
||||
storagepoolid=self.proxy.fss_defined_pool)
|
||||
|
||||
mock_create_group_timemark.assert_called_once_with(FAKE_ID, gsnap_name)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'delete_group_timemark')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_group_membercount')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
|
||||
def test_delete_cgsnapshot(self, mock__get_group_name_from_id,
|
||||
mock__get_fss_gid_from_name,
|
||||
mock__get_fss_group_membercount,
|
||||
mock_delete_group_timemark):
|
||||
tm_info = {
|
||||
"rc": 0,
|
||||
"data":
|
||||
{
|
||||
"name": "GroupTestABC",
|
||||
"total": 1,
|
||||
"timemark": [{
|
||||
"size": 65536,
|
||||
"comment": "cinder-PGGwaaaaaaaar+wYV4AMdgIPw",
|
||||
"priority": "low",
|
||||
"quiescent": "yes",
|
||||
"hastimeview": "false",
|
||||
"timeviewdata": "notkept",
|
||||
"rawtimestamp": "1324974940",
|
||||
"timestamp": "2015-10-15 16:35:40"}]
|
||||
}
|
||||
}
|
||||
final_tm_data = {
|
||||
"rc": 0,
|
||||
"data":
|
||||
{"name": "GroupTestABC",
|
||||
"total": 1,
|
||||
"timemark": []
|
||||
}}
|
||||
|
||||
mock__get_group_name_from_id.return_value = CG_SNAPSHOT[
|
||||
'consistencygroup_id']
|
||||
mock__get_fss_gid_from_name.return_value = FAKE_ID
|
||||
self.FSS_MOCK.get_group_timemark.side_effect = [tm_info, final_tm_data]
|
||||
encode_snap_name = self.proxy._encode_name(CG_SNAPSHOT['id'])
|
||||
self.proxy.delete_cgsnapshot(CG_SNAPSHOT)
|
||||
mock__get_fss_group_membercount.assert_called_once_with(FAKE_ID)
|
||||
|
||||
self.assertEqual(2, self.FSS_MOCK.get_group_timemark.call_count)
|
||||
self.FSS_MOCK.get_group_timemark.assert_any_call(FAKE_ID)
|
||||
rawtimestamp = self.proxy._get_timestamp(tm_info, encode_snap_name)
|
||||
timestamp = '%s_%s' % (FAKE_ID, rawtimestamp)
|
||||
mock_delete_group_timemark.assert_called_once_with(timestamp)
|
||||
self.FSS_MOCK.delete_group_timemark_policy.assert_called_once_with(
|
||||
FAKE_ID)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi')
|
||||
def test_iscsi_initialize_connection(self,
|
||||
mock_initialize_connection_iscsi):
|
||||
fss_hosts = []
|
||||
fss_hosts.append(PRIMARY_IP)
|
||||
self.proxy.initialize_connection_iscsi(VOLUME, ISCSI_CONNECTOR,
|
||||
fss_hosts)
|
||||
mock_initialize_connection_iscsi.assert_called_once_with(
|
||||
VOLUME,
|
||||
ISCSI_CONNECTOR,
|
||||
fss_hosts)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi')
|
||||
def test_iscsi_terminate_connection(self, mock_terminate_connection_iscsi):
|
||||
self.FSS_MOCK._get_target_info.return_value = (FAKE_ID, INITIATOR_IQN)
|
||||
|
||||
self.proxy.terminate_connection_iscsi(VOLUME, ISCSI_CONNECTOR)
|
||||
mock_terminate_connection_iscsi.assert_called_once_with(
|
||||
VOLUME,
|
||||
ISCSI_CONNECTOR)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'rename_vdev')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name')
|
||||
def test_manage_existing(self, mock__get_fss_volume_name,
|
||||
mock_rename_vdev):
|
||||
new_vol_name = 'rename-vol'
|
||||
mock__get_fss_volume_name.return_value = new_vol_name
|
||||
|
||||
self.proxy._manage_existing_volume(FAKE_ID, VOLUME)
|
||||
mock__get_fss_volume_name.assert_called_once_with(VOLUME)
|
||||
mock_rename_vdev.assert_called_once_with(FAKE_ID, new_vol_name)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'list_volume_info')
|
||||
def test_manage_existing_get_size(self, mock_list_volume_info):
|
||||
volume_ref = {'source-id': FAKE_ID}
|
||||
vdev_info = {
|
||||
"rc": 0,
|
||||
"data": {
|
||||
"name": "cinder-2ab1f70a-6c89-432c-84e3-5fa6c187fb92",
|
||||
"type": "san",
|
||||
"category": "virtual",
|
||||
"sizemb": 1020
|
||||
}}
|
||||
|
||||
mock_list_volume_info.return_value = vdev_info
|
||||
self.proxy._get_existing_volume_ref_vid(volume_ref)
|
||||
mock_list_volume_info.assert_called_once_with(FAKE_ID)
|
||||
|
||||
@mock.patch.object(proxy.RESTProxy, 'rename_vdev')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name')
|
||||
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name')
|
||||
def test_unmanage(self, mock__get_fss_volume_name,
|
||||
mock__get_fss_vid_from_name,
|
||||
mock_rename_vdev):
|
||||
|
||||
mock__get_fss_volume_name.return_value = VOLUME_NAME
|
||||
mock__get_fss_vid_from_name.return_value = FAKE_ID
|
||||
unmanaged_vol_name = VOLUME_NAME + "-unmanaged"
|
||||
|
||||
self.proxy.unmanage(VOLUME)
|
||||
mock__get_fss_volume_name.assert_called_once_with(VOLUME)
|
||||
mock__get_fss_vid_from_name.assert_called_once_with(VOLUME_NAME,
|
||||
FSS_SINGLE_TYPE)
|
||||
mock_rename_vdev.assert_called_once_with(FAKE_ID, unmanaged_vol_name)
|
0
cinder/volume/drivers/falconstor/__init__.py
Normal file
0
cinder/volume/drivers/falconstor/__init__.py
Normal file
110
cinder/volume/drivers/falconstor/fc.py
Normal file
110
cinder/volume/drivers/falconstor/fc.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Copyright (c) 2016 FalconStor, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Fibre channel Cinder volume driver for FalconStor FSS storage system.
|
||||
|
||||
This driver requires FSS-8.00-8865 or later.
|
||||
"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE
|
||||
from cinder import interface
|
||||
import cinder.volume.driver
|
||||
from cinder.volume.drivers.falconstor import fss_common
|
||||
from cinder.zonemanager import utils as fczm_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class FSSFCDriver(fss_common.FalconstorBaseDriver,
|
||||
cinder.volume.driver.FibreChannelDriver):
|
||||
"""Implements commands for FalconStor FSS FC management.
|
||||
|
||||
To enable the driver add the following line to the cinder configuration:
|
||||
volume_driver=cinder.volume.drivers.falconstor.fc.FSSFCDriver
|
||||
|
||||
Version history:
|
||||
1.0.0 - Initial driver
|
||||
|
||||
"""
|
||||
|
||||
VERSION = '1.0.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FSSFCDriver, self).__init__(*args, **kwargs)
|
||||
self.gateway_fc_wwns = []
|
||||
self._storage_protocol = "FC"
|
||||
self._backend_name = (
|
||||
self.configuration.safe_get('volume_backend_name') or
|
||||
self.__class__.__name__)
|
||||
self._lookup_service = fczm_utils.create_lookup_service()
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the driver does while starting."""
|
||||
super(FSSFCDriver, self).do_setup(context)
|
||||
self.gateway_fc_wwns = self.proxy.list_fc_target_wwpn()
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
super(FSSFCDriver, self).check_for_setup_error()
|
||||
if len(self.gateway_fc_wwns) == 0:
|
||||
msg = _('No FC targets found')
|
||||
raise exception.InvalidHost(reason=msg)
|
||||
|
||||
def validate_connector(self, connector):
|
||||
"""Check connector for at least one enabled FC protocol."""
|
||||
if 'FC' == self._storage_protocol and 'wwpns' not in connector:
|
||||
LOG.error(_LE('The connector does not contain the required '
|
||||
'information.'))
|
||||
raise exception.InvalidConnectorException(missing='wwpns')
|
||||
|
||||
@fczm_utils.AddFCZone
|
||||
def initialize_connection(self, volume, connector):
|
||||
fss_hosts = []
|
||||
fss_hosts.append(self.configuration.san_ip)
|
||||
target_info = self.proxy.fc_initialize_connection(volume, connector,
|
||||
fss_hosts)
|
||||
init_targ_map = self._build_initiator_target_map(
|
||||
target_info['available_initiator'])
|
||||
|
||||
fc_info = {'driver_volume_type': 'fibre_channel',
|
||||
'data': {'target_lun': int(target_info['lun']),
|
||||
'target_discovered': True,
|
||||
'target_wwn': self.gateway_fc_wwns,
|
||||
'initiator_target_map': init_targ_map,
|
||||
'volume_id': volume['id'],
|
||||
}
|
||||
}
|
||||
return fc_info
|
||||
|
||||
def _build_initiator_target_map(self, initiator_wwns):
|
||||
"""Build the target_wwns and the initiator target map."""
|
||||
init_targ_map = dict.fromkeys(initiator_wwns, self.gateway_fc_wwns)
|
||||
return init_targ_map
|
||||
|
||||
@fczm_utils.RemoveFCZone
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
host_id = self.proxy.fc_terminate_connection(volume, connector)
|
||||
fc_info = {"driver_volume_type": "fibre_channel", "data": {}}
|
||||
if self.proxy._check_fc_host_devices_empty(host_id):
|
||||
available_initiator, fc_initiators_info = (
|
||||
self.proxy._get_fc_client_initiators(connector))
|
||||
init_targ_map = self._build_initiator_target_map(
|
||||
available_initiator)
|
||||
fc_info["data"] = {"target_wwn": self.gateway_fc_wwns,
|
||||
"initiator_target_map": init_targ_map}
|
||||
return fc_info
|
399
cinder/volume/drivers/falconstor/fss_common.py
Normal file
399
cinder/volume/drivers/falconstor/fss_common.py
Normal file
@ -0,0 +1,399 @@
|
||||
# Copyright (c) 2016 FalconStor, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Volume driver for FalconStor FSS storage system.
|
||||
|
||||
This driver requires FSS-8.00-8865 or later.
|
||||
"""
|
||||
|
||||
import math
|
||||
import re
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder.image import image_utils
|
||||
from cinder.volume.drivers.falconstor import rest_proxy
|
||||
from cinder.volume.drivers.san import san
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FSS_OPTS = [
|
||||
cfg.IntOpt('fss_pool',
|
||||
default='',
|
||||
help='FSS pool id in which FalconStor volumes are stored.'),
|
||||
cfg.BoolOpt('fss_debug',
|
||||
default=False,
|
||||
help="Enable HTTP debugging to FSS"),
|
||||
cfg.StrOpt('additional_retry_list',
|
||||
default='',
|
||||
help='FSS additional retry list, separate by ;')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(FSS_OPTS)
|
||||
|
||||
|
||||
class FalconstorBaseDriver(san.SanDriver):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FalconstorBaseDriver, self).__init__(*args, **kwargs)
|
||||
if self.configuration:
|
||||
self.configuration.append_config_values(FSS_OPTS)
|
||||
|
||||
self.proxy = rest_proxy.RESTProxy(self.configuration)
|
||||
self._backend_name = (
|
||||
self.configuration.safe_get('volume_backend_name') or 'FalconStor')
|
||||
self._storage_protocol = 'iSCSI'
|
||||
|
||||
def do_setup(self, context):
|
||||
self.proxy.do_setup()
|
||||
LOG.info(_LI('Activate FalconStor cinder volume driver.'))
|
||||
|
||||
def check_for_setup_error(self):
|
||||
if self.proxy.session_id is None:
|
||||
msg = (_('FSS cinder volume driver not ready: Unable to determine '
|
||||
'session id.'))
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if not self.configuration.fss_pool:
|
||||
msg = _('Pool is not available in the cinder configuration '
|
||||
'fields.')
|
||||
raise exception.InvalidHost(reason=msg)
|
||||
|
||||
self._pool_checking(self.configuration.fss_pool)
|
||||
|
||||
def _pool_checking(self, pool_id):
|
||||
pool_count = 0
|
||||
try:
|
||||
output = self.proxy.list_pool_info(pool_id)
|
||||
if "name" in output['data']:
|
||||
pool_count = len(re.findall(rest_proxy.GROUP_PREFIX,
|
||||
output['data']['name']))
|
||||
if pool_count is 0:
|
||||
msg = (_('The given pool info must include the storage pool '
|
||||
'and naming start with OpenStack-'))
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
except Exception:
|
||||
msg = (_('Unexpected exception during pool checking.'))
|
||||
LOG.exception(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def _check_multipath(self):
|
||||
if self.configuration.use_multipath_for_image_xfer:
|
||||
if not self.configuration.san_secondary_ip:
|
||||
msg = (_('The san_secondary_ip param is null.'))
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
output = self.proxy._check_iocluster_state()
|
||||
if not output:
|
||||
msg = (_('FSS do not support multipathing.'))
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
return output
|
||||
else:
|
||||
return False
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume.
|
||||
|
||||
We use the metadata of the volume to create variety volume.
|
||||
|
||||
Create a thin provisioned volume :
|
||||
[Usage] create --volume-type FSS --metadata thinprovisioned=true
|
||||
thinsize=<thin-volume-size>
|
||||
|
||||
Create a LUN that is a Timeview of another LUN at a specified CDP tag:
|
||||
[Usage] create --volume-type FSS --metadata timeview=<vid>
|
||||
cdptag=<tag> volume-size
|
||||
|
||||
Create a LUN that is a Timeview of another LUN at a specified Timemark:
|
||||
[Usage] create --volume-type FSS --metadata timeview=<vid>
|
||||
rawtimestamp=<rawtimestamp> volume-size
|
||||
|
||||
"""
|
||||
|
||||
volume_metadata = self._get_volume_metadata(volume)
|
||||
if not volume_metadata:
|
||||
volume_name, fss_metadata = self.proxy.create_vdev(volume)
|
||||
else:
|
||||
if ("timeview" in volume_metadata and
|
||||
("cdptag" in volume_metadata) or
|
||||
("rawtimestamp" in volume_metadata)):
|
||||
volume_name, fss_metadata = self.proxy.create_tv_from_cdp_tag(
|
||||
volume_metadata, volume)
|
||||
elif ("thinprovisioned" in volume_metadata and
|
||||
"thinsize" in volume_metadata):
|
||||
volume_name, fss_metadata = self.proxy.create_thin_vdev(
|
||||
volume_metadata, volume)
|
||||
else:
|
||||
volume_name, fss_metadata = self.proxy.create_vdev(volume)
|
||||
fss_metadata.update(volume_metadata)
|
||||
|
||||
if type(volume['metadata']) is dict:
|
||||
fss_metadata.update(volume['metadata'])
|
||||
if volume['consistencygroup_id']:
|
||||
self.proxy._add_volume_to_consistency_group(
|
||||
volume['consistencygroup_id'],
|
||||
volume_name
|
||||
)
|
||||
return {'metadata': fss_metadata}
|
||||
|
||||
def _get_volume_metadata(self, volume):
|
||||
volume_metadata = {}
|
||||
if 'volume_metadata' in volume:
|
||||
for metadata in volume['volume_metadata']:
|
||||
volume_metadata[metadata['key']] = metadata['value']
|
||||
return volume_metadata
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
new_vol_name = self.proxy._get_fss_volume_name(volume)
|
||||
src_name = self.proxy._get_fss_volume_name(src_vref)
|
||||
vol_size = volume["size"]
|
||||
src_size = src_vref["size"]
|
||||
fss_metadata = self.proxy.clone_volume(new_vol_name, src_name)
|
||||
self.proxy.extend_vdev(new_vol_name, src_size, vol_size)
|
||||
|
||||
if volume['consistencygroup_id']:
|
||||
self.proxy._add_volume_to_consistency_group(
|
||||
volume['consistencygroup_id'],
|
||||
new_vol_name
|
||||
)
|
||||
volume_metadata = self._get_volume_metadata(volume)
|
||||
fss_metadata.update(volume_metadata)
|
||||
|
||||
if type(volume['metadata']) is dict:
|
||||
fss_metadata.update(volume['metadata'])
|
||||
return {'metadata': fss_metadata}
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extend volume to new_size."""
|
||||
volume_name = self.proxy._get_fss_volume_name(volume)
|
||||
self.proxy.extend_vdev(volume_name, volume["size"], new_size)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Disconnect all hosts and delete the volume"""
|
||||
try:
|
||||
self.proxy.delete_vdev(volume)
|
||||
except rest_proxy.FSSHTTPError as err:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
ctxt.reraise = False
|
||||
LOG.warning(_LW("Volume deletion failed with message: %s"),
|
||||
err.reason)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
snap_metadata = snapshot["metadata"]
|
||||
metadata = self.proxy.create_snapshot(snapshot)
|
||||
snap_metadata.update(metadata)
|
||||
return {'metadata': snap_metadata}
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
try:
|
||||
self.proxy.delete_snapshot(snapshot)
|
||||
except rest_proxy.FSSHTTPError as err:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
ctxt.reraise = False
|
||||
LOG.error(
|
||||
_LE("Snapshot deletion failed with message: %s"),
|
||||
err.reason)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
vol_size = volume['size']
|
||||
snap_size = snapshot['volume_size']
|
||||
volume_name, fss_metadata = self.proxy.create_volume_from_snapshot(
|
||||
volume, snapshot)
|
||||
|
||||
if vol_size != snap_size:
|
||||
try:
|
||||
extend_volume_name = self.proxy._get_fss_volume_name(volume)
|
||||
self.proxy.extend_vdev(extend_volume_name, snap_size, vol_size)
|
||||
except rest_proxy.FSSHTTPError as err:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
ctxt.reraise = False
|
||||
LOG.error(_LE(
|
||||
"Resizing %(id)s failed with message: %(msg)s. "
|
||||
"Cleaning volume."), {'id': volume["id"],
|
||||
'msg': err.reason})
|
||||
|
||||
if type(volume['metadata']) is dict:
|
||||
fss_metadata.update(volume['metadata'])
|
||||
|
||||
if volume['consistencygroup_id']:
|
||||
self.proxy._add_volume_to_consistency_group(
|
||||
volume['consistencygroup_id'],
|
||||
volume_name)
|
||||
return {'metadata': fss_metadata}
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume, connector):
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
||||
|
||||
# Attach/detach volume to instance/host
|
||||
def attach_volume(self, context, volume, instance_uuid, host_name,
|
||||
mountpoint):
|
||||
pass
|
||||
|
||||
def detach_volume(self, context, volume, attachment=None):
|
||||
pass
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
total_capacity = 0
|
||||
free_space = 0
|
||||
if refresh:
|
||||
try:
|
||||
info = self.proxy._get_pools_info()
|
||||
if info:
|
||||
total_capacity = int(info['total_capacity_gb'])
|
||||
used_space = int(info['used_gb'])
|
||||
free_space = int(total_capacity - used_space)
|
||||
|
||||
data = {"vendor_name": "FalconStor",
|
||||
"volume_backend_name": self._backend_name,
|
||||
"driver_version": self.VERSION,
|
||||
"storage_protocol": self._storage_protocol,
|
||||
"total_capacity_gb": total_capacity,
|
||||
"free_capacity_gb": free_space,
|
||||
"reserved_percentage": 0,
|
||||
"consistencygroup_support": True
|
||||
}
|
||||
|
||||
self._stats = data
|
||||
|
||||
except Exception as exc:
|
||||
LOG.error(_LE('Cannot get volume status %(exc)s.'),
|
||||
{'exc': exc})
|
||||
return self._stats
|
||||
|
||||
def create_consistencygroup(self, context, group):
|
||||
"""Creates a consistencygroup."""
|
||||
self.proxy.create_group(group)
|
||||
model_update = {'status': 'available'}
|
||||
return model_update
|
||||
|
||||
def delete_consistencygroup(self, context, group, volumes):
|
||||
"""Deletes a consistency group."""
|
||||
self.proxy.destroy_group(group)
|
||||
volume_updates = []
|
||||
for volume in volumes:
|
||||
self.delete_volume(volume)
|
||||
volume_updates.append({
|
||||
'id': volume.id,
|
||||
'status': 'deleted'
|
||||
})
|
||||
|
||||
model_update = {'status': group['status']}
|
||||
return model_update, volume_updates
|
||||
|
||||
def update_consistencygroup(self, context, group,
|
||||
add_volumes=None, remove_volumes=None):
|
||||
addvollist = []
|
||||
remvollist = []
|
||||
if add_volumes:
|
||||
for volume in add_volumes:
|
||||
addvollist.append(self.proxy._get_fss_volume_name(volume))
|
||||
if remove_volumes:
|
||||
for volume in remove_volumes:
|
||||
remvollist.append(self.proxy._get_fss_volume_name(volume))
|
||||
|
||||
self.proxy.set_group(group['id'], addvollist=addvollist,
|
||||
remvollist=remvollist)
|
||||
return None, None, None
|
||||
|
||||
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
|
||||
"""Creates a cgsnapshot."""
|
||||
cgsnapshot_id = cgsnapshot['id']
|
||||
try:
|
||||
self.proxy.create_cgsnapshot(cgsnapshot)
|
||||
except Exception as e:
|
||||
msg = _('Failed to create cg snapshot %(id)s '
|
||||
'due to %(reason)s.') % {'id': cgsnapshot_id,
|
||||
'reason': six.text_type(e)}
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
snapshot_updates = []
|
||||
for snapshot in snapshots:
|
||||
snapshot_updates.append({
|
||||
'id': snapshot.id,
|
||||
'status': 'available'
|
||||
})
|
||||
model_update = {'status': 'available'}
|
||||
return model_update, snapshot_updates
|
||||
|
||||
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
|
||||
"""Deletes a cgsnapshot."""
|
||||
cgsnapshot_id = cgsnapshot.id
|
||||
try:
|
||||
self.proxy.delete_cgsnapshot(cgsnapshot)
|
||||
except Exception as e:
|
||||
msg = _('Failed to delete cgsnapshot %(id)s '
|
||||
'due to %(reason)s.') % {'id': cgsnapshot_id,
|
||||
'reason': six.text_type(e)}
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
snapshot_updates = []
|
||||
for snapshot in snapshots:
|
||||
snapshot_updates.append({
|
||||
'id': snapshot.id,
|
||||
'status': 'deleted',
|
||||
})
|
||||
model_update = {'status': cgsnapshot.status}
|
||||
return model_update, snapshot_updates
|
||||
|
||||
def manage_existing(self, volume, existing_ref):
|
||||
"""Convert an existing FSS volume to a Cinder volume.
|
||||
|
||||
We expect a volume id in the existing_ref that matches one in FSS.
|
||||
"""
|
||||
volume_metadata = {}
|
||||
self.proxy._get_existing_volume_ref_vid(existing_ref)
|
||||
self.proxy._manage_existing_volume(existing_ref['source-id'], volume)
|
||||
volume_metadata['FSS-vid'] = existing_ref['source-id']
|
||||
updates = {'metadata': volume_metadata}
|
||||
return updates
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
"""Get size of an existing FSS volume.
|
||||
|
||||
We expect a volume id in the existing_ref that matches one in FSS.
|
||||
"""
|
||||
sizemb = self.proxy._get_existing_volume_ref_vid(existing_ref)
|
||||
size = int(math.ceil(float(sizemb) / units.Ki))
|
||||
return size
|
||||
|
||||
def unmanage(self, volume):
|
||||
"""Remove Cinder management from FSS volume"""
|
||||
self.proxy.unmanage(volume)
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
with image_utils.temporary_file() as tmp:
|
||||
image_utils.fetch_verify_image(context, image_service,
|
||||
image_id, tmp)
|
||||
image_utils.fetch_to_raw(context,
|
||||
image_service,
|
||||
image_id,
|
||||
tmp,
|
||||
self.configuration.volume_dd_blocksize,
|
||||
size=volume['size'])
|
102
cinder/volume/drivers/falconstor/iscsi.py
Normal file
102
cinder/volume/drivers/falconstor/iscsi.py
Normal file
@ -0,0 +1,102 @@
|
||||
# Copyright (c) 2016 FalconStor, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Volume driver for FalconStor FSS storage system.
|
||||
|
||||
This driver requires FSS-8.00-8865 or later.
|
||||
"""
|
||||
|
||||
from cinder import interface
|
||||
import cinder.volume.driver
|
||||
from cinder.volume.drivers.falconstor import fss_common
|
||||
|
||||
DEFAULT_ISCSI_PORT = 3260
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class FSSISCSIDriver(fss_common.FalconstorBaseDriver,
|
||||
cinder.volume.driver.ISCSIDriver):
|
||||
|
||||
"""Implements commands for FalconStor FSS ISCSI management.
|
||||
|
||||
To enable the driver add the following line to the cinder configuration:
|
||||
volume_driver=cinder.volume.drivers.falconstor.iscsi.FSSISCSIDriver
|
||||
|
||||
Version history:
|
||||
1.0.0 - Initial driver
|
||||
1.0.1 - Fix copy_image_to_volume error.
|
||||
1.0.2 - Closes-Bug #1554184, add lun id type conversion in
|
||||
initialize_connection
|
||||
1.03 - merge source code
|
||||
1.04 - Fixed create_volume_from_snapshot(), create_cloned_volume()
|
||||
metadata TypeError
|
||||
2.0.0 - Mitaka driver
|
||||
-- fixed consisgroup commands error.
|
||||
2.0.1 -- fixed bugs
|
||||
2.0.2 -- support Multipath
|
||||
3.0.0 - Newton driver
|
||||
|
||||
"""
|
||||
|
||||
VERSION = '3.0.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FSSISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self._storage_protocol = "iSCSI"
|
||||
self._backend_name = (
|
||||
self.configuration.safe_get('volume_backend_name') or
|
||||
self.__class__.__name__)
|
||||
|
||||
def initialize_connection(self, volume, connector, initiator_data=None):
|
||||
fss_hosts = []
|
||||
target_portal = []
|
||||
multipath = connector.get('multipath', False)
|
||||
fss_hosts.append(self.configuration.san_ip)
|
||||
|
||||
if multipath:
|
||||
if self._check_multipath():
|
||||
fss_hosts.append(self.configuration.san_secondary_ip)
|
||||
else:
|
||||
multipath = False
|
||||
|
||||
for host in fss_hosts:
|
||||
iscsi_ip_port = "%s:%d" % (host, DEFAULT_ISCSI_PORT)
|
||||
target_portal.append(iscsi_ip_port)
|
||||
|
||||
target_info = self.proxy.initialize_connection_iscsi(volume,
|
||||
connector,
|
||||
fss_hosts)
|
||||
properties = {}
|
||||
properties['target_discovered'] = True
|
||||
properties['discard'] = True
|
||||
properties['encrypted'] = False
|
||||
properties['qos_specs'] = None
|
||||
properties['access_mode'] = 'rw'
|
||||
properties['volume_id'] = volume['id']
|
||||
properties['target_iqn'] = target_info['iqn']
|
||||
properties['target_portal'] = target_portal[0]
|
||||
properties['target_lun'] = int(target_info['lun'])
|
||||
|
||||
if multipath:
|
||||
properties['target_iqns'] = [target_info['iqn'],
|
||||
target_info['iqn']]
|
||||
properties['target_portals'] = target_portal
|
||||
properties['target_luns'] = [int(target_info['lun']),
|
||||
int(target_info['lun'])]
|
||||
|
||||
return {'driver_volume_type': 'iscsi', 'data': properties}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Terminate connection."""
|
||||
self.proxy.terminate_connection_iscsi(volume, connector)
|
1530
cinder/volume/drivers/falconstor/rest_proxy.py
Normal file
1530
cinder/volume/drivers/falconstor/rest_proxy.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Added backend driver for FalconStor FreeStor.
|
||||
|
Loading…
Reference in New Issue
Block a user