Extend Falconstor driver to utilize multiple FSS storage pools

Changed the pool format from integer to key-value pair in configuration
file. Use A for single storage pool and P/O for multiple pools.

DocImpact
Implements: blueprint falconstor-extend-cinder-driver
Change-Id: Ic5362c8284f2d69989d820173c306e5856df111b
This commit is contained in:
Soffie Huang 2017-01-19 16:26:03 +08:00
parent 6ad77cc4ef
commit 213001f931
5 changed files with 202 additions and 65 deletions

View File

@ -32,6 +32,8 @@ ISCSI_DRIVER = DRIVER_PATH + ".iscsi.FSSISCSIDriver"
PRIMARY_IP = '10.0.0.1'
SECONDARY_IP = '10.0.0.2'
FAKE_ID = 123
FAKE_SINGLE_POOLS = {'A': 1}
FAKE_MULTIPLE_POOLS = {'P': 1, 'O': 2}
FAKE = 'fake'
FAKE_HOST = 'fakehost'
API_RESPONSE = {'rc': 0}
@ -214,7 +216,7 @@ class FSSDriverTestCase(test.TestCase):
self.mock_config.san_ip = PRIMARY_IP
self.mock_config.san_login = FAKE
self.mock_config.san_password = FAKE
self.mock_config.fss_pool = FAKE_ID
self.mock_config.fss_pools = FAKE_SINGLE_POOLS
self.mock_config.san_is_local = False
self.mock_config.fss_debug = False
self.mock_config.additional_retry_list = False
@ -237,8 +239,8 @@ class TestFSSISCSIDriver(FSSDriverTestCase):
def test_initialized_should_set_fss_info(self):
self.assertEqual(self.driver.proxy.fss_host,
self.driver.configuration.san_ip)
self.assertEqual(self.driver.proxy.fss_defined_pool,
self.driver.configuration.fss_pool)
self.assertEqual(self.driver.proxy.fss_defined_pools,
self.driver.configuration.fss_pools)
def test_check_for_setup_error(self):
self.assertRaises(exception.VolumeBackendAPIException,
@ -527,7 +529,7 @@ class TestRESTProxy(test.TestCase):
configuration.san_ip = FAKE
configuration.san_login = FAKE
configuration.san_password = FAKE
configuration.fss_pool = FAKE_ID
configuration.fss_pools = FAKE_SINGLE_POOLS
configuration.fss_debug = False
configuration.additional_retry_list = None
@ -545,8 +547,9 @@ class TestRESTProxy(test.TestCase):
def test_create_volume(self):
sizemb = self.proxy._convert_size_to_mb(VOLUME['size'])
volume_name = self.proxy._get_fss_volume_name(VOLUME)
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "P")
params = dict(storagepoolid=self.proxy.fss_defined_pool,
params = dict(storagepoolid=_pool_id,
sizemb=sizemb,
category="virtual",
name=volume_name)
@ -582,11 +585,12 @@ class TestRESTProxy(test.TestCase):
def test_clone_volume(self, mock__get_fss_vid_from_name):
self.FSS_MOCK.create_mirror.return_value = API_RESPONSE
self.FSS_MOCK.sync_mirror.return_value = API_RESPONSE
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O")
mirror_params = dict(
category='virtual',
selectioncriteria='anydrive',
mirrortarget="virtual",
storagepoolid=self.proxy.fss_defined_pool
storagepoolid=_pool_id
)
ret = self.proxy.clone_volume(VOLUME_NAME, SRC_VOL_NAME)
@ -613,9 +617,10 @@ class TestRESTProxy(test.TestCase):
FAKE_ID)
sizemb = self.proxy._convert_size_to_mb(SNAPSHOT['volume_size'])
mock_create_vdev_snapshot.assert_called_once_with(FAKE_ID, sizemb)
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O")
self.FSS_MOCK.create_timemark_policy.assert_called_once_with(
FAKE_ID,
storagepoolid=self.proxy.fss_defined_pool)
storagepoolid=_pool_id)
self.FSS_MOCK.create_timemark.assert_called_once_with(
FAKE_ID,
SNAPSHOT["display_name"])
@ -669,6 +674,7 @@ class TestRESTProxy(test.TestCase):
self.FSS_MOCK.get_timemark.return_value = tm_info
mock__get_timestamp.return_value = RAWTIMESTAMP
timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP)
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O")
self.proxy.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.FSS_MOCK.get_timemark.assert_called_once_with(FAKE_ID)
@ -676,7 +682,7 @@ class TestRESTProxy(test.TestCase):
SNAPSHOT['display_name'])
self.FSS_MOCK.copy_timemark.assert_called_once_with(
timestamp,
storagepoolid=self.proxy.fss_defined_pool,
storagepoolid=_pool_id,
name=new_vol_name)
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
@ -778,13 +784,14 @@ class TestRESTProxy(test.TestCase):
CG_SNAPSHOT['consistencygroup_id'])
mock__get_fss_gid_from_name.assert_called_once_with(group_name)
mock__get_vdev_id_from_group_id.assert_called_once_with(FAKE_ID)
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O")
for vid in vid_list:
self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_with(vid)
mock_create_vdev_snapshot.assert_called_once_with(vid, 1024)
self.FSS_MOCK.create_timemark_policy.assert_called_once_with(
vid,
storagepoolid=self.proxy.fss_defined_pool)
storagepoolid=_pool_id)
mock_create_group_timemark.assert_called_once_with(FAKE_ID, gsnap_name)

View File

@ -36,7 +36,20 @@ LOG = logging.getLogger(__name__)
FSS_OPTS = [
cfg.IntOpt('fss_pool',
default='',
help='FSS pool id in which FalconStor volumes are stored.'),
help='DEPRECATED: FSS pool id in which FalconStor volumes are '
'stored.',
deprecated_since='Pike',
deprecated_reason='This option will be removed once Queens '
'development opens up. Please use fss_pools '
'instead.'),
cfg.DictOpt('fss_pools',
default={},
help='FSS pool id list in which FalconStor volumes are stored.'
' If you have only one pool, use A:<pool-id>. '
'You can also have up to two storage pools, '
'P for primary and O for all supporting devices. '
'The usage is P:<primary-pool-id>,O:<others-pool-id>',
deprecated_name='fss_pool'),
cfg.StrOpt('fss_san_secondary_ip',
default='',
help='Specifies FSS secondary management IP to be used '
@ -54,12 +67,20 @@ CONF.register_opts(FSS_OPTS)
class FalconstorBaseDriver(san.SanDriver):
def __init__(self, *args, **kwargs):
super(FalconstorBaseDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(FSS_OPTS)
if self.configuration.fss_pool:
self.configuration.fss_pools = {'A': str(
self.configuration.fss_pool)}
LOG.warning("'fss_pool=<pool-id>' is deprecated. Using the "
"fss_pools=A:<pool-id> for single pool or "
"fss_pools=P:<pool-id>,O:<other-pool-id> instead "
"as old format will be removed once Queens development"
" opens up.")
self.proxy = rest_proxy.RESTProxy(self.configuration)
self._backend_name = (
self.configuration.safe_get('volume_backend_name') or 'FalconStor')
@ -71,54 +92,93 @@ class FalconstorBaseDriver(san.SanDriver):
def check_for_setup_error(self):
if self.proxy.session_id is None:
msg = (_('FSS cinder volume driver not ready: Unable to determine '
'session id.'))
msg = _('FSS cinder volume driver not ready: Unable to determine '
'session id.')
raise exception.VolumeBackendAPIException(data=msg)
if not self.configuration.fss_pool:
if self.configuration.fss_pool:
self.configuration.fss_pools = {'A': six.text_type(
self.configuration.fss_pool)}
# The fss_pool is deprecated.
LOG.warning("'fss_pool=<pool-id>' is deprecated. Using the "
"fss_pools=A:<pool-id> for single pool or "
"fss_pools=P:<pool-id>,O:<other-pool-id> instead "
"as old format will be removed once Queens development"
" opens up.")
if not self.configuration.fss_pools:
msg = _('Pool is not available in the cinder configuration '
'fields.')
raise exception.InvalidHost(reason=msg)
self._pool_checking(self.configuration.fss_pools)
self._pool_checking(self.configuration.fss_pool)
if self.configuration.san_thin_provision:
if not self.configuration.max_over_subscription_ratio:
msg = _('The max_over_subscription_ratio have to set '
'when thin provisioning enabled.')
raise exception.InvalidConfigurationValue(reason=msg)
def _pool_checking(self, pool_id):
def _pool_checking(self, pool_info):
pool_count = 0
try:
output = self.proxy.list_pool_info(pool_id)
if "name" in output['data']:
pool_count = len(re.findall(rest_proxy.GROUP_PREFIX,
output['data']['name']))
if pool_count is 0:
msg = (_('The given pool info must include the storage pool '
'and naming start with OpenStack-'))
raise exception.VolumeBackendAPIException(data=msg)
if len(pool_info) == 1:
_pool_state = self._is_single_pool(pool_info)
if not _pool_state:
msg = _('The given pool info does not match.')
raise exception.VolumeBackendAPIException(data=msg)
else:
_pool_state = self._is_multi_pool(pool_info)
if not _pool_state:
msg = _('The given pool info does not match.')
raise exception.VolumeBackendAPIException(data=msg)
for index, pool_id in pool_info.items():
output = self.proxy.list_pool_info(pool_id)
if "name" in output['data']:
pool_count = len(re.findall(rest_proxy.GROUP_PREFIX,
output['data']['name']))
if pool_count is 0:
msg = _('The given pool info must include the storage '
'pool and naming start with OpenStack-')
raise exception.VolumeBackendAPIException(data=msg)
except Exception:
msg = (_('Unexpected exception during pool checking.'))
msg = _('Unexpected exception during pool checking.')
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _check_multipath(self):
if self.configuration.use_multipath_for_image_xfer:
if not self.configuration.fss_san_secondary_ip:
msg = (_('The san_secondary_ip param is null.'))
msg = _('The san_secondary_ip param is null.')
raise exception.VolumeBackendAPIException(data=msg)
output = self.proxy._check_iocluster_state()
if not output:
msg = (_('FSS do not support multipathing.'))
msg = _('FSS do not support multipathing.')
raise exception.VolumeBackendAPIException(data=msg)
return output
else:
return False
def _is_single_pool(self, pool_info):
if len(pool_info) == 1 and "A" in pool_info:
return True
else:
return False
def _is_multi_pool(self, pool_info):
if len(pool_info) == 2 and "P" in pool_info and "O" in pool_info:
return True
else:
return False
def create_volume(self, volume):
"""Creates a volume.
We use the metadata of the volume to create variety volume.
Create a thin provisioned volume :
[Usage] create --volume-type FSS --metadata thinprovisioned=true
thinsize=<thin-volume-size>
[Usage] create --volume-type FSS-THIN
--metadata thinsize=<thin-volume-size> volume-size
Create a LUN that is a Timeview of another LUN at a specified CDP tag:
[Usage] create --volume-type FSS --metadata timeview=<vid>
@ -128,20 +188,25 @@ class FalconstorBaseDriver(san.SanDriver):
[Usage] create --volume-type FSS --metadata timeview=<vid>
rawtimestamp=<rawtimestamp> volume-size
Create a mirrored volume :
[Usage] create --volume-type FSS --metadata mirrored=true
"""
volume_metadata = self._get_volume_metadata(volume)
if not volume_metadata:
volume_name, fss_metadata = self.proxy.create_vdev(volume)
else:
if ("timeview" in volume_metadata and
if self.configuration.san_thin_provision:
volume_name, fss_metadata = self.proxy.create_thin_vdev(
volume_metadata, volume)
elif ("timeview" in volume_metadata and
("cdptag" in volume_metadata) or
("rawtimestamp" in volume_metadata)):
volume_name, fss_metadata = self.proxy.create_tv_from_cdp_tag(
volume_metadata, volume)
elif ("thinprovisioned" in volume_metadata and
"thinsize" in volume_metadata):
volume_name, fss_metadata = self.proxy.create_thin_vdev(
elif 'mirrored' in volume_metadata:
volume_name, fss_metadata = self.proxy.create_vdev_with_mirror(
volume_metadata, volume)
else:
volume_name, fss_metadata = self.proxy.create_vdev(volume)
@ -265,6 +330,8 @@ class FalconstorBaseDriver(san.SanDriver):
def get_volume_stats(self, refresh=False):
total_capacity = 0
free_space = 0
# Thin provisioning
thin_enabled = self.configuration.san_thin_provision
if refresh:
try:
info = self.proxy._get_pools_info()
@ -280,9 +347,15 @@ class FalconstorBaseDriver(san.SanDriver):
"total_capacity_gb": total_capacity,
"free_capacity_gb": free_space,
"reserved_percentage": 0,
"consistencygroup_support": True
"consistencygroup_support": True,
"thin_provisioning_support": thin_enabled,
"thick_provisioning_support": not thin_enabled
}
if thin_enabled:
provisioned_capacity = int(info['used_gb'])
data['provisioned_capacity_gb'] = provisioned_capacity
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio)
self._stats = data
except Exception as exc:

View File

@ -41,15 +41,18 @@ class FSSISCSIDriver(fss_common.FalconstorBaseDriver,
1.03 - merge source code
1.04 - Fixed create_volume_from_snapshot(), create_cloned_volume()
metadata TypeError
2.0.0 - Mitaka driver
-- fixed consisgroup commands error.
2.0.0 - Newton driver
-- fixed consisgroup commands error
2.0.1 -- fixed bugs
2.0.2 -- support Multipath
3.0.0 - Newton driver
3.0.0 - Ocata driver
-- fixed bugs
4.0.0 - Pike driver
-- extend Cinder driver to utilize multiple FSS storage pools
"""
VERSION = '3.0.0'
VERSION = '4.0.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "FalconStor_CI"

View File

@ -16,6 +16,7 @@
import base64
import json
import random
import six
import time
import uuid
@ -75,7 +76,7 @@ LOG = logging.getLogger(__name__)
class RESTProxy(object):
def __init__(self, config):
self.fss_host = config.san_ip
self.fss_defined_pool = config.fss_pool
self.fss_defined_pools = config.fss_pools
if config.additional_retry_list:
RETRY_LIST.append(config.additional_retry_list)
@ -117,15 +118,17 @@ class RESTProxy(object):
def _get_pools_info(self):
qpools = []
poolinfo = {}
total_capacity_gb = 0
used_gb = 0
try:
output = self.list_pool_info()
if output and "storagepools" in output['data']:
for item in output['data']['storagepools']:
if item['name'].startswith(GROUP_PREFIX) and (
self.fss_defined_pool == item['id']):
six.text_type(item['id']) in
self.fss_defined_pools.values()):
poolid = int(item['id'])
qpools.append(poolid)
break
if not qpools:
msg = _('The storage pool information is empty or not correct')
@ -134,18 +137,20 @@ class RESTProxy(object):
# Query pool detail information
for poolid in qpools:
output = self.list_pool_info(poolid)
poolinfo['pool_name'] = output['data']['name']
poolinfo['total_capacity_gb'] = (
total_capacity_gb += (
self._convert_size_to_gb(output['data']['size']))
poolinfo['used_gb'] = (
self._convert_size_to_gb(output['data']['used']))
poolinfo['QoS_support'] = False
poolinfo['reserved_percentage'] = 0
used_gb += (self._convert_size_to_gb(output['data']['used']))
except Exception:
msg = (_('Unexpected exception during get pools info.'))
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
poolinfo['total_capacity_gb'] = total_capacity_gb
poolinfo['used_gb'] = used_gb
poolinfo['QoS_support'] = False
poolinfo['reserved_percentage'] = 0
return poolinfo
def list_pool_info(self, pool_id=None):
@ -163,13 +168,26 @@ class RESTProxy(object):
adapter_type = physicaladapters['type']
return adapter_type
def _selected_pool_id(self, pool_info, pool_type=None):
_pool_id = 0
if len(pool_info) == 1 and "A" in pool_info:
_pool_id = pool_info['A']
elif len(pool_info) == 2 and "P" in pool_info and "O" in pool_info:
if pool_type:
if pool_type == "P":
_pool_id = pool_info['P']
elif pool_type == "O":
_pool_id = pool_info['O']
return _pool_id
def create_vdev(self, volume):
sizemb = self._convert_size_to_mb(volume["size"])
volume_name = self._get_fss_volume_name(volume)
params = dict(storagepoolid=self.fss_defined_pool,
category="virtual",
params = dict(category="virtual",
sizemb=sizemb,
name=volume_name)
pool_id = self._selected_pool_id(self.fss_defined_pools, "P")
params.update(storagepoolid=pool_id)
return volume_name, self.FSS.create_vdev(params)
def create_tv_from_cdp_tag(self, volume_metadata, volume):
@ -186,13 +204,13 @@ class RESTProxy(object):
volume_name = self._get_fss_volume_name(volume)
sizemb = self._convert_size_to_mb(volume['size'])
params = dict(name=volume_name,
storage=dict(storagepoolid=self.fss_defined_pool,
sizemb=sizemb),
automaticexpansion=dict(enabled=False),
timeviewcopy=True)
if cdp_tag:
params.update(cdpjournaltag=cdp_tag)
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
params.update(storage={'storagepoolid': pool_id, 'sizemb': sizemb})
metadata = self.FSS.create_timeview(tv_vid, params)
return volume_name, metadata
@ -200,8 +218,7 @@ class RESTProxy(object):
thin_size = 0
size = volume["size"]
sizemb = self._convert_size_to_mb(size)
params = dict(storagepoolid=self.fss_defined_pool,
category="virtual")
params = {'category': 'virtual'}
if 'thinprovisioned' in volume_metadata:
if volume_metadata['thinprovisioned'] is False:
@ -232,10 +249,40 @@ class RESTProxy(object):
params.update(thinprovisioning=thin_disk)
params.update(sizemb=thin_size)
pool_id = self._selected_pool_id(self.fss_defined_pools, "P")
params.update(storagepoolid=pool_id)
volume_name = self._get_fss_volume_name(volume)
params.update(name=volume_name)
return volume_name, self.FSS.create_vdev(params)
def create_vdev_with_mirror(self, volume_metadata, volume):
if 'mirrored' in volume_metadata:
if volume_metadata['mirrored'] is False:
msg = _('If you want to create a mirrored volume, this param '
'must be True.')
raise exception.VolumeBackendAPIException(data=msg)
sizemb = self._convert_size_to_mb(volume["size"])
volume_name = self._get_fss_volume_name(volume)
params = {'category': 'virtual', 'sizemb': sizemb, 'name': volume_name}
pool_id = self._selected_pool_id(self.fss_defined_pools, "P")
params.update(storagepoolid=pool_id)
metadata = self.FSS.create_vdev(params)
if metadata:
vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)
mirror_params = {'category': 'virtual',
'selectioncriteria': 'anydrive',
'mirrortarget': "virtual"}
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
mirror_params.update(storagepoolid=pool_id)
ret = self.FSS.create_mirror(vid, mirror_params)
if ret:
return volume_name, metadata
def _get_fss_vid_from_name(self, volume_name, fss_type=None):
vid = []
output = self.FSS.list_fss_volume_info()
@ -282,7 +329,6 @@ class RESTProxy(object):
return vidlist
def clone_volume(self, new_vol_name, source_volume_name):
params = dict(storagepoolid=self.fss_defined_pool)
volume_metadata = {}
new_vid = ''
vid = self._get_fss_vid_from_name(source_volume_name, FSS_SINGLE_TYPE)
@ -291,7 +337,8 @@ class RESTProxy(object):
selectioncriteria='anydrive',
mirrortarget="virtual"
)
mirror_params.update(params)
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
mirror_params.update(storagepoolid=pool_id)
ret1 = self.FSS.create_mirror(vid, mirror_params)
if ret1:
@ -331,12 +378,11 @@ class RESTProxy(object):
(snap, tm_policy, vdev_size) = (self.FSS.
_check_if_snapshot_tm_exist(vid))
if not snap:
self.create_vdev_snapshot(vid, self._convert_size_to_mb(size))
if not tm_policy:
self.FSS.create_timemark_policy(
vid, storagepoolid=self.fss_defined_pool)
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
self.FSS.create_timemark_policy(vid, storagepoolid=pool_id)
if not snap_name:
snap_name = "snap-%s" % time.strftime('%Y%m%d%H%M%S')
@ -409,8 +455,9 @@ class RESTProxy(object):
raise exception.VolumeBackendAPIException(data=msg)
timestamp = '%s_%s' % (vid, rawtimestamp)
pool_id = self._selected_pool_id(self.fss_defined_pools, "P")
output = self.FSS.copy_timemark(
timestamp, storagepoolid=self.fss_defined_pool, name=new_vol_name)
timestamp, storagepoolid=pool_id, name=new_vol_name)
if output['rc'] == 0:
vid = output['id']
self.FSS._random_sleep()
@ -468,12 +515,13 @@ class RESTProxy(object):
return self.create_vdev_snapshot(vid, self._convert_size_to_mb(size))
def create_vdev_snapshot(self, vid, size):
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
params = dict(
idlist=[vid],
selectioncriteria='anydrive',
policy='alwayswrite',
policy='preserveall',
sizemb=size,
storagepoolid=self.fss_defined_pool
storagepoolid=pool_id
)
return self.FSS.create_vdev_snapshot(params)
@ -518,6 +566,7 @@ class RESTProxy(object):
gsnap_name = self._encode_name(cgsnapshot['id'])
gid = self._get_fss_gid_from_name(group_name)
vidlist = self._get_vdev_id_from_group_id(gid)
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
for vid in vidlist:
(snap, tm_policy, sizemb) = (self.FSS.
@ -525,8 +574,7 @@ class RESTProxy(object):
if not snap:
self.create_vdev_snapshot(vid, sizemb)
if not tm_policy:
self.FSS.create_timemark_policy(
vid, storagepoolid=self.fss_defined_pool)
self.FSS.create_timemark_policy(vid, storagepoolid=pool_id)
group_tm_policy = self.FSS._check_if_group_tm_enabled(gid)
if not group_tm_policy:
@ -1146,7 +1194,8 @@ class FSSRestCommon(object):
params = dict(
idlist=[vid],
automatic=dict(enabled=False),
maxtimemarkcount=MAXSNAPSHOTS
maxtimemarkcount=MAXSNAPSHOTS,
retentionpolicy=dict(mode='all'),
)
if kwargs.get('storagepoolid'):
params.update(kwargs)

View File

@ -0,0 +1,5 @@
---
features:
- Added ability to specify multiple storage pools in the FalconStor driver.
deprecations:
- The fss_pool option is deprecated. Use fss_pools instead.