NetApp SolidFire: Thin provisioning scheduler support

Thin provisioning[1] is a feature supported by SolidFire hardware but
in the current driver implementation it is not managed by Cinder.
Therefore, scheduler options to control thin provisioning would not
work in this driver.

In this patch, we make the SolidFire driver compliant with Cinder thin
provisioning framework and also fix a few values that were not being
properly reported.

To keep the driver behavior consistent and avoid breaking rolling
upgrades, a new option is added. So, by default, the driver will
behave in the old way, and in the next releases this new option will
be the new default.

[1] https://specs.openstack.org/openstack/cinder-specs/specs/kilo/over-subscription-in-thin-provisioning.html

Change-Id: Ibba1dcf3deef27a9e1272d4e806f3e09ebd4ca4a
Closes-bug: #1784042
This commit is contained in:
Erlon R. Cruz 2018-10-19 09:49:57 -03:00
parent 1e698dbf39
commit 7bd44ded01
3 changed files with 72 additions and 12 deletions

View File

@ -78,6 +78,7 @@ class SolidFireVolumeTestCase(test.TestCase):
self.configuration.sf_volume_prefix = 'UUID-'
self.configuration.sf_enable_vag = False
self.configuration.replication_device = []
self.configuration.max_over_subscription_ratio = 2
super(SolidFireVolumeTestCase, self).setUp()
self.mock_object(solidfire.SolidFireDriver,
@ -151,7 +152,8 @@ class SolidFireVolumeTestCase(test.TestCase):
'usedSpace': 1073741824,
'compressionPercent': 100,
'deDuplicationPercent': 100,
'thinProvisioningPercent': 100}}}
'thinProvisioningPercent': 100,
'maxUsedSpace': 53687091200}}}
return data
elif method is 'GetClusterInfo':
@ -265,10 +267,12 @@ class SolidFireVolumeTestCase(test.TestCase):
'attributes': {'uuid': f_uuid[1]},
'qos': None,
'iqn': test_name}]}}
for v in result['result']['volumes']:
if int(v['volumeID']) == int(params['startVolumeID']):
break
return v
if params and params['startVolumeID']:
volumes = result['result']['volumes']
selected_volumes = [v for v in volumes if v.get('volumeID')
!= params['startVolumeID']]
result['result']['volumes'] = selected_volumes
return result
elif method is 'DeleteSnapshot':
return {'result': {}}
elif method is 'GetClusterVersionInfo':
@ -1019,6 +1023,25 @@ class SolidFireVolumeTestCase(test.TestCase):
self.assertEqual(99.0, sfv.cluster_stats['free_capacity_gb'])
self.assertEqual(100.0, sfv.cluster_stats['total_capacity_gb'])
sfv.configuration.sf_provisioning_calc = 'usedSpace'
sfv._update_cluster_status()
self.assertEqual(49.0, sfv.cluster_stats['free_capacity_gb'])
self.assertEqual(50.0, sfv.cluster_stats['total_capacity_gb'])
self.assertTrue(sfv.cluster_stats['thin_provisioning_support'])
self.assertEqual(self.configuration.max_over_subscription_ratio,
sfv.cluster_stats['max_over_subscription_ratio'])
def test_get_provisioned_capacity(self):
self.mock_object(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
prov_cap = sfv._get_provisioned_capacity()
# Sum of totalSize of the volumes mocked is
# (int(1.75 * units.Gi)) * 2 = 3758096384
self.assertEqual(3758096384, prov_cap)
def test_update_cluster_status_mvip_unreachable(self):
self.mock_object(solidfire.SolidFireDriver,
'_issue_api_request',
@ -1927,8 +1950,7 @@ class SolidFireVolumeTestCase(test.TestCase):
def test_delete_cgsnapshot_by_name_rainy(self):
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
with mock.patch.object(sfv,
'_get_group_snapshot_by_name',
with mock.patch.object(sfv, '_get_group_snapshot_by_name',
return_value=None):
self.assertRaises(exception.SolidFireDriverException,
sfv._delete_cgsnapshot_by_name,

View File

@ -95,7 +95,15 @@ sf_opts = [
cfg.BoolOpt('sf_enable_vag',
default=False,
help='Utilize volume access groups on a per-tenant basis.')]
help='Utilize volume access groups on a per-tenant basis.'),
cfg.StrOpt('sf_provisioning_calc',
default='maxProvisionedSpace',
choices=['maxProvisionedSpace', 'usedSpace'],
help='Change how SolidFire reports used space and '
'provisioning calculations. If this parameter is set to '
'\'usedSpace\', the driver will report correct '
'values as expected by Cinder '
'thin provisioning.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP)
@ -1872,6 +1880,7 @@ class SolidFireDriver(san.SanISCSIDriver):
except exception.SolidFireAPIException:
pass
LOG.debug("SolidFire cluster_stats: %s", self.cluster_stats)
return self.cluster_stats
def extend_volume(self, volume, new_size):
@ -1894,6 +1903,17 @@ class SolidFireDriver(san.SanISCSIDriver):
self._issue_api_request('ModifyVolume',
params, version='5.0')
def _get_provisioned_capacity(self):
response = self._issue_api_request('ListVolumes', {}, version='8.0')
volumes = response['result']['volumes']
LOG.debug("%s volumes present in cluster", len(volumes))
provisioned = 0
for vol in volumes:
provisioned += vol['totalSize']
return provisioned
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
params = {}
@ -1923,11 +1943,22 @@ class SolidFireDriver(san.SanISCSIDriver):
return
results = results['result']['clusterCapacity']
free_capacity = (
results['maxProvisionedSpace'] - results['usedSpace'])
data['total_capacity_gb'] = (
float(results['maxProvisionedSpace'] / units.Gi))
if self.configuration.sf_provisioning_calc == 'usedSpace':
free_capacity = (
results['maxUsedSpace'] - results['usedSpace'])
data['total_capacity_gb'] = results['maxUsedSpace'] / units.Gi
data['thin_provisioning_support'] = True
data['provisioned_capacity_gb'] = (
self._get_provisioned_capacity() / units.Gi)
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio
)
else:
free_capacity = (
results['maxProvisionedSpace'] - results['usedSpace'])
data['total_capacity_gb'] = (
results['maxProvisionedSpace'] / units.Gi)
data['free_capacity_gb'] = float(free_capacity / units.Gi)
data['compression_percent'] = (

View File

@ -0,0 +1,7 @@
---
fixes:
- |
Fix SolidFire free_capacity_gb reporting and also reports
thin_provisioning_support=True. This allow the use of Cinder scheduler's
parameters for thin provisioning in the SolidFire plataform.