NetApp SolidFire: Adding new fields to scheduler data
NetApp SolidFire now reports new fields to the scheduler. Those fields will allow operators to consider QoS and efficiency rates in the filtering and weighing of volumes. Implements: blueprint sf-qos-on-scheduler-stats Change-Id: Id77b532187ecd5f3f42171f1f94b6f0246d07be5
This commit is contained in:
parent
45dc00518c
commit
eb4e6c9246
@ -75,6 +75,16 @@ class DriverFilter(filters.BaseBackendFilter):
|
||||
qos_specs = stats['qos_specs']
|
||||
volume_stats = stats['volume_stats']
|
||||
|
||||
LOG.debug('Running evaluator: extra_specs: %(extra)s\n'
|
||||
'stats: %(stats)s\n'
|
||||
'capabilities: %(capabilities)s\n'
|
||||
'volume: %(volume)s\n'
|
||||
'qos: %(qos)s', {'extra': extra_specs,
|
||||
'stats': backend_stats,
|
||||
'capabilities': backend_caps,
|
||||
'volume': volume_stats,
|
||||
'qos': qos_specs})
|
||||
|
||||
result = evaluator.evaluate(
|
||||
func,
|
||||
extra=extra_specs,
|
||||
|
@ -44,8 +44,8 @@ class GoodnessWeigher(weights.BaseHostWeigher):
|
||||
stats = self._generate_stats(host_state, weight_properties)
|
||||
LOG.debug("Checking host '%s'", stats['host_stats']['host'])
|
||||
result = self._check_goodness_function(stats)
|
||||
LOG.debug("Goodness: %s", result)
|
||||
LOG.debug("Done checking host '%s'", stats['host_stats']['host'])
|
||||
LOG.debug("Goodness weight for %(host)s: %(res)s",
|
||||
{'res': result, 'host': stats['host_stats']['host']})
|
||||
|
||||
return result
|
||||
|
||||
|
@ -86,6 +86,10 @@ class SolidFireVolumeTestCase(test.TestCase):
|
||||
'_issue_api_request',
|
||||
self.fake_issue_api_request)
|
||||
|
||||
self.mock_object(solidfire.SolidFireDriver,
|
||||
'_get_provisioned_capacity_iops',
|
||||
return_value=(0, 0))
|
||||
|
||||
self.expected_qos_results = {'minIOPS': 1000,
|
||||
'maxIOPS': 10000,
|
||||
'burstIOPS': 20000}
|
||||
@ -146,14 +150,41 @@ class SolidFireVolumeTestCase(test.TestCase):
|
||||
|
||||
def fake_issue_api_request(obj, method, params, version='1.0',
|
||||
endpoint=None):
|
||||
if method is 'GetClusterCapacity' and version == '1.0':
|
||||
data = {'result':
|
||||
{'clusterCapacity': {'maxProvisionedSpace': 107374182400,
|
||||
'usedSpace': 1073741824,
|
||||
'compressionPercent': 100,
|
||||
'deDuplicationPercent': 100,
|
||||
'thinProvisioningPercent': 100,
|
||||
'maxUsedSpace': 53687091200}}}
|
||||
if method is 'GetClusterCapacity':
|
||||
data = {}
|
||||
if version == '1.0':
|
||||
data = {'result': {'clusterCapacity': {
|
||||
'maxProvisionedSpace': 107374182400,
|
||||
'usedSpace': 1073741824,
|
||||
'compressionPercent': 100,
|
||||
'deDuplicationPercent': 100,
|
||||
'thinProvisioningPercent': 100,
|
||||
'maxUsedSpace': 53687091200}}}
|
||||
elif version == '8.0':
|
||||
data = {'result': {'clusterCapacity': {
|
||||
'usedMetadataSpaceInSnapshots': 16476454912,
|
||||
'maxUsedMetadataSpace': 432103337164,
|
||||
'activeBlockSpace': 616690857535,
|
||||
'uniqueBlocksUsedSpace': 628629229316,
|
||||
'totalOps': 7092186135,
|
||||
'peakActiveSessions': 0,
|
||||
'uniqueBlocks': 519489473,
|
||||
'maxOverProvisionableSpace': 276546135777280,
|
||||
'zeroBlocks': 8719571984,
|
||||
'provisionedSpace': 19938551005184,
|
||||
'maxUsedSpace': 8402009333760,
|
||||
'peakIOPS': 0,
|
||||
'timestamp': '2019-04-24T12:08:22Z',
|
||||
'currentIOPS': 0,
|
||||
'usedSpace': 628629229316,
|
||||
'activeSessions': 0,
|
||||
'nonZeroBlocks': 1016048624,
|
||||
'maxProvisionedSpace': 55309227155456,
|
||||
'usedMetadataSpace': 16476946432,
|
||||
'averageIOPS': 0,
|
||||
'snapshotNonZeroBlocks': 1606,
|
||||
'maxIOPS': 200000,
|
||||
'clusterRecentIOSize': 0}}}
|
||||
return data
|
||||
|
||||
elif method is 'GetClusterInfo':
|
||||
@ -1152,34 +1183,42 @@ class SolidFireVolumeTestCase(test.TestCase):
|
||||
self.mock_object(solidfire.SolidFireDriver,
|
||||
'_issue_api_request',
|
||||
self.fake_issue_api_request)
|
||||
|
||||
driver_defined_stats = ['volume_backend_name', 'vendor_name',
|
||||
'driver_version', 'storage_protocol',
|
||||
'consistencygroup_support',
|
||||
'consistent_group_snapshot_enabled',
|
||||
'replication_enabled', 'active_cluster_mvip',
|
||||
'reserved_percentage', 'QoS_support',
|
||||
'multiattach', 'total_capacity_gb',
|
||||
'free_capacity_gb', 'compression_percent',
|
||||
'deduplicaton_percent',
|
||||
'thin_provision_percent', 'provisioned_iops',
|
||||
'current_iops', 'average_iops', 'max_iops',
|
||||
'peak_iops']
|
||||
|
||||
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
|
||||
sfv._update_cluster_status()
|
||||
self.assertEqual(99.0, sfv.cluster_stats['free_capacity_gb'])
|
||||
self.assertEqual(100.0, sfv.cluster_stats['total_capacity_gb'])
|
||||
|
||||
for key in driver_defined_stats:
|
||||
if sfv.cluster_stats.get(key, None) is None:
|
||||
msg = 'Key %s should be present at driver stats.' % key
|
||||
raise exception.CinderException(message=msg)
|
||||
|
||||
sfv.configuration.sf_provisioning_calc = 'usedSpace'
|
||||
sfv._update_cluster_status()
|
||||
self.assertEqual(49.0, sfv.cluster_stats['free_capacity_gb'])
|
||||
self.assertEqual(50.0, sfv.cluster_stats['total_capacity_gb'])
|
||||
self.assertTrue(sfv.cluster_stats['thin_provisioning_support'])
|
||||
self.assertEqual(self.configuration.max_over_subscription_ratio,
|
||||
sfv.cluster_stats['max_over_subscription_ratio'])
|
||||
driver_defined_stats += ['thin_provisioning_support',
|
||||
'provisioned_capacity_gb',
|
||||
'max_over_subscription_ratio']
|
||||
|
||||
def test_get_provisioned_capacity(self):
|
||||
self.mock_object(solidfire.SolidFireDriver,
|
||||
'_issue_api_request',
|
||||
self.fake_issue_api_request)
|
||||
|
||||
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
|
||||
prov_cap = sfv._get_provisioned_capacity()
|
||||
# Sum of totalSize of the volumes mocked is
|
||||
# (int(1.75 * units.Gi)) * 2 = 3758096384
|
||||
self.assertEqual(3758096384, prov_cap)
|
||||
for key in driver_defined_stats:
|
||||
self.assertIn(key, driver_defined_stats)
|
||||
|
||||
def test_update_cluster_status_mvip_unreachable(self):
|
||||
self.mock_object(solidfire.SolidFireDriver,
|
||||
'_issue_api_request',
|
||||
self.fake_issue_api_request)
|
||||
|
||||
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
|
||||
with mock.patch.object(sfv,
|
||||
'_issue_api_request',
|
||||
|
@ -1937,16 +1937,20 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
self._issue_api_request('ModifyVolume',
|
||||
params, version='5.0')
|
||||
|
||||
def _get_provisioned_capacity(self):
|
||||
def _get_provisioned_capacity_iops(self):
|
||||
response = self._issue_api_request('ListVolumes', {}, version='8.0')
|
||||
volumes = response['result']['volumes']
|
||||
|
||||
LOG.debug("%s volumes present in cluster", len(volumes))
|
||||
provisioned = 0
|
||||
for vol in volumes:
|
||||
provisioned += vol['totalSize']
|
||||
|
||||
return provisioned
|
||||
provisioned_cap = 0
|
||||
provisioned_iops = 0
|
||||
|
||||
for vol in volumes:
|
||||
provisioned_cap += vol['totalSize']
|
||||
provisioned_iops += vol['qos']['minIOPS']
|
||||
|
||||
return provisioned_cap, provisioned_iops
|
||||
|
||||
def _update_cluster_status(self):
|
||||
"""Retrieve status info for the Cluster."""
|
||||
@ -1969,7 +1973,8 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
data['multiattach'] = True
|
||||
|
||||
try:
|
||||
results = self._issue_api_request('GetClusterCapacity', params)
|
||||
results = self._issue_api_request('GetClusterCapacity', params,
|
||||
version='8.0')
|
||||
except SolidFireAPIException:
|
||||
data['total_capacity_gb'] = 0
|
||||
data['free_capacity_gb'] = 0
|
||||
@ -1977,14 +1982,14 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
return
|
||||
|
||||
results = results['result']['clusterCapacity']
|
||||
prov_cap, prov_iops = self._get_provisioned_capacity_iops()
|
||||
|
||||
if self.configuration.sf_provisioning_calc == 'usedSpace':
|
||||
free_capacity = (
|
||||
results['maxUsedSpace'] - results['usedSpace'])
|
||||
data['total_capacity_gb'] = results['maxUsedSpace'] / units.Gi
|
||||
data['thin_provisioning_support'] = True
|
||||
data['provisioned_capacity_gb'] = (
|
||||
self._get_provisioned_capacity() / units.Gi)
|
||||
data['provisioned_capacity_gb'] = prov_cap / units.Gi
|
||||
data['max_over_subscription_ratio'] = (
|
||||
self.configuration.max_over_subscription_ratio
|
||||
)
|
||||
@ -1995,12 +2000,30 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
results['maxProvisionedSpace'] / units.Gi)
|
||||
|
||||
data['free_capacity_gb'] = float(free_capacity / units.Gi)
|
||||
data['compression_percent'] = (
|
||||
results['compressionPercent'])
|
||||
data['deduplicaton_percent'] = (
|
||||
results['deDuplicationPercent'])
|
||||
data['thin_provision_percent'] = (
|
||||
results['thinProvisioningPercent'])
|
||||
|
||||
if (results['uniqueBlocksUsedSpace'] == 0 or
|
||||
results['uniqueBlocks'] == 0 or
|
||||
results['zeroBlocks'] == 0):
|
||||
data['compression_percent'] = 100
|
||||
data['deduplicaton_percent'] = 100
|
||||
data['thin_provision_percent'] = 100
|
||||
else:
|
||||
data['compression_percent'] = (
|
||||
(float(results['uniqueBlocks'] * 4096) /
|
||||
results['uniqueBlocksUsedSpace']) * 100)
|
||||
data['deduplicaton_percent'] = (
|
||||
float(results['nonZeroBlocks'] /
|
||||
results['uniqueBlocks']) * 100)
|
||||
data['thin_provision_percent'] = (
|
||||
(float(results['nonZeroBlocks'] + results['zeroBlocks']) /
|
||||
results['nonZeroBlocks']) * 100)
|
||||
|
||||
data['provisioned_iops'] = prov_iops
|
||||
data['current_iops'] = results['currentIOPS']
|
||||
data['average_iops'] = results['averageIOPS']
|
||||
data['max_iops'] = results['maxIOPS']
|
||||
data['peak_iops'] = results['peakIOPS']
|
||||
|
||||
data['shared_targets'] = False
|
||||
self.cluster_stats = data
|
||||
|
||||
|
@ -0,0 +1,5 @@
|
||||
features:
|
||||
- |
|
||||
NetApp SolidFire now reports QoS and efficiency stats allowing operators
|
||||
to use those values in consideration for weighting and filtering of their
|
||||
backends.
|
Loading…
Reference in New Issue
Block a user