[NetApp] Implement cached status pool
In order to optimize the NetApp ONTAP driver, this patch is caching the status of driver pools and reusing for the each share server, given that the pool is not separated by share server. The option `netapp_cached_aggregates_status_lifetime` is added for controlling the time that the cached values is considered valid. Closes-Bug: #1900469 Change-Id: I14a059615fc29c7c173c035bb51d39e0bbb8b70a
This commit is contained in:
parent
9b835f03d5
commit
b2e1797889
@ -155,6 +155,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
self._cluster_info = {}
|
||||
self._default_nfs_config = None
|
||||
self.is_nfs_config_supported = False
|
||||
self._cache_pool_status = None
|
||||
|
||||
self._app_version = kwargs.get('app_version', 'unknown')
|
||||
|
||||
@ -188,6 +189,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
LOG.debug('The default NFS configuration: %s',
|
||||
self._default_nfs_config)
|
||||
|
||||
self._cache_pool_status = na_utils.DataCache(
|
||||
self.configuration.netapp_cached_aggregates_status_lifetime)
|
||||
|
||||
@na_utils.trace
|
||||
def _set_cluster_info(self):
|
||||
self._cluster_info['nve_support'] = (
|
||||
@ -390,13 +394,18 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
|
||||
:param share_server: ShareServer class instance.
|
||||
"""
|
||||
|
||||
if self._cache_pool_status.is_expired():
|
||||
return self._get_pools()
|
||||
|
||||
return self._cache_pool_status.get_data()
|
||||
|
||||
@na_utils.trace
|
||||
def _get_pools(self, filter_function=None, goodness_function=None):
|
||||
"""Retrieve list of pools available to this backend."""
|
||||
|
||||
pools = []
|
||||
cached_pools = []
|
||||
aggr_space = self._get_aggregate_space()
|
||||
aggregates = aggr_space.keys()
|
||||
|
||||
@ -427,8 +436,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
|
||||
pool = {
|
||||
'pool_name': aggr_name,
|
||||
'filter_function': filter_function,
|
||||
'goodness_function': goodness_function,
|
||||
'filter_function': None,
|
||||
'goodness_function': None,
|
||||
'total_capacity_gb': total_capacity_gb,
|
||||
'free_capacity_gb': free_capacity_gb,
|
||||
'allocated_capacity_gb': allocated_capacity_gb,
|
||||
@ -455,7 +464,14 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
aggr_name)
|
||||
pool['utilization'] = na_utils.round_down(utilization)
|
||||
|
||||
pools.append(pool)
|
||||
cached_pools.append(pool)
|
||||
pool_with_func = copy.deepcopy(pool)
|
||||
pool_with_func['filter_function'] = filter_function
|
||||
pool_with_func['goodness_function'] = goodness_function
|
||||
|
||||
pools.append(pool_with_func)
|
||||
|
||||
self._cache_pool_status.update_data(cached_pools)
|
||||
|
||||
return pools
|
||||
|
||||
|
@ -134,7 +134,13 @@ netapp_provisioning_opts = [
|
||||
default='fpolicy_policy_%(share_id)s'),
|
||||
cfg.StrOpt('netapp_fpolicy_event_name_template',
|
||||
help='NetApp FPolicy policy name template.',
|
||||
default='fpolicy_event_%(protocol)s_%(share_id)s'), ]
|
||||
default='fpolicy_event_%(protocol)s_%(share_id)s'),
|
||||
cfg.IntOpt('netapp_cached_aggregates_status_lifetime',
|
||||
min=0,
|
||||
default=60,
|
||||
help='The maximum time in seconds that the cached aggregates '
|
||||
'status will be considered valid. Trying to read the '
|
||||
'expired cache leads to refreshing it.'), ]
|
||||
|
||||
netapp_cluster_opts = [
|
||||
cfg.StrOpt('netapp_vserver',
|
||||
|
@ -22,6 +22,7 @@ import re
|
||||
|
||||
from oslo_concurrency import processutils as putils
|
||||
from oslo_log import log
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
@ -238,3 +239,29 @@ class OpenStackInfo(object):
|
||||
return '%(version)s|%(release)s|%(vendor)s|%(platform)s' % {
|
||||
'version': self._version, 'release': self._release,
|
||||
'vendor': self._vendor, 'platform': self._platform}
|
||||
|
||||
|
||||
class DataCache(object):
|
||||
"""DataCache class for caching NetApp information.
|
||||
|
||||
The cache validity is measured by a stop watch that is
|
||||
not thread-safe.
|
||||
"""
|
||||
|
||||
def __init__(self, duration):
|
||||
self._stop_watch = timeutils.StopWatch(duration)
|
||||
self._cached_data = None
|
||||
|
||||
def is_expired(self):
|
||||
return not self._stop_watch.has_started() or self._stop_watch.expired()
|
||||
|
||||
def get_data(self):
|
||||
return self._cached_data
|
||||
|
||||
def update_data(self, cached_data):
|
||||
if not self._stop_watch.has_started():
|
||||
self._stop_watch.start()
|
||||
else:
|
||||
self._stop_watch.restart()
|
||||
|
||||
self._cached_data = cached_data
|
||||
|
@ -489,6 +489,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.mock_object(self.library,
|
||||
'_get_pools',
|
||||
mock.Mock(return_value=fake.POOLS))
|
||||
self.library._cache_pool_status = na_utils.DataCache(60)
|
||||
|
||||
result = self.library.get_share_server_pools(fake.SHARE_SERVER)
|
||||
|
||||
@ -499,6 +500,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.mock_object(
|
||||
self.library, '_get_aggregate_space',
|
||||
mock.Mock(return_value=fake.AGGREGATE_CAPACITIES))
|
||||
self.library._cache_pool_status = na_utils.DataCache(60)
|
||||
self.library._have_cluster_creds = True
|
||||
self.library._revert_to_snapshot_support = True
|
||||
self.library._cluster_info = fake.CLUSTER_INFO
|
||||
@ -516,6 +518,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.mock_object(
|
||||
self.library, '_get_aggregate_space',
|
||||
mock.Mock(return_value=fake.AGGREGATE_CAPACITIES_VSERVER_CREDS))
|
||||
self.library._cache_pool_status = na_utils.DataCache(60)
|
||||
self.library._have_cluster_creds = False
|
||||
self.library._revert_to_snapshot_support = True
|
||||
self.library._cluster_info = fake.CLUSTER_INFO
|
||||
|
@ -394,3 +394,50 @@ class OpenstackInfoTestCase(test.TestCase):
|
||||
info._update_openstack_info()
|
||||
|
||||
self.assertTrue(mock_updt_from_dpkg.called)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class DataCacheTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(DataCacheTestCase, self).setUp()
|
||||
|
||||
self.cache = na_utils.DataCache(60)
|
||||
self.cache._stop_watch = mock.Mock()
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_is_expired(self, is_expired):
|
||||
not_expired = not is_expired
|
||||
self.mock_object(
|
||||
self.cache._stop_watch, 'has_started',
|
||||
mock.Mock(return_value=not_expired))
|
||||
|
||||
self.mock_object(
|
||||
self.cache._stop_watch, 'expired',
|
||||
mock.Mock(return_value=is_expired))
|
||||
|
||||
self.assertEqual(is_expired, self.cache.is_expired())
|
||||
|
||||
def test_get_data(self):
|
||||
fake_data = 10
|
||||
self.cache._cached_data = fake_data
|
||||
self.assertEqual(fake_data, self.cache.get_data())
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_update_data(self, started):
|
||||
self.mock_object(
|
||||
self.cache._stop_watch, 'has_started',
|
||||
mock.Mock(return_value=started))
|
||||
mock_start = self.mock_object(self.cache._stop_watch, 'start',
|
||||
mock.Mock())
|
||||
mock_restart = self.mock_object(self.cache._stop_watch, 'restart',
|
||||
mock.Mock())
|
||||
fake_data = 10
|
||||
|
||||
self.cache.update_data(fake_data)
|
||||
|
||||
self.assertEqual(self.cache._cached_data, fake_data)
|
||||
if not started:
|
||||
mock_start.assert_called_once()
|
||||
else:
|
||||
mock_restart.assert_called_once()
|
||||
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
In order to optimize the NetApp ONTAP driver, this patch is caching
|
||||
the status of driver pools and reusing for the each share server, given
|
||||
that the pool is not separated by share server. It adds the option
|
||||
`netapp_cached_aggregates_status_lifetime` for controlling the time that
|
||||
the cached values is considered valid. Please refer to the
|
||||
`Launchpad bug #1900469 <https://bugs.launchpad.net/manila/+bug/1900469>`_
|
||||
for more details.
|
Loading…
Reference in New Issue
Block a user