diff --git a/cinder/api/contrib/scheduler_stats.py b/cinder/api/contrib/scheduler_stats.py index 23778f3d16b..c3613734f26 100644 --- a/cinder/api/contrib/scheduler_stats.py +++ b/cinder/api/contrib/scheduler_stats.py @@ -20,6 +20,8 @@ from cinder.api.views import scheduler_stats as scheduler_stats_view from cinder.scheduler import rpcapi from cinder import utils +GET_POOL_NAME_FILTER_MICRO_VERSION = '3.28' + def authorize(context, action_name): action = 'scheduler_stats:%s' % action_name @@ -40,9 +42,16 @@ class SchedulerStatsController(wsgi.Controller): context = req.environ['cinder.context'] authorize(context, 'get_pools') - # TODO(zhiteng) Add filters support detail = utils.get_bool_param('detail', req.params) - pools = self.scheduler_api.get_pools(context, filters=None) + + req_version = req.api_version_request + + if req_version.matches(GET_POOL_NAME_FILTER_MICRO_VERSION): + filters = req.params.copy() + filters.pop('detail', None) + pools = self.scheduler_api.get_pools(context, filters=filters) + else: + pools = self.scheduler_api.get_pools(context, filters=None) return self._view_builder.pools(req, pools, detail) diff --git a/cinder/api/openstack/api_version_request.py b/cinder/api/openstack/api_version_request.py index b3a1ab5cc27..ded5af870c3 100644 --- a/cinder/api/openstack/api_version_request.py +++ b/cinder/api/openstack/api_version_request.py @@ -78,6 +78,8 @@ REST_API_VERSION_HISTORY = """ * 3.26 - Add failover action and cluster listings accept new filters and return new data. * 3.27 - Add attachment API + * 3.28 - Add filters support to get_pools + """ # The minimum and maximum versions of the API supported @@ -85,7 +87,7 @@ REST_API_VERSION_HISTORY = """ # minimum version of the API supported. # Explicitly using /v1 or /v2 enpoints will still work _MIN_API_VERSION = "3.0" -_MAX_API_VERSION = "3.27" +_MAX_API_VERSION = "3.28" _LEGACY_API_VERSION1 = "1.0" _LEGACY_API_VERSION2 = "2.0" diff --git a/cinder/api/openstack/rest_api_version_history.rst b/cinder/api/openstack/rest_api_version_history.rst index 1f699c5b2da..c8555596325 100644 --- a/cinder/api/openstack/rest_api_version_history.rst +++ b/cinder/api/openstack/rest_api_version_history.rst @@ -284,3 +284,7 @@ user documentation. 3.27 (Maximum in Ocata) ----------------------- Added new attachment API's + +3.28 +---- + Add filters support to get_pools diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py index 0f91e8dc3e7..660e1e7763b 100644 --- a/cinder/scheduler/filter_scheduler.py +++ b/cinder/scheduler/filter_scheduler.py @@ -199,8 +199,7 @@ class FilterScheduler(driver.Scheduler): return top_backend.obj def get_pools(self, context, filters): - # TODO(zhiteng) Add filters support - return self.host_manager.get_pools(context) + return self.host_manager.get_pools(context, filters) def _post_select_populate_filter_properties(self, filter_properties, backend_state): diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index 90ab4d66941..c429aef477d 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -22,6 +22,7 @@ import collections from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils +from oslo_utils import strutils from oslo_utils import timeutils from cinder.common import constants @@ -628,20 +629,38 @@ class HostManager(object): return all_pools.values() - def get_pools(self, context): + def get_pools(self, context, filters=None): """Returns a dict of all pools on all hosts HostManager knows about.""" self._update_backend_state_map(context) all_pools = [] + name = None + if filters: + name = filters.pop('name', None) + for backend_key, state in self.backend_state_map.items(): for key in state.pools: + filtered = False pool = state.pools[key] # use backend_key.pool_name to make sure key is unique pool_key = vol_utils.append_host(backend_key, pool.pool_name) new_pool = dict(name=pool_key) new_pool.update(dict(capabilities=pool.capabilities)) - all_pools.append(new_pool) + + if name and new_pool.get('name') != name: + continue + + if filters: + # filter all other items in capabilities + for (attr, value) in filters.items(): + cap = new_pool.get('capabilities').get(attr) + if not self._equal_after_convert(cap, value): + filtered = True + break + + if not filtered: + all_pools.append(new_pool) return all_pools @@ -761,3 +780,17 @@ class HostManager(object): vol_utils.notify_about_capacity_usage( context, u, u['type'], None, None) LOG.debug("Publish storage capacity: %s.", usage) + + def _equal_after_convert(self, capability, value): + + if isinstance(value, type(capability)) or capability is None: + return value == capability + + if isinstance(capability, bool): + return capability == strutils.bool_from_string(value) + + # We can not check or convert value parameter's type in + # anywhere else. + # If the capability and value are not in the same type, + # we just convert them into string to compare them. + return str(value) == str(capability) diff --git a/cinder/tests/unit/api/contrib/test_scheduler_stats.py b/cinder/tests/unit/api/contrib/test_scheduler_stats.py index 96fe4862598..54a8520f165 100644 --- a/cinder/tests/unit/api/contrib/test_scheduler_stats.py +++ b/cinder/tests/unit/api/contrib/test_scheduler_stats.py @@ -17,6 +17,7 @@ import mock from cinder.api.contrib import scheduler_stats +from cinder.api.openstack import api_version_request as api_version from cinder import context from cinder import exception from cinder import test @@ -44,8 +45,6 @@ def schedule_rpcapi_get_pools(self, context, filters=None): return all_pools -@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', - schedule_rpcapi_get_pools) class SchedulerStatsAPITest(test.TestCase): def setUp(self): super(SchedulerStatsAPITest, self).setUp() @@ -53,7 +52,9 @@ class SchedulerStatsAPITest(test.TestCase): self.controller = scheduler_stats.SchedulerStatsController() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - def test_get_pools_summery(self): + @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', + schedule_rpcapi_get_pools) + def test_get_pools_summary(self): req = fakes.HTTPRequest.blank('/v2/%s/scheduler_stats' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt @@ -74,6 +75,55 @@ class SchedulerStatsAPITest(test.TestCase): self.assertDictEqual(expected, res) + @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools') + def test_get_pools_summary_filter_name(self, mock_rpcapi): + req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?name=pool1' % + fake.PROJECT_ID) + mock_rpcapi.return_value = [dict(name='pool1', + capabilities=dict(foo='bar'))] + req.api_version_request = api_version.APIVersionRequest('3.28') + req.environ['cinder.context'] = self.ctxt + res = self.controller.get_pools(req) + + expected = { + 'pools': [ + { + 'name': 'pool1', + } + ] + } + + self.assertDictEqual(expected, res) + filters = {'name': 'pool1'} + mock_rpcapi.assert_called_with(mock.ANY, filters=filters) + + @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools') + def test_get_pools_summary_filter_capabilities(self, mock_rpcapi): + req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?detail=True' + '&foo=bar' % fake.PROJECT_ID) + mock_rpcapi.return_value = [dict(name='pool1', + capabilities=dict(foo='bar'))] + req.api_version_request = api_version.APIVersionRequest('3.28') + req.environ['cinder.context'] = self.ctxt + res = self.controller.get_pools(req) + + expected = { + 'pools': [ + { + 'name': 'pool1', + 'capabilities': { + 'foo': 'bar' + } + } + ] + } + + self.assertDictEqual(expected, res) + filters = {'foo': 'bar'} + mock_rpcapi.assert_called_with(mock.ANY, filters=filters) + + @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', + schedule_rpcapi_get_pools) def test_get_pools_detail(self): req = fakes.HTTPRequest.blank('/v2/%s/scheduler_stats?detail=True' % fake.PROJECT_ID) diff --git a/cinder/tests/unit/scheduler/test_host_manager.py b/cinder/tests/unit/scheduler/test_host_manager.py index 89c8bc04e76..593a5f9031d 100644 --- a/cinder/tests/unit/scheduler/test_host_manager.py +++ b/cinder/tests/unit/scheduler/test_host_manager.py @@ -18,6 +18,7 @@ Tests For HostManager from datetime import datetime from datetime import timedelta +import ddt import mock from oslo_serialization import jsonutils @@ -45,6 +46,7 @@ class FakeFilterClass2(filters.BaseBackendFilter): pass +@ddt.ddt class HostManagerTestCase(test.TestCase): """Test case for HostManager class.""" @@ -965,6 +967,158 @@ class HostManagerTestCase(test.TestCase): self.assertEqual(sorted(expected2, key=sort_func), sorted(res2, key=sort_func)) + @mock.patch('cinder.db.service_get_all') + @mock.patch('cinder.objects.service.Service.is_up', + new_callable=mock.PropertyMock) + def test_get_pools_filter_name(self, _mock_service_is_up, + _mock_service_get_all_by_topic): + context = 'fake_context' + + services = [ + dict(id=1, host='host1', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=2, host='host2@back1', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()) + ] + + mocked_service_states = { + 'host1': dict(volume_backend_name='AAA', + total_capacity_gb=512, free_capacity_gb=200, + timestamp=None, reserved_percentage=0, + provisioned_capacity_gb=312), + 'host2@back1': dict(volume_backend_name='BBB', + total_capacity_gb=256, free_capacity_gb=100, + timestamp=None, reserved_percentage=0, + provisioned_capacity_gb=156) + } + + _mock_service_get_all_by_topic.return_value = services + _mock_service_is_up.return_value = True + _mock_warning = mock.Mock() + host_manager.LOG.warn = _mock_warning + + with mock.patch.dict(self.host_manager.service_states, + mocked_service_states): + filters = {'name': 'host1#AAA'} + res = self.host_manager.get_pools(context, filters=filters) + + expected = [ + { + 'name': 'host1#AAA', + 'capabilities': { + 'timestamp': None, + 'volume_backend_name': 'AAA', + 'free_capacity_gb': 200, + 'driver_version': None, + 'total_capacity_gb': 512, + 'reserved_percentage': 0, + 'vendor_name': None, + 'storage_protocol': None, + 'provisioned_capacity_gb': 312}, + } + ] + + self.assertEqual(expected, res) + + @mock.patch('cinder.db.service_get_all') + @mock.patch('cinder.objects.service.Service.is_up', + new_callable=mock.PropertyMock) + def test_get_pools_filter_mulitattach(self, _mock_service_is_up, + _mock_service_get_all_by_topic): + context = 'fake_context' + + services = [ + dict(id=1, host='host1', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=2, host='host2@back1', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()) + ] + + mocked_service_states = { + 'host1': dict(volume_backend_name='AAA', + total_capacity_gb=512, free_capacity_gb=200, + timestamp=None, reserved_percentage=0, + multiattach=True), + 'host2@back1': dict(volume_backend_name='BBB', + total_capacity_gb=256, free_capacity_gb=100, + timestamp=None, reserved_percentage=0, + multiattach=False) + } + + _mock_service_get_all_by_topic.return_value = services + _mock_service_is_up.return_value = True + _mock_warning = mock.Mock() + host_manager.LOG.warn = _mock_warning + + with mock.patch.dict(self.host_manager.service_states, + mocked_service_states): + filters_t = {'multiattach': 'true'} + filters_f = {'multiattach': False} + res_t = self.host_manager.get_pools(context, filters=filters_t) + res_f = self.host_manager.get_pools(context, filters=filters_f) + + expected_t = [ + { + 'name': 'host1#AAA', + 'capabilities': { + 'timestamp': None, + 'volume_backend_name': 'AAA', + 'free_capacity_gb': 200, + 'driver_version': None, + 'total_capacity_gb': 512, + 'reserved_percentage': 0, + 'vendor_name': None, + 'storage_protocol': None, + 'multiattach': True}, + } + ] + expected_f = [ + { + 'name': 'host2@back1#BBB', + 'capabilities': { + 'timestamp': None, + 'volume_backend_name': 'BBB', + 'free_capacity_gb': 100, + 'driver_version': None, + 'total_capacity_gb': 256, + 'reserved_percentage': 0, + 'vendor_name': None, + 'storage_protocol': None, + 'multiattach': False}, + } + ] + + self.assertEqual(expected_t, res_t) + self.assertEqual(expected_f, res_f) + + @ddt.data( + (None, None, True), + (None, 'value', False), + ('cap', None, False), + (False, 'True', False), + (True, 'True', True), + (True, True, True), + (False, 'false', True), + (1.1, '1.1', True), + (0, '0', True), + (1.1, '1.11', False), + ('str', 'str', True), + ('str1', 'str2', False), + ('str', 'StR', False), + ([], [], True), + (['hdd', 'ssd'], ['ssd'], False), + (['hdd', 'ssd'], ['ssd', 'hdd'], False), + (['hdd', 'ssd'], "['hdd', 'ssd']", True), + ({}, {}, True), + ({'a': 'a', 'b': 'b'}, {'b': 'b', 'a': 'a'}, True), + ({'a': 'a', 'b': 'b'}, {'b': 'b'}, False), + ({'a': 'a'}, "{'a': 'a'}", True), + ) + @ddt.unpack + def test_equal_after_convert(self, cap, value, ret_value): + self.assertEqual(ret_value, + self.host_manager._equal_after_convert(cap, value)) + class BackendStateTestCase(test.TestCase): """Test case for BackendState class.""" diff --git a/releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml b/releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml new file mode 100644 index 00000000000..d13f70c6c63 --- /dev/null +++ b/releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add filters support to get_pools API v3.28.