diff --git a/designate/api/middleware.py b/designate/api/middleware.py index c906faf98..49cfc024a 100644 --- a/designate/api/middleware.py +++ b/designate/api/middleware.py @@ -91,6 +91,12 @@ class ContextMiddleware(base.Middleware): value = request.GET.pop(i) ctxt.all_tenants = strutils.bool_from_string(value) + def _extract_dns_hide_counts(self, ctxt, request): + ctxt.hide_counts = False + value = request.headers.get('OpenStack-DNS-Hide-Counts') + if value: + ctxt.hide_counts = strutils.bool_from_string(value) + def _extract_edit_managed_records(self, ctxt, request): ctxt.edit_managed_records = False if 'edit_managed_records' in request.GET: @@ -111,6 +117,7 @@ class ContextMiddleware(base.Middleware): self._extract_sudo(ctxt, request) self._extract_all_projects(ctxt, request) self._extract_edit_managed_records(ctxt, request) + self._extract_dns_hide_counts(ctxt, request) finally: request.environ['context'] = ctxt return ctxt diff --git a/designate/api/v2/controllers/blacklists.py b/designate/api/v2/controllers/blacklists.py index 6ddd25b33..fcf4ef0f7 100644 --- a/designate/api/v2/controllers/blacklists.py +++ b/designate/api/v2/controllers/blacklists.py @@ -52,7 +52,7 @@ class BlacklistsController(rest.RestController): # Extract the pagination params marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params accepted_filters = ('pattern', ) diff --git a/designate/api/v2/controllers/common.py b/designate/api/v2/controllers/common.py index d500c712f..573b9310c 100644 --- a/designate/api/v2/controllers/common.py +++ b/designate/api/v2/controllers/common.py @@ -24,19 +24,22 @@ def retrieve_matched_rrsets(context, controller_obj, zone_id, **params): # Extract the pagination params marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, controller_obj.SORT_KEYS) + context, params, controller_obj.SORT_KEYS) # Extract any filter params. accepted_filters = ( - 'name', 'type', 'ttl', 'data', 'status', 'description', ) + 'name', 'type', 'ttl', 'data', 'status', 'description',) criterion = controller_obj._apply_filter_params( params, accepted_filters, {}) + # Use DB index for better performance in the case of cross zone search + force_index = True if zone_id: criterion['zone_id'] = zone_id + force_index = False recordsets = controller_obj.central_api.find_recordsets( - context, criterion, marker, limit, sort_key, sort_dir) + context, criterion, marker, limit, sort_key, sort_dir, force_index) return recordsets diff --git a/designate/api/v2/controllers/pools.py b/designate/api/v2/controllers/pools.py index f595771f6..72129060a 100644 --- a/designate/api/v2/controllers/pools.py +++ b/designate/api/v2/controllers/pools.py @@ -50,7 +50,7 @@ class PoolsController(rest.RestController): # Extract the pagination params marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params. accepted_filters = ('name', ) diff --git a/designate/api/v2/controllers/recordsets.py b/designate/api/v2/controllers/recordsets.py index 9233f8ba7..dd67d658e 100644 --- a/designate/api/v2/controllers/recordsets.py +++ b/designate/api/v2/controllers/recordsets.py @@ -26,8 +26,8 @@ LOG = logging.getLogger(__name__) class RecordSetsViewController(rest.RestController): - SORT_KEYS = ['created_at', 'id', 'updated_at', 'zone_id', 'tenant_id', - 'name', 'type', 'ttl', 'records'] + SORT_KEYS = ['created_at', 'updated_at', 'zone_id', 'tenant_id', + 'name', 'type', 'ttl'] @pecan.expose(template='json:', content_type='application/json') @utils.validate_uuid('recordset_id') diff --git a/designate/api/v2/controllers/service_status.py b/designate/api/v2/controllers/service_status.py index 253bd9aeb..c352d0ff8 100644 --- a/designate/api/v2/controllers/service_status.py +++ b/designate/api/v2/controllers/service_status.py @@ -32,7 +32,7 @@ class ServiceStatusController(rest.RestController): context = pecan.request.environ['context'] marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) accepted_filters = ["hostname", "service_name", "status"] criterion = self._apply_filter_params( diff --git a/designate/api/v2/controllers/tlds.py b/designate/api/v2/controllers/tlds.py index 1665ee031..b25483e2e 100644 --- a/designate/api/v2/controllers/tlds.py +++ b/designate/api/v2/controllers/tlds.py @@ -50,7 +50,7 @@ class TldsController(rest.RestController): # Extract the pagination params marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params. accepted_filters = ('name', ) diff --git a/designate/api/v2/controllers/tsigkeys.py b/designate/api/v2/controllers/tsigkeys.py index 36de5c8b2..c5d77e66f 100644 --- a/designate/api/v2/controllers/tsigkeys.py +++ b/designate/api/v2/controllers/tsigkeys.py @@ -52,7 +52,7 @@ class TsigKeysController(rest.RestController): # Extract the pagination params marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params accepted_filters = ('name', 'algorithm', 'scope') diff --git a/designate/api/v2/controllers/zones/__init__.py b/designate/api/v2/controllers/zones/__init__.py index ab64d730b..cc17cf24a 100644 --- a/designate/api/v2/controllers/zones/__init__.py +++ b/designate/api/v2/controllers/zones/__init__.py @@ -68,7 +68,7 @@ class ZonesController(rest.RestController): context = request.environ['context'] marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params. accepted_filters = ('name', 'type', 'email', 'status', diff --git a/designate/api/v2/controllers/zones/tasks/exports.py b/designate/api/v2/controllers/zones/tasks/exports.py index 697cc20ae..a4a1d5be4 100644 --- a/designate/api/v2/controllers/zones/tasks/exports.py +++ b/designate/api/v2/controllers/zones/tasks/exports.py @@ -99,7 +99,7 @@ class ZoneExportsController(rest.RestController): request = pecan.request context = request.environ['context'] marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params. accepted_filters = ('status', 'message', 'zone_id', ) diff --git a/designate/api/v2/controllers/zones/tasks/imports.py b/designate/api/v2/controllers/zones/tasks/imports.py index c2d6044c0..6ccaf669f 100644 --- a/designate/api/v2/controllers/zones/tasks/imports.py +++ b/designate/api/v2/controllers/zones/tasks/imports.py @@ -54,7 +54,7 @@ class ZoneImportController(rest.RestController): request = pecan.request context = request.environ['context'] marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params. accepted_filters = ('status', 'message', 'zone_id', ) diff --git a/designate/api/v2/controllers/zones/tasks/transfer_accepts.py b/designate/api/v2/controllers/zones/tasks/transfer_accepts.py index 8c883cda2..12775a0a3 100644 --- a/designate/api/v2/controllers/zones/tasks/transfer_accepts.py +++ b/designate/api/v2/controllers/zones/tasks/transfer_accepts.py @@ -55,7 +55,7 @@ class TransferAcceptsController(rest.RestController): # Extract the pagination params marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params. criterion = self._apply_filter_params(params, ('status',), {}) diff --git a/designate/api/v2/controllers/zones/tasks/transfer_requests.py b/designate/api/v2/controllers/zones/tasks/transfer_requests.py index 1cd16f2ed..01f06bc5c 100644 --- a/designate/api/v2/controllers/zones/tasks/transfer_requests.py +++ b/designate/api/v2/controllers/zones/tasks/transfer_requests.py @@ -59,7 +59,7 @@ class TransferRequestsController(rest.RestController): # Extract the pagination params marker, limit, sort_key, sort_dir = utils.get_paging_params( - params, self.SORT_KEYS) + context, params, self.SORT_KEYS) # Extract any filter params. criterion = self._apply_filter_params(params, ('status',), {}) diff --git a/designate/central/rpcapi.py b/designate/central/rpcapi.py index 61a44a937..924a7e189 100644 --- a/designate/central/rpcapi.py +++ b/designate/central/rpcapi.py @@ -61,8 +61,9 @@ class CentralAPI(object): 5.6 - Changed 'purge_zones' function args 6.0 - Renamed domains to zones 6.1 - Add ServiceStatus methods + 6.2 - Changed 'find_recordsets' method args """ - RPC_API_VERSION = '6.1' + RPC_API_VERSION = '6.2' # This allows us to mark some methods as not logged. # This can be for a few reasons - some methods my not actually call over @@ -74,7 +75,7 @@ class CentralAPI(object): topic = topic if topic else cfg.CONF.central_topic target = messaging.Target(topic=topic, version=self.RPC_API_VERSION) - self.client = rpc.get_client(target, version_cap='6.1') + self.client = rpc.get_client(target, version_cap='6.2') @classmethod def get_instance(cls): @@ -205,11 +206,11 @@ class CentralAPI(object): recordset_id=recordset_id) def find_recordsets(self, context, criterion=None, marker=None, limit=None, - sort_key=None, sort_dir=None): + sort_key=None, sort_dir=None, force_index=False): return self.client.call(context, 'find_recordsets', criterion=criterion, marker=marker, limit=limit, sort_key=sort_key, - sort_dir=sort_dir) + sort_dir=sort_dir, force_index=force_index) def find_recordset(self, context, criterion=None): return self.client.call(context, 'find_recordset', criterion=criterion) diff --git a/designate/central/service.py b/designate/central/service.py index 3186f8865..e019ad9ad 100644 --- a/designate/central/service.py +++ b/designate/central/service.py @@ -185,7 +185,7 @@ def notification(notification_type): class Service(service.RPCService, service.Service): - RPC_API_VERSION = '6.1' + RPC_API_VERSION = '6.2' target = messaging.Target(version=RPC_API_VERSION) @@ -1337,12 +1337,13 @@ class Service(service.RPCService, service.Service): return recordset def find_recordsets(self, context, criterion=None, marker=None, limit=None, - sort_key=None, sort_dir=None): + sort_key=None, sort_dir=None, force_index=False): target = {'tenant_id': context.tenant} policy.check('find_recordsets', context, target) recordsets = self.storage.find_recordsets(context, criterion, marker, - limit, sort_key, sort_dir) + limit, sort_key, sort_dir, + force_index) return recordsets diff --git a/designate/common/config.py b/designate/common/config.py index d32b6cb78..44c04d0a1 100644 --- a/designate/common/config.py +++ b/designate/common/config.py @@ -26,7 +26,8 @@ def set_defaults(): 'X-Auth-Sudo-Tenant-ID', 'X-Auth-Sudo-Project-ID', 'X-Auth-All-Projects', - 'X-Designate-Edit-Managed-Records'], + 'X-Designate-Edit-Managed-Records', + 'OpenStack-DNS-Hide-Counts'], expose_headers=['X-OpenStack-Request-ID', 'Host'], allow_methods=['GET', diff --git a/designate/context.py b/designate/context.py index 0b63986a4..4c2a695b8 100644 --- a/designate/context.py +++ b/designate/context.py @@ -28,13 +28,14 @@ LOG = logging.getLogger(__name__) class DesignateContext(context.RequestContext): _all_tenants = False + _hide_counts = False _abandon = None original_tenant = None _edit_managed_records = False def __init__(self, service_catalog=None, all_tenants=False, abandon=None, tsigkey_id=None, user_identity=None, original_tenant=None, - edit_managed_records=False, **kwargs): + edit_managed_records=False, hide_counts=False, **kwargs): # NOTE: user_identity may be passed in, but will be silently dropped as # it is a generated field based on several others. @@ -48,6 +49,7 @@ class DesignateContext(context.RequestContext): self.all_tenants = all_tenants self.abandon = abandon self.edit_managed_records = edit_managed_records + self.hide_counts = hide_counts def deepcopy(self): d = self.to_dict() @@ -80,7 +82,8 @@ class DesignateContext(context.RequestContext): 'all_tenants': self.all_tenants, 'abandon': self.abandon, 'edit_managed_records': self.edit_managed_records, - 'tsigkey_id': self.tsigkey_id + 'tsigkey_id': self.tsigkey_id, + 'hide_counts': self.hide_counts }) return copy.deepcopy(d) @@ -153,6 +156,14 @@ class DesignateContext(context.RequestContext): policy.check('all_tenants', self) self._all_tenants = value + @property + def hide_counts(self): + return self._hide_counts + + @hide_counts.setter + def hide_counts(self, value): + self._hide_counts = value + @property def abandon(self): return self._abandon diff --git a/designate/sqlalchemy/base.py b/designate/sqlalchemy/base.py index 8f012745d..747dca1ae 100644 --- a/designate/sqlalchemy/base.py +++ b/designate/sqlalchemy/base.py @@ -148,13 +148,18 @@ class SQLAlchemy(object): return query - def _apply_tenant_criteria(self, context, table, query): + def _apply_tenant_criteria(self, context, table, query, + include_null_tenant=True): if hasattr(table.c, 'tenant_id'): if not context.all_tenants: # NOTE: The query doesn't work with table.c.tenant_id is None, # so I had to force flake8 to skip the check - query = query.where(or_(table.c.tenant_id == context.tenant, - table.c.tenant_id == None)) # NOQA + if include_null_tenant: + query = query.where(or_( + table.c.tenant_id == context.tenant, + table.c.tenant_id == None)) # NOQA + else: + query = query.where(table.c.tenant_id == context.tenant) return query @@ -270,7 +275,8 @@ class SQLAlchemy(object): recordsets_table, records_table, one=False, marker=None, limit=None, sort_key=None, sort_dir=None, query=None, - apply_tenant_criteria=True): + apply_tenant_criteria=True, + force_index=False): sort_key = sort_key or 'created_at' sort_dir = sort_dir or 'asc' @@ -278,6 +284,10 @@ class SQLAlchemy(object): status = criterion.pop('status', None) filtering_records = data or status + # sort key will be used for the ORDER BY key in query, + # needs to use the correct table index for different sort keys + index_hint = utils.get_rrset_index(sort_key) if force_index else None + rzjoin = recordsets_table.join( zones_table, recordsets_table.c.zone_id == zones_table.c.id) @@ -291,9 +301,13 @@ class SQLAlchemy(object): zones_table.c.name] # 1 - ZONE NAME ).select_from(rzjoin).\ where(zones_table.c.deleted == '0') + count_q = select([func.count(distinct(recordsets_table.c.id))]).\ select_from(rzjoin).where(zones_table.c.deleted == '0') + if index_hint: + inner_q = inner_q.with_hint(recordsets_table, index_hint) + if marker is not None: marker = utils.check_marker(recordsets_table, marker, self.session) @@ -314,10 +328,12 @@ class SQLAlchemy(object): raise exceptions.ValueError(six.text_type(value_error)) if apply_tenant_criteria: - inner_q = self._apply_tenant_criteria(context, recordsets_table, - inner_q) + inner_q = self._apply_tenant_criteria( + context, recordsets_table, inner_q, + include_null_tenant=False) count_q = self._apply_tenant_criteria(context, recordsets_table, - count_q) + count_q, + include_null_tenant=False) inner_q = self._apply_criterion(recordsets_table, inner_q, criterion) count_q = self._apply_criterion(recordsets_table, count_q, criterion) @@ -348,9 +364,14 @@ class SQLAlchemy(object): id_zname_map[r[0]] = r[1] formatted_ids = six.moves.map(operator.itemgetter(0), rows) - resultproxy = self.session.execute(count_q) - result = resultproxy.fetchone() - total_count = 0 if result is None else result[0] + # Count query does not scale well for large amount of recordsets, + # don't do it if the header 'OpenStack-DNS-Hide-Counts: True' exists + if context.hide_counts: + total_count = None + else: + resultproxy = self.session.execute(count_q) + result = resultproxy.fetchone() + total_count = 0 if result is None else result[0] # Join the 2 required tables rjoin = recordsets_table.outerjoin( diff --git a/designate/sqlalchemy/utils.py b/designate/sqlalchemy/utils.py index 01f88d8e6..d959eeca7 100644 --- a/designate/sqlalchemy/utils.py +++ b/designate/sqlalchemy/utils.py @@ -32,6 +32,16 @@ from designate import exceptions LOG = log.getLogger(__name__) +RRSET_FILTERING_INDEX = { + 'created_at': 'recordset_created_at', + 'updated_at': 'rrset_updated_at', + 'zone_id': 'rrset_zoneid', + 'name': 'recordset_type_name', + 'type': 'rrset_type', + 'ttl': 'rrset_ttl', + 'tenant_id': 'rrset_tenant_id', +} + def get_migration_manager(repo_path, url, init_version=None): migration_config = { @@ -140,3 +150,13 @@ def check_marker(table, marker, session): raise return marker + + +def get_rrset_index(sort_key): + rrset_index_hint = None + index = RRSET_FILTERING_INDEX.get(sort_key) + + if index: + rrset_index_hint = 'USE INDEX (%s)' % index + + return rrset_index_hint diff --git a/designate/storage/base.py b/designate/storage/base.py index b2856f6f6..8d3ba02e3 100644 --- a/designate/storage/base.py +++ b/designate/storage/base.py @@ -325,8 +325,8 @@ class Storage(DriverPlugin): """ @abc.abstractmethod - def find_recordsets(self, context, criterion=None, - marker=None, limit=None, sort_key=None, sort_dir=None): + def find_recordsets(self, context, criterion=None, marker=None, limit=None, + sort_key=None, sort_dir=None, force_index=False): """ Find RecordSets. diff --git a/designate/storage/impl_sqlalchemy/__init__.py b/designate/storage/impl_sqlalchemy/__init__.py index 61f4e243c..8490271b1 100644 --- a/designate/storage/impl_sqlalchemy/__init__.py +++ b/designate/storage/impl_sqlalchemy/__init__.py @@ -601,7 +601,8 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): # RecordSet Methods def _find_recordsets(self, context, criterion, one=False, marker=None, - limit=None, sort_key=None, sort_dir=None): + limit=None, sort_key=None, sort_dir=None, + force_index=False): # Check to see if the criterion can use the reverse_name column criterion = self._rname_check(criterion) @@ -633,7 +634,8 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): tc, recordsets = self._find_recordsets_with_records( context, criterion, tables.zones, tables.recordsets, tables.records, limit=limit, marker=marker, - sort_key=sort_key, sort_dir=sort_dir) + sort_key=sort_key, sort_dir=sort_dir, + force_index=force_index) recordsets.total_count = tc @@ -710,10 +712,10 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return self._find_recordsets(context, {'id': recordset_id}, one=True) def find_recordsets(self, context, criterion=None, marker=None, limit=None, - sort_key=None, sort_dir=None): + sort_key=None, sort_dir=None, force_index=False): return self._find_recordsets(context, criterion, marker=marker, - limit=limit, sort_key=sort_key, - sort_dir=sort_dir) + sort_dir=sort_dir, sort_key=sort_key, + limit=limit, force_index=force_index) def find_recordset(self, context, criterion): return self._find_recordsets(context, criterion, one=True) diff --git a/designate/storage/impl_sqlalchemy/migrate_repo/versions/099_add_rrset_indexes_for_filtering_perf.py b/designate/storage/impl_sqlalchemy/migrate_repo/versions/099_add_rrset_indexes_for_filtering_perf.py new file mode 100644 index 000000000..c03b7ac86 --- /dev/null +++ b/designate/storage/impl_sqlalchemy/migrate_repo/versions/099_add_rrset_indexes_for_filtering_perf.py @@ -0,0 +1,36 @@ +# Copyright 2016 Rackspace +# +# Author: James Li +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_log import log as logging +from sqlalchemy.schema import MetaData, Table, Index + +LOG = logging.getLogger(__name__) +meta = MetaData() + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + recordsets_table = Table('recordsets', meta, autoload=True) + + Index('rrset_updated_at', recordsets_table.c.updated_at + ).create(migrate_engine) + Index('rrset_zoneid', recordsets_table.c.zone_id + ).create(migrate_engine) + Index('rrset_type', recordsets_table.c.type).create(migrate_engine) + Index('rrset_ttl', recordsets_table.c.ttl).create(migrate_engine) + Index('rrset_tenant_id', recordsets_table.c.tenant_id + ).create(migrate_engine) diff --git a/designate/tests/test_storage/test_sqlalchemy.py b/designate/tests/test_storage/test_sqlalchemy.py index cfa00b4d1..61b969348 100644 --- a/designate/tests/test_storage/test_sqlalchemy.py +++ b/designate/tests/test_storage/test_sqlalchemy.py @@ -79,13 +79,18 @@ class SqlalchemyStorageTest(StorageTestCase, TestCase): "records": { "record_created_at": "CREATE INDEX record_created_at ON records (created_at)", # noqa "records_tenant": "CREATE INDEX records_tenant ON records (tenant_id)", # noqa - "update_status_index": "CREATE INDEX update_status_index ON records (status, zone_id, tenant_id, created_at, serial)" # noqa + "update_status_index": "CREATE INDEX update_status_index ON records (status, zone_id, tenant_id, created_at, serial)", # noqa }, "recordsets": { "recordset_created_at": "CREATE INDEX recordset_created_at ON recordsets (created_at)", # noqa "recordset_type_name": "CREATE INDEX recordset_type_name ON recordsets (type, name)", # noqa "reverse_name_dom_id": "CREATE INDEX reverse_name_dom_id ON recordsets (reverse_name, zone_id)", # noqa - "rrset_type_domainid": "CREATE INDEX rrset_type_domainid ON recordsets (type, zone_id)" # noqa + "rrset_type_domainid": "CREATE INDEX rrset_type_domainid ON recordsets (type, zone_id)", # noqa + "rrset_updated_at": "CREATE INDEX rrset_updated_at ON recordsets (updated_at)", # noqa + "rrset_zoneid": "CREATE INDEX rrset_zoneid ON recordsets (zone_id)", # noqa + "rrset_type": "CREATE INDEX rrset_type ON recordsets (type)", # noqa + "rrset_ttl": "CREATE INDEX rrset_ttl ON recordsets (ttl)", # noqa + "rrset_tenant_id": "CREATE INDEX rrset_tenant_id ON recordsets (tenant_id)", # noqa }, "zones": { "delayed_notify": "CREATE INDEX delayed_notify ON zones (delayed_notify)", # noqa diff --git a/designate/tests/test_utils.py b/designate/tests/test_utils.py index d99e97b6c..b4f52ba0a 100644 --- a/designate/tests/test_utils.py +++ b/designate/tests/test_utils.py @@ -19,6 +19,7 @@ import tempfile import unittest import testtools +from mock import Mock from jinja2 import Template from designate.tests import TestCase @@ -114,22 +115,27 @@ class TestUtils(TestCase): self.assertEqual((host, port), ("abc", 25)) def test_get_paging_params_invalid_limit(self): + context = Mock() for value in [9223372036854775809, -1]: with testtools.ExpectedException(exceptions.InvalidLimit): - utils.get_paging_params({'limit': value}, []) + utils.get_paging_params(context, {'limit': value}, []) def test_get_paging_params_max_limit(self): + context = Mock() self.config(max_limit_v2=1000, group='service:api') - result = utils.get_paging_params({'limit': "max"}, []) + result = utils.get_paging_params(context, {'limit': "max"}, []) self.assertEqual(result[1], 1000) def test_get_paging_params_invalid_sort_dir(self): + context = Mock() with testtools.ExpectedException(exceptions.InvalidSortDir): - utils.get_paging_params({'sort_dir': "dsc"}, []) + utils.get_paging_params(context, {'sort_dir': "dsc"}, []) def test_get_paging_params_invalid_sort_key(self): + context = Mock() with testtools.ExpectedException(exceptions.InvalidSortKey): - utils.get_paging_params({'sort_key': "dsc"}, ['asc', 'desc']) + utils.get_paging_params(context, {'sort_key': "dsc"}, + ['asc', 'desc']) class SocketListenTest(unittest.TestCase): diff --git a/designate/utils.py b/designate/utils.py index 4433c5c19..57c4bc9c5 100644 --- a/designate/utils.py +++ b/designate/utils.py @@ -415,7 +415,7 @@ def split_host_port(string, default_port=53): return (host, port) -def get_paging_params(params, sort_keys): +def get_paging_params(context, params, sort_keys): """ Extract any paging parameters """ @@ -459,6 +459,8 @@ def get_paging_params(params, sort_keys): elif sort_key and sort_key not in sort_keys: msg = 'sort key must be one of %(keys)s' % {'keys': sort_keys} raise exceptions.InvalidSortKey(msg) + elif sort_key == 'tenant_id' and not context.all_tenants: + sort_key = None return marker, limit, sort_key, sort_dir diff --git a/etc/designate/designate.conf.sample b/etc/designate/designate.conf.sample index d46216a1e..6870f8fc9 100644 --- a/etc/designate/designate.conf.sample +++ b/etc/designate/designate.conf.sample @@ -191,7 +191,7 @@ debug = False # Indicate which header field names may be used during the actual request. # (list value) -#allow_headers = X-Auth-Token,X-Auth-Sudo-Tenant-ID,X-Auth-Sudo-Project-ID,X-Auth-All-Projects,X-Designate-Edit-Managed-Records +#allow_headers = X-Auth-Token,X-Auth-Sudo-Tenant-ID,X-Auth-Sudo-Project-ID,X-Auth-All-Projects,X-Designate-Edit-Managed-Records,OpenStack-DNS-Hide-Counts [cors.subdomain] @@ -214,7 +214,7 @@ debug = False # Indicate which header field names may be used during the actual request. # (list value) -#allow_headers = X-Auth-Token,X-Auth-Sudo-Tenant-ID,X-Auth-Sudo-Project-ID,X-Auth-All-Projects,X-Designate-Edit-Managed-Records +#allow_headers = X-Auth-Token,X-Auth-Sudo-Tenant-ID,X-Auth-Sudo-Project-ID,X-Auth-All-Projects,X-Designate-Edit-Managed-Records,OpenStack-DNS-Hide-Counts #----------------------- # Sink Service