diff --git a/designate/central/service.py b/designate/central/service.py index e9831e802..a1abf8570 100644 --- a/designate/central/service.py +++ b/designate/central/service.py @@ -90,9 +90,7 @@ class Service(service.RPCService): @property def storage(self): if not self._storage: - # Get a storage connection - storage_driver = cfg.CONF['service:central'].storage_driver - self._storage = storage.get_storage(storage_driver) + self._storage = storage.get_storage() return self._storage @property diff --git a/designate/cmd/status.py b/designate/cmd/status.py index c6eb4f098..3a1be9b9a 100644 --- a/designate/cmd/status.py +++ b/designate/cmd/status.py @@ -18,10 +18,10 @@ from sqlalchemy import MetaData, Table, select, func import designate.conf from designate.i18n import _ -from designate.sqlalchemy import sql +from designate.storage import sql # This import is not used, but is needed to register the storage:sqlalchemy # group. -import designate.storage.impl_sqlalchemy # noqa +import designate.storage.sqlalchemy # noqa from designate import utils diff --git a/designate/conf/central.py b/designate/conf/central.py index ec9dfe6a3..10b1fd18f 100644 --- a/designate/conf/central.py +++ b/designate/conf/central.py @@ -26,6 +26,9 @@ CENTRAL_OPTS = [ cfg.IntOpt('threads', default=1000, help='Number of central greenthreads to spawn'), cfg.StrOpt('storage_driver', default='sqlalchemy', + deprecated_for_removal=True, + deprecated_reason='Alternative storage drivers are no longer' + 'supported.', help='The storage driver to use'), cfg.IntOpt('max_zone_name_len', default=255, help="Maximum zone name length"), diff --git a/designate/manage/database.py b/designate/manage/database.py index c86946f15..500d0288d 100644 --- a/designate/manage/database.py +++ b/designate/manage/database.py @@ -31,11 +31,11 @@ LOG = logging.getLogger(__name__) class DatabaseCommands(base.Commands): def _get_alembic_config(self, db_url=None, stringio_buffer=sys.stdout): alembic_dir = os.path.join(os.path.dirname(__file__), - os.pardir, 'storage/impl_sqlalchemy') + os.pardir, 'storage/sqlalchemy') alembic_cfg = Config(os.path.join(alembic_dir, 'alembic.ini'), stdout=stringio_buffer) alembic_cfg.set_main_option( - 'script_location', 'designate.storage.impl_sqlalchemy:alembic') + 'script_location', 'designate.storage.sqlalchemy:alembic') if db_url: alembic_cfg.set_main_option('sqlalchemy.url', db_url) else: diff --git a/designate/mdns/service.py b/designate/mdns/service.py index fa12e6e7c..f7cfeaf0f 100644 --- a/designate/mdns/service.py +++ b/designate/mdns/service.py @@ -54,9 +54,7 @@ class Service(service.Service): @property def storage(self): if not self._storage: - self._storage = storage.get_storage( - CONF['service:mdns'].storage_driver - ) + self._storage = storage.get_storage() return self._storage @property diff --git a/designate/producer/service.py b/designate/producer/service.py index eb0e632d7..3fcec6a0b 100644 --- a/designate/producer/service.py +++ b/designate/producer/service.py @@ -54,8 +54,7 @@ class Service(service.RPCService): @property def storage(self): if not self._storage: - storage_driver = cfg.CONF['service:producer'].storage_driver - self._storage = storage.get_storage(storage_driver) + self._storage = storage.get_storage() return self._storage @property diff --git a/designate/quota/impl_storage.py b/designate/quota/impl_storage.py index ab03b70bf..11c49836b 100644 --- a/designate/quota/impl_storage.py +++ b/designate/quota/impl_storage.py @@ -13,7 +13,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from oslo_config import cfg from oslo_log import log as logging from designate import exceptions @@ -31,10 +30,7 @@ class StorageQuota(base.Quota): def __init__(self): super(StorageQuota, self).__init__() - - # TODO(kiall): Should this be tied to central's config? - storage_driver = cfg.CONF['service:central'].storage_driver - self.storage = storage.get_storage(storage_driver) + self.storage = storage.get_storage() def _get_quotas(self, context, tenant_id): quotas = self.storage.find_quotas(context, { diff --git a/designate/sqlalchemy/__init__.py b/designate/sqlalchemy/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/designate/storage/__init__.py b/designate/storage/__init__.py index 6b5d4df04..7e4799735 100644 --- a/designate/storage/__init__.py +++ b/designate/storage/__init__.py @@ -23,19 +23,17 @@ from oslo_db import exception as db_exception from oslo_log import log as logging from oslo_utils import excutils -from designate.sqlalchemy import sql -from designate.storage import base +from designate.storage import sql +from designate.storage import sqlalchemy LOG = logging.getLogger(__name__) RETRY_STATE = threading.local() -def get_storage(storage_driver): - """Return the engine class from the provided engine name""" - cls = base.Storage.get_driver(storage_driver) - - return cls() +def get_storage(): + """Return the engine class""" + return sqlalchemy.SQLAlchemyStorage() def _retry_on_deadlock(exc): diff --git a/designate/storage/base.py b/designate/storage/base.py deleted file mode 100644 index 5b9cd2210..000000000 --- a/designate/storage/base.py +++ /dev/null @@ -1,858 +0,0 @@ -# Copyright 2012 Managed I.T. -# -# Author: Kiall Mac Innes -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -from designate.plugin import DriverPlugin - - -class Storage(DriverPlugin, metaclass=abc.ABCMeta): - - """Base class for storage plugins""" - __plugin_ns__ = 'designate.storage' - __plugin_type__ = 'storage' - - @abc.abstractmethod - def create_quota(self, context, quota): - """ - Create a Quota. - - :param context: RPC Context. - :param quota: Quota object with the values to be created. - """ - - @abc.abstractmethod - def get_quota(self, context, quota_id): - """ - Get a Quota via ID. - - :param context: RPC Context. - :param quota_id: Quota ID to get. - """ - - @abc.abstractmethod - def find_quotas(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find Quotas - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def find_quota(self, context, criterion): - """ - Find a single Quota. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def update_quota(self, context, quota): - """ - Update a Quota - - :param context: RPC Context. - :param quota: Quota to update. - """ - - @abc.abstractmethod - def delete_quota(self, context, quota_id): - """ - Delete a Quota via ID. - - :param context: RPC Context. - :param quota_id: Delete a Quota via ID - """ - - @abc.abstractmethod - def create_tld(self, context, tld): - """ - Create a TLD. - - :param context: RPC Context. - :param tld: Tld object with the values to be created. - """ - - @abc.abstractmethod - def get_tld(self, context, tld_id): - """ - Get a TLD via ID. - - :param context: RPC Context. - :param tld_id: TLD ID to get. - """ - - @abc.abstractmethod - def find_tlds(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find TLDs - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def find_tld(self, context, criterion): - """ - Find a single TLD. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def update_tld(self, context, tld): - """ - Update a TLD - - :param context: RPC Context. - :param tld: TLD to update. - """ - - @abc.abstractmethod - def delete_tld(self, context, tld_id): - """ - Delete a TLD via ID. - - :param context: RPC Context. - :param tld_id: Delete a TLD via ID - """ - - @abc.abstractmethod - def create_tsigkey(self, context, tsigkey): - """ - Create a TSIG Key. - - :param context: RPC Context. - :param tsigkey: TsigKey object with the values to be created. - """ - - @abc.abstractmethod - def find_tsigkeys(self, context, criterion=None, - marker=None, limit=None, sort_key=None, sort_dir=None): - """ - Find TSIG Keys. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def get_tsigkey(self, context, tsigkey_id): - """ - Get a TSIG Key via ID. - - :param context: RPC Context. - :param tsigkey_id: Server ID to get. - """ - - @abc.abstractmethod - def update_tsigkey(self, context, tsigkey): - """ - Update a TSIG Key - - :param context: RPC Context. - :param tsigkey: TSIG Keyto update. - """ - - @abc.abstractmethod - def delete_tsigkey(self, context, tsigkey_id): - """ - Delete a TSIG Key via ID. - - :param context: RPC Context. - :param tsigkey_id: Delete a TSIG Key via ID - """ - - @abc.abstractmethod - def find_tenants(self, context): - """ - Find all Tenants. - - :param context: RPC Context. - """ - - @abc.abstractmethod - def get_tenant(self, context, tenant_id): - """ - Get all Tenants. - - :param context: RPC Context. - :param tenant_id: ID of the Tenant. - """ - - @abc.abstractmethod - def count_tenants(self, context): - """ - Count tenants - - :param context: RPC Context. - """ - - @abc.abstractmethod - def create_zone(self, context, zone): - """ - Create a new Zone. - - :param context: RPC Context. - :param zone: Zone object with the values to be created. - """ - - @abc.abstractmethod - def get_zone(self, context, zone_id, apply_tenant_criteria=True): - """ - Get a Zone via its ID. - - :param context: RPC Context. - :param zone_id: ID of the Zone. - :param apply_tenant_criteria: Whether to filter results by project_id. - """ - - @abc.abstractmethod - def find_zones(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find zones - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def find_zone(self, context, criterion): - """ - Find a single Zone. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def update_zone(self, context, zone): - """ - Update a Zone - - :param context: RPC Context. - :param zone: Zone object. - """ - - @abc.abstractmethod - def delete_zone(self, context, zone_id): - """ - Delete a Zone - - :param context: RPC Context. - :param zone_id: Zone ID to delete. - """ - - @abc.abstractmethod - def purge_zone(self, context, zone): - """ - Purge a Zone - - :param context: RPC Context. - :param zone: Zone to delete. - """ - - @abc.abstractmethod - def count_zones(self, context, criterion=None): - """ - Count zones - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def share_zone(self, context, shared_zone): - """ - Share zone - - :param context: RPC Context. - :param shared_zone: Shared Zone dict - """ - - @abc.abstractmethod - def unshare_zone(self, context, zone_id, shared_zone_id): - """ - Unshare zone - - :param context: RPC Context. - :param shared_zone_id: Shared Zone Id - """ - - @abc.abstractmethod - def find_shared_zones(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find shared zones - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def get_shared_zone(self, context, zone_id, shared_zone_id): - """ - Get a shared zone via ID - - :param context: RPC Context. - :param shared_zone_id: Shared Zone Id - """ - - @abc.abstractmethod - def is_zone_shared_with_project(self, zone_id, project_id): - """ - Checks if a zone is shared with a project. - - :param zone_id: The zone ID to check. - :param project_id: The project ID to check. - :returns: Boolean True/False if the zone is shared with the project. - """ - - @abc.abstractmethod - def delete_zone_shares(self, zone_id): - """ - Delete all of the zone shares for a specific zone. - - :param zone_id: The zone ID to check. - """ - - @abc.abstractmethod - def create_recordset(self, context, zone_id, recordset): - """ - Create a recordset on a given Zone ID - - :param context: RPC Context. - :param zone_id: Zone ID to create the recordset in. - :param recordset: RecordSet object with the values to be created. - """ - - @abc.abstractmethod - def find_recordsets(self, context, criterion=None, marker=None, limit=None, - sort_key=None, sort_dir=None, force_index=False, - apply_tenant_criteria=True): - """ - Find RecordSets. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - :param apply_tenant_criteria: Whether to filter results by project_id. - """ - - @abc.abstractmethod - def find_recordsets_axfr(self, context, criterion=None): - """ - Find RecordSets. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def find_recordset(self, context, criterion, apply_tenant_criteria=True): - """ - Find a single RecordSet. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param apply_tenant_criteria: Whether to filter results by project_id. - """ - - @abc.abstractmethod - def update_recordset(self, context, recordset): - """ - Update a recordset - - :param context: RPC Context. - :param recordset: RecordSet to update - """ - - @abc.abstractmethod - def delete_recordset(self, context, recordset_id): - """ - Delete a recordset - - :param context: RPC Context. - :param recordset_id: RecordSet ID to delete - """ - - @abc.abstractmethod - def count_recordsets(self, context, criterion=None): - """ - Count recordsets - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def create_record(self, context, zone_id, recordset_id, record): - """ - Create a record on a given Zone ID - - :param context: RPC Context. - :param zone_id: Zone ID to create the record in. - :param recordset_id: RecordSet ID to create the record in. - :param record: Record object with the values to be created. - """ - - @abc.abstractmethod - def get_record(self, context, record_id): - """ - Get a record via ID - - :param context: RPC Context. - :param record_id: Record ID to get - """ - - @abc.abstractmethod - def find_records(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find Records. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def find_record(self, context, criterion): - """ - Find a single Record. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def update_record(self, context, record): - """ - Update a record - - :param context: RPC Context. - :param record: Record to update - """ - - @abc.abstractmethod - def delete_record(self, context, record_id): - """ - Delete a record - - :param context: RPC Context. - :param record_id: Record ID to delete - """ - - @abc.abstractmethod - def count_records(self, context, criterion=None): - """ - Count records - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def create_blacklist(self, context, blacklist): - """ - Create a Blacklist. - - :param context: RPC Context. - :param blacklist: Blacklist object with the values to be created. - """ - - @abc.abstractmethod - def get_blacklist(self, context, blacklist_id): - """ - Get a Blacklist via ID. - - :param context: RPC Context. - :param blacklist_id: Blacklist ID to get. - """ - - @abc.abstractmethod - def find_blacklists(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find Blacklists - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def find_blacklist(self, context, criterion): - """ - Find a single Blacklist. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def update_blacklist(self, context, blacklist): - """ - Update a Blacklist - - :param context: RPC Context. - :param blacklist: Blacklist to update. - """ - - @abc.abstractmethod - def delete_blacklist(self, context, blacklist_id): - """ - Delete a Blacklist via ID. - - :param context: RPC Context. - :param blacklist_id: Delete a Blacklist via ID - """ - - @abc.abstractmethod - def create_pool(self, context, pool): - """ - Create a Pool. - - :param context: RPC Context. - :param pool: Pool object with the values to be created. - """ - - @abc.abstractmethod - def find_pools(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find all Pools - - :param context: RPC Context. - :param criterion: Criteria by which to filter - :param marker: Resource ID used by paging. The next page will start - at the next resource after the marker - :param limit: Integer limit of objects on the page - :param sort_key: Key used to sort the returned list - :param sort_dir: Directions to sort after using sort_key - """ - - @abc.abstractmethod - def find_pool(self, context, criterion): - """ - Find a single Pool. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def get_pool(self, context, pool_id): - """ - Get a Pool via the id - - :param context: RPC Context. - :param pool_id: The ID of the pool to get - """ - - @abc.abstractmethod - def update_pool(self, context, pool): - """ - Update the specified pool - - :param context: RPC Context. - :param pool: Pool to update. - """ - - @abc.abstractmethod - def delete_pool(self, context, pool_id): - """ - Delete the pool with the matching id - - :param context: RPC Context. - :param pool_id: The ID of the pool to be deleted - """ - - @abc.abstractmethod - def create_pool_attribute(self, context, pool_id, pool_attribute): - """ - Create a PoolAttribute. - - :param context: RPC Context. - :param pool_id: The ID of the pool to which the attribute belongs. - :param pool_attribute: PoolAttribute object with the values created. - """ - - @abc.abstractmethod - def find_pool_attributes(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find all PoolAttributes - - :param context: RPC Context - :param criterion: Criteria by which to filer - :param marker: Resource ID used by paging. The next page will start - at the next resource after the marker - :param limit: Integer limit of objects on the page - :param sort_key: Key used to sort the returned list - :param sort_dir: Directions to sort after using sort_key - """ - - @abc.abstractmethod - def find_pool_attribute(self, context, criterion): - """ - Find a single PoolAttribute - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def get_pool_attribute(self, context, pool_attribute_id): - """ - Get a PoolAttribute via the ID - - :param context: RPC Context. - :param pool_attribute_id: The ID of the PoolAttribute to get - """ - - @abc.abstractmethod - def update_pool_attribute(self, context, pool_attribute): - """ - Update the specified pool - - :param context: RPC Context. - :param pool_attribute: PoolAttribute to update - """ - - @abc.abstractmethod - def delete_pool_attribute(self, context, pool_attribute_id): - """ - Delete the pool with the matching id - - :param context: RPC Context. - :param pool_attribute_id: The ID of the PoolAttribute to be deleted - """ - - @abc.abstractmethod - def create_zone_import(self, context, zone_import): - """ - Create a Zone Import. - - :param context: RPC Context. - :param zone_import: Zone Import object with the values to be created. - """ - - @abc.abstractmethod - def get_zone_import(self, context, zone_import_id): - """ - Get a Zone Import via ID. - - :param context: RPC Context. - :param zone_import_id: Zone Import ID to get. - """ - - @abc.abstractmethod - def find_zone_imports(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find Zone Imports - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def find_zone_import(self, context, criterion): - """ - Find a single Zone Import. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def update_zone_import(self, context, zone_import): - """ - Update a Zone Import - - :param context: RPC Context. - :param zone_import: Zone Import to update. - """ - - @abc.abstractmethod - def increment_serial(self, context, zone_id): - """ - Increment serial of a Zone - - :param context: RPC Context. - :param zone_id: ID of the Zone. - """ - - @abc.abstractmethod - def delete_zone_import(self, context, zone_import_id): - """ - Delete a Zone Import via ID. - - :param context: RPC Context. - :param zone_import_id: Delete a Zone Import via ID - """ - - @abc.abstractmethod - def create_zone_export(self, context, zone_export): - """ - Create a Zone Export. - - :param context: RPC Context. - :param zone_export: Zone Export object with the values to be created. - """ - - @abc.abstractmethod - def get_zone_export(self, context, zone_export_id): - """ - Get a Zone Export via ID. - - :param context: RPC Context. - :param zone_export_id: Zone Export ID to get. - """ - - @abc.abstractmethod - def find_zone_exports(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Find Zone Exports - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def find_zone_export(self, context, criterion): - """ - Find a single Zone Export. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def update_zone_export(self, context, zone_export): - """ - Update a Zone Export - - :param context: RPC Context. - :param zone_export: Zone Export to update. - """ - - @abc.abstractmethod - def delete_zone_export(self, context, zone_export_id): - """ - Delete a Zone Export via ID. - - :param context: RPC Context. - :param zone_export_id: Delete a Zone Export via ID - """ - - @abc.abstractmethod - def find_service_statuses(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): - """ - Retrieve status for services - - :param context: RPC Context. - :param criterion: Criteria to filter by. - :param marker: Resource ID from which after the requested page will - start after - :param limit: Integer limit of objects of the page size after the - marker - :param sort_key: Key from which to sort after. - :param sort_dir: Direction to sort after using sort_key. - """ - - @abc.abstractmethod - def find_service_status(self, context, criterion): - """ - Find a single Service Status. - - :param context: RPC Context. - :param criterion: Criteria to filter by. - """ - - @abc.abstractmethod - def update_service_status(self, context, service_status): - """ - Update the Service status for a service. - - :param context: RPC Context. - :param service_status: Set the status for a service. - """ diff --git a/designate/sqlalchemy/sql.py b/designate/storage/sql.py similarity index 100% rename from designate/sqlalchemy/sql.py rename to designate/storage/sql.py diff --git a/designate/storage/impl_sqlalchemy/__init__.py b/designate/storage/sqlalchemy/__init__.py similarity index 81% rename from designate/storage/impl_sqlalchemy/__init__.py rename to designate/storage/sqlalchemy/__init__.py index 1a89709df..8fccc9576 100644 --- a/designate/storage/impl_sqlalchemy/__init__.py +++ b/designate/storage/sqlalchemy/__init__.py @@ -21,10 +21,9 @@ from sqlalchemy.sql.expression import or_, literal_column from designate import exceptions from designate import objects -from designate.sqlalchemy import base as sqlalchemy_base -from designate.sqlalchemy import sql -from designate.storage import base as storage_base -from designate.storage.impl_sqlalchemy import tables +from designate.storage import sql +from designate.storage.sqlalchemy import base +from designate.storage.sqlalchemy import tables LOG = logging.getLogger(__name__) @@ -32,7 +31,7 @@ LOG = logging.getLogger(__name__) MAXIMUM_SUBZONE_DEPTH = 128 -class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): +class SQLAlchemyStorage(base.SQLAlchemy): """SQLAlchemy connection""" __plugin_name__ = 'sqlalchemy' @@ -61,6 +60,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): sort_key, sort_dir) def create_quota(self, context, quota): + """ + Create a Quota. + + :param context: RPC Context. + :param quota: Quota object with the values to be created. + """ if not isinstance(quota, objects.Quota): # TODO(kiall): Quotas should always use Objects quota = objects.Quota(**quota) @@ -69,23 +74,59 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): tables.quotas, quota, exceptions.DuplicateQuota) def get_quota(self, context, quota_id): + """ + Get a Quota via ID. + + :param context: RPC Context. + :param quota_id: Quota ID to get. + """ return self._find_quotas(context, {'id': quota_id}, one=True) def find_quotas(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find Quotas + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_quotas(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_quota(self, context, criterion): + """ + Find a single Quota. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_quotas(context, criterion, one=True) def update_quota(self, context, quota): + """ + Update a Quota + + :param context: RPC Context. + :param quota: Quota to update. + """ return self._update( context, tables.quotas, quota, exceptions.DuplicateQuota, exceptions.QuotaNotFound) def delete_quota(self, context, quota_id): + """ + Delete a Quota via ID. + + :param context: RPC Context. + :param quota_id: Delete a Quota via ID + """ # Fetch the existing quota, we'll need to return it. quota = self._find_quotas(context, {'id': quota_id}, one=True) return self._delete(context, tables.quotas, quota, @@ -100,26 +141,68 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): sort_key, sort_dir) def create_tld(self, context, tld): + """ + Create a TLD. + + :param context: RPC Context. + :param tld: Tld object with the values to be created. + """ return self._create( tables.tlds, tld, exceptions.DuplicateTld) def get_tld(self, context, tld_id): + """ + Get a TLD via ID. + + :param context: RPC Context. + :param tld_id: TLD ID to get. + """ return self._find_tlds(context, {'id': tld_id}, one=True) def find_tlds(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find TLDs + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_tlds(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_tld(self, context, criterion): + """ + Find a single TLD. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_tlds(context, criterion, one=True) def update_tld(self, context, tld): + """ + Update a TLD + + :param context: RPC Context. + :param tld: TLD to update. + """ return self._update( context, tables.tlds, tld, exceptions.DuplicateTld, exceptions.TldNotFound) def delete_tld(self, context, tld_id): + """ + Delete a TLD via ID. + + :param context: RPC Context. + :param tld_id: Delete a TLD via ID + """ # Fetch the existing tld, we'll need to return it. tld = self._find_tlds(context, {'id': tld_id}, one=True) return self._delete(context, tables.tlds, tld, exceptions.TldNotFound) @@ -133,27 +216,69 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): sort_key, sort_dir) def create_tsigkey(self, context, tsigkey): + """ + Create a TSIG Key. + + :param context: RPC Context. + :param tsigkey: TsigKey object with the values to be created. + """ return self._create( tables.tsigkeys, tsigkey, exceptions.DuplicateTsigKey) def get_tsigkey(self, context, tsigkey_id): + """ + Get a TSIG Key via ID. + + :param context: RPC Context. + :param tsigkey_id: Server ID to get. + """ return self._find_tsigkeys(context, {'id': tsigkey_id}, one=True) def find_tsigkeys(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find TSIG Keys. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_tsigkeys(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_tsigkey(self, context, criterion): + """ + Find TSIG Key. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_tsigkeys(context, criterion, one=True) def update_tsigkey(self, context, tsigkey): + """ + Update a TSIG Key + + :param context: RPC Context. + :param tsigkey: TSIG Keyto update. + """ return self._update( context, tables.tsigkeys, tsigkey, exceptions.DuplicateTsigKey, exceptions.TsigKeyNotFound) def delete_tsigkey(self, context, tsigkey_id): + """ + Delete a TSIG Key via ID. + + :param context: RPC Context. + :param tsigkey_id: Delete a TSIG Key via ID + """ # Fetch the existing tsigkey, we'll need to return it. tsigkey = self._find_tsigkeys(context, {'id': tsigkey_id}, one=True) return self._delete(context, tables.tsigkeys, tsigkey, @@ -163,6 +288,11 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): # Tenant Methods ## def find_tenants(self, context): + """ + Find all Tenants. + + :param context: RPC Context. + """ # returns an array of tenant_id & count of their zones query = select(tables.zones.c.tenant_id, func.count(tables.zones.c.id)) query = self._apply_tenant_criteria(context, tables.zones, query) @@ -182,6 +312,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return tenant_list def get_tenant(self, context, tenant_id): + """ + Get Tenant. + + :param context: RPC Context. + :param tenant_id: ID of the Tenant. + """ # get list & count of all zones owned by given tenant_id query = select(tables.zones.c.name) query = self._apply_tenant_criteria(context, tables.zones, query) @@ -198,6 +334,11 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): zones=[r[0] for r in results]) def count_tenants(self, context): + """ + Count tenants + + :param context: RPC Context. + """ # tenants are the owner of zones, count the number of unique tenants # select count(distinct tenant_id) from zones query = select(func.count(distinct(tables.zones.c.tenant_id))) @@ -264,6 +405,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return zones def create_zone(self, context, zone): + """ + Create a new Zone. + + :param context: RPC Context. + :param zone: Zone object with the values to be created. + """ # Patch in the reverse_name column extra_values = {'reverse_name': zone.name[::-1]} @@ -288,22 +435,53 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return zone def get_zone(self, context, zone_id, apply_tenant_criteria=True): + """ + Get a Zone via its ID. + + :param context: RPC Context. + :param zone_id: ID of the Zone. + :param apply_tenant_criteria: Whether to filter results by project_id. + """ zone = self._find_zones(context, {'id': zone_id}, one=True, apply_tenant_criteria=apply_tenant_criteria) return zone def find_zones(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find zones + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ zones = self._find_zones(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) return zones def find_zone(self, context, criterion): + """ + Find a single Zone. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ zone = self._find_zones(context, criterion, one=True) return zone def update_zone(self, context, zone): + """ + Update a Zone + + :param context: RPC Context. + :param zone: Zone object. + """ tenant_id_changed = False if 'tenant_id' in zone.obj_what_changed(): tenant_id_changed = True @@ -451,6 +629,10 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): def delete_zone(self, context, zone_id): """ + Delete a Zone + + :param context: RPC Context. + :param zone_id: Zone ID to delete. """ # Fetch the existing zone, we'll need to return it. zone = self._find_zones(context, {'id': zone_id}, one=True) @@ -458,7 +640,11 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): exceptions.ZoneNotFound) def purge_zone(self, context, zone): - """Effectively remove a zone database record. + """ + Purge a Zone, effectively removing the zone database record. + + :param context: RPC Context. + :param zone: Zone to delete. """ return self._delete(context, tables.zones, zone, exceptions.ZoneNotFound, hard_delete=True) @@ -479,10 +665,16 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return current.parent_zone_id def purge_zones(self, context, criterion, limit): - """Purge deleted zones. + """ + Purge Zones, effectively removing the zones database records. + Reparent orphan childrens, if any. Transactions/locks are not needed. - :returns: number of purged zones + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param limit: Integer limit of objects of the page size after the + marker """ if 'deleted' in criterion: context.show_deleted = True @@ -520,6 +712,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return len(zones) def count_zones(self, context, criterion=None): + """ + Count zones + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ query = select(func.count(tables.zones.c.id)) query = self._apply_criterion(tables.zones, query, criterion) query = self._apply_tenant_criteria(context, tables.zones, query) @@ -570,10 +768,22 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return None def share_zone(self, context, shared_zone): + """ + Share zone + + :param context: RPC Context. + :param shared_zone: Shared Zone dict + """ return self._create(tables.shared_zones, shared_zone, exceptions.DuplicateSharedZone) def unshare_zone(self, context, zone_id, shared_zone_id): + """ + Unshare zone + + :param context: RPC Context. + :param shared_zone_id: Shared Zone Id + """ shared_zone = self._find_shared_zones( context, {'id': shared_zone_id, 'zone_id': zone_id}, one=True ) @@ -582,17 +792,42 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): def find_shared_zones(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find shared zones + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_shared_zones( context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir ) def get_shared_zone(self, context, zone_id, shared_zone_id): + """ + Get a shared zone via ID + + :param context: RPC Context. + :param shared_zone_id: Shared Zone Id + """ return self._find_shared_zones( context, {'id': shared_zone_id, 'zone_id': zone_id}, one=True ) def is_zone_shared_with_project(self, zone_id, project_id): + """ + Checks if a zone is shared with a project. + + :param zone_id: The zone ID to check. + :param project_id: The project ID to check. + :returns: Boolean True/False if the zone is shared with the project. + """ query = select(literal_column('true')) query = query.where(tables.shared_zones.c.zone_id == zone_id) query = query.where( @@ -601,6 +836,11 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return session.scalar(query) is not None def delete_zone_shares(self, zone_id): + """ + Delete all of the zone shares for a specific zone. + + :param zone_id: The zone ID to check. + """ query = tables.shared_zones.delete().where( tables.shared_zones.c.zone_id == zone_id) with sql.get_write_session() as session: @@ -713,8 +953,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): else: tc, recordsets = self._find_recordsets_with_records( - context, criterion, tables.zones, tables.recordsets, - tables.records, limit=limit, marker=marker, + context, criterion, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, force_index=force_index, apply_tenant_criteria=apply_tenant_criteria, @@ -725,8 +964,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return recordsets def find_recordsets_axfr(self, context, criterion=None): - query = None + """ + Find RecordSets. + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ # Check to see if the criterion can use the reverse_name column criterion = self._rname_check(criterion) @@ -749,6 +992,13 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return raw_rows def create_recordset(self, context, zone_id, recordset): + """ + Create a recordset on a given Zone ID + + :param context: RPC Context. + :param zone_id: Zone ID to create the recordset in. + :param recordset: RecordSet object with the values to be created. + """ recordset.tenant_id = context.project_id recordset.zone_id = zone_id @@ -795,17 +1045,43 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): def find_recordsets(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None, force_index=False, apply_tenant_criteria=True): + """ + Find RecordSets. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + :param apply_tenant_criteria: Whether to filter results by project_id. + """ return self._find_recordsets( context, criterion, marker=marker, sort_dir=sort_dir, sort_key=sort_key, limit=limit, force_index=force_index, apply_tenant_criteria=apply_tenant_criteria) def find_recordset(self, context, criterion, apply_tenant_criteria=True): + """ + Find a single RecordSet. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param apply_tenant_criteria: Whether to filter results by project_id. + """ return self._find_recordsets( context, criterion, one=True, apply_tenant_criteria=apply_tenant_criteria) def update_recordset(self, context, recordset): + """ + Update a recordset + + :param context: RPC Context. + :param recordset: RecordSet to update + """ recordset = self._update( context, tables.recordsets, recordset, exceptions.DuplicateRecordSet, exceptions.RecordSetNotFound, @@ -852,6 +1128,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return recordset def delete_recordset(self, context, recordset_id): + """ + Delete a recordset + + :param context: RPC Context. + :param recordset_id: RecordSet ID to delete + """ # Fetch the existing recordset, we'll need to return it. recordset = self._find_recordsets( context, {'id': recordset_id}, one=True) @@ -860,6 +1142,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): exceptions.RecordSetNotFound) def count_recordsets(self, context, criterion=None): + """ + Count recordsets + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ # Ensure that we return only active recordsets rjoin = tables.recordsets.join( tables.zones, @@ -906,6 +1194,14 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return md5sum.hexdigest() def create_record(self, context, zone_id, recordset_id, record): + """ + Create a record on a given Zone ID + + :param context: RPC Context. + :param zone_id: Zone ID to create the record in. + :param recordset_id: RecordSet ID to create the record in. + :param record: Record object with the values to be created. + """ record.tenant_id = context.project_id record.zone_id = zone_id record.recordset_id = recordset_id @@ -915,18 +1211,48 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): tables.records, record, exceptions.DuplicateRecord) def get_record(self, context, record_id): + """ + Get a record via ID + + :param context: RPC Context. + :param record_id: Record ID to get + """ return self._find_records(context, {'id': record_id}, one=True) def find_records(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find Records. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_records(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_record(self, context, criterion): + """ + Find a single Record. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_records(context, criterion, one=True) def update_record(self, context, record): + """ + Update a record + + :param context: RPC Context. + :param record: Record to update + """ if record.obj_what_changed(): record.hash = self._recalculate_record_hash(record) @@ -935,12 +1261,24 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): exceptions.RecordNotFound) def delete_record(self, context, record_id): + """ + Delete a record + + :param context: RPC Context. + :param record_id: Record ID to delete + """ # Fetch the existing record, we'll need to return it. record = self._find_records(context, {'id': record_id}, one=True) return self._delete(context, tables.records, record, exceptions.RecordNotFound) def count_records(self, context, criterion=None): + """ + Count records + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ # Ensure that we return only active records rjoin = tables.records.join( tables.zones, @@ -974,27 +1312,69 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): one, marker, limit, sort_key, sort_dir) def create_blacklist(self, context, blacklist): + """ + Create a Blacklist. + + :param context: RPC Context. + :param blacklist: Blacklist object with the values to be created. + """ return self._create( tables.blacklists, blacklist, exceptions.DuplicateBlacklist) def get_blacklist(self, context, blacklist_id): + """ + Get a Blacklist via ID. + + :param context: RPC Context. + :param blacklist_id: Blacklist ID to get. + """ return self._find_blacklists(context, {'id': blacklist_id}, one=True) def find_blacklists(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find Blacklists + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_blacklists(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_blacklist(self, context, criterion): + """ + Find a single Blacklist. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_blacklists(context, criterion, one=True) def update_blacklist(self, context, blacklist): + """ + Update a Blacklist + + :param context: RPC Context. + :param blacklist: Blacklist to update. + """ return self._update( context, tables.blacklists, blacklist, exceptions.DuplicateBlacklist, exceptions.BlacklistNotFound) def delete_blacklist(self, context, blacklist_id): + """ + Delete a Blacklist via ID. + + :param context: RPC Context. + :param blacklist_id: Delete a Blacklist via ID + """ # Fetch the existing blacklist, we'll need to return it. blacklist = self._find_blacklists( context, {'id': blacklist_id}, one=True) @@ -1039,6 +1419,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return pools def create_pool(self, context, pool): + """ + Create a Pool. + + :param context: RPC Context. + :param pool: Pool object with the values to be created. + """ pool = self._create( tables.pools, pool, exceptions.DuplicatePool, ['attributes', 'ns_records', 'nameservers', 'targets', @@ -1080,18 +1466,47 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return pool def get_pool(self, context, pool_id): + """ + Get a Pool via the id + + :param context: RPC Context. + :param pool_id: The ID of the pool to get + """ return self._find_pools(context, {'id': pool_id}, one=True) def find_pools(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find all Pools + + :param context: RPC Context. + :param criterion: Criteria by which to filter + :param marker: Resource ID used by paging. The next page will start + at the next resource after the marker + :param limit: Integer limit of objects on the page + :param sort_key: Key used to sort the returned list + :param sort_dir: Directions to sort after using sort_key + """ return self._find_pools(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_pool(self, context, criterion): + """ + Find a single Pool. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_pools(context, criterion, one=True) def update_pool(self, context, pool): + """ + Update the specified pool + + :param context: RPC Context. + :param pool: Pool to update. + """ pool = self._update(context, tables.pools, pool, exceptions.DuplicatePool, exceptions.PoolNotFound, ['attributes', 'ns_records', 'nameservers', @@ -1109,6 +1524,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return updated_pool def delete_pool(self, context, pool_id): + """ + Delete the pool with the matching id + + :param context: RPC Context. + :param pool_id: The ID of the pool to be deleted + """ pool = self._find_pools(context, {'id': pool_id}, one=True) return self._delete(context, tables.pools, pool, @@ -1123,30 +1544,72 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): marker, limit, sort_key, sort_dir) def create_pool_attribute(self, context, pool_id, pool_attribute): + """ + Create a PoolAttribute. + + :param context: RPC Context. + :param pool_id: The ID of the pool to which the attribute belongs. + :param pool_attribute: PoolAttribute object with the values created. + """ pool_attribute.pool_id = pool_id return self._create(tables.pool_attributes, pool_attribute, exceptions.DuplicatePoolAttribute) def get_pool_attribute(self, context, pool_attribute_id): + """ + Get a PoolAttribute via the ID + + :param context: RPC Context. + :param pool_attribute_id: The ID of the PoolAttribute to get + """ return self._find_pool_attributes( context, {'id': pool_attribute_id}, one=True) def find_pool_attributes(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find all PoolAttributes + + :param context: RPC Context + :param criterion: Criteria by which to filer + :param marker: Resource ID used by paging. The next page will start + at the next resource after the marker + :param limit: Integer limit of objects on the page + :param sort_key: Key used to sort the returned list + :param sort_dir: Directions to sort after using sort_key + """ return self._find_pool_attributes(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_pool_attribute(self, context, criterion): + """ + Find a single PoolAttribute + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_pool_attributes(context, criterion, one=True) def update_pool_attribute(self, context, pool_attribute): + """ + Update the specified pool + + :param context: RPC Context. + :param pool_attribute: PoolAttribute to update + """ return self._update(context, tables.pool_attributes, pool_attribute, exceptions.DuplicatePoolAttribute, exceptions.PoolAttributeNotFound) def delete_pool_attribute(self, context, pool_attribute_id): + """ + Delete the pool with the matching id + + :param context: RPC Context. + :param pool_attribute_id: The ID of the PoolAttribute to be deleted + """ pool_attribute = self._find_pool_attributes( context, {'id': pool_attribute_id}, one=True) deleted_pool_attribute = self._delete( @@ -1733,28 +2196,70 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return zone_imports def create_zone_import(self, context, zone_import): + """ + Create a Zone Import. + + :param context: RPC Context. + :param zone_import: Zone Import object with the values to be created. + """ return self._create( tables.zone_tasks, zone_import, exceptions.DuplicateZoneImport) def get_zone_import(self, context, zone_import_id): + """ + Get a Zone Import via ID. + + :param context: RPC Context. + :param zone_import_id: Zone Import ID to get. + """ return self._find_zone_imports(context, {'id': zone_import_id}, one=True) def find_zone_imports(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find Zone Imports + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_zone_imports(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_zone_import(self, context, criterion): + """ + Find a single Zone Import. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_zone_imports(context, criterion, one=True) def update_zone_import(self, context, zone_import): + """ + Update a Zone Import + + :param context: RPC Context. + :param zone_import: Zone Import to update. + """ return self._update( context, tables.zone_tasks, zone_import, exceptions.DuplicateZoneImport, exceptions.ZoneImportNotFound) def delete_zone_import(self, context, zone_import_id): + """ + Delete a Zone Import via ID. + + :param context: RPC Context. + :param zone_import_id: Delete a Zone Import via ID + """ # Fetch the existing zone_import, we'll need to return it. zone_import = self._find_zone_imports(context, {'id': zone_import_id}, one=True) @@ -1778,28 +2283,70 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return zone_exports def create_zone_export(self, context, zone_export): + """ + Create a Zone Export. + + :param context: RPC Context. + :param zone_export: Zone Export object with the values to be created. + """ return self._create( tables.zone_tasks, zone_export, exceptions.DuplicateZoneExport) def get_zone_export(self, context, zone_export_id): + """ + Get a Zone Export via ID. + + :param context: RPC Context. + :param zone_export_id: Zone Export ID to get. + """ return self._find_zone_exports(context, {'id': zone_export_id}, one=True) def find_zone_exports(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Find Zone Exports + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_zone_exports(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def find_zone_export(self, context, criterion): + """ + Find a single Zone Export. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_zone_exports(context, criterion, one=True) def update_zone_export(self, context, zone_export): + """ + Update a Zone Export + + :param context: RPC Context. + :param zone_export: Zone Export to update. + """ return self._update( context, tables.zone_tasks, zone_export, exceptions.DuplicateZoneExport, exceptions.ZoneExportNotFound) def delete_zone_export(self, context, zone_export_id): + """ + Delete a Zone Export via ID. + + :param context: RPC Context. + :param zone_export_id: Delete a Zone Export via ID + """ # Fetch the existing zone_export, we'll need to return it. zone_export = self._find_zone_exports(context, {'id': zone_export_id}, one=True) @@ -1831,20 +2378,50 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): criterion, one, marker, limit, sort_key, sort_dir) def find_service_status(self, context, criterion): + """ + Find a single Service Status. + + :param context: RPC Context. + :param criterion: Criteria to filter by. + """ return self._find_service_statuses(context, criterion, one=True) def find_service_statuses(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): + """ + Retrieve status for services + + :param context: RPC Context. + :param criterion: Criteria to filter by. + :param marker: Resource ID from which after the requested page will + start after + :param limit: Integer limit of objects of the page size after the + marker + :param sort_key: Key from which to sort after. + :param sort_dir: Direction to sort after using sort_key. + """ return self._find_service_statuses(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) def create_service_status(self, context, service_status): + """ + Create a Service status for a service. + + :param context: RPC Context. + :param service_status: The status of a service. + """ return self._create( tables.service_status, service_status, exceptions.DuplicateServiceStatus) def update_service_status(self, context, service_status): + """ + Update the Service status for a service. + + :param context: RPC Context. + :param service_status: Set the status for a service. + """ return self._update( context, tables.service_status, service_status, exceptions.DuplicateServiceStatus, diff --git a/designate/storage/impl_sqlalchemy/alembic.ini b/designate/storage/sqlalchemy/alembic.ini similarity index 100% rename from designate/storage/impl_sqlalchemy/alembic.ini rename to designate/storage/sqlalchemy/alembic.ini diff --git a/designate/storage/impl_sqlalchemy/alembic/README b/designate/storage/sqlalchemy/alembic/README similarity index 67% rename from designate/storage/impl_sqlalchemy/alembic/README rename to designate/storage/sqlalchemy/alembic/README index 81b6a350a..f104a2244 100644 --- a/designate/storage/impl_sqlalchemy/alembic/README +++ b/designate/storage/sqlalchemy/alembic/README @@ -1,5 +1,5 @@ Please use the "designate-manage database" command for database management. Developers adding new migrations can run 'alembic -m ""' from -the designate/storage/impl_sqlalchemy directory where the alembic.ini file is +the designate/storage/sqlalchemy directory where the alembic.ini file is located. diff --git a/designate/storage/impl_sqlalchemy/alembic/env.py b/designate/storage/sqlalchemy/alembic/env.py similarity index 100% rename from designate/storage/impl_sqlalchemy/alembic/env.py rename to designate/storage/sqlalchemy/alembic/env.py diff --git a/designate/storage/impl_sqlalchemy/alembic/legacy_utils.py b/designate/storage/sqlalchemy/alembic/legacy_utils.py similarity index 100% rename from designate/storage/impl_sqlalchemy/alembic/legacy_utils.py rename to designate/storage/sqlalchemy/alembic/legacy_utils.py diff --git a/designate/storage/impl_sqlalchemy/alembic/script.py.mako b/designate/storage/sqlalchemy/alembic/script.py.mako similarity index 100% rename from designate/storage/impl_sqlalchemy/alembic/script.py.mako rename to designate/storage/sqlalchemy/alembic/script.py.mako diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/0bcf910ea823_add_zone_attributes.py b/designate/storage/sqlalchemy/alembic/versions/0bcf910ea823_add_zone_attributes.py similarity index 96% rename from designate/storage/impl_sqlalchemy/alembic/versions/0bcf910ea823_add_zone_attributes.py rename to designate/storage/sqlalchemy/alembic/versions/0bcf910ea823_add_zone_attributes.py index 531d11b1d..4d4a3117e 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/0bcf910ea823_add_zone_attributes.py +++ b/designate/storage/sqlalchemy/alembic/versions/0bcf910ea823_add_zone_attributes.py @@ -24,8 +24,8 @@ from alembic import op from oslo_utils import timeutils import sqlalchemy as sa -from designate.sqlalchemy.types import UUID -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.types import UUID from designate import utils # revision identifiers, used by Alembic. diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/15b34ff3ecb8_fix_service_charset.py b/designate/storage/sqlalchemy/alembic/versions/15b34ff3ecb8_fix_service_charset.py similarity index 95% rename from designate/storage/impl_sqlalchemy/alembic/versions/15b34ff3ecb8_fix_service_charset.py rename to designate/storage/sqlalchemy/alembic/versions/15b34ff3ecb8_fix_service_charset.py index a385ef722..07212b0c2 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/15b34ff3ecb8_fix_service_charset.py +++ b/designate/storage/sqlalchemy/alembic/versions/15b34ff3ecb8_fix_service_charset.py @@ -22,7 +22,7 @@ Create Date: 2022-08-01 16:53:34.612019 """ from alembic import op -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = '15b34ff3ecb8' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/304d41c3847a_add_services.py b/designate/storage/sqlalchemy/alembic/versions/304d41c3847a_add_services.py similarity index 94% rename from designate/storage/impl_sqlalchemy/alembic/versions/304d41c3847a_add_services.py rename to designate/storage/sqlalchemy/alembic/versions/304d41c3847a_add_services.py index 02e6a09a9..dab9d0f59 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/304d41c3847a_add_services.py +++ b/designate/storage/sqlalchemy/alembic/versions/304d41c3847a_add_services.py @@ -23,8 +23,8 @@ Create Date: 2022-08-01 16:41:55.139558 from alembic import op import sqlalchemy as sa -from designate.sqlalchemy.types import UUID -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.types import UUID from designate import utils # revision identifiers, used by Alembic. diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/7977deaa5167_add_rrset_indexes_for_filtering_perf.py b/designate/storage/sqlalchemy/alembic/versions/7977deaa5167_add_rrset_indexes_for_filtering_perf.py similarity index 95% rename from designate/storage/impl_sqlalchemy/alembic/versions/7977deaa5167_add_rrset_indexes_for_filtering_perf.py rename to designate/storage/sqlalchemy/alembic/versions/7977deaa5167_add_rrset_indexes_for_filtering_perf.py index 12abecbdc..1ac83c71f 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/7977deaa5167_add_rrset_indexes_for_filtering_perf.py +++ b/designate/storage/sqlalchemy/alembic/versions/7977deaa5167_add_rrset_indexes_for_filtering_perf.py @@ -22,7 +22,7 @@ Create Date: 2022-08-01 17:13:01.429689 """ from alembic import op -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = '7977deaa5167' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/867a331ce1fc_domain_to_zone_rename.py b/designate/storage/sqlalchemy/alembic/versions/867a331ce1fc_domain_to_zone_rename.py similarity index 98% rename from designate/storage/impl_sqlalchemy/alembic/versions/867a331ce1fc_domain_to_zone_rename.py rename to designate/storage/sqlalchemy/alembic/versions/867a331ce1fc_domain_to_zone_rename.py index b2c984196..2c7b2ef85 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/867a331ce1fc_domain_to_zone_rename.py +++ b/designate/storage/sqlalchemy/alembic/versions/867a331ce1fc_domain_to_zone_rename.py @@ -23,8 +23,8 @@ Create Date: 2022-07-29 18:41:19.427853 from alembic import op import sqlalchemy as sa -from designate.sqlalchemy.types import UUID -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.types import UUID # revision identifiers, used by Alembic. revision = '867a331ce1fc' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/91eb1eb7c882_support_caa_records.py b/designate/storage/sqlalchemy/alembic/versions/91eb1eb7c882_support_caa_records.py similarity index 95% rename from designate/storage/impl_sqlalchemy/alembic/versions/91eb1eb7c882_support_caa_records.py rename to designate/storage/sqlalchemy/alembic/versions/91eb1eb7c882_support_caa_records.py index 668569ada..a0cc37391 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/91eb1eb7c882_support_caa_records.py +++ b/designate/storage/sqlalchemy/alembic/versions/91eb1eb7c882_support_caa_records.py @@ -23,7 +23,7 @@ Create Date: 2022-08-01 17:32:21.386556 from alembic import op import sqlalchemy as sa -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = '91eb1eb7c882' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/93a00a815f07_unique_service_status.py b/designate/storage/sqlalchemy/alembic/versions/93a00a815f07_unique_service_status.py similarity index 96% rename from designate/storage/impl_sqlalchemy/alembic/versions/93a00a815f07_unique_service_status.py rename to designate/storage/sqlalchemy/alembic/versions/93a00a815f07_unique_service_status.py index 6071a7baf..41e9b7f5d 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/93a00a815f07_unique_service_status.py +++ b/designate/storage/sqlalchemy/alembic/versions/93a00a815f07_unique_service_status.py @@ -25,7 +25,7 @@ from alembic import op from oslo_log import log as logging import sqlalchemy as sa -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = '93a00a815f07' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/a005af3aa38e_add_increment_serial.py b/designate/storage/sqlalchemy/alembic/versions/a005af3aa38e_add_increment_serial.py similarity index 100% rename from designate/storage/impl_sqlalchemy/alembic/versions/a005af3aa38e_add_increment_serial.py rename to designate/storage/sqlalchemy/alembic/versions/a005af3aa38e_add_increment_serial.py diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/a69b45715cc1_add_delayed_notify_column.py b/designate/storage/sqlalchemy/alembic/versions/a69b45715cc1_add_delayed_notify_column.py similarity index 95% rename from designate/storage/impl_sqlalchemy/alembic/versions/a69b45715cc1_add_delayed_notify_column.py rename to designate/storage/sqlalchemy/alembic/versions/a69b45715cc1_add_delayed_notify_column.py index 93bd6c5f4..96deab4f2 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/a69b45715cc1_add_delayed_notify_column.py +++ b/designate/storage/sqlalchemy/alembic/versions/a69b45715cc1_add_delayed_notify_column.py @@ -23,7 +23,7 @@ Create Date: 2022-07-29 21:30:12.127816 from alembic import op import sqlalchemy as sa -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = 'a69b45715cc1' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/b20189fd288e_shared_zone.py b/designate/storage/sqlalchemy/alembic/versions/b20189fd288e_shared_zone.py similarity index 96% rename from designate/storage/impl_sqlalchemy/alembic/versions/b20189fd288e_shared_zone.py rename to designate/storage/sqlalchemy/alembic/versions/b20189fd288e_shared_zone.py index b068e218c..ac333f740 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/b20189fd288e_shared_zone.py +++ b/designate/storage/sqlalchemy/alembic/versions/b20189fd288e_shared_zone.py @@ -20,7 +20,7 @@ Create Date: 2022-09-22 20:50:03.056609 from alembic import op import sqlalchemy as sa -from designate.sqlalchemy.types import UUID +from designate.storage.sqlalchemy.types import UUID from designate import utils # revision identifiers, used by Alembic. diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/b8999fd10721_support_naptr_records.py b/designate/storage/sqlalchemy/alembic/versions/b8999fd10721_support_naptr_records.py similarity index 95% rename from designate/storage/impl_sqlalchemy/alembic/versions/b8999fd10721_support_naptr_records.py rename to designate/storage/sqlalchemy/alembic/versions/b8999fd10721_support_naptr_records.py index ec456d4c1..32ba7b7bb 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/b8999fd10721_support_naptr_records.py +++ b/designate/storage/sqlalchemy/alembic/versions/b8999fd10721_support_naptr_records.py @@ -23,7 +23,7 @@ Create Date: 2022-08-01 17:25:33.058845 from alembic import op import sqlalchemy as sa -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = 'b8999fd10721' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/bfcfc4a07487_unique_ns_record.py b/designate/storage/sqlalchemy/alembic/versions/bfcfc4a07487_unique_ns_record.py similarity index 96% rename from designate/storage/impl_sqlalchemy/alembic/versions/bfcfc4a07487_unique_ns_record.py rename to designate/storage/sqlalchemy/alembic/versions/bfcfc4a07487_unique_ns_record.py index 6f355a605..1a99f12f8 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/bfcfc4a07487_unique_ns_record.py +++ b/designate/storage/sqlalchemy/alembic/versions/bfcfc4a07487_unique_ns_record.py @@ -26,7 +26,7 @@ from alembic import op from oslo_log import log as logging import sqlalchemy as sa -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = 'bfcfc4a07487' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/c9f427f7180a_liberty.py b/designate/storage/sqlalchemy/alembic/versions/c9f427f7180a_liberty.py similarity index 99% rename from designate/storage/impl_sqlalchemy/alembic/versions/c9f427f7180a_liberty.py rename to designate/storage/sqlalchemy/alembic/versions/c9f427f7180a_liberty.py index 62b7135fc..1047cdec9 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/c9f427f7180a_liberty.py +++ b/designate/storage/sqlalchemy/alembic/versions/c9f427f7180a_liberty.py @@ -26,8 +26,8 @@ from oslo_utils import timeutils import sqlalchemy as sa from designate.conf import central -from designate.sqlalchemy.types import UUID -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.types import UUID # revision identifiers, used by Alembic. revision = 'c9f427f7180a' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/d04819112169_new_pools_tables.py b/designate/storage/sqlalchemy/alembic/versions/d04819112169_new_pools_tables.py similarity index 97% rename from designate/storage/impl_sqlalchemy/alembic/versions/d04819112169_new_pools_tables.py rename to designate/storage/sqlalchemy/alembic/versions/d04819112169_new_pools_tables.py index d321fb512..7af13e41c 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/d04819112169_new_pools_tables.py +++ b/designate/storage/sqlalchemy/alembic/versions/d04819112169_new_pools_tables.py @@ -24,8 +24,8 @@ from alembic import op from oslo_utils import timeutils import sqlalchemy as sa -from designate.sqlalchemy.types import UUID -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.types import UUID from designate import utils # revision identifiers, used by Alembic. diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/d9a1883e93e9_add_fks.py b/designate/storage/sqlalchemy/alembic/versions/d9a1883e93e9_add_fks.py similarity index 97% rename from designate/storage/impl_sqlalchemy/alembic/versions/d9a1883e93e9_add_fks.py rename to designate/storage/sqlalchemy/alembic/versions/d9a1883e93e9_add_fks.py index 81d4a557d..7a46557de 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/d9a1883e93e9_add_fks.py +++ b/designate/storage/sqlalchemy/alembic/versions/d9a1883e93e9_add_fks.py @@ -22,7 +22,7 @@ Create Date: 2022-07-29 20:41:51.855014 """ from alembic import op -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = 'd9a1883e93e9' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/e5e2199ed76e_support_cert_records.py b/designate/storage/sqlalchemy/alembic/versions/e5e2199ed76e_support_cert_records.py similarity index 95% rename from designate/storage/impl_sqlalchemy/alembic/versions/e5e2199ed76e_support_cert_records.py rename to designate/storage/sqlalchemy/alembic/versions/e5e2199ed76e_support_cert_records.py index f759ef6c5..11c64bb7c 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/e5e2199ed76e_support_cert_records.py +++ b/designate/storage/sqlalchemy/alembic/versions/e5e2199ed76e_support_cert_records.py @@ -23,7 +23,7 @@ Create Date: 2022-08-01 17:34:45.569101 from alembic import op import sqlalchemy as sa -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = 'e5e2199ed76e' diff --git a/designate/storage/impl_sqlalchemy/alembic/versions/f9f969f9d85e_change_managed_column_types.py b/designate/storage/sqlalchemy/alembic/versions/f9f969f9d85e_change_managed_column_types.py similarity index 97% rename from designate/storage/impl_sqlalchemy/alembic/versions/f9f969f9d85e_change_managed_column_types.py rename to designate/storage/sqlalchemy/alembic/versions/f9f969f9d85e_change_managed_column_types.py index 3cec49405..c7c5c369b 100644 --- a/designate/storage/impl_sqlalchemy/alembic/versions/f9f969f9d85e_change_managed_column_types.py +++ b/designate/storage/sqlalchemy/alembic/versions/f9f969f9d85e_change_managed_column_types.py @@ -23,7 +23,7 @@ Create Date: 2022-07-29 21:18:35.403634 from alembic import op import sqlalchemy as sa -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils # revision identifiers, used by Alembic. revision = 'f9f969f9d85e' diff --git a/designate/sqlalchemy/base.py b/designate/storage/sqlalchemy/base.py similarity index 76% rename from designate/sqlalchemy/base.py rename to designate/storage/sqlalchemy/base.py index 5512a7bec..79954ce3e 100644 --- a/designate/sqlalchemy/base.py +++ b/designate/storage/sqlalchemy/base.py @@ -25,12 +25,80 @@ from sqlalchemy import select, or_, between, func, distinct from designate import exceptions from designate import objects -from designate.sqlalchemy import sql -from designate.sqlalchemy import utils +from designate.storage import sql +from designate.storage.sqlalchemy import tables +from designate.storage.sqlalchemy import utils LOG = logging.getLogger(__name__) +RECORDSET_QUERY_TABLES = ( + # RS Info + tables.recordsets.c.id, # 0 - RS ID + tables.recordsets.c.version, # 1 - RS Version + tables.recordsets.c.created_at, # 2 - RS Created + tables.recordsets.c.updated_at, # 3 - RS Updated + tables.recordsets.c.tenant_id, # 4 - RS Tenant + tables.recordsets.c.zone_id, # 5 - RS Zone + tables.recordsets.c.name, # 6 - RS Name + tables.recordsets.c.type, # 7 - RS Type + tables.recordsets.c.ttl, # 8 - RS TTL + tables.recordsets.c.description, # 9 - RS Desc + # R Info + tables.records.c.id, # 10 - R ID + tables.records.c.version, # 11 - R Version + tables.records.c.created_at, # 12 - R Created + tables.records.c.updated_at, # 13 - R Updated + tables.records.c.tenant_id, # 14 - R Tenant + tables.records.c.zone_id, # 15 - R Zone + tables.records.c.recordset_id, # 16 - R RSet + tables.records.c.data, # 17 - R Data + tables.records.c.description, # 18 - R Desc + tables.records.c.hash, # 19 - R Hash + tables.records.c.managed, # 20 - R Mngd Flg + tables.records.c.managed_plugin_name, # 21 - R Mngd Plg + tables.records.c.managed_resource_type, # 22 - R Mngd Type + tables.records.c.managed_resource_region, # 23 - R Mngd Rgn + tables.records.c.managed_resource_id, # 24 - R Mngd ID + tables.records.c.managed_tenant_id, # 25 - R Mngd T ID + tables.records.c.status, # 26 - R Status + tables.records.c.action, # 27 - R Action + tables.records.c.serial # 28 - R Serial +) +RECORDSET_MAP = { + 'id': 0, + 'version': 1, + 'created_at': 2, + 'updated_at': 3, + 'tenant_id': 4, + 'zone_id': 5, + 'name': 6, + 'type': 7, + 'ttl': 8, + 'description': 9, +} +RECORD_MAP = { + 'id': 10, + 'version': 11, + 'created_at': 12, + 'updated_at': 13, + 'tenant_id': 14, + 'zone_id': 15, + 'recordset_id': 16, + 'data': 17, + 'description': 18, + 'hash': 19, + 'managed': 20, + 'managed_plugin_name': 21, + 'managed_resource_type': 22, + 'managed_resource_region': 23, + 'managed_resource_id': 24, + 'managed_tenant_id': 25, + 'status': 26, + 'action': 27, + 'serial': 28, +} + def _set_object_from_model(obj, model, **extra): """Update a DesignateObject with the values from a SQLA Model""" @@ -230,13 +298,11 @@ class SQLAlchemy(object, metaclass=abc.ABCMeta): except ValueError as value_error: raise exceptions.ValueError(str(value_error)) - def _find_recordsets_with_records(self, context, criterion, zones_table, - recordsets_table, records_table, - one=False, marker=None, limit=None, - sort_key=None, sort_dir=None, query=None, + def _find_recordsets_with_records(self, context, criterion, + marker=None, limit=None, + sort_key=None, sort_dir=None, apply_tenant_criteria=True, force_index=False): - sort_key = sort_key or 'created_at' sort_dir = sort_dir or 'asc' data = criterion.pop('data', None) @@ -247,37 +313,39 @@ class SQLAlchemy(object, metaclass=abc.ABCMeta): # needs to use the correct table index for different sort keys index_hint = utils.get_rrset_index(sort_key) if force_index else None - rzjoin = recordsets_table.join( - zones_table, - recordsets_table.c.zone_id == zones_table.c.id) + rzjoin = tables.recordsets.join( + tables.zones, + tables.recordsets.c.zone_id == tables.zones.c.id + ) if filtering_records: rzjoin = rzjoin.join( - records_table, - recordsets_table.c.id == records_table.c.recordset_id) + tables.records, + tables.recordsets.c.id == tables.records.c.recordset_id + ) inner_q = ( - select(recordsets_table.c.id, # 0 - RS ID - zones_table.c.name). # 1 - ZONE NAME + select(tables.recordsets.c.id, # 0 - RS ID + tables.zones.c.name). # 1 - ZONE NAME select_from(rzjoin). - where(zones_table.c.deleted == '0') + where(tables.zones.c.deleted == '0') ) count_q = ( - select(func.count(distinct(recordsets_table.c.id))). - select_from(rzjoin).where(zones_table.c.deleted == '0') + select(func.count(distinct(tables.recordsets.c.id))). + select_from(rzjoin).where(tables.zones.c.deleted == '0') ) if index_hint: - inner_q = inner_q.with_hint(recordsets_table, index_hint, + inner_q = inner_q.with_hint(tables.recordsets, index_hint, dialect_name='mysql') if marker is not None: - marker = utils.check_marker(recordsets_table, marker) + marker = utils.check_marker(tables.recordsets, marker) try: inner_q = utils.paginate_query( - inner_q, recordsets_table, limit, + inner_q, tables.recordsets, limit, [sort_key, 'id'], marker=marker, sort_dir=sort_dir) @@ -292,26 +360,26 @@ class SQLAlchemy(object, metaclass=abc.ABCMeta): if apply_tenant_criteria: inner_q = self._apply_tenant_criteria( - context, recordsets_table, inner_q, + context, tables.recordsets, inner_q, include_null_tenant=False) - count_q = self._apply_tenant_criteria(context, recordsets_table, + count_q = self._apply_tenant_criteria(context, tables.recordsets, count_q, include_null_tenant=False) - inner_q = self._apply_criterion(recordsets_table, inner_q, criterion) - count_q = self._apply_criterion(recordsets_table, count_q, criterion) + inner_q = self._apply_criterion(tables.recordsets, inner_q, criterion) + count_q = self._apply_criterion(tables.recordsets, count_q, criterion) if filtering_records: records_criterion = dict((k, v) for k, v in ( ('data', data), ('status', status)) if v is not None) - inner_q = self._apply_criterion(records_table, inner_q, + inner_q = self._apply_criterion(tables.records, inner_q, records_criterion) - count_q = self._apply_criterion(records_table, count_q, + count_q = self._apply_criterion(tables.records, count_q, records_criterion) - inner_q = self._apply_deleted_criteria(context, recordsets_table, + inner_q = self._apply_deleted_criteria(context, tables.recordsets, inner_q) - count_q = self._apply_deleted_criteria(context, recordsets_table, + count_q = self._apply_deleted_criteria(context, tables.recordsets, count_q) # Get the list of IDs needed. @@ -339,87 +407,18 @@ class SQLAlchemy(object, metaclass=abc.ABCMeta): total_count = 0 if result is None else result[0] # Join the 2 required tables - rjoin = recordsets_table.outerjoin( - records_table, - records_table.c.recordset_id == recordsets_table.c.id) - - query = select( - # RS Info - recordsets_table.c.id, # 0 - RS ID - recordsets_table.c.version, # 1 - RS Version - recordsets_table.c.created_at, # 2 - RS Created - recordsets_table.c.updated_at, # 3 - RS Updated - recordsets_table.c.tenant_id, # 4 - RS Tenant - recordsets_table.c.zone_id, # 5 - RS Zone - recordsets_table.c.name, # 6 - RS Name - recordsets_table.c.type, # 7 - RS Type - recordsets_table.c.ttl, # 8 - RS TTL - recordsets_table.c.description, # 9 - RS Desc - # R Info - records_table.c.id, # 10 - R ID - records_table.c.version, # 11 - R Version - records_table.c.created_at, # 12 - R Created - records_table.c.updated_at, # 13 - R Updated - records_table.c.tenant_id, # 14 - R Tenant - records_table.c.zone_id, # 15 - R Zone - records_table.c.recordset_id, # 16 - R RSet - records_table.c.data, # 17 - R Data - records_table.c.description, # 18 - R Desc - records_table.c.hash, # 19 - R Hash - records_table.c.managed, # 20 - R Mngd Flg - records_table.c.managed_plugin_name, # 21 - R Mngd Plg - records_table.c.managed_resource_type, # 22 - R Mngd Type - records_table.c.managed_resource_region, # 23 - R Mngd Rgn - records_table.c.managed_resource_id, # 24 - R Mngd ID - records_table.c.managed_tenant_id, # 25 - R Mngd T ID - records_table.c.status, # 26 - R Status - records_table.c.action, # 27 - R Action - records_table.c.serial # 28 - R Serial - ).select_from(rjoin) - - query = query.where( - recordsets_table.c.id.in_(formatted_ids) + rjoin = tables.recordsets.outerjoin( + tables.records, + tables.records.c.recordset_id == tables.recordsets.c.id ) - # These make looking up indexes for the Raw Rows much easier, - # and maintainable + query = select(RECORDSET_QUERY_TABLES).select_from(rjoin) - rs_map = { - "id": 0, - "version": 1, - "created_at": 2, - "updated_at": 3, - "tenant_id": 4, - "zone_id": 5, - "name": 6, - "type": 7, - "ttl": 8, - "description": 9, - } + query = query.where( + tables.recordsets.c.id.in_(formatted_ids) + ) - r_map = { - "id": 10, - "version": 11, - "created_at": 12, - "updated_at": 13, - "tenant_id": 14, - "zone_id": 15, - "recordset_id": 16, - "data": 17, - "description": 18, - "hash": 19, - "managed": 20, - "managed_plugin_name": 21, - "managed_resource_type": 22, - "managed_resource_region": 23, - "managed_resource_id": 24, - "managed_tenant_id": 25, - "status": 26, - "action": 27, - "serial": 28, - } - - query, sort_dirs = utils.sort_query(query, recordsets_table, + query, sort_dirs = utils.sort_query(query, tables.recordsets, [sort_key, 'id'], sort_dir=sort_dir) @@ -447,11 +446,11 @@ class SQLAlchemy(object, metaclass=abc.ABCMeta): # Set up a new rrset current_rrset = objects.RecordSet() - rrset_id = record[rs_map['id']] + rrset_id = record[RECORDSET_MAP['id']] # Add all the loaded vars into RecordSet object - for key, value in rs_map.items(): + for key, value in RECORDSET_MAP.items(): setattr(current_rrset, key, record[value]) current_rrset.zone_name = id_zname_map[current_rrset.id] @@ -459,20 +458,20 @@ class SQLAlchemy(object, metaclass=abc.ABCMeta): current_rrset.records = objects.RecordList() - if record[r_map['id']] is not None: + if record[RECORD_MAP['id']] is not None: rrdata = objects.Record() - for key, value in r_map.items(): + for key, value in RECORD_MAP.items(): setattr(rrdata, key, record[value]) current_rrset.records.append(rrdata) else: # We've already got a rrset, add the rdata - if record[r_map['id']] is not None: + if record[RECORD_MAP['id']] is not None: rrdata = objects.Record() - for key, value in r_map.items(): + for key, value in RECORD_MAP.items(): setattr(rrdata, key, record[value]) current_rrset.records.append(rrdata) diff --git a/designate/storage/impl_sqlalchemy/tables.py b/designate/storage/sqlalchemy/tables.py similarity index 99% rename from designate/storage/impl_sqlalchemy/tables.py rename to designate/storage/sqlalchemy/tables.py index aa815ed9f..04b27be38 100644 --- a/designate/storage/impl_sqlalchemy/tables.py +++ b/designate/storage/sqlalchemy/tables.py @@ -21,7 +21,7 @@ from oslo_config import cfg from oslo_db.sqlalchemy import types from oslo_utils import timeutils -from designate.sqlalchemy.types import UUID +from designate.storage.sqlalchemy.types import UUID from designate import utils diff --git a/designate/sqlalchemy/types.py b/designate/storage/sqlalchemy/types.py similarity index 100% rename from designate/sqlalchemy/types.py rename to designate/storage/sqlalchemy/types.py diff --git a/designate/sqlalchemy/utils.py b/designate/storage/sqlalchemy/utils.py similarity index 99% rename from designate/sqlalchemy/utils.py rename to designate/storage/sqlalchemy/utils.py index 3b9b70bfd..a118ed97a 100644 --- a/designate/sqlalchemy/utils.py +++ b/designate/storage/sqlalchemy/utils.py @@ -26,7 +26,7 @@ from sqlalchemy import select from designate import exceptions from designate.i18n import _ -from designate.sqlalchemy import sql +from designate.storage import sql LOG = log.getLogger(__name__) diff --git a/designate/tests/__init__.py b/designate/tests/__init__.py index ac371ab7d..82920a1ad 100644 --- a/designate/tests/__init__.py +++ b/designate/tests/__init__.py @@ -49,7 +49,7 @@ CONF.import_opt('storage_driver', 'designate.central', group='service:central') CONF.import_opt('auth_strategy', 'designate.api', group='service:api') -CONF.import_opt('connection', 'designate.storage.impl_sqlalchemy', +CONF.import_opt('connection', 'designate.storage.sqlalchemy', group='storage:sqlalchemy') CONF.import_opt('emitter_type', 'designate.heartbeat_emitter', group="heartbeat_emitter") @@ -397,8 +397,7 @@ class TestCase(base.BaseTestCase): self.admin_context = self.get_admin_context() self.admin_context_all_tenants = self.get_admin_context( all_tenants=True) - storage_driver = CONF['service:central'].storage_driver - self.storage = storage.get_storage(storage_driver) + self.storage = storage.get_storage() # Setup the Default Pool with some useful settings self._setup_default_pool() diff --git a/designate/tests/test_central/test_service.py b/designate/tests/test_central/test_service.py index 21b29aec9..12e760b51 100644 --- a/designate/tests/test_central/test_service.py +++ b/designate/tests/test_central/test_service.py @@ -37,8 +37,8 @@ import testtools from designate import exceptions from designate import objects -from designate.storage.impl_sqlalchemy import tables from designate.storage import sql +from designate.storage.sqlalchemy import tables import designate.tests from designate.tests import fixtures from designate import utils diff --git a/designate/tests/test_producer/test_tasks.py b/designate/tests/test_producer/test_tasks.py index 0aaebb3c4..07bbc0c61 100644 --- a/designate/tests/test_producer/test_tasks.py +++ b/designate/tests/test_producer/test_tasks.py @@ -21,8 +21,8 @@ from oslo_log import log as logging from oslo_utils import timeutils from designate.producer import tasks -from designate.storage.impl_sqlalchemy import tables from designate.storage import sql +from designate.storage.sqlalchemy import tables from designate.tests import fixtures from designate.tests import TestCase from designate.worker import rpcapi as worker_api diff --git a/designate/tests/test_sqlalchemy.py b/designate/tests/test_sqlalchemy.py index b411d5798..11a120245 100644 --- a/designate/tests/test_sqlalchemy.py +++ b/designate/tests/test_sqlalchemy.py @@ -19,7 +19,7 @@ from unittest import mock import sqlalchemy as sa from sqlalchemy.sql import operators -from designate.sqlalchemy import base +from designate.storage.sqlalchemy import base from designate.tests import TestCase metadata = sa.MetaData() diff --git a/designate/tests/test_storage/__init__.py b/designate/tests/test_storage/__init__.py index 6f2dc7f0c..e69de29bb 100644 --- a/designate/tests/test_storage/__init__.py +++ b/designate/tests/test_storage/__init__.py @@ -1,3195 +0,0 @@ -# Copyright 2012 Managed I.T. -# -# Author: Kiall Mac Innes -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import math -from unittest import mock - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_messaging.rpc import dispatcher as rpc_dispatcher -import testtools - -from designate.conf.mdns import DEFAULT_MDNS_PORT -from designate import exceptions -from designate import objects -from designate.storage.base import Storage as StorageBase -from designate.utils import generate_uuid - - -LOG = logging.getLogger(__name__) - - -class StorageTestCase(object): - # TODO(kiall): Someone, Somewhere, could probably make use of a - # assertNestedDictContainsSubset(), cleanup and put somewhere - # better. - def assertNestedDictContainsSubset(self, expected, actual): - for key, value in expected.items(): - if isinstance(value, dict): - self.assertNestedDictContainsSubset(value, actual.get(key, {})) - - elif isinstance(value, list): - self.assertEqual(len(value), len(actual[key])) - - for index, item in enumerate(value): - self.assertNestedDictContainsSubset( - item, actual[key][index]) - - else: - self.assertEqual(value, actual[key]) - - def create_quota(self, **kwargs): - """ - This create method has been kept in the StorageTestCase class as quotas - are treated differently to other resources in Central. - """ - - context = kwargs.pop('context', self.admin_context) - fixture = kwargs.pop('fixture', 0) - - values = self.get_quota_fixture(fixture=fixture, values=kwargs) - - if 'tenant_id' not in values: - values['tenant_id'] = context.project_id - - return self.storage.create_quota(context, values) - - def create_pool_nameserver(self, pool, **kwargs): - # NOTE(kiall): We add this method here, rather than in the base test - # case, as the base methods expect to make a central API - # call. If a central API method is exposed for this, we - # should remove this and add to the base. - context = kwargs.pop('context', self.admin_context) - fixture = kwargs.pop('fixture', 0) - - values = self.get_pool_nameserver_fixture( - fixture=fixture, values=kwargs) - - if 'pool_id' not in values: - values['pool_id'] = pool.id - - return self.storage.create_pool_nameserver( - context, pool.id, objects.PoolNameserver.from_dict(values)) - - def create_pool_target(self, pool, **kwargs): - # NOTE(kiall): We add this method here, rather than in the base test - # case, as the base methods expect to make a central API - # call. If a central API method is exposed for this, we - # should remove this and add to the base. - context = kwargs.pop('context', self.admin_context) - fixture = kwargs.pop('fixture', 0) - - values = self.get_pool_target_fixture( - fixture=fixture, values=kwargs) - - if 'pool_id' not in values: - values['pool_id'] = pool.id - - return self.storage.create_pool_target( - context, pool.id, objects.PoolTarget.from_dict(values)) - - def create_pool_also_notify(self, pool, **kwargs): - # NOTE(kiall): We add this method here, rather than in the base test - # case, as the base methods expect to make a central API - # call. If a central API method is exposed for this, we - # should remove this and add to the base. - context = kwargs.pop('context', self.admin_context) - fixture = kwargs.pop('fixture', 0) - - values = self.get_pool_also_notify_fixture( - fixture=fixture, values=kwargs) - - if 'pool_id' not in values: - values['pool_id'] = pool.id - - return self.storage.create_pool_also_notify( - context, pool.id, objects.PoolAlsoNotify.from_dict(values)) - - # Paging Tests - def _ensure_paging(self, data, method, criterion=None): - """ - Given an array of created items we iterate through them making sure - they match up to things returned by paged results. - """ - results = None - item_number = 0 - - criterion = criterion or {} - - for current_page in range(0, int(math.ceil(float(len(data)) / 2))): - LOG.critical('Validating results on page %d', current_page) - - if results is not None: - results = method( - self.admin_context, - limit=2, - marker=results[-1]['id'], - criterion=criterion - ) - else: - results = method(self.admin_context, limit=2, - criterion=criterion) - - LOG.critical('Results: %d', len(results)) - - for result_number, result in enumerate(results): - LOG.critical('Validating result %d on page %d', result_number, - current_page) - self.assertEqual( - data[item_number]['id'], results[result_number]['id']) - - item_number += 1 - - def test_paging_marker_not_found(self): - with testtools.ExpectedException(exceptions.MarkerNotFound): - self.storage.find_pool_attributes( - self.admin_context, marker=generate_uuid(), limit=5) - - def test_paging_marker_invalid(self): - with testtools.ExpectedException(exceptions.InvalidMarker): - self.storage.find_pool_attributes( - self.admin_context, marker='4') - - def test_paging_limit_invalid(self): - with testtools.ExpectedException(exceptions.ValueError): - self.storage.find_pool_attributes( - self.admin_context, limit='z') - - def test_paging_sort_dir_invalid(self): - with testtools.ExpectedException(exceptions.ValueError): - self.storage.find_pool_attributes( - self.admin_context, sort_dir='invalid_sort_dir') - - def test_paging_sort_key_invalid(self): - with testtools.ExpectedException(exceptions.InvalidSortKey): - self.storage.find_pool_attributes( - self.admin_context, sort_key='invalid_sort_key') - - # Interface Tests - def test_interface(self): - self._ensure_interface(StorageBase, self.storage.__class__) - - # Quota Tests - def test_create_quota(self): - values = self.get_quota_fixture() - values['tenant_id'] = self.admin_context.project_id - - result = self.storage.create_quota(self.admin_context, values) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(self.admin_context.project_id, result['tenant_id']) - self.assertEqual(values['resource'], result['resource']) - self.assertEqual(values['hard_limit'], result['hard_limit']) - - def test_create_quota_duplicate(self): - # Create the initial quota - self.create_quota() - - with testtools.ExpectedException(exceptions.DuplicateQuota): - self.create_quota() - - def test_find_quotas(self): - actual = self.storage.find_quotas(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a single quota - quota_one = self.create_quota() - - actual = self.storage.find_quotas(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(quota_one['tenant_id'], actual[0]['tenant_id']) - self.assertEqual(quota_one['resource'], actual[0]['resource']) - self.assertEqual(quota_one['hard_limit'], actual[0]['hard_limit']) - - # Create a second quota - quota_two = self.create_quota(fixture=1) - - actual = self.storage.find_quotas(self.admin_context) - self.assertEqual(2, len(actual)) - - self.assertEqual(quota_two['tenant_id'], actual[1]['tenant_id']) - self.assertEqual(quota_two['resource'], actual[1]['resource']) - self.assertEqual(quota_two['hard_limit'], actual[1]['hard_limit']) - - def test_find_quotas_criterion(self): - quota_one = self.create_quota() - quota_two = self.create_quota(fixture=1) - - criterion = dict( - tenant_id=quota_one['tenant_id'], - resource=quota_one['resource'] - ) - - results = self.storage.find_quotas(self.admin_context, criterion) - - self.assertEqual(1, len(results)) - - self.assertEqual(quota_one['tenant_id'], results[0]['tenant_id']) - self.assertEqual(quota_one['resource'], results[0]['resource']) - self.assertEqual(quota_one['hard_limit'], results[0]['hard_limit']) - - criterion = dict( - tenant_id=quota_two['tenant_id'], - resource=quota_two['resource'] - ) - - results = self.storage.find_quotas(self.admin_context, criterion) - - self.assertEqual(1, len(results)) - - self.assertEqual(quota_two['tenant_id'], results[0]['tenant_id']) - self.assertEqual(quota_two['resource'], results[0]['resource']) - self.assertEqual(quota_two['hard_limit'], results[0]['hard_limit']) - - def test_get_quota(self): - # Create a quota - expected = self.create_quota() - actual = self.storage.get_quota(self.admin_context, expected['id']) - - self.assertEqual(expected['tenant_id'], actual['tenant_id']) - self.assertEqual(expected['resource'], actual['resource']) - self.assertEqual(expected['hard_limit'], actual['hard_limit']) - - def test_get_quota_missing(self): - with testtools.ExpectedException(exceptions.QuotaNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.get_quota(self.admin_context, uuid) - - def test_find_quota_criterion(self): - quota_one = self.create_quota() - quota_two = self.create_quota(fixture=1) - - criterion = dict( - tenant_id=quota_one['tenant_id'], - resource=quota_one['resource'] - ) - - result = self.storage.find_quota(self.admin_context, criterion) - - self.assertEqual(quota_one['tenant_id'], result['tenant_id']) - self.assertEqual(quota_one['resource'], result['resource']) - self.assertEqual(quota_one['hard_limit'], result['hard_limit']) - - criterion = dict( - tenant_id=quota_two['tenant_id'], - resource=quota_two['resource'] - ) - - result = self.storage.find_quota(self.admin_context, criterion) - - self.assertEqual(quota_two['tenant_id'], result['tenant_id']) - self.assertEqual(quota_two['resource'], result['resource']) - self.assertEqual(quota_two['hard_limit'], result['hard_limit']) - - def test_find_quota_criterion_missing(self): - expected = self.create_quota() - - criterion = dict( - tenant_id=expected['tenant_id'] + "NOT FOUND" - ) - - with testtools.ExpectedException(exceptions.QuotaNotFound): - self.storage.find_quota(self.admin_context, criterion) - - def test_update_quota(self): - # Create a quota - quota = self.create_quota(fixture=1) - - # Update the Object - quota.hard_limit = 5000 - - # Perform the update - quota = self.storage.update_quota(self.admin_context, quota) - - # Ensure the new value took - self.assertEqual(5000, quota.hard_limit) - - # Ensure the version column was incremented - self.assertEqual(2, quota.version) - - def test_update_quota_duplicate(self): - # Create two quotas - quota_one = self.create_quota(fixture=0) - quota_two = self.create_quota(fixture=1) - - # Update the Q2 object to be a duplicate of Q1 - quota_two.resource = quota_one.resource - - with testtools.ExpectedException(exceptions.DuplicateQuota): - self.storage.update_quota(self.admin_context, quota_two) - - def test_update_quota_missing(self): - quota = objects.Quota(id='caf771fc-6b05-4891-bee1-c2a48621f57b') - - with testtools.ExpectedException(exceptions.QuotaNotFound): - self.storage.update_quota(self.admin_context, quota) - - def test_delete_quota(self): - quota = self.create_quota() - - self.storage.delete_quota(self.admin_context, quota['id']) - - with testtools.ExpectedException(exceptions.QuotaNotFound): - self.storage.get_quota(self.admin_context, quota['id']) - - def test_delete_quota_missing(self): - with testtools.ExpectedException(exceptions.QuotaNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.delete_quota(self.admin_context, uuid) - - # TSIG Key Tests - def test_create_tsigkey(self): - values = self.get_tsigkey_fixture() - - result = self.storage.create_tsigkey( - self.admin_context, tsigkey=objects.TsigKey.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(values['name'], result['name']) - self.assertEqual(values['algorithm'], result['algorithm']) - self.assertEqual(values['secret'], result['secret']) - self.assertEqual(values['scope'], result['scope']) - - def test_create_tsigkey_duplicate(self): - # Create the Initial TsigKey - tsigkey_one = self.create_tsigkey() - - values = self.get_tsigkey_fixture(1) - values['name'] = tsigkey_one['name'] - - exc = self.assertRaises(rpc_dispatcher.ExpectedException, - self.create_tsigkey, - **values) - - self.assertEqual(exceptions.DuplicateTsigKey, exc.exc_info[0]) - - def test_find_tsigkeys(self): - actual = self.storage.find_tsigkeys(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a single tsigkey - tsig = self.create_tsigkey() - - actual = self.storage.find_tsigkeys(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(tsig['name'], actual[0]['name']) - self.assertEqual(tsig['algorithm'], actual[0]['algorithm']) - self.assertEqual(tsig['secret'], actual[0]['secret']) - self.assertEqual(tsig['scope'], actual[0]['scope']) - - def test_find_tsigkey(self): - # Create a single tsigkey - tsig = self.create_tsigkey() - - actual = self.storage.find_tsigkeys(self.admin_context) - self.assertEqual(1, len(actual)) - name = actual[0].name - - actual = self.storage.find_tsigkey(self.admin_context, - {'name': name}) - self.assertEqual(tsig['name'], actual['name']) - self.assertEqual(tsig['algorithm'], actual['algorithm']) - self.assertEqual(tsig['secret'], actual['secret']) - self.assertEqual(tsig['scope'], actual['scope']) - - def test_find_tsigkeys_paging(self): - # Create 10 TSIG Keys - created = [self.create_tsigkey(name='tsig-%s' % i) - for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_tsigkeys) - - def test_find_tsigkeys_criterion(self): - tsigkey_one = self.create_tsigkey(fixture=0) - tsigkey_two = self.create_tsigkey(fixture=1) - - criterion = dict( - name=tsigkey_one['name'] - ) - - results = self.storage.find_tsigkeys(self.admin_context, criterion) - - self.assertEqual(1, len(results)) - - self.assertEqual(tsigkey_one['name'], results[0]['name']) - - criterion = dict( - name=tsigkey_two['name'] - ) - - results = self.storage.find_tsigkeys(self.admin_context, criterion) - - self.assertEqual(1, len(results)) - - self.assertEqual(tsigkey_two['name'], results[0]['name']) - - def test_get_tsigkey(self): - # Create a tsigkey - expected = self.create_tsigkey() - - actual = self.storage.get_tsigkey(self.admin_context, expected['id']) - - self.assertEqual(expected['name'], actual['name']) - self.assertEqual(expected['algorithm'], actual['algorithm']) - self.assertEqual(expected['secret'], actual['secret']) - self.assertEqual(expected['scope'], actual['scope']) - - def test_get_tsigkey_missing(self): - with testtools.ExpectedException(exceptions.TsigKeyNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.get_tsigkey(self.admin_context, uuid) - - def test_update_tsigkey(self): - # Create a tsigkey - tsigkey = self.create_tsigkey(name='test-key') - - # Update the Object - tsigkey.name = 'test-key-updated' - - # Perform the update - tsigkey = self.storage.update_tsigkey(self.admin_context, tsigkey) - - # Ensure the new value took - self.assertEqual('test-key-updated', tsigkey.name) - - # Ensure the version column was incremented - self.assertEqual(2, tsigkey.version) - - def test_update_tsigkey_duplicate(self): - # Create two tsigkeys - tsigkey_one = self.create_tsigkey(fixture=0) - tsigkey_two = self.create_tsigkey(fixture=1) - - # Update the T2 object to be a duplicate of T1 - tsigkey_two.name = tsigkey_one.name - - with testtools.ExpectedException(exceptions.DuplicateTsigKey): - self.storage.update_tsigkey(self.admin_context, tsigkey_two) - - def test_update_tsigkey_missing(self): - tsigkey = objects.TsigKey(id='caf771fc-6b05-4891-bee1-c2a48621f57b') - - with testtools.ExpectedException(exceptions.TsigKeyNotFound): - self.storage.update_tsigkey(self.admin_context, tsigkey) - - def test_delete_tsigkey(self): - tsigkey = self.create_tsigkey() - - self.storage.delete_tsigkey(self.admin_context, tsigkey['id']) - - with testtools.ExpectedException(exceptions.TsigKeyNotFound): - self.storage.get_tsigkey(self.admin_context, tsigkey['id']) - - def test_delete_tsigkey_missing(self): - with testtools.ExpectedException(exceptions.TsigKeyNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.delete_tsigkey(self.admin_context, uuid) - - # Tenant Tests - def test_find_tenants(self): - context = self.get_admin_context() - one_context = context - one_context.project_id = 'One' - two_context = context - two_context.project_id = 'Two' - context.all_tenants = True - - # create 3 zones in 2 tenants - self.create_zone(fixture=0, context=one_context, tenant_id='One') - zone = self.create_zone(fixture=1, context=one_context, - tenant_id='One') - self.create_zone(fixture=2, context=two_context, tenant_id='Two') - - # Delete one of the zones. - self.storage.delete_zone(context, zone['id']) - - # Ensure we get accurate results - result = self.storage.find_tenants(context) - result_dict = [dict(t) for t in result] - - expected = [{ - 'id': 'One', - 'zone_count': 1, - }, { - 'id': 'Two', - 'zone_count': 1, - }] - - self.assertEqual(expected, result_dict) - - def test_get_tenant(self): - context = self.get_admin_context() - one_context = context - one_context.project_id = 1 - context.all_tenants = True - - # create 2 zones in a tenant - zone_1 = self.create_zone(fixture=0, context=one_context) - zone_2 = self.create_zone(fixture=1, context=one_context) - zone_3 = self.create_zone(fixture=2, context=one_context) - - # Delete one of the zones. - self.storage.delete_zone(context, zone_3['id']) - - result = self.storage.get_tenant(context, 1) - - self.assertEqual(1, result['id']) - self.assertEqual(2, result['zone_count']) - self.assertEqual([zone_1['name'], zone_2['name']], - sorted(result['zones'])) - - def test_count_tenants(self): - context = self.get_admin_context() - one_context = context - one_context.project_id = 1 - two_context = context - two_context.project_id = 2 - context.all_tenants = True - - # in the beginning, there should be nothing - tenants = self.storage.count_tenants(context) - self.assertEqual(0, tenants) - - # create 2 zones with 2 tenants - self.create_zone(fixture=0, context=one_context, tenant_id=1) - self.create_zone(fixture=1, context=two_context, tenant_id=2) - zone = self.create_zone(fixture=2, - context=two_context, tenant_id=2) - - # Delete one of the zones. - self.storage.delete_zone(context, zone['id']) - - tenants = self.storage.count_tenants(context) - self.assertEqual(2, tenants) - - def test_count_tenants_none_result(self): - rp = mock.Mock() - rp.fetchone.return_value = None - with mock.patch('designate.storage.sql.get_write_session', - return_value=rp): - tenants = self.storage.count_tenants(self.admin_context) - self.assertEqual(0, tenants) - - # Zone Tests - def test_create_zone(self): - pool_id = cfg.CONF['service:central'].default_pool_id - values = { - 'tenant_id': self.admin_context.project_id, - 'name': 'example.net.', - 'email': 'example@example.net', - 'pool_id': pool_id - } - - result = self.storage.create_zone( - self.admin_context, zone=objects.Zone.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(self.admin_context.project_id, result['tenant_id']) - self.assertEqual(values['name'], result['name']) - self.assertEqual(values['email'], result['email']) - self.assertEqual(pool_id, result['pool_id']) - self.assertIn('status', result) - - def test_create_zone_duplicate(self): - # Create the Initial Zone - self.create_zone() - - exc = self.assertRaises(rpc_dispatcher.ExpectedException, - self.create_zone) - - self.assertEqual(exceptions.DuplicateZone, exc.exc_info[0]) - - def test_find_zones(self): - self.config(quota_zones=20) - - actual = self.storage.find_zones(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a single zone - zone = self.create_zone() - - actual = self.storage.find_zones(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(zone['name'], actual[0]['name']) - self.assertEqual(zone['email'], actual[0]['email']) - - def test_find_zones_paging(self): - # Create 10 zones - created = [self.create_zone(name='example-%d.org.' % i) - for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_zones) - - def test_find_zones_criterion(self): - zone_one = self.create_zone() - zone_two = self.create_zone(fixture=1) - - criterion = dict( - name=zone_one['name'] - ) - - results = self.storage.find_zones(self.admin_context, criterion) - - self.assertEqual(1, len(results)) - - self.assertEqual(zone_one['name'], results[0]['name']) - self.assertEqual(zone_one['email'], results[0]['email']) - self.assertIn('status', zone_one) - - criterion = dict( - name=zone_two['name'] - ) - - results = self.storage.find_zones(self.admin_context, criterion) - - self.assertEqual(len(results), 1) - - self.assertEqual(zone_two['name'], results[0]['name']) - self.assertEqual(zone_two['email'], results[0]['email']) - self.assertIn('status', zone_two) - - def test_find_zones_all_tenants(self): - # Create two contexts with different tenant_id's - one_context = self.get_admin_context() - one_context.project_id = 1 - two_context = self.get_admin_context() - two_context.project_id = 2 - - # Create normal and all_tenants context objects - nm_context = self.get_admin_context() - at_context = self.get_admin_context() - at_context.all_tenants = True - - # Create two zones in different tenants - self.create_zone(fixture=0, context=one_context) - self.create_zone(fixture=1, context=two_context) - - # Ensure the all_tenants context see's two zones - results = self.storage.find_zones(at_context) - self.assertEqual(2, len(results)) - - # Ensure the normal context see's no zones - results = self.storage.find_zones(nm_context) - self.assertEqual(0, len(results)) - - # Ensure the tenant 1 context see's 1 zone - results = self.storage.find_zones(one_context) - self.assertEqual(1, len(results)) - - # Ensure the tenant 2 context see's 1 zone - results = self.storage.find_zones(two_context) - self.assertEqual(1, len(results)) - - def test_get_zone(self): - # Create a zone - expected = self.create_zone() - actual = self.storage.get_zone(self.admin_context, expected['id']) - - self.assertEqual(expected['name'], actual['name']) - self.assertEqual(expected['email'], actual['email']) - self.assertIn('status', actual) - - def test_get_zone_missing(self): - with testtools.ExpectedException(exceptions.ZoneNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.get_zone(self.admin_context, uuid) - - def test_get_deleted_zone(self): - context = self.get_admin_context() - context.show_deleted = True - - zone = self.create_zone(context=context) - - self.storage.delete_zone(context, zone['id']) - self.storage.get_zone(context, zone['id']) - - def test_find_zone_criterion(self): - zone_one = self.create_zone() - zone_two = self.create_zone(fixture=1) - - criterion = dict( - name=zone_one['name'] - ) - - result = self.storage.find_zone(self.admin_context, criterion) - - self.assertEqual(zone_one['name'], result['name']) - self.assertEqual(zone_one['email'], result['email']) - self.assertIn('status', zone_one) - - criterion = dict( - name=zone_two['name'] - ) - - result = self.storage.find_zone(self.admin_context, criterion) - - self.assertEqual(zone_two['name'], result['name']) - self.assertEqual(zone_two['email'], result['email']) - self.assertIn('status', zone_one) - self.assertIn('status', zone_two) - - def test_find_zone_criterion_missing(self): - expected = self.create_zone() - - criterion = dict( - name=expected['name'] + "NOT FOUND" - ) - - with testtools.ExpectedException(exceptions.ZoneNotFound): - self.storage.find_zone(self.admin_context, criterion) - - def test_find_zone_criterion_lessthan(self): - zone = self.create_zone() - - # Test Finding No Results (serial is not < serial) - criterion = dict( - name=zone['name'], - serial='<%s' % zone['serial'], - ) - - with testtools.ExpectedException(exceptions.ZoneNotFound): - self.storage.find_zone(self.admin_context, criterion) - - # Test Finding 1 Result (serial is < serial + 1) - criterion = dict( - name=zone['name'], - serial='<%s' % (zone['serial'] + 1), - ) - - result = self.storage.find_zone(self.admin_context, criterion) - - self.assertEqual(zone['name'], result['name']) - - def test_find_zone_criterion_greaterthan(self): - zone = self.create_zone() - - # Test Finding No Results (serial is not > serial) - criterion = dict( - name=zone['name'], - serial='>%s' % zone['serial'], - ) - - with testtools.ExpectedException(exceptions.ZoneNotFound): - self.storage.find_zone(self.admin_context, criterion) - - # Test Finding 1 Result (serial is > serial - 1) - criterion = dict( - name=zone['name'], - serial='>%s' % (zone['serial'] - 1), - ) - - result = self.storage.find_zone(self.admin_context, criterion) - - self.assertEqual(zone['name'], result['name']) - - def test_update_zone(self): - # Create a zone - zone = self.create_zone(name='example.org.') - - # Update the Object - zone.name = 'example.net.' - - # Perform the update - zone = self.storage.update_zone(self.admin_context, zone) - - # Ensure the new valie took - self.assertEqual('example.net.', zone.name) - - # Ensure the version column was incremented - self.assertEqual(2, zone.version) - - def test_update_zone_duplicate(self): - # Create two zones - zone_one = self.create_zone(fixture=0) - zone_two = self.create_zone(fixture=1) - - # Update the D2 object to be a duplicate of D1 - zone_two.name = zone_one.name - - with testtools.ExpectedException(exceptions.DuplicateZone): - self.storage.update_zone(self.admin_context, zone_two) - - def test_update_zone_missing(self): - zone = objects.Zone(id='caf771fc-6b05-4891-bee1-c2a48621f57b') - with testtools.ExpectedException(exceptions.ZoneNotFound): - self.storage.update_zone(self.admin_context, zone) - - def test_delete_zone(self): - zone = self.create_zone() - - self.storage.delete_zone(self.admin_context, zone['id']) - - with testtools.ExpectedException(exceptions.ZoneNotFound): - self.storage.get_zone(self.admin_context, zone['id']) - - def test_delete_zone_missing(self): - with testtools.ExpectedException(exceptions.ZoneNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.delete_zone(self.admin_context, uuid) - - def test_count_zones(self): - # in the beginning, there should be nothing - zones = self.storage.count_zones(self.admin_context) - self.assertEqual(0, zones) - - # Create a single zone - self.create_zone() - - # count 'em up - zones = self.storage.count_zones(self.admin_context) - - # well, did we get 1? - self.assertEqual(1, zones) - - def test_count_zones_none_result(self): - rp = mock.Mock() - rp.fetchone.return_value = None - - with mock.patch('designate.storage.sql.get_write_session', - return_value=rp): - zones = self.storage.count_zones(self.admin_context) - self.assertEqual(0, zones) - - def test_create_recordset(self): - zone = self.create_zone() - - values = { - 'name': 'www.%s' % zone['name'], - 'type': 'A' - } - - result = self.storage.create_recordset( - self.admin_context, - zone['id'], - recordset=objects.RecordSet.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(values['name'], result['name']) - self.assertEqual(values['type'], result['type']) - - def test_create_recordset_duplicate(self): - zone = self.create_zone() - - # Create the First RecordSet - self.create_recordset(zone) - - exc = self.assertRaises(rpc_dispatcher.ExpectedException, - self.create_recordset, - zone) - - self.assertEqual(exceptions.DuplicateRecordSet, exc.exc_info[0]) - - def test_create_recordset_with_records(self): - zone = self.create_zone() - - recordset = objects.RecordSet( - name='www.%s' % zone['name'], - type='A', - records=objects.RecordList(objects=[ - objects.Record(data='192.0.2.1'), - objects.Record(data='192.0.2.2'), - ]) - ) - - recordset = self.storage.create_recordset( - self.admin_context, zone['id'], recordset) - - # Ensure recordset.records is a RecordList instance - self.assertIsInstance(recordset.records, objects.RecordList) - - # Ensure two Records are attached to the RecordSet correctly - self.assertEqual(2, len(recordset.records)) - self.assertIsInstance(recordset.records[0], objects.Record) - self.assertIsInstance(recordset.records[1], objects.Record) - - # Ensure the Records have been saved by checking they have an ID - self.assertIsNotNone(recordset.records[0].id) - self.assertIsNotNone(recordset.records[1].id) - - def test_find_recordsets(self): - zone = self.create_zone() - - criterion = {'zone_id': zone['id']} - - actual = self.storage.find_recordsets(self.admin_context, criterion) - self.assertEqual(2, len(actual)) - - # Create a single recordset - recordset_one = self.create_recordset(zone) - - actual = self.storage.find_recordsets(self.admin_context, criterion) - self.assertEqual(3, len(actual)) - - self.assertEqual(recordset_one['name'], actual[2]['name']) - self.assertEqual(recordset_one['type'], actual[2]['type']) - - def test_find_recordsets_paging(self): - zone = self.create_zone(name='example.org.') - - # Create 10 RecordSets - created = [self.create_recordset(zone, name='r-%d.example.org.' % i) - for i in range(10)] - - # Add in the SOA and NS recordsets that are automatically created - soa = self.storage.find_recordset(self.admin_context, - criterion={'zone_id': zone['id'], - 'type': "SOA"}) - ns = self.storage.find_recordset(self.admin_context, - criterion={'zone_id': zone['id'], - 'type': "NS"}) - created.insert(0, ns) - created.insert(0, soa) - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_recordsets) - - def test_find_recordsets_criterion(self): - zone = self.create_zone() - - recordset_one = self.create_recordset(zone, type='A', fixture=0) - self.create_recordset(zone, fixture=1) - - criterion = dict( - zone_id=zone['id'], - name=recordset_one['name'], - ) - - results = self.storage.find_recordsets(self.admin_context, - criterion) - - self.assertEqual(1, len(results)) - - criterion = dict( - zone_id=zone['id'], - type='A', - ) - - results = self.storage.find_recordsets(self.admin_context, - criterion) - - self.assertEqual(2, len(results)) - - def test_find_recordsets_criterion_wildcard(self): - zone = self.create_zone() - - values = {'name': 'one.%s' % zone['name']} - - self.create_recordset(zone, **values) - - criterion = dict( - zone_id=zone['id'], - name="%%%(name)s" % {"name": zone['name']}, - ) - - results = self.storage.find_recordsets(self.admin_context, criterion) - - # Should be 3, as SOA and NS recordsets are automiatcally created - self.assertEqual(3, len(results)) - - def test_find_recordsets_with_records(self): - zone = self.create_zone() - - records = [ - objects.Record.from_dict({"data": "10.0.0.1"}), - objects.Record.from_dict({"data": "10.0.0.2"}), - objects.Record.from_dict({"data": "10.0.0.3"}) - ] - - recordset = self.create_recordset(zone, records=records) - - criterion = dict( - id=recordset.id, - ) - - # Find the RecordSet - results = self.storage.find_recordsets(self.admin_context, criterion) - - # Ensure we only have one result - self.assertEqual(1, len(results)) - - recordset = results[0] - - # Ensure recordset.records is a RecordList instance - self.assertIsInstance(recordset.records, objects.RecordList) - - # Ensure two Records are attached to the RecordSet correctly - self.assertEqual(3, len(recordset.records)) - - records = [] - for record in recordset.records: - self.assertIsInstance(record, objects.Record) - self.assertNotIn(record, records) - records.append(record) - - def test_find_recordset_criterion(self): - zone = self.create_zone() - expected = self.create_recordset(zone) - - criterion = dict( - zone_id=zone['id'], - name=expected['name'], - ) - - actual = self.storage.find_recordset(self.admin_context, criterion) - - self.assertEqual(expected['name'], actual['name']) - self.assertEqual(expected['type'], actual['type']) - - def test_find_recordset_criterion_missing(self): - zone = self.create_zone() - expected = self.create_recordset(zone) - - criterion = dict( - name=expected['name'] + "NOT FOUND" - ) - - with testtools.ExpectedException(exceptions.RecordSetNotFound): - self.storage.find_recordset(self.admin_context, criterion) - - def test_find_recordset_criterion_with_records(self): - zone = self.create_zone() - - records = [ - objects.Record.from_dict(self.get_record_fixture('A', fixture=0)), - objects.Record.from_dict(self.get_record_fixture('A', fixture=1)) - ] - recordset = self.create_recordset(zone, records=records) - - criterion = dict( - id=recordset.id, - ) - - # Fetch the RecordSet again - recordset = self.storage.find_recordset(self.admin_context, criterion) - - # Ensure recordset.records is a RecordList instance - self.assertIsInstance(recordset.records, objects.RecordList) - - # Ensure two Records are attached to the RecordSet correctly - self.assertEqual(2, len(recordset.records)) - self.assertIsInstance(recordset.records[0], objects.Record) - self.assertIsInstance(recordset.records[1], objects.Record) - - def test_update_recordset(self): - zone = self.create_zone() - - # Create a recordset - recordset = self.create_recordset(zone) - - # Update the Object - recordset.ttl = 1800 - - # Change records as well - recordset.records.append(objects.Record(data="10.0.0.1")) - - # Perform the update - recordset = self.storage.update_recordset(self.admin_context, - recordset) - - # Ensure the new value took - self.assertEqual(1800, recordset.ttl) - - # Ensure the version column was incremented - self.assertEqual(2, recordset.version) - - def test_update_recordset_duplicate(self): - zone = self.create_zone() - - # Create two recordsets - recordset_one = self.create_recordset(zone, type='A') - recordset_two = self.create_recordset(zone, type='A', fixture=1) - - # Update the R2 object to be a duplicate of R1 - recordset_two.name = recordset_one.name - - with testtools.ExpectedException(exceptions.DuplicateRecordSet): - self.storage.update_recordset(self.admin_context, recordset_two) - - def test_update_recordset_missing(self): - recordset = objects.RecordSet( - id='caf771fc-6b05-4891-bee1-c2a48621f57b') - - with testtools.ExpectedException(exceptions.RecordSetNotFound): - self.storage.update_recordset(self.admin_context, recordset) - - def test_update_recordset_with_record_create(self): - zone = self.create_zone() - - # Create a RecordSet - recordset = self.create_recordset(zone, 'A', records=[]) - - # Append two new Records - recordset.records.append(objects.Record(data='192.0.2.1')) - recordset.records.append(objects.Record(data='192.0.2.2')) - - # Perform the update - self.storage.update_recordset(self.admin_context, recordset) - - # Fetch the RecordSet again - recordset = self.storage.find_recordset(self.admin_context, - {'id': recordset.id}) - - # Ensure two Records are attached to the RecordSet correctly - self.assertEqual(2, len(recordset.records)) - self.assertIsInstance(recordset.records[0], objects.Record) - self.assertIsInstance(recordset.records[1], objects.Record) - - # Ensure the Records have been saved by checking they have an ID - self.assertIsNotNone(recordset.records[0].id) - self.assertIsNotNone(recordset.records[1].id) - - def test_update_recordset_with_record_delete(self): - zone = self.create_zone() - - # Create a RecordSet and two Records - records = [ - objects.Record.from_dict(self.get_record_fixture('A', fixture=0)), - objects.Record.from_dict(self.get_record_fixture('A', fixture=1)) - ] - recordset = self.create_recordset(zone, records=records) - - # Fetch the RecordSet again - recordset = self.storage.find_recordset(self.admin_context, - {'id': recordset.id}) - - # Remove one of the Records - recordset.records.pop(0) - - # Ensure only one Record is attached to the RecordSet - self.assertEqual(1, len(recordset.records)) - - # Perform the update - self.storage.update_recordset(self.admin_context, recordset) - - # Fetch the RecordSet again - recordset = self.storage.find_recordset(self.admin_context, - {'id': recordset.id}) - - # Ensure only one Record is attached to the RecordSet - self.assertEqual(1, len(recordset.records)) - self.assertIsInstance(recordset.records[0], objects.Record) - - def test_update_recordset_with_record_update(self): - zone = self.create_zone() - - # Create a RecordSet and two Records - records = [ - objects.Record.from_dict(self.get_record_fixture('A', fixture=0)), - objects.Record.from_dict(self.get_record_fixture('A', fixture=1)) - ] - recordset = self.create_recordset(zone, records=records) - - # Fetch the RecordSet again - recordset = self.storage.find_recordset(self.admin_context, - {'id': recordset.id}) - - # Update one of the Records - updated_record_id = recordset.records[0].id - recordset.records[0].data = '192.0.2.255' - - # Perform the update - self.storage.update_recordset(self.admin_context, recordset) - - # Fetch the RecordSet again - recordset = self.storage.find_recordset(self.admin_context, - {'id': recordset.id}) - - # Ensure the Record has been updated - for record in recordset.records: - if record.id != updated_record_id: - continue - - self.assertEqual('192.0.2.255', record.data) - return # Exits this test early as we succeeded - - raise Exception('Updated record not found') - - def test_delete_recordset(self): - zone = self.create_zone() - - # Create a recordset - recordset = self.create_recordset(zone) - - self.storage.delete_recordset(self.admin_context, recordset['id']) - - with testtools.ExpectedException(exceptions.RecordSetNotFound): - self.storage.find_recordset(self.admin_context, - criterion={'id': recordset['id']}) - - def test_delete_recordset_missing(self): - with testtools.ExpectedException(exceptions.RecordSetNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.delete_recordset(self.admin_context, uuid) - - def test_count_recordsets(self): - # in the beginning, there should be nothing - recordsets = self.storage.count_recordsets(self.admin_context) - self.assertEqual(0, recordsets) - - # Create a single zone & recordset - zone = self.create_zone() - self.create_recordset(zone) - - # we should have 3 recordsets now, including SOA & NS - recordsets = self.storage.count_recordsets(self.admin_context) - self.assertEqual(3, recordsets) - - # Delete the zone, we should be back to 0 recordsets - self.storage.delete_zone(self.admin_context, zone.id) - recordsets = self.storage.count_recordsets(self.admin_context) - self.assertEqual(0, recordsets) - - def test_count_recordsets_none_result(self): - rp = mock.Mock() - rp.fetchone.return_value = None - with mock.patch('designate.storage.sql.get_write_session', - return_value=rp): - recordsets = self.storage.count_recordsets(self.admin_context) - self.assertEqual(0, recordsets) - - def test_find_records(self): - zone = self.create_zone() - recordset = self.create_recordset(zone, records=[]) - - criterion = { - 'zone_id': zone['id'], - 'recordset_id': recordset['id'] - } - - actual = self.storage.find_records(self.admin_context, criterion) - self.assertEqual(0, len(actual)) - - # Create a single record - records = [ - objects.Record.from_dict(self.get_record_fixture('A', fixture=0)), - ] - recordset.records = records - - self.central_service.update_recordset(self.admin_context, recordset) - - recordset = self.central_service.get_recordset( - self.admin_context, zone['id'], recordset['id'] - ) - record = recordset.records[0] - - actual = self.storage.find_records(self.admin_context, criterion) - self.assertEqual(1, len(actual)) - - self.assertEqual(record['data'], actual[0]['data']) - - def test_find_records_paging(self): - zone = self.create_zone() - - records = [] - for i in range(10): - records.append( - objects.Record.from_dict(({'data': '192.0.2.%d' % i})) - ) - - self.create_recordset(zone, type='A', records=records) - - # Add in the SOA and NS records that are automatically created - soa = self.storage.find_recordset(self.admin_context, - criterion={'zone_id': zone['id'], - 'type': "SOA"}) - ns = self.storage.find_recordset(self.admin_context, - criterion={'zone_id': zone['id'], - 'type': "NS"}) - for r in ns['records']: - records.insert(0, r) - records.insert(0, soa['records'][0]) - - # Ensure we can page through the results. - self._ensure_paging(records, self.storage.find_records) - - def test_find_records_criterion(self): - zone = self.create_zone() - record_one = objects.Record.from_dict( - self.get_record_fixture('A', fixture=0) - ) - records = [ - record_one, - objects.Record.from_dict(self.get_record_fixture('A', fixture=1)) - ] - recordset = self.create_recordset(zone, records=records) - - criterion = dict( - data=record_one['data'], - zone_id=zone['id'], - recordset_id=recordset['id'], - ) - - results = self.storage.find_records(self.admin_context, criterion) - self.assertEqual(1, len(results)) - - criterion = dict( - zone_id=zone['id'], - recordset_id=recordset['id'], - ) - - results = self.storage.find_records(self.admin_context, criterion) - - self.assertEqual(2, len(results)) - - def test_find_records_criterion_wildcard(self): - zone = self.create_zone() - - records = [objects.Record.from_dict({'data': '127.0.0.1'})] - recordset = self.create_recordset(zone, type='A', records=records) - - criterion = dict( - zone_id=zone['id'], - recordset_id=recordset['id'], - data="%.0.0.1", - ) - - results = self.storage.find_records(self.admin_context, criterion) - - self.assertEqual(1, len(results)) - - def test_get_record(self): - zone = self.create_zone() - recordset = self.create_recordset(zone) - expected = recordset.records[0] - - actual = self.storage.get_record(self.admin_context, expected['id']) - - self.assertEqual(expected['data'], actual['data']) - self.assertIn('status', actual) - - def test_get_record_missing(self): - with testtools.ExpectedException(exceptions.RecordNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.get_record(self.admin_context, uuid) - - def test_find_record_criterion(self): - zone = self.create_zone() - recordset = self.create_recordset(zone) - expected = recordset.records[0] - - criterion = dict( - zone_id=zone['id'], - recordset_id=recordset['id'], - data=expected['data'], - ) - - actual = self.storage.find_record(self.admin_context, criterion) - - self.assertEqual(expected['data'], actual['data']) - self.assertIn('status', actual) - - def test_find_record_criterion_missing(self): - zone = self.create_zone() - recordset = self.create_recordset(zone) - expected = recordset.records[0] - - criterion = dict( - zone_id=zone['id'], - data=expected['data'] + "NOT FOUND", - ) - - with testtools.ExpectedException(exceptions.RecordNotFound): - self.storage.find_record(self.admin_context, criterion) - - def test_update_record(self): - zone = self.create_zone() - recordset = self.create_recordset(zone, type='A') - record = recordset.records[0] - - # Update the Object - record.data = '192.0.2.255' - - # Perform the update - record = self.storage.update_record(self.admin_context, record) - - # Ensure the new value took - self.assertEqual('192.0.2.255', record.data) - - # Ensure the version column was incremented - self.assertEqual(2, record.version) - - def test_update_record_duplicate(self): - zone = self.create_zone() - - record_one = objects.Record.from_dict( - self.get_record_fixture('A', fixture=0) - ) - record_two = objects.Record.from_dict( - self.get_record_fixture('A', fixture=1) - ) - - records = [ - record_one, - record_two - ] - - self.create_recordset(zone, records=records) - - # Update the R2 object to be a duplicate of R1 - record_two.data = record_one.data - - with testtools.ExpectedException(exceptions.DuplicateRecord): - self.storage.update_record(self.admin_context, record_two) - - def test_update_record_missing(self): - record = objects.Record(id='caf771fc-6b05-4891-bee1-c2a48621f57b') - - with testtools.ExpectedException(exceptions.RecordNotFound): - self.storage.update_record(self.admin_context, record) - - def test_delete_record(self): - zone = self.create_zone() - recordset = self.create_recordset(zone) - record = recordset.records[0] - - self.storage.delete_record(self.admin_context, record['id']) - - with testtools.ExpectedException(exceptions.RecordNotFound): - self.storage.get_record(self.admin_context, record['id']) - - def test_delete_record_missing(self): - with testtools.ExpectedException(exceptions.RecordNotFound): - uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' - self.storage.delete_record(self.admin_context, uuid) - - def test_count_records(self): - # in the beginning, there should be nothing - records = self.storage.count_records(self.admin_context) - self.assertEqual(0, records) - - # Create a single zone & record - zone = self.create_zone() - self.create_recordset(zone) - - # we should have 3 records now, including NS and SOA - records = self.storage.count_records(self.admin_context) - self.assertEqual(3, records) - - # Delete the zone, we should be back to 0 records - self.storage.delete_zone(self.admin_context, zone.id) - records = self.storage.count_records(self.admin_context) - self.assertEqual(0, records) - - def test_count_records_none_result(self): - rp = mock.Mock() - rp.fetchone.return_value = None - with mock.patch('designate.storage.sql.get_write_session', - return_value=rp): - records = self.storage.count_records(self.admin_context) - self.assertEqual(0, records) - - # TLD Tests - def test_create_tld(self): - values = { - 'name': 'com', - 'description': 'This is a comment.' - } - - result = self.storage.create_tld( - self.admin_context, objects.Tld.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - self.assertIsNotNone(result['version']) - self.assertEqual(values['name'], result['name']) - self.assertEqual(values['description'], result['description']) - - def test_create_tld_with_duplicate(self): - # Create the First Tld - self.create_tld(fixture=0) - - exc = self.assertRaises(rpc_dispatcher.ExpectedException, - self.create_tld, - fixture=0) - - self.assertEqual(exceptions.DuplicateTld, exc.exc_info[0]) - - def test_find_tlds(self): - - actual = self.storage.find_tlds(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a single Tld - tld = self.create_tld(fixture=0) - - actual = self.storage.find_tlds(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(tld['name'], actual[0]['name']) - self.assertEqual(tld['description'], actual[0]['description']) - - def test_find_tlds_paging(self): - # Create 10 Tlds - created = [self.create_tld(name='org%d' % i) - for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_tlds) - - def test_find_tlds_with_criterion(self): - tld_one = self.create_tld(fixture=0) - tld_two = self.create_tld(fixture=1) - - criterion_one = dict(name=tld_one['name']) - - results = self.storage.find_tlds(self.admin_context, - criterion_one) - self.assertEqual(1, len(results)) - - self.assertEqual(tld_one['name'], results[0]['name']) - - criterion_two = dict(name=tld_two['name']) - - results = self.storage.find_tlds(self.admin_context, - criterion_two) - self.assertEqual(len(results), 1) - - self.assertEqual(tld_two['name'], results[0]['name']) - - def test_get_tld(self): - # Create a tld - expected = self.create_tld() - actual = self.storage.get_tld(self.admin_context, expected['id']) - - self.assertEqual(expected['name'], actual['name']) - - def test_get_tld_missing(self): - with testtools.ExpectedException(exceptions.TldNotFound): - uuid = '4c8e7f82-3519-4bf7-8940-a66a4480f223' - self.storage.get_tld(self.admin_context, uuid) - - def test_find_tld_criterion(self): - # Create two tlds - tld_one = self.create_tld(fixture=0) - tld_two = self.create_tld(fixture=1) - - criterion = dict(name=tld_one['name']) - - # Find tld_one using its name as criterion - result = self.storage.find_tld(self.admin_context, criterion) - - # Assert names match - self.assertEqual(tld_one['name'], result['name']) - - # Repeat with tld_two - criterion = dict(name=tld_two['name']) - - result = self.storage.find_tld(self.admin_context, criterion) - - self.assertEqual(tld_two['name'], result['name']) - - def test_find_tld_criterion_missing(self): - expected = self.create_tld() - - criterion = dict(name=expected['name'] + "NOT FOUND") - - with testtools.ExpectedException(exceptions.TldNotFound): - self.storage.find_tld(self.admin_context, criterion) - - def test_update_tld(self): - # Create a tld - tld = self.create_tld(name='net') - - # Update the tld - tld.name = 'org' - - # Update storage - tld = self.storage.update_tld(self.admin_context, tld) - - # Verify the new value - self.assertEqual('org', tld.name) - - # Ensure the version column was incremented - self.assertEqual(2, tld.version) - - def test_update_tld_duplicate(self): - # Create two tlds - tld_one = self.create_tld(fixture=0) - tld_two = self.create_tld(fixture=1) - - # Update tld_two to be a duplicate of tld_ond - tld_two.name = tld_one.name - - with testtools.ExpectedException(exceptions.DuplicateTld): - self.storage.update_tld(self.admin_context, tld_two) - - def test_update_tld_missing(self): - tld = objects.Tld(id='486f9cbe-b8b6-4d8c-8275-1a6e47b13e00') - with testtools.ExpectedException(exceptions.TldNotFound): - self.storage.update_tld(self.admin_context, tld) - - def test_delete_tld(self): - # Create a tld - tld = self.create_tld() - - # Delete the tld - self.storage.delete_tld(self.admin_context, tld['id']) - - # Verify that it's deleted - with testtools.ExpectedException(exceptions.TldNotFound): - self.storage.get_tld(self.admin_context, tld['id']) - - def test_delete_tld_missing(self): - with testtools.ExpectedException(exceptions.TldNotFound): - uuid = 'cac1fc02-79b2-4e62-a1a4-427b6790bbe6' - self.storage.delete_tld(self.admin_context, uuid) - - # Blacklist tests - def test_create_blacklist(self): - values = { - 'pattern': "^([A-Za-z0-9_\\-]+\\.)*example\\.com\\.$", - 'description': 'This is a comment.' - } - - result = self.storage.create_blacklist( - self.admin_context, objects.Blacklist.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNotNone(result['version']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(values['pattern'], result['pattern']) - self.assertEqual(values['description'], result['description']) - - def test_create_blacklist_duplicate(self): - # Create the initial Blacklist - self.create_blacklist(fixture=0) - - exc = self.assertRaises(rpc_dispatcher.ExpectedException, - self.create_blacklist, - fixture=0) - - self.assertEqual(exceptions.DuplicateBlacklist, exc.exc_info[0]) - - def test_find_blacklists(self): - # Verify that there are no blacklists created - actual = self.storage.find_blacklists(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a Blacklist - blacklist = self.create_blacklist(fixture=0) - - actual = self.storage.find_blacklists(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(blacklist['pattern'], actual[0]['pattern']) - - def test_find_blacklists_paging(self): - # Create 10 Blacklists - created = [self.create_blacklist(pattern='^example-%d.org.' % i) - for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_blacklists) - - def test_find_blacklists_with_criterion(self): - # Create two blacklists - blacklist_one = self.create_blacklist(fixture=0) - blacklist_two = self.create_blacklist(fixture=1) - - # Verify blacklist_one - criterion = dict(pattern=blacklist_one['pattern']) - - results = self.storage.find_blacklists(self.admin_context, - criterion) - self.assertEqual(1, len(results)) - self.assertEqual(blacklist_one['pattern'], results[0]['pattern']) - - # Verify blacklist_two - criterion = dict(pattern=blacklist_two['pattern']) - - results = self.storage.find_blacklists(self.admin_context, - criterion) - self.assertEqual(1, len(results)) - self.assertEqual(blacklist_two['pattern'], results[0]['pattern']) - - def test_get_blacklist(self): - expected = self.create_blacklist(fixture=0) - actual = self.storage.get_blacklist(self.admin_context, expected['id']) - - self.assertEqual(expected['pattern'], actual['pattern']) - - def test_get_blacklist_missing(self): - with testtools.ExpectedException(exceptions.BlacklistNotFound): - uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' - self.storage.get_blacklist(self.admin_context, uuid) - - def test_find_blacklist_criterion(self): - blacklist_one = self.create_blacklist(fixture=0) - blacklist_two = self.create_blacklist(fixture=1) - - criterion = dict(pattern=blacklist_one['pattern']) - - result = self.storage.find_blacklist(self.admin_context, criterion) - - self.assertEqual(blacklist_one['pattern'], result['pattern']) - - criterion = dict(pattern=blacklist_two['pattern']) - - result = self.storage.find_blacklist(self.admin_context, criterion) - - self.assertEqual(blacklist_two['pattern'], result['pattern']) - - def test_find_blacklist_criterion_missing(self): - expected = self.create_blacklist(fixture=0) - - criterion = dict(pattern=expected['pattern'] + "NOT FOUND") - - with testtools.ExpectedException(exceptions.BlacklistNotFound): - self.storage.find_blacklist(self.admin_context, criterion) - - def test_update_blacklist(self): - blacklist = self.create_blacklist(pattern='^example.uk.') - - # Update the blacklist - blacklist.pattern = '^example.uk.co.' - - blacklist = self.storage.update_blacklist(self.admin_context, - blacklist) - # Verify the new values - self.assertEqual('^example.uk.co.', blacklist.pattern) - - # Ensure the version column was incremented - self.assertEqual(2, blacklist.version) - - def test_update_blacklist_duplicate(self): - # Create two blacklists - blacklist_one = self.create_blacklist(fixture=0) - blacklist_two = self.create_blacklist(fixture=1) - - # Update the second one to be a duplicate of the first - blacklist_two.pattern = blacklist_one.pattern - - with testtools.ExpectedException(exceptions.DuplicateBlacklist): - self.storage.update_blacklist(self.admin_context, - blacklist_two) - - def test_update_blacklist_missing(self): - blacklist = objects.Blacklist( - id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') - - with testtools.ExpectedException(exceptions.BlacklistNotFound): - self.storage.update_blacklist(self.admin_context, blacklist) - - def test_delete_blacklist(self): - blacklist = self.create_blacklist(fixture=0) - - self.storage.delete_blacklist(self.admin_context, blacklist['id']) - - with testtools.ExpectedException(exceptions.BlacklistNotFound): - self.storage.get_blacklist(self.admin_context, blacklist['id']) - - def test_delete_blacklist_missing(self): - with testtools.ExpectedException(exceptions.BlacklistNotFound): - uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' - self.storage.delete_blacklist(self.admin_context, uuid) - - # Pool Tests - def test_create_pool(self): - values = { - 'name': 'test1', - 'tenant_id': self.admin_context.project_id, - 'provisioner': 'UNMANAGED' - } - - result = self.storage.create_pool( - self.admin_context, objects.Pool.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(values['name'], result['name']) - self.assertEqual(values['tenant_id'], result['tenant_id']) - self.assertEqual(values['provisioner'], result['provisioner']) - - def test_create_pool_with_all_relations(self): - values = { - 'name': 'Pool', - 'description': 'Pool description', - 'attributes': [{'key': 'scope', 'value': 'public'}], - 'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}], - 'nameservers': [{'host': "192.0.2.1", 'port': 53}], - 'targets': [{ - 'type': "fake", - 'description': 'FooBar', - 'masters': [{'host': "192.0.2.2", - 'port': DEFAULT_MDNS_PORT}], - 'options': [{'key': 'fake_option', 'value': 'fake_value'}], - }], - 'also_notifies': [{'host': "192.0.2.3", 'port': 53}] - } - - # Create the Pool, and check all values are OK - result = self.storage.create_pool( - self.admin_context, objects.Pool.from_dict(values)) - self.assertNestedDictContainsSubset(values, result.to_dict()) - - # Re-Fetch the pool, and check everything is still OK - result = self.storage.get_pool(self.admin_context, result.id) - self.assertNestedDictContainsSubset(values, result.to_dict()) - - def test_create_pool_duplicate(self): - # Create the first pool - self.create_pool(fixture=0) - - # Create the second pool and should get exception - exc = self.assertRaises(rpc_dispatcher.ExpectedException, - self.create_pool, - fixture=0) - - self.assertEqual(exceptions.DuplicatePool, exc.exc_info[0]) - - def test_find_pools(self): - # Verify that there are no pools, except for default pool - actual = self.storage.find_pools(self.admin_context) - self.assertEqual(1, len(actual)) - - # Create a Pool - pool = self.create_pool(fixture=0) - - actual = self.storage.find_pools(self.admin_context) - self.assertEqual(2, len(actual)) - - # Test against the second pool, since the first is the default pool - self.assertEqual(pool['name'], actual[1]['name']) - - def test_find_pools_paging(self): - # Get any pools that are already created, including default - pools = self.storage.find_pools(self.admin_context) - - # Create 10 Pools - created = [self.create_pool(name='test%d' % i) - for i in range(10)] - - # Add in the existing pools - - for p in pools: - created.insert(0, p) - - # Ensure we can page through the results - self._ensure_paging(created, self.storage.find_pools) - - def test_find_pools_criterion(self): - # Create two pools - pool_one = self.create_pool(fixture=0) - pool_two = self.create_pool(fixture=1) - - # Verify pool_one - criterion = dict(name=pool_one['name']) - - results = self.storage.find_pools(self.admin_context, criterion) - - self.assertEqual(1, len(results)) - - self.assertEqual(pool_one['name'], results[0]['name']) - self.assertEqual(pool_one['provisioner'], results[0]['provisioner']) - - criterion = dict(name=pool_two['name']) - - results = self.storage.find_pools(self.admin_context, criterion) - - self.assertEqual(1, len(results)) - - self.assertEqual(pool_two['name'], results[0]['name']) - self.assertEqual(pool_two['provisioner'], results[0]['provisioner']) - - def test_get_pool(self): - # Create a pool - expected = self.create_pool() - actual = self.storage.get_pool(self.admin_context, expected['id']) - - self.assertEqual(expected['name'], actual['name']) - self.assertEqual(expected['provisioner'], actual['provisioner']) - - def test_get_pool_missing(self): - with testtools.ExpectedException(exceptions.PoolNotFound): - uuid = 'c28893e3-eb87-4562-aa29-1f0e835d749b' - self.storage.get_pool(self.admin_context, uuid) - - def test_find_pool_criterion(self): - pool_one = self.create_pool(fixture=0) - pool_two = self.create_pool(fixture=1) - - criterion = dict(name=pool_one['name']) - - result = self.storage.find_pool(self.admin_context, criterion) - - self.assertEqual(pool_one['name'], result['name']) - self.assertEqual(pool_one['provisioner'], result['provisioner']) - - criterion = dict(name=pool_two['name']) - - result = self.storage.find_pool(self.admin_context, criterion) - - self.assertEqual(pool_two['name'], result['name']) - self.assertEqual(pool_two['provisioner'], result['provisioner']) - - def test_find_pool_criterion_missing(self): - expected = self.create_pool() - - criterion = dict(name=expected['name'] + "NOT FOUND") - - with testtools.ExpectedException(exceptions.PoolNotFound): - self.storage.find_pool(self.admin_context, criterion) - - def test_update_pool(self): - # Create a pool - pool = self.create_pool(name='test1') - - # Update the Pool - pool.name = 'test3' - - # Perform the update - pool = self.storage.update_pool(self.admin_context, pool) - - # Verify the new value is there - self.assertEqual('test3', pool.name) - - def test_update_pool_duplicate(self): - # Create two pools - pool_one = self.create_pool(fixture=0) - pool_two = self.create_pool(fixture=1) - - # Update pool_two to be a duplicate of pool_one - pool_two.name = pool_one.name - - with testtools.ExpectedException(exceptions.DuplicatePool): - self.storage.update_pool(self.admin_context, pool_two) - - def test_update_pool_missing(self): - pool = objects.Pool(id='8806f871-5140-43f4-badd-2bbc5715b013') - - with testtools.ExpectedException(exceptions.PoolNotFound): - self.storage.update_pool(self.admin_context, pool) - - def test_update_pool_with_all_relations(self): - values = { - 'name': 'Pool-A', - 'description': 'Pool-A description', - 'attributes': [{'key': 'scope', 'value': 'public'}], - 'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}], - 'nameservers': [{'host': "192.0.2.1", 'port': 53}], - 'targets': [{ - 'type': "fake", - 'description': 'FooBar', - 'masters': [{'host': "192.0.2.2", - 'port': DEFAULT_MDNS_PORT}], - 'options': [{'key': 'fake_option', 'value': 'fake_value'}], - }], - 'also_notifies': [{'host': "192.0.2.3", 'port': 53}] - } - - # Create the Pool - result = self.storage.create_pool( - self.admin_context, objects.Pool.from_dict(values)) - - created_pool_id = result.id - - # Prepare a new set of data for the Pool, copying over the ID so - # we trigger an update rather than a create. - values = { - 'id': created_pool_id, - 'name': 'Pool-B', - 'description': 'Pool-B description', - 'attributes': [{'key': 'scope', 'value': 'private'}], - 'ns_records': [{'priority': 1, 'hostname': 'ns2.example.org.'}], - 'nameservers': [{'host': "192.0.2.5", 'port': 53}], - 'targets': [{ - 'type': "fake", - 'description': 'NewFooBar', - 'masters': [{'host': "192.0.2.2", - 'port': DEFAULT_MDNS_PORT}], - 'options': [{'key': 'fake_option', 'value': 'fake_value'}], - }, { - 'type': "fake", - 'description': 'FooBar2', - 'masters': [{'host': "192.0.2.7", 'port': 5355}], - 'options': [{'key': 'fake_option', 'value': 'new_fake_value'}], - }], - 'also_notifies': [] - } - - # Update the pool, and check everything is OK - result = self.storage.update_pool( - self.admin_context, objects.Pool.from_dict(values)) - self.assertNestedDictContainsSubset(values, result.to_dict()) - - # Re-Fetch the pool, and check everything is still OK - result = self.storage.get_pool(self.admin_context, created_pool_id) - self.assertNestedDictContainsSubset(values, result.to_dict()) - - def test_delete_pool(self): - pool = self.create_pool() - - self.storage.delete_pool(self.admin_context, pool['id']) - - with testtools.ExpectedException(exceptions.PoolNotFound): - self.storage.get_pool(self.admin_context, pool['id']) - - def test_delete_pool_missing(self): - with testtools.ExpectedException(exceptions.PoolNotFound): - uuid = '203ca44f-c7e7-4337-9a02-0d735833e6aa' - self.storage.delete_pool(self.admin_context, uuid) - - def test_create_pool_ns_record_duplicate(self): - # Create a pool - pool = self.create_pool(name='test1') - - ns = objects.PoolNsRecord(priority=1, hostname="ns.example.io.") - self.storage.create_pool_ns_record( - self.admin_context, pool.id, ns) - - ns2 = objects.PoolNsRecord(priority=2, hostname="ns.example.io.") - with testtools.ExpectedException(exceptions.DuplicatePoolNsRecord): - self.storage.create_pool_ns_record( - self.admin_context, pool.id, ns2) - - def test_update_pool_ns_record_duplicate(self): - # Create a pool - pool = self.create_pool(name='test1') - - ns1 = objects.PoolNsRecord(priority=1, hostname="ns1.example.io.") - self.storage.create_pool_ns_record( - self.admin_context, pool.id, ns1) - - ns2 = objects.PoolNsRecord(priority=2, hostname="ns2.example.io.") - self.storage.create_pool_ns_record( - self.admin_context, pool.id, ns2) - - with testtools.ExpectedException(exceptions.DuplicatePoolNsRecord): - ns2.hostname = ns1.hostname - self.storage.update_pool_ns_record( - self.admin_context, ns2) - - # PoolAttribute tests - def test_create_pool_attribute(self): - values = { - 'pool_id': "d5d10661-0312-4ae1-8664-31188a4310b7", - 'key': "test-attribute", - 'value': 'test-value' - } - - result = self.storage.create_pool_attribute( - self.admin_context, values['pool_id'], - objects.PoolAttribute.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNotNone(result['version']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(values['pool_id'], result['pool_id']) - self.assertEqual(values['key'], result['key']) - self.assertEqual(values['value'], result['value']) - - def test_find_pool_attribute(self): - # Verify that there are no Pool Attributes created - actual = self.storage.find_pool_attributes(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a Pool Attribute - pool_attribute = self.create_pool_attribute(fixture=0) - - actual = self.storage.find_pool_attributes(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(pool_attribute['pool_id'], actual[0]['pool_id']) - self.assertEqual(pool_attribute['key'], actual[0]['key']) - self.assertEqual(pool_attribute['value'], actual[0]['value']) - - def test_find_pool_attributes_paging(self): - # Create 10 Pool Attributes - created = [self.create_pool_attribute(value='^ns%d.example.com.' % i) - for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_pool_attributes) - - def test_find_pool_attributes_with_criterion(self): - # Create two pool attributes - pool_attribute_one = self.create_pool_attribute(fixture=0) - pool_attribute_two = self.create_pool_attribute(fixture=1) - - # Verify pool_attribute_one - criterion = dict(key=pool_attribute_one['key']) - - results = self.storage.find_pool_attributes(self.admin_context, - criterion) - self.assertEqual(1, len(results)) - self.assertEqual(pool_attribute_one['pool_id'], results[0]['pool_id']) - self.assertEqual(pool_attribute_one['key'], results[0]['key']) - self.assertEqual(pool_attribute_one['value'], results[0]['value']) - - # Verify pool_attribute_two - criterion = dict(key=pool_attribute_two['key']) - LOG.debug("Criterion is %r " % criterion) - - results = self.storage.find_pool_attributes(self.admin_context, - criterion) - self.assertEqual(1, len(results)) - self.assertEqual(pool_attribute_two['pool_id'], results[0]['pool_id']) - self.assertEqual(pool_attribute_two['key'], results[0]['key']) - self.assertEqual(pool_attribute_two['value'], results[0]['value']) - - def test_get_pool_attribute(self): - expected = self.create_pool_attribute(fixture=0) - actual = self.storage.get_pool_attribute(self.admin_context, - expected['id']) - - self.assertEqual(expected['pool_id'], actual['pool_id']) - self.assertEqual(expected['key'], actual['key']) - self.assertEqual(expected['value'], actual['value']) - - def test_get_pool_attribute_missing(self): - with testtools.ExpectedException(exceptions.PoolAttributeNotFound): - uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' - self.storage.get_pool_attribute(self.admin_context, uuid) - - def test_find_pool_attribute_criterion(self): - pool_attribute_one = self.create_pool_attribute(fixture=0) - pool_attribute_two = self.create_pool_attribute(fixture=1) - - criterion = dict(key=pool_attribute_one['key']) - - result = self.storage.find_pool_attribute(self.admin_context, - criterion) - - self.assertEqual(pool_attribute_one['pool_id'], result['pool_id']) - self.assertEqual(pool_attribute_one['key'], result['key']) - self.assertEqual(pool_attribute_one['value'], result['value']) - - criterion = dict(key=pool_attribute_two['key']) - - result = self.storage.find_pool_attribute(self.admin_context, - criterion) - - self.assertEqual(pool_attribute_two['pool_id'], result['pool_id']) - self.assertEqual(pool_attribute_two['key'], result['key']) - self.assertEqual(pool_attribute_two['value'], result['value']) - - def test_find_pool_attribute_criterion_missing(self): - expected = self.create_pool_attribute(fixture=0) - - criterion = dict(key=expected['key'] + "NOT FOUND") - - with testtools.ExpectedException(exceptions.PoolAttributeNotFound): - self.storage.find_pool_attribute(self.admin_context, criterion) - - def test_update_pool_attribute(self): - pool_attribute = self.create_pool_attribute(value='ns1.example.org') - - # Update the Pool Attribute - pool_attribute.value = 'ns5.example.org' - - pool_attribute = self.storage.update_pool_attribute(self.admin_context, - pool_attribute) - # Verify the new values - self.assertEqual('ns5.example.org', pool_attribute.value) - - # Ensure the version column was incremented - self.assertEqual(2, pool_attribute.version) - - def test_update_pool_attribute_missing(self): - pool_attribute = objects.PoolAttribute( - id='728a329a-83b1-4573-82dc-45dceab435d4') - - with testtools.ExpectedException(exceptions.PoolAttributeNotFound): - self.storage.update_pool_attribute(self.admin_context, - pool_attribute) - - def test_update_pool_attribute_duplicate(self): - # Create two PoolAttributes - pool_attribute_one = self.create_pool_attribute(fixture=0) - pool_attribute_two = self.create_pool_attribute(fixture=1) - - # Update the second one to be a duplicate of the first - pool_attribute_two.pool_id = pool_attribute_one.pool_id - pool_attribute_two.key = pool_attribute_one.key - pool_attribute_two.value = pool_attribute_one.value - - with testtools.ExpectedException(exceptions.DuplicatePoolAttribute): - self.storage.update_pool_attribute(self.admin_context, - pool_attribute_two) - - def test_delete_pool_attribute(self): - pool_attribute = self.create_pool_attribute(fixture=0) - - self.storage.delete_pool_attribute(self.admin_context, - pool_attribute['id']) - - with testtools.ExpectedException(exceptions.PoolAttributeNotFound): - self.storage.get_pool_attribute(self.admin_context, - pool_attribute['id']) - - def test_delete_oool_attribute_missing(self): - with testtools.ExpectedException(exceptions.PoolAttributeNotFound): - uuid = '464e9250-4fe0-4267-9993-da639390bb04' - self.storage.delete_pool_attribute(self.admin_context, uuid) - - def test_create_pool_attribute_duplicate(self): - # Create the initial PoolAttribute - self.create_pool_attribute(fixture=0) - - with testtools.ExpectedException(exceptions.DuplicatePoolAttribute): - self.create_pool_attribute(fixture=0) - - # PoolNameserver tests - def test_create_pool_nameserver(self): - pool = self.create_pool(fixture=0) - - values = { - 'pool_id': pool.id, - 'host': "192.0.2.1", - 'port': 53 - } - - result = self.storage.create_pool_nameserver( - self.admin_context, - pool.id, - objects.PoolNameserver.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNotNone(result['version']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(values['pool_id'], result['pool_id']) - self.assertEqual(values['host'], result['host']) - self.assertEqual(values['port'], result['port']) - - def test_create_pool_nameserver_duplicate(self): - pool = self.create_pool(fixture=0) - - # Create the initial PoolNameserver - self.create_pool_nameserver(pool, fixture=0) - - with testtools.ExpectedException(exceptions.DuplicatePoolNameserver): - self.create_pool_nameserver(pool, fixture=0) - - def test_find_pool_nameservers(self): - pool = self.create_pool(fixture=0) - - # Verify that there are no pool_nameservers created - actual = self.storage.find_pool_nameservers(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a PoolNameserver - pool_nameserver = self.create_pool_nameserver(pool, fixture=0) - - # Fetch the PoolNameservers and ensure only 1 exists - actual = self.storage.find_pool_nameservers(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(pool_nameserver['pool_id'], actual[0]['pool_id']) - self.assertEqual(pool_nameserver['host'], actual[0]['host']) - self.assertEqual(pool_nameserver['port'], actual[0]['port']) - - def test_find_pool_nameservers_paging(self): - pool = self.create_pool(fixture=0) - - # Create 10 PoolNameservers - created = [self.create_pool_nameserver(pool, host='192.0.2.%d' % i) - for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_pool_nameservers) - - def test_find_pool_nameservers_with_criterion(self): - pool = self.create_pool(fixture=0) - - # Create two pool_nameservers - pool_nameserver_one = self.create_pool_nameserver(pool, fixture=0) - pool_nameserver_two = self.create_pool_nameserver(pool, fixture=1) - - # Verify pool_nameserver_one - criterion = dict(host=pool_nameserver_one['host']) - - results = self.storage.find_pool_nameservers( - self.admin_context, criterion) - - self.assertEqual(1, len(results)) - self.assertEqual(pool_nameserver_one['host'], results[0]['host']) - - # Verify pool_nameserver_two - criterion = dict(host=pool_nameserver_two['host']) - - results = self.storage.find_pool_nameservers(self.admin_context, - criterion) - self.assertEqual(1, len(results)) - self.assertEqual(pool_nameserver_two['host'], results[0]['host']) - - def test_get_pool_nameserver(self): - pool = self.create_pool(fixture=0) - - expected = self.create_pool_nameserver(pool, fixture=0) - actual = self.storage.get_pool_nameserver( - self.admin_context, expected['id']) - - self.assertEqual(expected['host'], actual['host']) - - def test_get_pool_nameserver_missing(self): - with testtools.ExpectedException(exceptions.PoolNameserverNotFound): - uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' - self.storage.get_pool_nameserver(self.admin_context, uuid) - - def test_find_pool_nameserver_criterion(self): - pool = self.create_pool(fixture=0) - - # Create two pool_nameservers - pool_nameserver_one = self.create_pool_nameserver(pool, fixture=0) - pool_nameserver_two = self.create_pool_nameserver(pool, fixture=1) - - # Verify pool_nameserver_one - criterion = dict(host=pool_nameserver_one['host']) - - result = self.storage.find_pool_nameserver( - self.admin_context, criterion) - - self.assertEqual(pool_nameserver_one['host'], result['host']) - - # Verify pool_nameserver_two - criterion = dict(host=pool_nameserver_two['host']) - - result = self.storage.find_pool_nameserver( - self.admin_context, criterion) - - self.assertEqual(pool_nameserver_two['host'], result['host']) - - def test_find_pool_nameserver_criterion_missing(self): - pool = self.create_pool(fixture=0) - - expected = self.create_pool_nameserver(pool, fixture=0) - - criterion = dict(host=expected['host'] + "NOT FOUND") - - with testtools.ExpectedException(exceptions.PoolNameserverNotFound): - self.storage.find_pool_nameserver(self.admin_context, criterion) - - def test_update_pool_nameserver(self): - pool = self.create_pool(fixture=0) - - pool_nameserver = self.create_pool_nameserver(pool, host='192.0.2.1') - - # Update the pool_nameserver - pool_nameserver.host = '192.0.2.2' - - pool_nameserver = self.storage.update_pool_nameserver( - self.admin_context, pool_nameserver) - - # Verify the new values - self.assertEqual('192.0.2.2', pool_nameserver.host) - - # Ensure the version column was incremented - self.assertEqual(2, pool_nameserver.version) - - def test_update_pool_nameserver_duplicate(self): - pool = self.create_pool(fixture=0) - - # Create two pool_nameservers - pool_nameserver_one = self.create_pool_nameserver( - pool, fixture=0, host='192.0.2.1') - pool_nameserver_two = self.create_pool_nameserver( - pool, fixture=0, host='192.0.2.2') - - # Update the second one to be a duplicate of the first - pool_nameserver_two.host = pool_nameserver_one.host - - with testtools.ExpectedException(exceptions.DuplicatePoolNameserver): - self.storage.update_pool_nameserver( - self.admin_context, pool_nameserver_two) - - def test_update_pool_nameserver_missing(self): - pool_nameserver = objects.PoolNameserver( - id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') - - with testtools.ExpectedException(exceptions.PoolNameserverNotFound): - self.storage.update_pool_nameserver( - self.admin_context, pool_nameserver) - - def test_delete_pool_nameserver(self): - pool = self.create_pool(fixture=0) - pool_nameserver = self.create_pool_nameserver(pool, fixture=0) - - self.storage.delete_pool_nameserver( - self.admin_context, pool_nameserver['id']) - - with testtools.ExpectedException(exceptions.PoolNameserverNotFound): - self.storage.get_pool_nameserver( - self.admin_context, pool_nameserver['id']) - - def test_delete_pool_nameserver_missing(self): - with testtools.ExpectedException(exceptions.PoolNameserverNotFound): - uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' - self.storage.delete_pool_nameserver(self.admin_context, uuid) - - # PoolTarget tests - def test_create_pool_target(self): - pool = self.create_pool(fixture=0) - - values = { - 'pool_id': pool.id, - 'type': "fake" - } - - result = self.storage.create_pool_target( - self.admin_context, - pool.id, - objects.PoolTarget.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNotNone(result['version']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(values['pool_id'], result['pool_id']) - self.assertEqual(values['type'], result['type']) - - def test_find_pool_targets(self): - pool = self.create_pool(fixture=0) - - # Verify that there are no new pool_targets created - actual = self.storage.find_pool_targets( - self.admin_context, - criterion={'pool_id': pool.id}) - self.assertEqual(0, len(actual)) - - # Create a PoolTarget - pool_target = self.create_pool_target(pool, fixture=0) - - # Fetch the PoolTargets and ensure only 2 exist - actual = self.storage.find_pool_targets( - self.admin_context, - criterion={'pool_id': pool.id}) - self.assertEqual(1, len(actual)) - - self.assertEqual(pool_target['pool_id'], actual[0]['pool_id']) - self.assertEqual(pool_target['type'], actual[0]['type']) - - def test_find_pool_targets_paging(self): - pool = self.create_pool(fixture=0) - - # Create 10 PoolTargets - created = [self.create_pool_target(pool, description='Target %d' % i) - for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_pool_targets, - criterion={'pool_id': pool.id}) - - def test_find_pool_targets_with_criterion(self): - pool = self.create_pool(fixture=0) - - # Create two pool_targets - pool_target_one = self.create_pool_target( - pool, fixture=0, description='One') - pool_target_two = self.create_pool_target( - pool, fixture=1, description='Two') - - # Verify pool_target_one - criterion = dict(description=pool_target_one['description']) - - results = self.storage.find_pool_targets( - self.admin_context, criterion) - - self.assertEqual(1, len(results)) - self.assertEqual( - pool_target_one['description'], results[0]['description']) - - # Verify pool_target_two - criterion = dict(description=pool_target_two['description']) - - results = self.storage.find_pool_targets(self.admin_context, - criterion) - self.assertEqual(1, len(results)) - self.assertEqual( - pool_target_two['description'], results[0]['description']) - - def test_get_pool_target(self): - pool = self.create_pool(fixture=0) - - expected = self.create_pool_target(pool, fixture=0) - actual = self.storage.get_pool_target( - self.admin_context, expected['id']) - - self.assertEqual(expected['type'], actual['type']) - - def test_get_pool_target_missing(self): - with testtools.ExpectedException(exceptions.PoolTargetNotFound): - uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' - self.storage.get_pool_target(self.admin_context, uuid) - - def test_find_pool_target_criterion(self): - pool = self.create_pool(fixture=0) - - # Create two pool_targets - pool_target_one = self.create_pool_target( - pool, fixture=0, description='One') - pool_target_two = self.create_pool_target( - pool, fixture=1, description='Two') - - # Verify pool_target_one - criterion = dict(description=pool_target_one['description']) - - result = self.storage.find_pool_target( - self.admin_context, criterion) - - self.assertEqual(pool_target_one['description'], result['description']) - - # Verify pool_target_two - criterion = dict(description=pool_target_two['description']) - - result = self.storage.find_pool_target( - self.admin_context, criterion) - - self.assertEqual(pool_target_two['description'], result['description']) - - def test_find_pool_target_criterion_missing(self): - pool = self.create_pool(fixture=0) - - expected = self.create_pool_target(pool, fixture=0) - - criterion = dict(description=expected['description'] + '' - 'NOT FOUND') - - with testtools.ExpectedException(exceptions.PoolTargetNotFound): - self.storage.find_pool_target(self.admin_context, criterion) - - def test_update_pool_target(self): - pool = self.create_pool(fixture=0) - - pool_target = self.create_pool_target(pool, description='One') - - # Update the pool_target - pool_target.description = 'Two' - - pool_target = self.storage.update_pool_target( - self.admin_context, pool_target) - - # Verify the new values - self.assertEqual('Two', pool_target.description) - - # Ensure the version column was incremented - self.assertEqual(2, pool_target.version) - - def test_update_pool_target_missing(self): - pool_target = objects.PoolTarget( - id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') - - with testtools.ExpectedException(exceptions.PoolTargetNotFound): - self.storage.update_pool_target( - self.admin_context, pool_target) - - def test_delete_pool_target(self): - pool = self.create_pool(fixture=0) - pool_target = self.create_pool_target(pool, fixture=0) - - self.storage.delete_pool_target( - self.admin_context, pool_target['id']) - - with testtools.ExpectedException(exceptions.PoolTargetNotFound): - self.storage.get_pool_target( - self.admin_context, pool_target['id']) - - def test_delete_pool_target_missing(self): - with testtools.ExpectedException(exceptions.PoolTargetNotFound): - uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' - self.storage.delete_pool_target(self.admin_context, uuid) - - # PoolAlsoNotify tests - def test_create_pool_also_notify(self): - pool = self.create_pool(fixture=0) - - values = { - 'pool_id': pool.id, - 'host': "192.0.2.1", - 'port': 53 - } - - result = self.storage.create_pool_also_notify( - self.admin_context, - pool.id, - objects.PoolAlsoNotify.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNotNone(result['version']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(values['pool_id'], result['pool_id']) - self.assertEqual(values['host'], result['host']) - self.assertEqual(values['port'], result['port']) - - def test_create_pool_also_notify_duplicate(self): - pool = self.create_pool(fixture=0) - - # Create the initial PoolAlsoNotify - self.create_pool_also_notify(pool, fixture=0) - - with testtools.ExpectedException(exceptions.DuplicatePoolAlsoNotify): - self.create_pool_also_notify(pool, fixture=0) - - def test_find_pool_also_notifies(self): - pool = self.create_pool(fixture=0) - - # Verify that there are no pool_also_notifies created - actual = self.storage.find_pool_also_notifies(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a PoolAlsoNotify - pool_also_notify = self.create_pool_also_notify(pool, fixture=0) - - # Fetch the PoolAlsoNotifies and ensure only 1 exists - actual = self.storage.find_pool_also_notifies(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(pool_also_notify['pool_id'], actual[0]['pool_id']) - self.assertEqual(pool_also_notify['host'], actual[0]['host']) - self.assertEqual(pool_also_notify['port'], actual[0]['port']) - - def test_find_pool_also_notifies_paging(self): - pool = self.create_pool(fixture=0) - - # Create 10 PoolAlsoNotifies - created = [self.create_pool_also_notify(pool, host='192.0.2.%d' % i) - for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_pool_also_notifies) - - def test_find_pool_also_notifies_with_criterion(self): - pool = self.create_pool(fixture=0) - - # Create two pool_also_notifies - pool_also_notify_one = self.create_pool_also_notify(pool, fixture=0) - pool_also_notify_two = self.create_pool_also_notify(pool, fixture=1) - - # Verify pool_also_notify_one - criterion = dict(host=pool_also_notify_one['host']) - - results = self.storage.find_pool_also_notifies( - self.admin_context, criterion) - - self.assertEqual(1, len(results)) - self.assertEqual(pool_also_notify_one['host'], results[0]['host']) - - # Verify pool_also_notify_two - criterion = dict(host=pool_also_notify_two['host']) - - results = self.storage.find_pool_also_notifies(self.admin_context, - criterion) - self.assertEqual(1, len(results)) - self.assertEqual(pool_also_notify_two['host'], results[0]['host']) - - def test_get_pool_also_notify(self): - pool = self.create_pool(fixture=0) - - expected = self.create_pool_also_notify(pool, fixture=0) - actual = self.storage.get_pool_also_notify( - self.admin_context, expected['id']) - - self.assertEqual(expected['host'], actual['host']) - - def test_get_pool_also_notify_missing(self): - with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): - uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' - self.storage.get_pool_also_notify(self.admin_context, uuid) - - def test_find_pool_also_notify_criterion(self): - pool = self.create_pool(fixture=0) - - # Create two pool_also_notifies - pool_also_notify_one = self.create_pool_also_notify(pool, fixture=0) - pool_also_notify_two = self.create_pool_also_notify(pool, fixture=1) - - # Verify pool_also_notify_one - criterion = dict(host=pool_also_notify_one['host']) - - result = self.storage.find_pool_also_notify( - self.admin_context, criterion) - - self.assertEqual(pool_also_notify_one['host'], result['host']) - - # Verify pool_also_notify_two - criterion = dict(host=pool_also_notify_two['host']) - - result = self.storage.find_pool_also_notify( - self.admin_context, criterion) - - self.assertEqual(pool_also_notify_two['host'], result['host']) - - def test_find_pool_also_notify_criterion_missing(self): - pool = self.create_pool(fixture=0) - - expected = self.create_pool_also_notify(pool, fixture=0) - - criterion = dict(host=expected['host'] + "NOT FOUND") - - with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): - self.storage.find_pool_also_notify(self.admin_context, criterion) - - def test_update_pool_also_notify(self): - pool = self.create_pool(fixture=0) - - pool_also_notify = self.create_pool_also_notify(pool, host='192.0.2.1') - - # Update the pool_also_notify - pool_also_notify.host = '192.0.2.2' - - pool_also_notify = self.storage.update_pool_also_notify( - self.admin_context, pool_also_notify) - - # Verify the new values - self.assertEqual('192.0.2.2', pool_also_notify.host) - - # Ensure the version column was incremented - self.assertEqual(2, pool_also_notify.version) - - def test_update_pool_also_notify_duplicate(self): - pool = self.create_pool(fixture=0) - - # Create two pool_also_notifies - pool_also_notify_one = self.create_pool_also_notify( - pool, fixture=0, host='192.0.2.1') - pool_also_notify_two = self.create_pool_also_notify( - pool, fixture=0, host='192.0.2.2') - - # Update the second one to be a duplicate of the first - pool_also_notify_two.host = pool_also_notify_one.host - - with testtools.ExpectedException(exceptions.DuplicatePoolAlsoNotify): - self.storage.update_pool_also_notify( - self.admin_context, pool_also_notify_two) - - def test_update_pool_also_notify_missing(self): - pool_also_notify = objects.PoolAlsoNotify( - id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') - - with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): - self.storage.update_pool_also_notify( - self.admin_context, pool_also_notify) - - def test_delete_pool_also_notify(self): - pool = self.create_pool(fixture=0) - pool_also_notify = self.create_pool_also_notify(pool, fixture=0) - - self.storage.delete_pool_also_notify( - self.admin_context, pool_also_notify['id']) - - with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): - self.storage.get_pool_also_notify( - self.admin_context, pool_also_notify['id']) - - def test_delete_pool_also_notify_missing(self): - with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): - uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' - self.storage.delete_pool_also_notify(self.admin_context, uuid) - - def test_create_service_status_duplicate(self): - values = self.get_service_status_fixture(fixture=0) - - self.storage.create_service_status( - self.admin_context, objects.ServiceStatus.from_dict(values)) - - with testtools.ExpectedException(exceptions.DuplicateServiceStatus): - self.storage.create_service_status( - self.admin_context, objects.ServiceStatus.from_dict(values)) - - # Zone Transfer Accept tests - def test_create_zone_transfer_request(self): - zone = self.create_zone() - - values = { - 'tenant_id': self.admin_context.project_id, - 'zone_id': zone.id, - 'key': 'qwertyuiop' - } - - result = self.storage.create_zone_transfer_request( - self.admin_context, objects.ZoneTransferRequest.from_dict(values)) - - self.assertEqual(self.admin_context.project_id, result['tenant_id']) - self.assertIn('status', result) - - def test_create_zone_transfer_request_scoped(self): - zone = self.create_zone() - tenant_2_context = self.get_context(project_id='2') - tenant_3_context = self.get_context(project_id='3') - - values = { - 'tenant_id': self.admin_context.project_id, - 'zone_id': zone.id, - 'key': 'qwertyuiop', - 'target_tenant_id': tenant_2_context.project_id, - } - - result = self.storage.create_zone_transfer_request( - self.admin_context, objects.ZoneTransferRequest.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(self.admin_context.project_id, result['tenant_id']) - self.assertEqual( - tenant_2_context.project_id, result['target_tenant_id'] - ) - self.assertIn('status', result) - - stored_ztr = self.storage.get_zone_transfer_request( - tenant_2_context, result.id) - - self.assertEqual( - self.admin_context.project_id, stored_ztr['tenant_id'] - ) - self.assertEqual(stored_ztr['id'], result['id']) - - with testtools.ExpectedException( - exceptions.ZoneTransferRequestNotFound): - self.storage.get_zone_transfer_request( - tenant_3_context, result.id) - - def test_find_zone_transfer_requests(self): - zone = self.create_zone() - - values = { - 'tenant_id': self.admin_context.project_id, - 'zone_id': zone.id, - 'key': 'qwertyuiop' - } - - self.storage.create_zone_transfer_request( - self.admin_context, objects.ZoneTransferRequest.from_dict(values)) - - requests = self.storage.find_zone_transfer_requests( - self.admin_context, {"tenant_id": self.admin_context.project_id}) - self.assertEqual(1, len(requests)) - - def test_delete_zone_transfer_request(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - - self.storage.delete_zone_transfer_request( - self.admin_context, zt_request.id) - - with testtools.ExpectedException( - exceptions.ZoneTransferRequestNotFound): - self.storage.get_zone_transfer_request( - self.admin_context, zt_request.id) - - def test_update_zone_transfer_request(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - - zt_request.description = 'New description' - result = self.storage.update_zone_transfer_request( - self.admin_context, zt_request) - self.assertEqual('New description', result.description) - - def test_get_zone_transfer_request(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - - result = self.storage.get_zone_transfer_request( - self.admin_context, zt_request.id) - self.assertEqual(zt_request.id, result.id) - self.assertEqual(zt_request.zone_id, result.zone_id) - - # Zone Transfer Accept tests - def test_create_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - values = { - 'tenant_id': self.admin_context.project_id, - 'zone_transfer_request_id': zt_request.id, - 'zone_id': zone.id, - 'key': zt_request.key - } - - result = self.storage.create_zone_transfer_accept( - self.admin_context, objects.ZoneTransferAccept.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(self.admin_context.project_id, result['tenant_id']) - self.assertIn('status', result) - - def test_find_zone_transfer_accepts(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - values = { - 'tenant_id': self.admin_context.project_id, - 'zone_transfer_request_id': zt_request.id, - 'zone_id': zone.id, - 'key': zt_request.key - } - - self.storage.create_zone_transfer_accept( - self.admin_context, objects.ZoneTransferAccept.from_dict(values)) - - accepts = self.storage.find_zone_transfer_accepts( - self.admin_context, {"tenant_id": self.admin_context.project_id}) - self.assertEqual(1, len(accepts)) - - def test_find_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - values = { - 'tenant_id': self.admin_context.project_id, - 'zone_transfer_request_id': zt_request.id, - 'zone_id': zone.id, - 'key': zt_request.key - } - - result = self.storage.create_zone_transfer_accept( - self.admin_context, objects.ZoneTransferAccept.from_dict(values)) - - accept = self.storage.find_zone_transfer_accept( - self.admin_context, {"id": result.id}) - self.assertEqual(result.id, accept.id) - - def test_transfer_zone_ownership(self): - tenant_1_context = self.get_context(project_id='1', - roles=['member', 'reader']) - tenant_2_context = self.get_context(project_id='2', - roles=['member', 'reader']) - admin_context = self.get_admin_context() - admin_context.all_tenants = True - - zone = self.create_zone(context=tenant_1_context) - recordset = self.create_recordset(zone, context=tenant_1_context) - record = recordset.records[0] - - updated_zone = zone - - updated_zone.tenant_id = tenant_2_context.project_id - - self.storage.update_zone( - admin_context, updated_zone) - - saved_zone = self.storage.get_zone( - admin_context, zone.id) - saved_recordset = self.storage.find_recordset( - admin_context, criterion={'id': recordset.id}) - saved_record = self.storage.get_record( - admin_context, record.id) - - self.assertEqual(tenant_2_context.project_id, saved_zone.tenant_id) - self.assertEqual( - tenant_2_context.project_id, saved_recordset.tenant_id - ) - self.assertEqual(tenant_2_context.project_id, saved_record.tenant_id) - - def test_delete_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - zt_accept = self.create_zone_transfer_accept(zt_request) - - self.storage.delete_zone_transfer_accept( - self.admin_context, zt_accept.id) - - with testtools.ExpectedException( - exceptions.ZoneTransferAcceptNotFound): - self.storage.get_zone_transfer_accept( - self.admin_context, zt_accept.id) - - def test_update_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - zt_accept = self.create_zone_transfer_accept(zt_request) - - zt_accept.status = 'COMPLETE' - result = self.storage.update_zone_transfer_accept( - self.admin_context, zt_accept) - self.assertEqual('COMPLETE', result.status) - - def test_get_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - zt_accept = self.create_zone_transfer_accept(zt_request) - - result = self.storage.get_zone_transfer_accept( - self.admin_context, zt_accept.id) - self.assertEqual(zt_accept.id, result.id) - self.assertEqual(zt_accept.zone_id, result.zone_id) - - def test_count_zone_tasks(self): - # in the beginning, there should be nothing - zones = self.storage.count_zone_tasks(self.admin_context) - self.assertEqual(0, zones) - - values = { - 'status': 'PENDING', - 'task_type': 'IMPORT' - } - - self.storage.create_zone_import( - self.admin_context, objects.ZoneImport.from_dict(values)) - - # count imported zones - zones = self.storage.count_zone_tasks(self.admin_context) - - # well, did we get 1? - self.assertEqual(1, zones) - - def test_count_zone_tasks_none_result(self): - rp = mock.Mock() - rp.fetchone.return_value = None - with mock.patch('designate.storage.sql.get_write_session', - return_value=rp): - zones = self.storage.count_zone_tasks(self.admin_context) - self.assertEqual(0, zones) - - # Zone Import Tests - def test_create_zone_import(self): - values = { - 'status': 'PENDING', - 'task_type': 'IMPORT' - } - - result = self.storage.create_zone_import( - self.admin_context, objects.ZoneImport.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - self.assertIsNotNone(result['version']) - self.assertEqual(values['status'], result['status']) - self.assertIsNone(result['zone_id']) - self.assertIsNone(result['message']) - - def test_find_zone_imports(self): - - actual = self.storage.find_zone_imports(self.admin_context) - self.assertEqual(0, len(actual)) - - # Create a single ZoneImport - zone_import = self.create_zone_import(fixture=0) - - actual = self.storage.find_zone_imports(self.admin_context) - self.assertEqual(1, len(actual)) - - self.assertEqual(zone_import['status'], actual[0]['status']) - self.assertEqual(zone_import['message'], actual[0]['message']) - self.assertEqual(zone_import['zone_id'], actual[0]['zone_id']) - - def test_find_zone_imports_paging(self): - # Create 10 ZoneImports - created = [self.create_zone_import() for i in range(10)] - - # Ensure we can page through the results. - self._ensure_paging(created, self.storage.find_zone_imports) - - def test_find_zone_imports_with_criterion(self): - zone_import_one = self.create_zone_import(fixture=0) - zone_import_two = self.create_zone_import(fixture=1) - - criterion_one = dict(status=zone_import_one['status']) - - results = self.storage.find_zone_imports(self.admin_context, - criterion_one) - self.assertEqual(1, len(results)) - - self.assertEqual(zone_import_one['status'], results[0]['status']) - - criterion_two = dict(status=zone_import_two['status']) - - results = self.storage.find_zone_imports(self.admin_context, - criterion_two) - self.assertEqual(1, len(results)) - - self.assertEqual(zone_import_two['status'], results[0]['status']) - - def test_get_zone_import(self): - # Create a zone_import - expected = self.create_zone_import() - actual = self.storage.get_zone_import(self.admin_context, - expected['id']) - - self.assertEqual(expected['status'], actual['status']) - - def test_get_zone_import_missing(self): - with testtools.ExpectedException(exceptions.ZoneImportNotFound): - uuid = '4c8e7f82-3519-4bf7-8940-a66a4480f223' - self.storage.get_zone_import(self.admin_context, uuid) - - def test_find_zone_import_criterion_missing(self): - expected = self.create_zone_import() - - criterion = dict(status=expected['status'] + "NOT FOUND") - - with testtools.ExpectedException(exceptions.ZoneImportNotFound): - self.storage.find_zone_import(self.admin_context, criterion) - - def test_update_zone_import(self): - # Create a zone_import - zone_import = self.create_zone_import(status='PENDING', - task_type='IMPORT') - - # Update the zone_import - zone_import.status = 'COMPLETE' - - # Update storage - zone_import = self.storage.update_zone_import(self.admin_context, - zone_import) - - # Verify the new value - self.assertEqual('COMPLETE', zone_import.status) - - # Ensure the version column was incremented - self.assertEqual(2, zone_import.version) - - def test_update_zone_import_missing(self): - zone_import = objects.ZoneImport( - id='486f9cbe-b8b6-4d8c-8275-1a6e47b13e00') - with testtools.ExpectedException(exceptions.ZoneImportNotFound): - self.storage.update_zone_import(self.admin_context, zone_import) - - def test_delete_zone_import(self): - # Create a zone_import - zone_import = self.create_zone_import() - - # Delete the zone_import - self.storage.delete_zone_import(self.admin_context, zone_import['id']) - - # Verify that it's deleted - with testtools.ExpectedException(exceptions.ZoneImportNotFound): - self.storage.get_zone_import(self.admin_context, zone_import['id']) - - def test_delete_zone_import_missing(self): - with testtools.ExpectedException(exceptions.ZoneImportNotFound): - uuid = 'cac1fc02-79b2-4e62-a1a4-427b6790bbe6' - self.storage.delete_zone_import(self.admin_context, uuid) diff --git a/designate/tests/test_storage/test_sqlalchemy.py b/designate/tests/test_storage/test_sqlalchemy.py deleted file mode 100644 index 9cd05b045..000000000 --- a/designate/tests/test_storage/test_sqlalchemy.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2012 Managed I.T. -# -# Author: Kiall Mac Innes -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_log import log as logging -from sqlalchemy import text - -from designate import storage -from designate.storage import sql -from designate.tests.test_storage import StorageTestCase -from designate.tests import TestCase - -LOG = logging.getLogger(__name__) - - -class SqlalchemyStorageTest(StorageTestCase, TestCase): - def setUp(self): - super(SqlalchemyStorageTest, self).setUp() - - self.storage = storage.get_storage('sqlalchemy') - - def test_schema_table_names(self): - table_names = [ - 'blacklists', - 'pool_also_notifies', - 'pool_attributes', - 'pool_nameservers', - 'pool_ns_records', - 'pool_target_masters', - 'pool_target_options', - 'pool_targets', - 'pools', - 'quotas', - 'records', - 'recordsets', - 'service_statuses', - 'shared_zones', - 'tlds', - 'tsigkeys', - 'zone_attributes', - 'zone_masters', - 'zone_tasks', - 'zone_transfer_accepts', - 'zone_transfer_requests', - 'zones' - ] - - inspector = self.storage.get_inspector() - - actual_table_names = inspector.get_table_names() - - # We have transitioned database schema migration tools. - # They use different tracking tables. Accomidate that one or both - # may exist in this test. - migration_table_found = False - if ('migrate_version' in actual_table_names or - 'alembic_version' in actual_table_names): - migration_table_found = True - self.assertTrue( - migration_table_found, 'A DB migration table was not found.' - ) - try: - actual_table_names.remove('migrate_version') - except ValueError: - pass - try: - actual_table_names.remove('alembic_version') - except ValueError: - pass - - self.assertEqual(table_names, actual_table_names) - - def test_schema_table_indexes(self): - with sql.get_read_session() as session: - indexes_t = session.execute( - text("SELECT * FROM sqlite_master WHERE type = 'index';")) - - indexes = {} # table name -> index names -> cmd - for _, index_name, table_name, num, cmd in indexes_t: - if index_name.startswith("sqlite_"): - continue # ignore sqlite-specific indexes - if table_name not in indexes: - indexes[table_name] = {} - indexes[table_name][index_name] = cmd - - expected = { - "records": { - "record_created_at": "CREATE INDEX record_created_at ON records (created_at)", # noqa - "records_tenant": "CREATE INDEX records_tenant ON records (tenant_id)", # noqa - "update_status_index": "CREATE INDEX update_status_index ON records (status, zone_id, tenant_id, created_at, serial)", # noqa - }, - "recordsets": { - "recordset_created_at": "CREATE INDEX recordset_created_at ON recordsets (created_at)", # noqa - "recordset_type_name": "CREATE INDEX recordset_type_name ON recordsets (type, name)", # noqa - "reverse_name_dom_id": "CREATE INDEX reverse_name_dom_id ON recordsets (reverse_name, zone_id)", # noqa - "rrset_type_domainid": "CREATE INDEX rrset_type_domainid ON recordsets (type, zone_id)", # noqa - "rrset_updated_at": "CREATE INDEX rrset_updated_at ON recordsets (updated_at)", # noqa - "rrset_zoneid": "CREATE INDEX rrset_zoneid ON recordsets (zone_id)", # noqa - "rrset_type": "CREATE INDEX rrset_type ON recordsets (type)", # noqa - "rrset_ttl": "CREATE INDEX rrset_ttl ON recordsets (ttl)", # noqa - "rrset_tenant_id": "CREATE INDEX rrset_tenant_id ON recordsets (tenant_id)", # noqa - }, - "zones": { - "delayed_notify": "CREATE INDEX delayed_notify ON zones (delayed_notify)", # noqa - "increment_serial": "CREATE INDEX increment_serial ON zones (increment_serial)", # noqa - "reverse_name_deleted": "CREATE INDEX reverse_name_deleted ON zones (reverse_name, deleted)", # noqa - "zone_created_at": "CREATE INDEX zone_created_at ON zones (created_at)", # noqa - "zone_deleted": "CREATE INDEX zone_deleted ON zones (deleted)", - "zone_tenant_deleted": "CREATE INDEX zone_tenant_deleted ON zones (tenant_id, deleted)", # noqa - } - } - self.assertDictEqual(expected, indexes) diff --git a/designate/tests/test_storage/test_storage.py b/designate/tests/test_storage/test_storage.py new file mode 100644 index 000000000..c42495a93 --- /dev/null +++ b/designate/tests/test_storage/test_storage.py @@ -0,0 +1,3298 @@ +# Copyright 2012 Managed I.T. +# +# Author: Kiall Mac Innes +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import math +from sqlalchemy import text +from unittest import mock + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_messaging.rpc import dispatcher as rpc_dispatcher +import testtools + +from designate.conf.mdns import DEFAULT_MDNS_PORT +from designate import exceptions +from designate import objects +from designate import storage +from designate.storage import sql +from designate.tests import TestCase +from designate.utils import generate_uuid + +LOG = logging.getLogger(__name__) + + +class SqlalchemyStorageTest(TestCase): + def setUp(self): + super(SqlalchemyStorageTest, self).setUp() + self.storage = storage.get_storage() + + # TODO(kiall): Someone, Somewhere, could probably make use of a + # assertNestedDictContainsSubset(), cleanup and put somewhere + # better. + def assertNestedDictContainsSubset(self, expected, actual): + for key, value in expected.items(): + if isinstance(value, dict): + self.assertNestedDictContainsSubset(value, actual.get(key, {})) + + elif isinstance(value, list): + self.assertEqual(len(value), len(actual[key])) + + for index, item in enumerate(value): + self.assertNestedDictContainsSubset( + item, actual[key][index]) + + else: + self.assertEqual(value, actual[key]) + + def create_quota(self, **kwargs): + """ + This create method has been kept in the StorageTestCase class as quotas + are treated differently to other resources in Central. + """ + + context = kwargs.pop('context', self.admin_context) + fixture = kwargs.pop('fixture', 0) + + values = self.get_quota_fixture(fixture=fixture, values=kwargs) + + if 'tenant_id' not in values: + values['tenant_id'] = context.project_id + + return self.storage.create_quota(context, values) + + def create_pool_nameserver(self, pool, **kwargs): + # NOTE(kiall): We add this method here, rather than in the base test + # case, as the base methods expect to make a central API + # call. If a central API method is exposed for this, we + # should remove this and add to the base. + context = kwargs.pop('context', self.admin_context) + fixture = kwargs.pop('fixture', 0) + + values = self.get_pool_nameserver_fixture( + fixture=fixture, values=kwargs) + + if 'pool_id' not in values: + values['pool_id'] = pool.id + + return self.storage.create_pool_nameserver( + context, pool.id, objects.PoolNameserver.from_dict(values)) + + def create_pool_target(self, pool, **kwargs): + # NOTE(kiall): We add this method here, rather than in the base test + # case, as the base methods expect to make a central API + # call. If a central API method is exposed for this, we + # should remove this and add to the base. + context = kwargs.pop('context', self.admin_context) + fixture = kwargs.pop('fixture', 0) + + values = self.get_pool_target_fixture( + fixture=fixture, values=kwargs) + + if 'pool_id' not in values: + values['pool_id'] = pool.id + + return self.storage.create_pool_target( + context, pool.id, objects.PoolTarget.from_dict(values)) + + def create_pool_also_notify(self, pool, **kwargs): + # NOTE(kiall): We add this method here, rather than in the base test + # case, as the base methods expect to make a central API + # call. If a central API method is exposed for this, we + # should remove this and add to the base. + context = kwargs.pop('context', self.admin_context) + fixture = kwargs.pop('fixture', 0) + + values = self.get_pool_also_notify_fixture( + fixture=fixture, values=kwargs) + + if 'pool_id' not in values: + values['pool_id'] = pool.id + + return self.storage.create_pool_also_notify( + context, pool.id, objects.PoolAlsoNotify.from_dict(values)) + + # Paging Tests + def _ensure_paging(self, data, method, criterion=None): + """ + Given an array of created items we iterate through them making sure + they match up to things returned by paged results. + """ + results = None + item_number = 0 + + criterion = criterion or {} + + for current_page in range(0, int(math.ceil(float(len(data)) / 2))): + LOG.critical('Validating results on page %d', current_page) + + if results is not None: + results = method( + self.admin_context, + limit=2, + marker=results[-1]['id'], + criterion=criterion + ) + else: + results = method(self.admin_context, limit=2, + criterion=criterion) + + LOG.critical('Results: %d', len(results)) + + for result_number, result in enumerate(results): + LOG.critical('Validating result %d on page %d', result_number, + current_page) + self.assertEqual( + data[item_number]['id'], results[result_number]['id']) + + item_number += 1 + + def test_paging_marker_not_found(self): + with testtools.ExpectedException(exceptions.MarkerNotFound): + self.storage.find_pool_attributes( + self.admin_context, marker=generate_uuid(), limit=5) + + def test_paging_marker_invalid(self): + with testtools.ExpectedException(exceptions.InvalidMarker): + self.storage.find_pool_attributes( + self.admin_context, marker='4') + + def test_paging_limit_invalid(self): + with testtools.ExpectedException(exceptions.ValueError): + self.storage.find_pool_attributes( + self.admin_context, limit='z') + + def test_paging_sort_dir_invalid(self): + with testtools.ExpectedException(exceptions.ValueError): + self.storage.find_pool_attributes( + self.admin_context, sort_dir='invalid_sort_dir') + + def test_paging_sort_key_invalid(self): + with testtools.ExpectedException(exceptions.InvalidSortKey): + self.storage.find_pool_attributes( + self.admin_context, sort_key='invalid_sort_key') + + # Quota Tests + def test_create_quota(self): + values = self.get_quota_fixture() + values['tenant_id'] = self.admin_context.project_id + + result = self.storage.create_quota(self.admin_context, values) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(self.admin_context.project_id, result['tenant_id']) + self.assertEqual(values['resource'], result['resource']) + self.assertEqual(values['hard_limit'], result['hard_limit']) + + def test_create_quota_duplicate(self): + # Create the initial quota + self.create_quota() + + with testtools.ExpectedException(exceptions.DuplicateQuota): + self.create_quota() + + def test_find_quotas(self): + actual = self.storage.find_quotas(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a single quota + quota_one = self.create_quota() + + actual = self.storage.find_quotas(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(quota_one['tenant_id'], actual[0]['tenant_id']) + self.assertEqual(quota_one['resource'], actual[0]['resource']) + self.assertEqual(quota_one['hard_limit'], actual[0]['hard_limit']) + + # Create a second quota + quota_two = self.create_quota(fixture=1) + + actual = self.storage.find_quotas(self.admin_context) + self.assertEqual(2, len(actual)) + + self.assertEqual(quota_two['tenant_id'], actual[1]['tenant_id']) + self.assertEqual(quota_two['resource'], actual[1]['resource']) + self.assertEqual(quota_two['hard_limit'], actual[1]['hard_limit']) + + def test_find_quotas_criterion(self): + quota_one = self.create_quota() + quota_two = self.create_quota(fixture=1) + + criterion = dict( + tenant_id=quota_one['tenant_id'], + resource=quota_one['resource'] + ) + + results = self.storage.find_quotas(self.admin_context, criterion) + + self.assertEqual(1, len(results)) + + self.assertEqual(quota_one['tenant_id'], results[0]['tenant_id']) + self.assertEqual(quota_one['resource'], results[0]['resource']) + self.assertEqual(quota_one['hard_limit'], results[0]['hard_limit']) + + criterion = dict( + tenant_id=quota_two['tenant_id'], + resource=quota_two['resource'] + ) + + results = self.storage.find_quotas(self.admin_context, criterion) + + self.assertEqual(1, len(results)) + + self.assertEqual(quota_two['tenant_id'], results[0]['tenant_id']) + self.assertEqual(quota_two['resource'], results[0]['resource']) + self.assertEqual(quota_two['hard_limit'], results[0]['hard_limit']) + + def test_get_quota(self): + # Create a quota + expected = self.create_quota() + actual = self.storage.get_quota(self.admin_context, expected['id']) + + self.assertEqual(expected['tenant_id'], actual['tenant_id']) + self.assertEqual(expected['resource'], actual['resource']) + self.assertEqual(expected['hard_limit'], actual['hard_limit']) + + def test_get_quota_missing(self): + with testtools.ExpectedException(exceptions.QuotaNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.get_quota(self.admin_context, uuid) + + def test_find_quota_criterion(self): + quota_one = self.create_quota() + quota_two = self.create_quota(fixture=1) + + criterion = dict( + tenant_id=quota_one['tenant_id'], + resource=quota_one['resource'] + ) + + result = self.storage.find_quota(self.admin_context, criterion) + + self.assertEqual(quota_one['tenant_id'], result['tenant_id']) + self.assertEqual(quota_one['resource'], result['resource']) + self.assertEqual(quota_one['hard_limit'], result['hard_limit']) + + criterion = dict( + tenant_id=quota_two['tenant_id'], + resource=quota_two['resource'] + ) + + result = self.storage.find_quota(self.admin_context, criterion) + + self.assertEqual(quota_two['tenant_id'], result['tenant_id']) + self.assertEqual(quota_two['resource'], result['resource']) + self.assertEqual(quota_two['hard_limit'], result['hard_limit']) + + def test_find_quota_criterion_missing(self): + expected = self.create_quota() + + criterion = dict( + tenant_id=expected['tenant_id'] + "NOT FOUND" + ) + + with testtools.ExpectedException(exceptions.QuotaNotFound): + self.storage.find_quota(self.admin_context, criterion) + + def test_update_quota(self): + # Create a quota + quota = self.create_quota(fixture=1) + + # Update the Object + quota.hard_limit = 5000 + + # Perform the update + quota = self.storage.update_quota(self.admin_context, quota) + + # Ensure the new value took + self.assertEqual(5000, quota.hard_limit) + + # Ensure the version column was incremented + self.assertEqual(2, quota.version) + + def test_update_quota_duplicate(self): + # Create two quotas + quota_one = self.create_quota(fixture=0) + quota_two = self.create_quota(fixture=1) + + # Update the Q2 object to be a duplicate of Q1 + quota_two.resource = quota_one.resource + + with testtools.ExpectedException(exceptions.DuplicateQuota): + self.storage.update_quota(self.admin_context, quota_two) + + def test_update_quota_missing(self): + quota = objects.Quota(id='caf771fc-6b05-4891-bee1-c2a48621f57b') + + with testtools.ExpectedException(exceptions.QuotaNotFound): + self.storage.update_quota(self.admin_context, quota) + + def test_delete_quota(self): + quota = self.create_quota() + + self.storage.delete_quota(self.admin_context, quota['id']) + + with testtools.ExpectedException(exceptions.QuotaNotFound): + self.storage.get_quota(self.admin_context, quota['id']) + + def test_delete_quota_missing(self): + with testtools.ExpectedException(exceptions.QuotaNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.delete_quota(self.admin_context, uuid) + + # TSIG Key Tests + def test_create_tsigkey(self): + values = self.get_tsigkey_fixture() + + result = self.storage.create_tsigkey( + self.admin_context, tsigkey=objects.TsigKey.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['name'], result['name']) + self.assertEqual(values['algorithm'], result['algorithm']) + self.assertEqual(values['secret'], result['secret']) + self.assertEqual(values['scope'], result['scope']) + + def test_create_tsigkey_duplicate(self): + # Create the Initial TsigKey + tsigkey_one = self.create_tsigkey() + + values = self.get_tsigkey_fixture(1) + values['name'] = tsigkey_one['name'] + + exc = self.assertRaises(rpc_dispatcher.ExpectedException, + self.create_tsigkey, + **values) + + self.assertEqual(exceptions.DuplicateTsigKey, exc.exc_info[0]) + + def test_find_tsigkeys(self): + actual = self.storage.find_tsigkeys(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a single tsigkey + tsig = self.create_tsigkey() + + actual = self.storage.find_tsigkeys(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(tsig['name'], actual[0]['name']) + self.assertEqual(tsig['algorithm'], actual[0]['algorithm']) + self.assertEqual(tsig['secret'], actual[0]['secret']) + self.assertEqual(tsig['scope'], actual[0]['scope']) + + def test_find_tsigkey(self): + # Create a single tsigkey + tsig = self.create_tsigkey() + + actual = self.storage.find_tsigkeys(self.admin_context) + self.assertEqual(1, len(actual)) + name = actual[0].name + + actual = self.storage.find_tsigkey(self.admin_context, + {'name': name}) + self.assertEqual(tsig['name'], actual['name']) + self.assertEqual(tsig['algorithm'], actual['algorithm']) + self.assertEqual(tsig['secret'], actual['secret']) + self.assertEqual(tsig['scope'], actual['scope']) + + def test_find_tsigkeys_paging(self): + # Create 10 TSIG Keys + created = [self.create_tsigkey(name='tsig-%s' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_tsigkeys) + + def test_find_tsigkeys_criterion(self): + tsigkey_one = self.create_tsigkey(fixture=0) + tsigkey_two = self.create_tsigkey(fixture=1) + + criterion = dict( + name=tsigkey_one['name'] + ) + + results = self.storage.find_tsigkeys(self.admin_context, criterion) + + self.assertEqual(1, len(results)) + + self.assertEqual(tsigkey_one['name'], results[0]['name']) + + criterion = dict( + name=tsigkey_two['name'] + ) + + results = self.storage.find_tsigkeys(self.admin_context, criterion) + + self.assertEqual(1, len(results)) + + self.assertEqual(tsigkey_two['name'], results[0]['name']) + + def test_get_tsigkey(self): + # Create a tsigkey + expected = self.create_tsigkey() + + actual = self.storage.get_tsigkey(self.admin_context, expected['id']) + + self.assertEqual(expected['name'], actual['name']) + self.assertEqual(expected['algorithm'], actual['algorithm']) + self.assertEqual(expected['secret'], actual['secret']) + self.assertEqual(expected['scope'], actual['scope']) + + def test_get_tsigkey_missing(self): + with testtools.ExpectedException(exceptions.TsigKeyNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.get_tsigkey(self.admin_context, uuid) + + def test_update_tsigkey(self): + # Create a tsigkey + tsigkey = self.create_tsigkey(name='test-key') + + # Update the Object + tsigkey.name = 'test-key-updated' + + # Perform the update + tsigkey = self.storage.update_tsigkey(self.admin_context, tsigkey) + + # Ensure the new value took + self.assertEqual('test-key-updated', tsigkey.name) + + # Ensure the version column was incremented + self.assertEqual(2, tsigkey.version) + + def test_update_tsigkey_duplicate(self): + # Create two tsigkeys + tsigkey_one = self.create_tsigkey(fixture=0) + tsigkey_two = self.create_tsigkey(fixture=1) + + # Update the T2 object to be a duplicate of T1 + tsigkey_two.name = tsigkey_one.name + + with testtools.ExpectedException(exceptions.DuplicateTsigKey): + self.storage.update_tsigkey(self.admin_context, tsigkey_two) + + def test_update_tsigkey_missing(self): + tsigkey = objects.TsigKey(id='caf771fc-6b05-4891-bee1-c2a48621f57b') + + with testtools.ExpectedException(exceptions.TsigKeyNotFound): + self.storage.update_tsigkey(self.admin_context, tsigkey) + + def test_delete_tsigkey(self): + tsigkey = self.create_tsigkey() + + self.storage.delete_tsigkey(self.admin_context, tsigkey['id']) + + with testtools.ExpectedException(exceptions.TsigKeyNotFound): + self.storage.get_tsigkey(self.admin_context, tsigkey['id']) + + def test_delete_tsigkey_missing(self): + with testtools.ExpectedException(exceptions.TsigKeyNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.delete_tsigkey(self.admin_context, uuid) + + # Tenant Tests + def test_find_tenants(self): + context = self.get_admin_context() + one_context = context + one_context.project_id = 'One' + two_context = context + two_context.project_id = 'Two' + context.all_tenants = True + + # create 3 zones in 2 tenants + self.create_zone(fixture=0, context=one_context, tenant_id='One') + zone = self.create_zone(fixture=1, context=one_context, + tenant_id='One') + self.create_zone(fixture=2, context=two_context, tenant_id='Two') + + # Delete one of the zones. + self.storage.delete_zone(context, zone['id']) + + # Ensure we get accurate results + result = self.storage.find_tenants(context) + result_dict = [dict(t) for t in result] + + expected = [{ + 'id': 'One', + 'zone_count': 1, + }, { + 'id': 'Two', + 'zone_count': 1, + }] + + self.assertEqual(expected, result_dict) + + def test_get_tenant(self): + context = self.get_admin_context() + one_context = context + one_context.project_id = 1 + context.all_tenants = True + + # create 2 zones in a tenant + zone_1 = self.create_zone(fixture=0, context=one_context) + zone_2 = self.create_zone(fixture=1, context=one_context) + zone_3 = self.create_zone(fixture=2, context=one_context) + + # Delete one of the zones. + self.storage.delete_zone(context, zone_3['id']) + + result = self.storage.get_tenant(context, 1) + + self.assertEqual(1, result['id']) + self.assertEqual(2, result['zone_count']) + self.assertEqual([zone_1['name'], zone_2['name']], + sorted(result['zones'])) + + def test_count_tenants(self): + context = self.get_admin_context() + one_context = context + one_context.project_id = 1 + two_context = context + two_context.project_id = 2 + context.all_tenants = True + + # in the beginning, there should be nothing + tenants = self.storage.count_tenants(context) + self.assertEqual(0, tenants) + + # create 2 zones with 2 tenants + self.create_zone(fixture=0, context=one_context, tenant_id=1) + self.create_zone(fixture=1, context=two_context, tenant_id=2) + zone = self.create_zone(fixture=2, + context=two_context, tenant_id=2) + + # Delete one of the zones. + self.storage.delete_zone(context, zone['id']) + + tenants = self.storage.count_tenants(context) + self.assertEqual(2, tenants) + + def test_count_tenants_none_result(self): + rp = mock.Mock() + rp.fetchone.return_value = None + with mock.patch('designate.storage.sql.get_write_session', + return_value=rp): + tenants = self.storage.count_tenants(self.admin_context) + self.assertEqual(0, tenants) + + # Zone Tests + def test_create_zone(self): + pool_id = cfg.CONF['service:central'].default_pool_id + values = { + 'tenant_id': self.admin_context.project_id, + 'name': 'example.net.', + 'email': 'example@example.net', + 'pool_id': pool_id + } + + result = self.storage.create_zone( + self.admin_context, zone=objects.Zone.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(self.admin_context.project_id, result['tenant_id']) + self.assertEqual(values['name'], result['name']) + self.assertEqual(values['email'], result['email']) + self.assertEqual(pool_id, result['pool_id']) + self.assertIn('status', result) + + def test_create_zone_duplicate(self): + # Create the Initial Zone + self.create_zone() + + exc = self.assertRaises(rpc_dispatcher.ExpectedException, + self.create_zone) + + self.assertEqual(exceptions.DuplicateZone, exc.exc_info[0]) + + def test_find_zones(self): + self.config(quota_zones=20) + + actual = self.storage.find_zones(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a single zone + zone = self.create_zone() + + actual = self.storage.find_zones(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(zone['name'], actual[0]['name']) + self.assertEqual(zone['email'], actual[0]['email']) + + def test_find_zones_paging(self): + # Create 10 zones + created = [self.create_zone(name='example-%d.org.' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_zones) + + def test_find_zones_criterion(self): + zone_one = self.create_zone() + zone_two = self.create_zone(fixture=1) + + criterion = dict( + name=zone_one['name'] + ) + + results = self.storage.find_zones(self.admin_context, criterion) + + self.assertEqual(1, len(results)) + + self.assertEqual(zone_one['name'], results[0]['name']) + self.assertEqual(zone_one['email'], results[0]['email']) + self.assertIn('status', zone_one) + + criterion = dict( + name=zone_two['name'] + ) + + results = self.storage.find_zones(self.admin_context, criterion) + + self.assertEqual(len(results), 1) + + self.assertEqual(zone_two['name'], results[0]['name']) + self.assertEqual(zone_two['email'], results[0]['email']) + self.assertIn('status', zone_two) + + def test_find_zones_all_tenants(self): + # Create two contexts with different tenant_id's + one_context = self.get_admin_context() + one_context.project_id = 1 + two_context = self.get_admin_context() + two_context.project_id = 2 + + # Create normal and all_tenants context objects + nm_context = self.get_admin_context() + at_context = self.get_admin_context() + at_context.all_tenants = True + + # Create two zones in different tenants + self.create_zone(fixture=0, context=one_context) + self.create_zone(fixture=1, context=two_context) + + # Ensure the all_tenants context see's two zones + results = self.storage.find_zones(at_context) + self.assertEqual(2, len(results)) + + # Ensure the normal context see's no zones + results = self.storage.find_zones(nm_context) + self.assertEqual(0, len(results)) + + # Ensure the tenant 1 context see's 1 zone + results = self.storage.find_zones(one_context) + self.assertEqual(1, len(results)) + + # Ensure the tenant 2 context see's 1 zone + results = self.storage.find_zones(two_context) + self.assertEqual(1, len(results)) + + def test_get_zone(self): + # Create a zone + expected = self.create_zone() + actual = self.storage.get_zone(self.admin_context, expected['id']) + + self.assertEqual(expected['name'], actual['name']) + self.assertEqual(expected['email'], actual['email']) + self.assertIn('status', actual) + + def test_get_zone_missing(self): + with testtools.ExpectedException(exceptions.ZoneNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.get_zone(self.admin_context, uuid) + + def test_get_deleted_zone(self): + context = self.get_admin_context() + context.show_deleted = True + + zone = self.create_zone(context=context) + + self.storage.delete_zone(context, zone['id']) + self.storage.get_zone(context, zone['id']) + + def test_find_zone_criterion(self): + zone_one = self.create_zone() + zone_two = self.create_zone(fixture=1) + + criterion = dict( + name=zone_one['name'] + ) + + result = self.storage.find_zone(self.admin_context, criterion) + + self.assertEqual(zone_one['name'], result['name']) + self.assertEqual(zone_one['email'], result['email']) + self.assertIn('status', zone_one) + + criterion = dict( + name=zone_two['name'] + ) + + result = self.storage.find_zone(self.admin_context, criterion) + + self.assertEqual(zone_two['name'], result['name']) + self.assertEqual(zone_two['email'], result['email']) + self.assertIn('status', zone_one) + self.assertIn('status', zone_two) + + def test_find_zone_criterion_missing(self): + expected = self.create_zone() + + criterion = dict( + name=expected['name'] + "NOT FOUND" + ) + + with testtools.ExpectedException(exceptions.ZoneNotFound): + self.storage.find_zone(self.admin_context, criterion) + + def test_find_zone_criterion_lessthan(self): + zone = self.create_zone() + + # Test Finding No Results (serial is not < serial) + criterion = dict( + name=zone['name'], + serial='<%s' % zone['serial'], + ) + + with testtools.ExpectedException(exceptions.ZoneNotFound): + self.storage.find_zone(self.admin_context, criterion) + + # Test Finding 1 Result (serial is < serial + 1) + criterion = dict( + name=zone['name'], + serial='<%s' % (zone['serial'] + 1), + ) + + result = self.storage.find_zone(self.admin_context, criterion) + + self.assertEqual(zone['name'], result['name']) + + def test_find_zone_criterion_greaterthan(self): + zone = self.create_zone() + + # Test Finding No Results (serial is not > serial) + criterion = dict( + name=zone['name'], + serial='>%s' % zone['serial'], + ) + + with testtools.ExpectedException(exceptions.ZoneNotFound): + self.storage.find_zone(self.admin_context, criterion) + + # Test Finding 1 Result (serial is > serial - 1) + criterion = dict( + name=zone['name'], + serial='>%s' % (zone['serial'] - 1), + ) + + result = self.storage.find_zone(self.admin_context, criterion) + + self.assertEqual(zone['name'], result['name']) + + def test_update_zone(self): + # Create a zone + zone = self.create_zone(name='example.org.') + + # Update the Object + zone.name = 'example.net.' + + # Perform the update + zone = self.storage.update_zone(self.admin_context, zone) + + # Ensure the new valie took + self.assertEqual('example.net.', zone.name) + + # Ensure the version column was incremented + self.assertEqual(2, zone.version) + + def test_update_zone_duplicate(self): + # Create two zones + zone_one = self.create_zone(fixture=0) + zone_two = self.create_zone(fixture=1) + + # Update the D2 object to be a duplicate of D1 + zone_two.name = zone_one.name + + with testtools.ExpectedException(exceptions.DuplicateZone): + self.storage.update_zone(self.admin_context, zone_two) + + def test_update_zone_missing(self): + zone = objects.Zone(id='caf771fc-6b05-4891-bee1-c2a48621f57b') + with testtools.ExpectedException(exceptions.ZoneNotFound): + self.storage.update_zone(self.admin_context, zone) + + def test_delete_zone(self): + zone = self.create_zone() + + self.storage.delete_zone(self.admin_context, zone['id']) + + with testtools.ExpectedException(exceptions.ZoneNotFound): + self.storage.get_zone(self.admin_context, zone['id']) + + def test_delete_zone_missing(self): + with testtools.ExpectedException(exceptions.ZoneNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.delete_zone(self.admin_context, uuid) + + def test_count_zones(self): + # in the beginning, there should be nothing + zones = self.storage.count_zones(self.admin_context) + self.assertEqual(0, zones) + + # Create a single zone + self.create_zone() + + # count 'em up + zones = self.storage.count_zones(self.admin_context) + + # well, did we get 1? + self.assertEqual(1, zones) + + def test_count_zones_none_result(self): + rp = mock.Mock() + rp.fetchone.return_value = None + + with mock.patch('designate.storage.sql.get_write_session', + return_value=rp): + zones = self.storage.count_zones(self.admin_context) + self.assertEqual(0, zones) + + def test_create_recordset(self): + zone = self.create_zone() + + values = { + 'name': 'www.%s' % zone['name'], + 'type': 'A' + } + + result = self.storage.create_recordset( + self.admin_context, + zone['id'], + recordset=objects.RecordSet.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['name'], result['name']) + self.assertEqual(values['type'], result['type']) + + def test_create_recordset_duplicate(self): + zone = self.create_zone() + + # Create the First RecordSet + self.create_recordset(zone) + + exc = self.assertRaises(rpc_dispatcher.ExpectedException, + self.create_recordset, + zone) + + self.assertEqual(exceptions.DuplicateRecordSet, exc.exc_info[0]) + + def test_create_recordset_with_records(self): + zone = self.create_zone() + + recordset = objects.RecordSet( + name='www.%s' % zone['name'], + type='A', + records=objects.RecordList(objects=[ + objects.Record(data='192.0.2.1'), + objects.Record(data='192.0.2.2'), + ]) + ) + + recordset = self.storage.create_recordset( + self.admin_context, zone['id'], recordset) + + # Ensure recordset.records is a RecordList instance + self.assertIsInstance(recordset.records, objects.RecordList) + + # Ensure two Records are attached to the RecordSet correctly + self.assertEqual(2, len(recordset.records)) + self.assertIsInstance(recordset.records[0], objects.Record) + self.assertIsInstance(recordset.records[1], objects.Record) + + # Ensure the Records have been saved by checking they have an ID + self.assertIsNotNone(recordset.records[0].id) + self.assertIsNotNone(recordset.records[1].id) + + def test_find_recordsets_axfr(self): + zone = self.create_zone() + self.create_recordset(zone) + + result = self.storage.find_recordsets_axfr( + self.admin_context, {'zone_id': zone['id']} + ) + self.assertEqual(3, len(result)) + + def test_find_recordsets(self): + zone = self.create_zone() + + criterion = {'zone_id': zone['id']} + + actual = self.storage.find_recordsets(self.admin_context, criterion) + self.assertEqual(2, len(actual)) + + # Create a single recordset + recordset_one = self.create_recordset(zone) + + actual = self.storage.find_recordsets(self.admin_context, criterion) + self.assertEqual(3, len(actual)) + + self.assertEqual(recordset_one['name'], actual[2]['name']) + self.assertEqual(recordset_one['type'], actual[2]['type']) + + def test_find_recordsets_paging(self): + zone = self.create_zone(name='example.org.') + + # Create 10 RecordSets + created = [self.create_recordset(zone, name='r-%d.example.org.' % i) + for i in range(10)] + + # Add in the SOA and NS recordsets that are automatically created + soa = self.storage.find_recordset(self.admin_context, + criterion={'zone_id': zone['id'], + 'type': "SOA"}) + ns = self.storage.find_recordset(self.admin_context, + criterion={'zone_id': zone['id'], + 'type': "NS"}) + created.insert(0, ns) + created.insert(0, soa) + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_recordsets) + + def test_find_recordsets_criterion(self): + zone = self.create_zone() + + recordset_one = self.create_recordset(zone, type='A', fixture=0) + self.create_recordset(zone, fixture=1) + + criterion = dict( + zone_id=zone['id'], + name=recordset_one['name'], + ) + + results = self.storage.find_recordsets(self.admin_context, + criterion) + + self.assertEqual(1, len(results)) + + criterion = dict( + zone_id=zone['id'], + type='A', + ) + + results = self.storage.find_recordsets(self.admin_context, + criterion) + + self.assertEqual(2, len(results)) + + def test_find_recordsets_criterion_wildcard(self): + zone = self.create_zone() + + values = {'name': 'one.%s' % zone['name']} + + self.create_recordset(zone, **values) + + criterion = dict( + zone_id=zone['id'], + name="%%%(name)s" % {"name": zone['name']}, + ) + + results = self.storage.find_recordsets(self.admin_context, criterion) + + # Should be 3, as SOA and NS recordsets are automiatcally created + self.assertEqual(3, len(results)) + + def test_find_recordsets_with_records(self): + zone = self.create_zone() + + records = [ + objects.Record.from_dict({"data": "10.0.0.1"}), + objects.Record.from_dict({"data": "10.0.0.2"}), + objects.Record.from_dict({"data": "10.0.0.3"}) + ] + + recordset = self.create_recordset(zone, records=records) + + criterion = dict( + id=recordset.id, + ) + + # Find the RecordSet + results = self.storage.find_recordsets(self.admin_context, criterion) + + # Ensure we only have one result + self.assertEqual(1, len(results)) + + recordset = results[0] + + # Ensure recordset.records is a RecordList instance + self.assertIsInstance(recordset.records, objects.RecordList) + + # Ensure two Records are attached to the RecordSet correctly + self.assertEqual(3, len(recordset.records)) + + records = [] + for record in recordset.records: + self.assertIsInstance(record, objects.Record) + self.assertNotIn(record, records) + records.append(record) + + def test_find_recordset_criterion(self): + zone = self.create_zone() + expected = self.create_recordset(zone) + + criterion = dict( + zone_id=zone['id'], + name=expected['name'], + ) + + actual = self.storage.find_recordset(self.admin_context, criterion) + + self.assertEqual(expected['name'], actual['name']) + self.assertEqual(expected['type'], actual['type']) + + def test_find_recordset_criterion_missing(self): + zone = self.create_zone() + expected = self.create_recordset(zone) + + criterion = dict( + name=expected['name'] + "NOT FOUND" + ) + + with testtools.ExpectedException(exceptions.RecordSetNotFound): + self.storage.find_recordset(self.admin_context, criterion) + + def test_find_recordset_criterion_with_records(self): + zone = self.create_zone() + + records = [ + objects.Record.from_dict(self.get_record_fixture('A', fixture=0)), + objects.Record.from_dict(self.get_record_fixture('A', fixture=1)) + ] + recordset = self.create_recordset(zone, records=records) + + criterion = dict( + id=recordset.id, + ) + + # Fetch the RecordSet again + recordset = self.storage.find_recordset(self.admin_context, criterion) + + # Ensure recordset.records is a RecordList instance + self.assertIsInstance(recordset.records, objects.RecordList) + + # Ensure two Records are attached to the RecordSet correctly + self.assertEqual(2, len(recordset.records)) + self.assertIsInstance(recordset.records[0], objects.Record) + self.assertIsInstance(recordset.records[1], objects.Record) + + def test_update_recordset(self): + zone = self.create_zone() + + # Create a recordset + recordset = self.create_recordset(zone) + + # Update the Object + recordset.ttl = 1800 + + # Change records as well + recordset.records.append(objects.Record(data="10.0.0.1")) + + # Perform the update + recordset = self.storage.update_recordset(self.admin_context, + recordset) + + # Ensure the new value took + self.assertEqual(1800, recordset.ttl) + + # Ensure the version column was incremented + self.assertEqual(2, recordset.version) + + def test_update_recordset_duplicate(self): + zone = self.create_zone() + + # Create two recordsets + recordset_one = self.create_recordset(zone, type='A') + recordset_two = self.create_recordset(zone, type='A', fixture=1) + + # Update the R2 object to be a duplicate of R1 + recordset_two.name = recordset_one.name + + with testtools.ExpectedException(exceptions.DuplicateRecordSet): + self.storage.update_recordset(self.admin_context, recordset_two) + + def test_update_recordset_missing(self): + recordset = objects.RecordSet( + id='caf771fc-6b05-4891-bee1-c2a48621f57b') + + with testtools.ExpectedException(exceptions.RecordSetNotFound): + self.storage.update_recordset(self.admin_context, recordset) + + def test_update_recordset_with_record_create(self): + zone = self.create_zone() + + # Create a RecordSet + recordset = self.create_recordset(zone, 'A', records=[]) + + # Append two new Records + recordset.records.append(objects.Record(data='192.0.2.1')) + recordset.records.append(objects.Record(data='192.0.2.2')) + + # Perform the update + self.storage.update_recordset(self.admin_context, recordset) + + # Fetch the RecordSet again + recordset = self.storage.find_recordset(self.admin_context, + {'id': recordset.id}) + + # Ensure two Records are attached to the RecordSet correctly + self.assertEqual(2, len(recordset.records)) + self.assertIsInstance(recordset.records[0], objects.Record) + self.assertIsInstance(recordset.records[1], objects.Record) + + # Ensure the Records have been saved by checking they have an ID + self.assertIsNotNone(recordset.records[0].id) + self.assertIsNotNone(recordset.records[1].id) + + def test_update_recordset_with_record_delete(self): + zone = self.create_zone() + + # Create a RecordSet and two Records + records = [ + objects.Record.from_dict(self.get_record_fixture('A', fixture=0)), + objects.Record.from_dict(self.get_record_fixture('A', fixture=1)) + ] + recordset = self.create_recordset(zone, records=records) + + # Fetch the RecordSet again + recordset = self.storage.find_recordset(self.admin_context, + {'id': recordset.id}) + + # Remove one of the Records + recordset.records.pop(0) + + # Ensure only one Record is attached to the RecordSet + self.assertEqual(1, len(recordset.records)) + + # Perform the update + self.storage.update_recordset(self.admin_context, recordset) + + # Fetch the RecordSet again + recordset = self.storage.find_recordset(self.admin_context, + {'id': recordset.id}) + + # Ensure only one Record is attached to the RecordSet + self.assertEqual(1, len(recordset.records)) + self.assertIsInstance(recordset.records[0], objects.Record) + + def test_update_recordset_with_record_update(self): + zone = self.create_zone() + + # Create a RecordSet and two Records + records = [ + objects.Record.from_dict(self.get_record_fixture('A', fixture=0)), + objects.Record.from_dict(self.get_record_fixture('A', fixture=1)) + ] + recordset = self.create_recordset(zone, records=records) + + # Fetch the RecordSet again + recordset = self.storage.find_recordset(self.admin_context, + {'id': recordset.id}) + + # Update one of the Records + updated_record_id = recordset.records[0].id + recordset.records[0].data = '192.0.2.255' + + # Perform the update + self.storage.update_recordset(self.admin_context, recordset) + + # Fetch the RecordSet again + recordset = self.storage.find_recordset(self.admin_context, + {'id': recordset.id}) + + # Ensure the Record has been updated + for record in recordset.records: + if record.id != updated_record_id: + continue + + self.assertEqual('192.0.2.255', record.data) + return # Exits this test early as we succeeded + + raise Exception('Updated record not found') + + def test_delete_recordset(self): + zone = self.create_zone() + + # Create a recordset + recordset = self.create_recordset(zone) + + self.storage.delete_recordset(self.admin_context, recordset['id']) + + with testtools.ExpectedException(exceptions.RecordSetNotFound): + self.storage.find_recordset(self.admin_context, + criterion={'id': recordset['id']}) + + def test_delete_recordset_missing(self): + with testtools.ExpectedException(exceptions.RecordSetNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.delete_recordset(self.admin_context, uuid) + + def test_count_recordsets(self): + # in the beginning, there should be nothing + recordsets = self.storage.count_recordsets(self.admin_context) + self.assertEqual(0, recordsets) + + # Create a single zone & recordset + zone = self.create_zone() + self.create_recordset(zone) + + # we should have 3 recordsets now, including SOA & NS + recordsets = self.storage.count_recordsets(self.admin_context) + self.assertEqual(3, recordsets) + + # Delete the zone, we should be back to 0 recordsets + self.storage.delete_zone(self.admin_context, zone.id) + recordsets = self.storage.count_recordsets(self.admin_context) + self.assertEqual(0, recordsets) + + def test_count_recordsets_none_result(self): + rp = mock.Mock() + rp.fetchone.return_value = None + with mock.patch('designate.storage.sql.get_write_session', + return_value=rp): + recordsets = self.storage.count_recordsets(self.admin_context) + self.assertEqual(0, recordsets) + + def test_find_records(self): + zone = self.create_zone() + recordset = self.create_recordset(zone, records=[]) + + criterion = { + 'zone_id': zone['id'], + 'recordset_id': recordset['id'] + } + + actual = self.storage.find_records(self.admin_context, criterion) + self.assertEqual(0, len(actual)) + + # Create a single record + records = [ + objects.Record.from_dict(self.get_record_fixture('A', fixture=0)), + ] + recordset.records = records + + self.central_service.update_recordset(self.admin_context, recordset) + + recordset = self.central_service.get_recordset( + self.admin_context, zone['id'], recordset['id'] + ) + record = recordset.records[0] + + actual = self.storage.find_records(self.admin_context, criterion) + self.assertEqual(1, len(actual)) + + self.assertEqual(record['data'], actual[0]['data']) + + def test_find_records_paging(self): + zone = self.create_zone() + + records = [] + for i in range(10): + records.append( + objects.Record.from_dict(({'data': '192.0.2.%d' % i})) + ) + + self.create_recordset(zone, type='A', records=records) + + # Add in the SOA and NS records that are automatically created + soa = self.storage.find_recordset(self.admin_context, + criterion={'zone_id': zone['id'], + 'type': "SOA"}) + ns = self.storage.find_recordset(self.admin_context, + criterion={'zone_id': zone['id'], + 'type': "NS"}) + for r in ns['records']: + records.insert(0, r) + records.insert(0, soa['records'][0]) + + # Ensure we can page through the results. + self._ensure_paging(records, self.storage.find_records) + + def test_find_records_criterion(self): + zone = self.create_zone() + record_one = objects.Record.from_dict( + self.get_record_fixture('A', fixture=0) + ) + records = [ + record_one, + objects.Record.from_dict(self.get_record_fixture('A', fixture=1)) + ] + recordset = self.create_recordset(zone, records=records) + + criterion = dict( + data=record_one['data'], + zone_id=zone['id'], + recordset_id=recordset['id'], + ) + + results = self.storage.find_records(self.admin_context, criterion) + self.assertEqual(1, len(results)) + + criterion = dict( + zone_id=zone['id'], + recordset_id=recordset['id'], + ) + + results = self.storage.find_records(self.admin_context, criterion) + + self.assertEqual(2, len(results)) + + def test_find_records_criterion_wildcard(self): + zone = self.create_zone() + + records = [objects.Record.from_dict({'data': '127.0.0.1'})] + recordset = self.create_recordset(zone, type='A', records=records) + + criterion = dict( + zone_id=zone['id'], + recordset_id=recordset['id'], + data="%.0.0.1", + ) + + results = self.storage.find_records(self.admin_context, criterion) + + self.assertEqual(1, len(results)) + + def test_get_record(self): + zone = self.create_zone() + recordset = self.create_recordset(zone) + expected = recordset.records[0] + + actual = self.storage.get_record(self.admin_context, expected['id']) + + self.assertEqual(expected['data'], actual['data']) + self.assertIn('status', actual) + + def test_get_record_missing(self): + with testtools.ExpectedException(exceptions.RecordNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.get_record(self.admin_context, uuid) + + def test_find_record_criterion(self): + zone = self.create_zone() + recordset = self.create_recordset(zone) + expected = recordset.records[0] + + criterion = dict( + zone_id=zone['id'], + recordset_id=recordset['id'], + data=expected['data'], + ) + + actual = self.storage.find_record(self.admin_context, criterion) + + self.assertEqual(expected['data'], actual['data']) + self.assertIn('status', actual) + + def test_find_record_criterion_missing(self): + zone = self.create_zone() + recordset = self.create_recordset(zone) + expected = recordset.records[0] + + criterion = dict( + zone_id=zone['id'], + data=expected['data'] + "NOT FOUND", + ) + + with testtools.ExpectedException(exceptions.RecordNotFound): + self.storage.find_record(self.admin_context, criterion) + + def test_update_record(self): + zone = self.create_zone() + recordset = self.create_recordset(zone, type='A') + record = recordset.records[0] + + # Update the Object + record.data = '192.0.2.255' + + # Perform the update + record = self.storage.update_record(self.admin_context, record) + + # Ensure the new value took + self.assertEqual('192.0.2.255', record.data) + + # Ensure the version column was incremented + self.assertEqual(2, record.version) + + def test_update_record_duplicate(self): + zone = self.create_zone() + + record_one = objects.Record.from_dict( + self.get_record_fixture('A', fixture=0) + ) + record_two = objects.Record.from_dict( + self.get_record_fixture('A', fixture=1) + ) + + records = [ + record_one, + record_two + ] + + self.create_recordset(zone, records=records) + + # Update the R2 object to be a duplicate of R1 + record_two.data = record_one.data + + with testtools.ExpectedException(exceptions.DuplicateRecord): + self.storage.update_record(self.admin_context, record_two) + + def test_update_record_missing(self): + record = objects.Record(id='caf771fc-6b05-4891-bee1-c2a48621f57b') + + with testtools.ExpectedException(exceptions.RecordNotFound): + self.storage.update_record(self.admin_context, record) + + def test_delete_record(self): + zone = self.create_zone() + recordset = self.create_recordset(zone) + record = recordset.records[0] + + self.storage.delete_record(self.admin_context, record['id']) + + with testtools.ExpectedException(exceptions.RecordNotFound): + self.storage.get_record(self.admin_context, record['id']) + + def test_delete_record_missing(self): + with testtools.ExpectedException(exceptions.RecordNotFound): + uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b' + self.storage.delete_record(self.admin_context, uuid) + + def test_count_records(self): + # in the beginning, there should be nothing + records = self.storage.count_records(self.admin_context) + self.assertEqual(0, records) + + # Create a single zone & record + zone = self.create_zone() + self.create_recordset(zone) + + # we should have 3 records now, including NS and SOA + records = self.storage.count_records(self.admin_context) + self.assertEqual(3, records) + + # Delete the zone, we should be back to 0 records + self.storage.delete_zone(self.admin_context, zone.id) + records = self.storage.count_records(self.admin_context) + self.assertEqual(0, records) + + def test_count_records_none_result(self): + rp = mock.Mock() + rp.fetchone.return_value = None + with mock.patch('designate.storage.sql.get_write_session', + return_value=rp): + records = self.storage.count_records(self.admin_context) + self.assertEqual(0, records) + + # TLD Tests + def test_create_tld(self): + values = { + 'name': 'com', + 'description': 'This is a comment.' + } + + result = self.storage.create_tld( + self.admin_context, objects.Tld.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + self.assertIsNotNone(result['version']) + self.assertEqual(values['name'], result['name']) + self.assertEqual(values['description'], result['description']) + + def test_create_tld_with_duplicate(self): + # Create the First Tld + self.create_tld(fixture=0) + + exc = self.assertRaises(rpc_dispatcher.ExpectedException, + self.create_tld, + fixture=0) + + self.assertEqual(exceptions.DuplicateTld, exc.exc_info[0]) + + def test_find_tlds(self): + + actual = self.storage.find_tlds(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a single Tld + tld = self.create_tld(fixture=0) + + actual = self.storage.find_tlds(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(tld['name'], actual[0]['name']) + self.assertEqual(tld['description'], actual[0]['description']) + + def test_find_tlds_paging(self): + # Create 10 Tlds + created = [self.create_tld(name='org%d' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_tlds) + + def test_find_tlds_with_criterion(self): + tld_one = self.create_tld(fixture=0) + tld_two = self.create_tld(fixture=1) + + criterion_one = dict(name=tld_one['name']) + + results = self.storage.find_tlds(self.admin_context, + criterion_one) + self.assertEqual(1, len(results)) + + self.assertEqual(tld_one['name'], results[0]['name']) + + criterion_two = dict(name=tld_two['name']) + + results = self.storage.find_tlds(self.admin_context, + criterion_two) + self.assertEqual(len(results), 1) + + self.assertEqual(tld_two['name'], results[0]['name']) + + def test_get_tld(self): + # Create a tld + expected = self.create_tld() + actual = self.storage.get_tld(self.admin_context, expected['id']) + + self.assertEqual(expected['name'], actual['name']) + + def test_get_tld_missing(self): + with testtools.ExpectedException(exceptions.TldNotFound): + uuid = '4c8e7f82-3519-4bf7-8940-a66a4480f223' + self.storage.get_tld(self.admin_context, uuid) + + def test_find_tld_criterion(self): + # Create two tlds + tld_one = self.create_tld(fixture=0) + tld_two = self.create_tld(fixture=1) + + criterion = dict(name=tld_one['name']) + + # Find tld_one using its name as criterion + result = self.storage.find_tld(self.admin_context, criterion) + + # Assert names match + self.assertEqual(tld_one['name'], result['name']) + + # Repeat with tld_two + criterion = dict(name=tld_two['name']) + + result = self.storage.find_tld(self.admin_context, criterion) + + self.assertEqual(tld_two['name'], result['name']) + + def test_find_tld_criterion_missing(self): + expected = self.create_tld() + + criterion = dict(name=expected['name'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.TldNotFound): + self.storage.find_tld(self.admin_context, criterion) + + def test_update_tld(self): + # Create a tld + tld = self.create_tld(name='net') + + # Update the tld + tld.name = 'org' + + # Update storage + tld = self.storage.update_tld(self.admin_context, tld) + + # Verify the new value + self.assertEqual('org', tld.name) + + # Ensure the version column was incremented + self.assertEqual(2, tld.version) + + def test_update_tld_duplicate(self): + # Create two tlds + tld_one = self.create_tld(fixture=0) + tld_two = self.create_tld(fixture=1) + + # Update tld_two to be a duplicate of tld_ond + tld_two.name = tld_one.name + + with testtools.ExpectedException(exceptions.DuplicateTld): + self.storage.update_tld(self.admin_context, tld_two) + + def test_update_tld_missing(self): + tld = objects.Tld(id='486f9cbe-b8b6-4d8c-8275-1a6e47b13e00') + with testtools.ExpectedException(exceptions.TldNotFound): + self.storage.update_tld(self.admin_context, tld) + + def test_delete_tld(self): + # Create a tld + tld = self.create_tld() + + # Delete the tld + self.storage.delete_tld(self.admin_context, tld['id']) + + # Verify that it's deleted + with testtools.ExpectedException(exceptions.TldNotFound): + self.storage.get_tld(self.admin_context, tld['id']) + + def test_delete_tld_missing(self): + with testtools.ExpectedException(exceptions.TldNotFound): + uuid = 'cac1fc02-79b2-4e62-a1a4-427b6790bbe6' + self.storage.delete_tld(self.admin_context, uuid) + + # Blacklist tests + def test_create_blacklist(self): + values = { + 'pattern': "^([A-Za-z0-9_\\-]+\\.)*example\\.com\\.$", + 'description': 'This is a comment.' + } + + result = self.storage.create_blacklist( + self.admin_context, objects.Blacklist.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNotNone(result['version']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['pattern'], result['pattern']) + self.assertEqual(values['description'], result['description']) + + def test_create_blacklist_duplicate(self): + # Create the initial Blacklist + self.create_blacklist(fixture=0) + + exc = self.assertRaises(rpc_dispatcher.ExpectedException, + self.create_blacklist, + fixture=0) + + self.assertEqual(exceptions.DuplicateBlacklist, exc.exc_info[0]) + + def test_find_blacklists(self): + # Verify that there are no blacklists created + actual = self.storage.find_blacklists(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a Blacklist + blacklist = self.create_blacklist(fixture=0) + + actual = self.storage.find_blacklists(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(blacklist['pattern'], actual[0]['pattern']) + + def test_find_blacklists_paging(self): + # Create 10 Blacklists + created = [self.create_blacklist(pattern='^example-%d.org.' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_blacklists) + + def test_find_blacklists_with_criterion(self): + # Create two blacklists + blacklist_one = self.create_blacklist(fixture=0) + blacklist_two = self.create_blacklist(fixture=1) + + # Verify blacklist_one + criterion = dict(pattern=blacklist_one['pattern']) + + results = self.storage.find_blacklists(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual(blacklist_one['pattern'], results[0]['pattern']) + + # Verify blacklist_two + criterion = dict(pattern=blacklist_two['pattern']) + + results = self.storage.find_blacklists(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual(blacklist_two['pattern'], results[0]['pattern']) + + def test_get_blacklist(self): + expected = self.create_blacklist(fixture=0) + actual = self.storage.get_blacklist(self.admin_context, expected['id']) + + self.assertEqual(expected['pattern'], actual['pattern']) + + def test_get_blacklist_missing(self): + with testtools.ExpectedException(exceptions.BlacklistNotFound): + uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' + self.storage.get_blacklist(self.admin_context, uuid) + + def test_find_blacklist_criterion(self): + blacklist_one = self.create_blacklist(fixture=0) + blacklist_two = self.create_blacklist(fixture=1) + + criterion = dict(pattern=blacklist_one['pattern']) + + result = self.storage.find_blacklist(self.admin_context, criterion) + + self.assertEqual(blacklist_one['pattern'], result['pattern']) + + criterion = dict(pattern=blacklist_two['pattern']) + + result = self.storage.find_blacklist(self.admin_context, criterion) + + self.assertEqual(blacklist_two['pattern'], result['pattern']) + + def test_find_blacklist_criterion_missing(self): + expected = self.create_blacklist(fixture=0) + + criterion = dict(pattern=expected['pattern'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.BlacklistNotFound): + self.storage.find_blacklist(self.admin_context, criterion) + + def test_update_blacklist(self): + blacklist = self.create_blacklist(pattern='^example.uk.') + + # Update the blacklist + blacklist.pattern = '^example.uk.co.' + + blacklist = self.storage.update_blacklist(self.admin_context, + blacklist) + # Verify the new values + self.assertEqual('^example.uk.co.', blacklist.pattern) + + # Ensure the version column was incremented + self.assertEqual(2, blacklist.version) + + def test_update_blacklist_duplicate(self): + # Create two blacklists + blacklist_one = self.create_blacklist(fixture=0) + blacklist_two = self.create_blacklist(fixture=1) + + # Update the second one to be a duplicate of the first + blacklist_two.pattern = blacklist_one.pattern + + with testtools.ExpectedException(exceptions.DuplicateBlacklist): + self.storage.update_blacklist(self.admin_context, + blacklist_two) + + def test_update_blacklist_missing(self): + blacklist = objects.Blacklist( + id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') + + with testtools.ExpectedException(exceptions.BlacklistNotFound): + self.storage.update_blacklist(self.admin_context, blacklist) + + def test_delete_blacklist(self): + blacklist = self.create_blacklist(fixture=0) + + self.storage.delete_blacklist(self.admin_context, blacklist['id']) + + with testtools.ExpectedException(exceptions.BlacklistNotFound): + self.storage.get_blacklist(self.admin_context, blacklist['id']) + + def test_delete_blacklist_missing(self): + with testtools.ExpectedException(exceptions.BlacklistNotFound): + uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' + self.storage.delete_blacklist(self.admin_context, uuid) + + # Pool Tests + def test_create_pool(self): + values = { + 'name': 'test1', + 'tenant_id': self.admin_context.project_id, + 'provisioner': 'UNMANAGED' + } + + result = self.storage.create_pool( + self.admin_context, objects.Pool.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['name'], result['name']) + self.assertEqual(values['tenant_id'], result['tenant_id']) + self.assertEqual(values['provisioner'], result['provisioner']) + + def test_create_pool_with_all_relations(self): + values = { + 'name': 'Pool', + 'description': 'Pool description', + 'attributes': [{'key': 'scope', 'value': 'public'}], + 'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}], + 'nameservers': [{'host': "192.0.2.1", 'port': 53}], + 'targets': [{ + 'type': "fake", + 'description': 'FooBar', + 'masters': [{'host': "192.0.2.2", + 'port': DEFAULT_MDNS_PORT}], + 'options': [{'key': 'fake_option', 'value': 'fake_value'}], + }], + 'also_notifies': [{'host': "192.0.2.3", 'port': 53}] + } + + # Create the Pool, and check all values are OK + result = self.storage.create_pool( + self.admin_context, objects.Pool.from_dict(values)) + self.assertNestedDictContainsSubset(values, result.to_dict()) + + # Re-Fetch the pool, and check everything is still OK + result = self.storage.get_pool(self.admin_context, result.id) + self.assertNestedDictContainsSubset(values, result.to_dict()) + + def test_create_pool_duplicate(self): + # Create the first pool + self.create_pool(fixture=0) + + # Create the second pool and should get exception + exc = self.assertRaises(rpc_dispatcher.ExpectedException, + self.create_pool, + fixture=0) + + self.assertEqual(exceptions.DuplicatePool, exc.exc_info[0]) + + def test_find_pools(self): + # Verify that there are no pools, except for default pool + actual = self.storage.find_pools(self.admin_context) + self.assertEqual(1, len(actual)) + + # Create a Pool + pool = self.create_pool(fixture=0) + + actual = self.storage.find_pools(self.admin_context) + self.assertEqual(2, len(actual)) + + # Test against the second pool, since the first is the default pool + self.assertEqual(pool['name'], actual[1]['name']) + + def test_find_pools_paging(self): + # Get any pools that are already created, including default + pools = self.storage.find_pools(self.admin_context) + + # Create 10 Pools + created = [self.create_pool(name='test%d' % i) + for i in range(10)] + + # Add in the existing pools + + for p in pools: + created.insert(0, p) + + # Ensure we can page through the results + self._ensure_paging(created, self.storage.find_pools) + + def test_find_pools_criterion(self): + # Create two pools + pool_one = self.create_pool(fixture=0) + pool_two = self.create_pool(fixture=1) + + # Verify pool_one + criterion = dict(name=pool_one['name']) + + results = self.storage.find_pools(self.admin_context, criterion) + + self.assertEqual(1, len(results)) + + self.assertEqual(pool_one['name'], results[0]['name']) + self.assertEqual(pool_one['provisioner'], results[0]['provisioner']) + + criterion = dict(name=pool_two['name']) + + results = self.storage.find_pools(self.admin_context, criterion) + + self.assertEqual(1, len(results)) + + self.assertEqual(pool_two['name'], results[0]['name']) + self.assertEqual(pool_two['provisioner'], results[0]['provisioner']) + + def test_get_pool(self): + # Create a pool + expected = self.create_pool() + actual = self.storage.get_pool(self.admin_context, expected['id']) + + self.assertEqual(expected['name'], actual['name']) + self.assertEqual(expected['provisioner'], actual['provisioner']) + + def test_get_pool_missing(self): + with testtools.ExpectedException(exceptions.PoolNotFound): + uuid = 'c28893e3-eb87-4562-aa29-1f0e835d749b' + self.storage.get_pool(self.admin_context, uuid) + + def test_find_pool_criterion(self): + pool_one = self.create_pool(fixture=0) + pool_two = self.create_pool(fixture=1) + + criterion = dict(name=pool_one['name']) + + result = self.storage.find_pool(self.admin_context, criterion) + + self.assertEqual(pool_one['name'], result['name']) + self.assertEqual(pool_one['provisioner'], result['provisioner']) + + criterion = dict(name=pool_two['name']) + + result = self.storage.find_pool(self.admin_context, criterion) + + self.assertEqual(pool_two['name'], result['name']) + self.assertEqual(pool_two['provisioner'], result['provisioner']) + + def test_find_pool_criterion_missing(self): + expected = self.create_pool() + + criterion = dict(name=expected['name'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.PoolNotFound): + self.storage.find_pool(self.admin_context, criterion) + + def test_update_pool(self): + # Create a pool + pool = self.create_pool(name='test1') + + # Update the Pool + pool.name = 'test3' + + # Perform the update + pool = self.storage.update_pool(self.admin_context, pool) + + # Verify the new value is there + self.assertEqual('test3', pool.name) + + def test_update_pool_duplicate(self): + # Create two pools + pool_one = self.create_pool(fixture=0) + pool_two = self.create_pool(fixture=1) + + # Update pool_two to be a duplicate of pool_one + pool_two.name = pool_one.name + + with testtools.ExpectedException(exceptions.DuplicatePool): + self.storage.update_pool(self.admin_context, pool_two) + + def test_update_pool_missing(self): + pool = objects.Pool(id='8806f871-5140-43f4-badd-2bbc5715b013') + + with testtools.ExpectedException(exceptions.PoolNotFound): + self.storage.update_pool(self.admin_context, pool) + + def test_update_pool_with_all_relations(self): + values = { + 'name': 'Pool-A', + 'description': 'Pool-A description', + 'attributes': [{'key': 'scope', 'value': 'public'}], + 'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}], + 'nameservers': [{'host': "192.0.2.1", 'port': 53}], + 'targets': [{ + 'type': "fake", + 'description': 'FooBar', + 'masters': [{'host': "192.0.2.2", + 'port': DEFAULT_MDNS_PORT}], + 'options': [{'key': 'fake_option', 'value': 'fake_value'}], + }], + 'also_notifies': [{'host': "192.0.2.3", 'port': 53}] + } + + # Create the Pool + result = self.storage.create_pool( + self.admin_context, objects.Pool.from_dict(values)) + + created_pool_id = result.id + + # Prepare a new set of data for the Pool, copying over the ID so + # we trigger an update rather than a create. + values = { + 'id': created_pool_id, + 'name': 'Pool-B', + 'description': 'Pool-B description', + 'attributes': [{'key': 'scope', 'value': 'private'}], + 'ns_records': [{'priority': 1, 'hostname': 'ns2.example.org.'}], + 'nameservers': [{'host': "192.0.2.5", 'port': 53}], + 'targets': [{ + 'type': "fake", + 'description': 'NewFooBar', + 'masters': [{'host': "192.0.2.2", + 'port': DEFAULT_MDNS_PORT}], + 'options': [{'key': 'fake_option', 'value': 'fake_value'}], + }, { + 'type': "fake", + 'description': 'FooBar2', + 'masters': [{'host': "192.0.2.7", 'port': 5355}], + 'options': [{'key': 'fake_option', 'value': 'new_fake_value'}], + }], + 'also_notifies': [] + } + + # Update the pool, and check everything is OK + result = self.storage.update_pool( + self.admin_context, objects.Pool.from_dict(values)) + self.assertNestedDictContainsSubset(values, result.to_dict()) + + # Re-Fetch the pool, and check everything is still OK + result = self.storage.get_pool(self.admin_context, created_pool_id) + self.assertNestedDictContainsSubset(values, result.to_dict()) + + def test_delete_pool(self): + pool = self.create_pool() + + self.storage.delete_pool(self.admin_context, pool['id']) + + with testtools.ExpectedException(exceptions.PoolNotFound): + self.storage.get_pool(self.admin_context, pool['id']) + + def test_delete_pool_missing(self): + with testtools.ExpectedException(exceptions.PoolNotFound): + uuid = '203ca44f-c7e7-4337-9a02-0d735833e6aa' + self.storage.delete_pool(self.admin_context, uuid) + + def test_create_pool_ns_record_duplicate(self): + # Create a pool + pool = self.create_pool(name='test1') + + ns = objects.PoolNsRecord(priority=1, hostname="ns.example.io.") + self.storage.create_pool_ns_record( + self.admin_context, pool.id, ns) + + ns2 = objects.PoolNsRecord(priority=2, hostname="ns.example.io.") + with testtools.ExpectedException(exceptions.DuplicatePoolNsRecord): + self.storage.create_pool_ns_record( + self.admin_context, pool.id, ns2) + + def test_update_pool_ns_record_duplicate(self): + # Create a pool + pool = self.create_pool(name='test1') + + ns1 = objects.PoolNsRecord(priority=1, hostname="ns1.example.io.") + self.storage.create_pool_ns_record( + self.admin_context, pool.id, ns1) + + ns2 = objects.PoolNsRecord(priority=2, hostname="ns2.example.io.") + self.storage.create_pool_ns_record( + self.admin_context, pool.id, ns2) + + with testtools.ExpectedException(exceptions.DuplicatePoolNsRecord): + ns2.hostname = ns1.hostname + self.storage.update_pool_ns_record( + self.admin_context, ns2) + + # PoolAttribute tests + def test_create_pool_attribute(self): + values = { + 'pool_id': "d5d10661-0312-4ae1-8664-31188a4310b7", + 'key': "test-attribute", + 'value': 'test-value' + } + + result = self.storage.create_pool_attribute( + self.admin_context, values['pool_id'], + objects.PoolAttribute.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNotNone(result['version']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['pool_id'], result['pool_id']) + self.assertEqual(values['key'], result['key']) + self.assertEqual(values['value'], result['value']) + + def test_find_pool_attribute(self): + # Verify that there are no Pool Attributes created + actual = self.storage.find_pool_attributes(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a Pool Attribute + pool_attribute = self.create_pool_attribute(fixture=0) + + actual = self.storage.find_pool_attributes(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(pool_attribute['pool_id'], actual[0]['pool_id']) + self.assertEqual(pool_attribute['key'], actual[0]['key']) + self.assertEqual(pool_attribute['value'], actual[0]['value']) + + def test_find_pool_attributes_paging(self): + # Create 10 Pool Attributes + created = [self.create_pool_attribute(value='^ns%d.example.com.' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_pool_attributes) + + def test_find_pool_attributes_with_criterion(self): + # Create two pool attributes + pool_attribute_one = self.create_pool_attribute(fixture=0) + pool_attribute_two = self.create_pool_attribute(fixture=1) + + # Verify pool_attribute_one + criterion = dict(key=pool_attribute_one['key']) + + results = self.storage.find_pool_attributes(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual(pool_attribute_one['pool_id'], results[0]['pool_id']) + self.assertEqual(pool_attribute_one['key'], results[0]['key']) + self.assertEqual(pool_attribute_one['value'], results[0]['value']) + + # Verify pool_attribute_two + criterion = dict(key=pool_attribute_two['key']) + LOG.debug("Criterion is %r " % criterion) + + results = self.storage.find_pool_attributes(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual(pool_attribute_two['pool_id'], results[0]['pool_id']) + self.assertEqual(pool_attribute_two['key'], results[0]['key']) + self.assertEqual(pool_attribute_two['value'], results[0]['value']) + + def test_get_pool_attribute(self): + expected = self.create_pool_attribute(fixture=0) + actual = self.storage.get_pool_attribute(self.admin_context, + expected['id']) + + self.assertEqual(expected['pool_id'], actual['pool_id']) + self.assertEqual(expected['key'], actual['key']) + self.assertEqual(expected['value'], actual['value']) + + def test_get_pool_attribute_missing(self): + with testtools.ExpectedException(exceptions.PoolAttributeNotFound): + uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' + self.storage.get_pool_attribute(self.admin_context, uuid) + + def test_find_pool_attribute_criterion(self): + pool_attribute_one = self.create_pool_attribute(fixture=0) + pool_attribute_two = self.create_pool_attribute(fixture=1) + + criterion = dict(key=pool_attribute_one['key']) + + result = self.storage.find_pool_attribute(self.admin_context, + criterion) + + self.assertEqual(pool_attribute_one['pool_id'], result['pool_id']) + self.assertEqual(pool_attribute_one['key'], result['key']) + self.assertEqual(pool_attribute_one['value'], result['value']) + + criterion = dict(key=pool_attribute_two['key']) + + result = self.storage.find_pool_attribute(self.admin_context, + criterion) + + self.assertEqual(pool_attribute_two['pool_id'], result['pool_id']) + self.assertEqual(pool_attribute_two['key'], result['key']) + self.assertEqual(pool_attribute_two['value'], result['value']) + + def test_find_pool_attribute_criterion_missing(self): + expected = self.create_pool_attribute(fixture=0) + + criterion = dict(key=expected['key'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.PoolAttributeNotFound): + self.storage.find_pool_attribute(self.admin_context, criterion) + + def test_update_pool_attribute(self): + pool_attribute = self.create_pool_attribute(value='ns1.example.org') + + # Update the Pool Attribute + pool_attribute.value = 'ns5.example.org' + + pool_attribute = self.storage.update_pool_attribute(self.admin_context, + pool_attribute) + # Verify the new values + self.assertEqual('ns5.example.org', pool_attribute.value) + + # Ensure the version column was incremented + self.assertEqual(2, pool_attribute.version) + + def test_update_pool_attribute_missing(self): + pool_attribute = objects.PoolAttribute( + id='728a329a-83b1-4573-82dc-45dceab435d4') + + with testtools.ExpectedException(exceptions.PoolAttributeNotFound): + self.storage.update_pool_attribute(self.admin_context, + pool_attribute) + + def test_update_pool_attribute_duplicate(self): + # Create two PoolAttributes + pool_attribute_one = self.create_pool_attribute(fixture=0) + pool_attribute_two = self.create_pool_attribute(fixture=1) + + # Update the second one to be a duplicate of the first + pool_attribute_two.pool_id = pool_attribute_one.pool_id + pool_attribute_two.key = pool_attribute_one.key + pool_attribute_two.value = pool_attribute_one.value + + with testtools.ExpectedException(exceptions.DuplicatePoolAttribute): + self.storage.update_pool_attribute(self.admin_context, + pool_attribute_two) + + def test_delete_pool_attribute(self): + pool_attribute = self.create_pool_attribute(fixture=0) + + self.storage.delete_pool_attribute(self.admin_context, + pool_attribute['id']) + + with testtools.ExpectedException(exceptions.PoolAttributeNotFound): + self.storage.get_pool_attribute(self.admin_context, + pool_attribute['id']) + + def test_delete_oool_attribute_missing(self): + with testtools.ExpectedException(exceptions.PoolAttributeNotFound): + uuid = '464e9250-4fe0-4267-9993-da639390bb04' + self.storage.delete_pool_attribute(self.admin_context, uuid) + + def test_create_pool_attribute_duplicate(self): + # Create the initial PoolAttribute + self.create_pool_attribute(fixture=0) + + with testtools.ExpectedException(exceptions.DuplicatePoolAttribute): + self.create_pool_attribute(fixture=0) + + # PoolNameserver tests + def test_create_pool_nameserver(self): + pool = self.create_pool(fixture=0) + + values = { + 'pool_id': pool.id, + 'host': "192.0.2.1", + 'port': 53 + } + + result = self.storage.create_pool_nameserver( + self.admin_context, + pool.id, + objects.PoolNameserver.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNotNone(result['version']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['pool_id'], result['pool_id']) + self.assertEqual(values['host'], result['host']) + self.assertEqual(values['port'], result['port']) + + def test_create_pool_nameserver_duplicate(self): + pool = self.create_pool(fixture=0) + + # Create the initial PoolNameserver + self.create_pool_nameserver(pool, fixture=0) + + with testtools.ExpectedException(exceptions.DuplicatePoolNameserver): + self.create_pool_nameserver(pool, fixture=0) + + def test_find_pool_nameservers(self): + pool = self.create_pool(fixture=0) + + # Verify that there are no pool_nameservers created + actual = self.storage.find_pool_nameservers(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a PoolNameserver + pool_nameserver = self.create_pool_nameserver(pool, fixture=0) + + # Fetch the PoolNameservers and ensure only 1 exists + actual = self.storage.find_pool_nameservers(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(pool_nameserver['pool_id'], actual[0]['pool_id']) + self.assertEqual(pool_nameserver['host'], actual[0]['host']) + self.assertEqual(pool_nameserver['port'], actual[0]['port']) + + def test_find_pool_nameservers_paging(self): + pool = self.create_pool(fixture=0) + + # Create 10 PoolNameservers + created = [self.create_pool_nameserver(pool, host='192.0.2.%d' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_pool_nameservers) + + def test_find_pool_nameservers_with_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_nameservers + pool_nameserver_one = self.create_pool_nameserver(pool, fixture=0) + pool_nameserver_two = self.create_pool_nameserver(pool, fixture=1) + + # Verify pool_nameserver_one + criterion = dict(host=pool_nameserver_one['host']) + + results = self.storage.find_pool_nameservers( + self.admin_context, criterion) + + self.assertEqual(1, len(results)) + self.assertEqual(pool_nameserver_one['host'], results[0]['host']) + + # Verify pool_nameserver_two + criterion = dict(host=pool_nameserver_two['host']) + + results = self.storage.find_pool_nameservers(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual(pool_nameserver_two['host'], results[0]['host']) + + def test_get_pool_nameserver(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_nameserver(pool, fixture=0) + actual = self.storage.get_pool_nameserver( + self.admin_context, expected['id']) + + self.assertEqual(expected['host'], actual['host']) + + def test_get_pool_nameserver_missing(self): + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' + self.storage.get_pool_nameserver(self.admin_context, uuid) + + def test_find_pool_nameserver_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_nameservers + pool_nameserver_one = self.create_pool_nameserver(pool, fixture=0) + pool_nameserver_two = self.create_pool_nameserver(pool, fixture=1) + + # Verify pool_nameserver_one + criterion = dict(host=pool_nameserver_one['host']) + + result = self.storage.find_pool_nameserver( + self.admin_context, criterion) + + self.assertEqual(pool_nameserver_one['host'], result['host']) + + # Verify pool_nameserver_two + criterion = dict(host=pool_nameserver_two['host']) + + result = self.storage.find_pool_nameserver( + self.admin_context, criterion) + + self.assertEqual(pool_nameserver_two['host'], result['host']) + + def test_find_pool_nameserver_criterion_missing(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_nameserver(pool, fixture=0) + + criterion = dict(host=expected['host'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + self.storage.find_pool_nameserver(self.admin_context, criterion) + + def test_update_pool_nameserver(self): + pool = self.create_pool(fixture=0) + + pool_nameserver = self.create_pool_nameserver(pool, host='192.0.2.1') + + # Update the pool_nameserver + pool_nameserver.host = '192.0.2.2' + + pool_nameserver = self.storage.update_pool_nameserver( + self.admin_context, pool_nameserver) + + # Verify the new values + self.assertEqual('192.0.2.2', pool_nameserver.host) + + # Ensure the version column was incremented + self.assertEqual(2, pool_nameserver.version) + + def test_update_pool_nameserver_duplicate(self): + pool = self.create_pool(fixture=0) + + # Create two pool_nameservers + pool_nameserver_one = self.create_pool_nameserver( + pool, fixture=0, host='192.0.2.1') + pool_nameserver_two = self.create_pool_nameserver( + pool, fixture=0, host='192.0.2.2') + + # Update the second one to be a duplicate of the first + pool_nameserver_two.host = pool_nameserver_one.host + + with testtools.ExpectedException(exceptions.DuplicatePoolNameserver): + self.storage.update_pool_nameserver( + self.admin_context, pool_nameserver_two) + + def test_update_pool_nameserver_missing(self): + pool_nameserver = objects.PoolNameserver( + id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') + + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + self.storage.update_pool_nameserver( + self.admin_context, pool_nameserver) + + def test_delete_pool_nameserver(self): + pool = self.create_pool(fixture=0) + pool_nameserver = self.create_pool_nameserver(pool, fixture=0) + + self.storage.delete_pool_nameserver( + self.admin_context, pool_nameserver['id']) + + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + self.storage.get_pool_nameserver( + self.admin_context, pool_nameserver['id']) + + def test_delete_pool_nameserver_missing(self): + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' + self.storage.delete_pool_nameserver(self.admin_context, uuid) + + # PoolTarget tests + def test_create_pool_target(self): + pool = self.create_pool(fixture=0) + + values = { + 'pool_id': pool.id, + 'type': "fake" + } + + result = self.storage.create_pool_target( + self.admin_context, + pool.id, + objects.PoolTarget.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNotNone(result['version']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['pool_id'], result['pool_id']) + self.assertEqual(values['type'], result['type']) + + def test_find_pool_targets(self): + pool = self.create_pool(fixture=0) + + # Verify that there are no new pool_targets created + actual = self.storage.find_pool_targets( + self.admin_context, + criterion={'pool_id': pool.id}) + self.assertEqual(0, len(actual)) + + # Create a PoolTarget + pool_target = self.create_pool_target(pool, fixture=0) + + # Fetch the PoolTargets and ensure only 2 exist + actual = self.storage.find_pool_targets( + self.admin_context, + criterion={'pool_id': pool.id}) + self.assertEqual(1, len(actual)) + + self.assertEqual(pool_target['pool_id'], actual[0]['pool_id']) + self.assertEqual(pool_target['type'], actual[0]['type']) + + def test_find_pool_targets_paging(self): + pool = self.create_pool(fixture=0) + + # Create 10 PoolTargets + created = [self.create_pool_target(pool, description='Target %d' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_pool_targets, + criterion={'pool_id': pool.id}) + + def test_find_pool_targets_with_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_targets + pool_target_one = self.create_pool_target( + pool, fixture=0, description='One') + pool_target_two = self.create_pool_target( + pool, fixture=1, description='Two') + + # Verify pool_target_one + criterion = dict(description=pool_target_one['description']) + + results = self.storage.find_pool_targets( + self.admin_context, criterion) + + self.assertEqual(1, len(results)) + self.assertEqual( + pool_target_one['description'], results[0]['description']) + + # Verify pool_target_two + criterion = dict(description=pool_target_two['description']) + + results = self.storage.find_pool_targets(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual( + pool_target_two['description'], results[0]['description']) + + def test_get_pool_target(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_target(pool, fixture=0) + actual = self.storage.get_pool_target( + self.admin_context, expected['id']) + + self.assertEqual(expected['type'], actual['type']) + + def test_get_pool_target_missing(self): + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' + self.storage.get_pool_target(self.admin_context, uuid) + + def test_find_pool_target_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_targets + pool_target_one = self.create_pool_target( + pool, fixture=0, description='One') + pool_target_two = self.create_pool_target( + pool, fixture=1, description='Two') + + # Verify pool_target_one + criterion = dict(description=pool_target_one['description']) + + result = self.storage.find_pool_target( + self.admin_context, criterion) + + self.assertEqual(pool_target_one['description'], result['description']) + + # Verify pool_target_two + criterion = dict(description=pool_target_two['description']) + + result = self.storage.find_pool_target( + self.admin_context, criterion) + + self.assertEqual(pool_target_two['description'], result['description']) + + def test_find_pool_target_criterion_missing(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_target(pool, fixture=0) + + criterion = dict(description=expected['description'] + '' + 'NOT FOUND') + + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + self.storage.find_pool_target(self.admin_context, criterion) + + def test_update_pool_target(self): + pool = self.create_pool(fixture=0) + + pool_target = self.create_pool_target(pool, description='One') + + # Update the pool_target + pool_target.description = 'Two' + + pool_target = self.storage.update_pool_target( + self.admin_context, pool_target) + + # Verify the new values + self.assertEqual('Two', pool_target.description) + + # Ensure the version column was incremented + self.assertEqual(2, pool_target.version) + + def test_update_pool_target_missing(self): + pool_target = objects.PoolTarget( + id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') + + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + self.storage.update_pool_target( + self.admin_context, pool_target) + + def test_delete_pool_target(self): + pool = self.create_pool(fixture=0) + pool_target = self.create_pool_target(pool, fixture=0) + + self.storage.delete_pool_target( + self.admin_context, pool_target['id']) + + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + self.storage.get_pool_target( + self.admin_context, pool_target['id']) + + def test_delete_pool_target_missing(self): + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' + self.storage.delete_pool_target(self.admin_context, uuid) + + # PoolAlsoNotify tests + def test_create_pool_also_notify(self): + pool = self.create_pool(fixture=0) + + values = { + 'pool_id': pool.id, + 'host': "192.0.2.1", + 'port': 53 + } + + result = self.storage.create_pool_also_notify( + self.admin_context, + pool.id, + objects.PoolAlsoNotify.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNotNone(result['version']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['pool_id'], result['pool_id']) + self.assertEqual(values['host'], result['host']) + self.assertEqual(values['port'], result['port']) + + def test_create_pool_also_notify_duplicate(self): + pool = self.create_pool(fixture=0) + + # Create the initial PoolAlsoNotify + self.create_pool_also_notify(pool, fixture=0) + + with testtools.ExpectedException(exceptions.DuplicatePoolAlsoNotify): + self.create_pool_also_notify(pool, fixture=0) + + def test_find_pool_also_notifies(self): + pool = self.create_pool(fixture=0) + + # Verify that there are no pool_also_notifies created + actual = self.storage.find_pool_also_notifies(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a PoolAlsoNotify + pool_also_notify = self.create_pool_also_notify(pool, fixture=0) + + # Fetch the PoolAlsoNotifies and ensure only 1 exists + actual = self.storage.find_pool_also_notifies(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(pool_also_notify['pool_id'], actual[0]['pool_id']) + self.assertEqual(pool_also_notify['host'], actual[0]['host']) + self.assertEqual(pool_also_notify['port'], actual[0]['port']) + + def test_find_pool_also_notifies_paging(self): + pool = self.create_pool(fixture=0) + + # Create 10 PoolAlsoNotifies + created = [self.create_pool_also_notify(pool, host='192.0.2.%d' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_pool_also_notifies) + + def test_find_pool_also_notifies_with_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_also_notifies + pool_also_notify_one = self.create_pool_also_notify(pool, fixture=0) + pool_also_notify_two = self.create_pool_also_notify(pool, fixture=1) + + # Verify pool_also_notify_one + criterion = dict(host=pool_also_notify_one['host']) + + results = self.storage.find_pool_also_notifies( + self.admin_context, criterion) + + self.assertEqual(1, len(results)) + self.assertEqual(pool_also_notify_one['host'], results[0]['host']) + + # Verify pool_also_notify_two + criterion = dict(host=pool_also_notify_two['host']) + + results = self.storage.find_pool_also_notifies(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual(pool_also_notify_two['host'], results[0]['host']) + + def test_get_pool_also_notify(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_also_notify(pool, fixture=0) + actual = self.storage.get_pool_also_notify( + self.admin_context, expected['id']) + + self.assertEqual(expected['host'], actual['host']) + + def test_get_pool_also_notify_missing(self): + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' + self.storage.get_pool_also_notify(self.admin_context, uuid) + + def test_find_pool_also_notify_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_also_notifies + pool_also_notify_one = self.create_pool_also_notify(pool, fixture=0) + pool_also_notify_two = self.create_pool_also_notify(pool, fixture=1) + + # Verify pool_also_notify_one + criterion = dict(host=pool_also_notify_one['host']) + + result = self.storage.find_pool_also_notify( + self.admin_context, criterion) + + self.assertEqual(pool_also_notify_one['host'], result['host']) + + # Verify pool_also_notify_two + criterion = dict(host=pool_also_notify_two['host']) + + result = self.storage.find_pool_also_notify( + self.admin_context, criterion) + + self.assertEqual(pool_also_notify_two['host'], result['host']) + + def test_find_pool_also_notify_criterion_missing(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_also_notify(pool, fixture=0) + + criterion = dict(host=expected['host'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + self.storage.find_pool_also_notify(self.admin_context, criterion) + + def test_update_pool_also_notify(self): + pool = self.create_pool(fixture=0) + + pool_also_notify = self.create_pool_also_notify(pool, host='192.0.2.1') + + # Update the pool_also_notify + pool_also_notify.host = '192.0.2.2' + + pool_also_notify = self.storage.update_pool_also_notify( + self.admin_context, pool_also_notify) + + # Verify the new values + self.assertEqual('192.0.2.2', pool_also_notify.host) + + # Ensure the version column was incremented + self.assertEqual(2, pool_also_notify.version) + + def test_update_pool_also_notify_duplicate(self): + pool = self.create_pool(fixture=0) + + # Create two pool_also_notifies + pool_also_notify_one = self.create_pool_also_notify( + pool, fixture=0, host='192.0.2.1') + pool_also_notify_two = self.create_pool_also_notify( + pool, fixture=0, host='192.0.2.2') + + # Update the second one to be a duplicate of the first + pool_also_notify_two.host = pool_also_notify_one.host + + with testtools.ExpectedException(exceptions.DuplicatePoolAlsoNotify): + self.storage.update_pool_also_notify( + self.admin_context, pool_also_notify_two) + + def test_update_pool_also_notify_missing(self): + pool_also_notify = objects.PoolAlsoNotify( + id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') + + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + self.storage.update_pool_also_notify( + self.admin_context, pool_also_notify) + + def test_delete_pool_also_notify(self): + pool = self.create_pool(fixture=0) + pool_also_notify = self.create_pool_also_notify(pool, fixture=0) + + self.storage.delete_pool_also_notify( + self.admin_context, pool_also_notify['id']) + + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + self.storage.get_pool_also_notify( + self.admin_context, pool_also_notify['id']) + + def test_delete_pool_also_notify_missing(self): + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' + self.storage.delete_pool_also_notify(self.admin_context, uuid) + + def test_create_service_status_duplicate(self): + values = self.get_service_status_fixture(fixture=0) + + self.storage.create_service_status( + self.admin_context, objects.ServiceStatus.from_dict(values)) + + with testtools.ExpectedException(exceptions.DuplicateServiceStatus): + self.storage.create_service_status( + self.admin_context, objects.ServiceStatus.from_dict(values)) + + # Zone Transfer Accept tests + def test_create_zone_transfer_request(self): + zone = self.create_zone() + + values = { + 'tenant_id': self.admin_context.project_id, + 'zone_id': zone.id, + 'key': 'qwertyuiop' + } + + result = self.storage.create_zone_transfer_request( + self.admin_context, objects.ZoneTransferRequest.from_dict(values)) + + self.assertEqual(self.admin_context.project_id, result['tenant_id']) + self.assertIn('status', result) + + def test_create_zone_transfer_request_scoped(self): + zone = self.create_zone() + tenant_2_context = self.get_context(project_id='2') + tenant_3_context = self.get_context(project_id='3') + + values = { + 'tenant_id': self.admin_context.project_id, + 'zone_id': zone.id, + 'key': 'qwertyuiop', + 'target_tenant_id': tenant_2_context.project_id, + } + + result = self.storage.create_zone_transfer_request( + self.admin_context, objects.ZoneTransferRequest.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(self.admin_context.project_id, result['tenant_id']) + self.assertEqual( + tenant_2_context.project_id, result['target_tenant_id'] + ) + self.assertIn('status', result) + + stored_ztr = self.storage.get_zone_transfer_request( + tenant_2_context, result.id) + + self.assertEqual( + self.admin_context.project_id, stored_ztr['tenant_id'] + ) + self.assertEqual(stored_ztr['id'], result['id']) + + with testtools.ExpectedException( + exceptions.ZoneTransferRequestNotFound): + self.storage.get_zone_transfer_request( + tenant_3_context, result.id) + + def test_find_zone_transfer_requests(self): + zone = self.create_zone() + + values = { + 'tenant_id': self.admin_context.project_id, + 'zone_id': zone.id, + 'key': 'qwertyuiop' + } + + self.storage.create_zone_transfer_request( + self.admin_context, objects.ZoneTransferRequest.from_dict(values)) + + requests = self.storage.find_zone_transfer_requests( + self.admin_context, {"tenant_id": self.admin_context.project_id}) + self.assertEqual(1, len(requests)) + + def test_delete_zone_transfer_request(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + + self.storage.delete_zone_transfer_request( + self.admin_context, zt_request.id) + + with testtools.ExpectedException( + exceptions.ZoneTransferRequestNotFound): + self.storage.get_zone_transfer_request( + self.admin_context, zt_request.id) + + def test_update_zone_transfer_request(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + + zt_request.description = 'New description' + result = self.storage.update_zone_transfer_request( + self.admin_context, zt_request) + self.assertEqual('New description', result.description) + + def test_get_zone_transfer_request(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + + result = self.storage.get_zone_transfer_request( + self.admin_context, zt_request.id) + self.assertEqual(zt_request.id, result.id) + self.assertEqual(zt_request.zone_id, result.zone_id) + + # Zone Transfer Accept tests + def test_create_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + values = { + 'tenant_id': self.admin_context.project_id, + 'zone_transfer_request_id': zt_request.id, + 'zone_id': zone.id, + 'key': zt_request.key + } + + result = self.storage.create_zone_transfer_accept( + self.admin_context, objects.ZoneTransferAccept.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(self.admin_context.project_id, result['tenant_id']) + self.assertIn('status', result) + + def test_find_zone_transfer_accepts(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + values = { + 'tenant_id': self.admin_context.project_id, + 'zone_transfer_request_id': zt_request.id, + 'zone_id': zone.id, + 'key': zt_request.key + } + + self.storage.create_zone_transfer_accept( + self.admin_context, objects.ZoneTransferAccept.from_dict(values)) + + accepts = self.storage.find_zone_transfer_accepts( + self.admin_context, {"tenant_id": self.admin_context.project_id}) + self.assertEqual(1, len(accepts)) + + def test_find_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + values = { + 'tenant_id': self.admin_context.project_id, + 'zone_transfer_request_id': zt_request.id, + 'zone_id': zone.id, + 'key': zt_request.key + } + + result = self.storage.create_zone_transfer_accept( + self.admin_context, objects.ZoneTransferAccept.from_dict(values)) + + accept = self.storage.find_zone_transfer_accept( + self.admin_context, {"id": result.id}) + self.assertEqual(result.id, accept.id) + + def test_transfer_zone_ownership(self): + tenant_1_context = self.get_context(project_id='1', + roles=['member', 'reader']) + tenant_2_context = self.get_context(project_id='2', + roles=['member', 'reader']) + admin_context = self.get_admin_context() + admin_context.all_tenants = True + + zone = self.create_zone(context=tenant_1_context) + recordset = self.create_recordset(zone, context=tenant_1_context) + record = recordset.records[0] + + updated_zone = zone + + updated_zone.tenant_id = tenant_2_context.project_id + + self.storage.update_zone( + admin_context, updated_zone) + + saved_zone = self.storage.get_zone( + admin_context, zone.id) + saved_recordset = self.storage.find_recordset( + admin_context, criterion={'id': recordset.id}) + saved_record = self.storage.get_record( + admin_context, record.id) + + self.assertEqual(tenant_2_context.project_id, saved_zone.tenant_id) + self.assertEqual( + tenant_2_context.project_id, saved_recordset.tenant_id + ) + self.assertEqual(tenant_2_context.project_id, saved_record.tenant_id) + + def test_delete_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + zt_accept = self.create_zone_transfer_accept(zt_request) + + self.storage.delete_zone_transfer_accept( + self.admin_context, zt_accept.id) + + with testtools.ExpectedException( + exceptions.ZoneTransferAcceptNotFound): + self.storage.get_zone_transfer_accept( + self.admin_context, zt_accept.id) + + def test_update_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + zt_accept = self.create_zone_transfer_accept(zt_request) + + zt_accept.status = 'COMPLETE' + result = self.storage.update_zone_transfer_accept( + self.admin_context, zt_accept) + self.assertEqual('COMPLETE', result.status) + + def test_get_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + zt_accept = self.create_zone_transfer_accept(zt_request) + + result = self.storage.get_zone_transfer_accept( + self.admin_context, zt_accept.id) + self.assertEqual(zt_accept.id, result.id) + self.assertEqual(zt_accept.zone_id, result.zone_id) + + def test_count_zone_tasks(self): + # in the beginning, there should be nothing + zones = self.storage.count_zone_tasks(self.admin_context) + self.assertEqual(0, zones) + + values = { + 'status': 'PENDING', + 'task_type': 'IMPORT' + } + + self.storage.create_zone_import( + self.admin_context, objects.ZoneImport.from_dict(values)) + + # count imported zones + zones = self.storage.count_zone_tasks(self.admin_context) + + # well, did we get 1? + self.assertEqual(1, zones) + + def test_count_zone_tasks_none_result(self): + rp = mock.Mock() + rp.fetchone.return_value = None + with mock.patch('designate.storage.sql.get_write_session', + return_value=rp): + zones = self.storage.count_zone_tasks(self.admin_context) + self.assertEqual(0, zones) + + # Zone Import Tests + def test_create_zone_import(self): + values = { + 'status': 'PENDING', + 'task_type': 'IMPORT' + } + + result = self.storage.create_zone_import( + self.admin_context, objects.ZoneImport.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + self.assertIsNotNone(result['version']) + self.assertEqual(values['status'], result['status']) + self.assertIsNone(result['zone_id']) + self.assertIsNone(result['message']) + + def test_find_zone_imports(self): + + actual = self.storage.find_zone_imports(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a single ZoneImport + zone_import = self.create_zone_import(fixture=0) + + actual = self.storage.find_zone_imports(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(zone_import['status'], actual[0]['status']) + self.assertEqual(zone_import['message'], actual[0]['message']) + self.assertEqual(zone_import['zone_id'], actual[0]['zone_id']) + + def test_find_zone_imports_paging(self): + # Create 10 ZoneImports + created = [self.create_zone_import() for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_zone_imports) + + def test_find_zone_imports_with_criterion(self): + zone_import_one = self.create_zone_import(fixture=0) + zone_import_two = self.create_zone_import(fixture=1) + + criterion_one = dict(status=zone_import_one['status']) + + results = self.storage.find_zone_imports(self.admin_context, + criterion_one) + self.assertEqual(1, len(results)) + + self.assertEqual(zone_import_one['status'], results[0]['status']) + + criterion_two = dict(status=zone_import_two['status']) + + results = self.storage.find_zone_imports(self.admin_context, + criterion_two) + self.assertEqual(1, len(results)) + + self.assertEqual(zone_import_two['status'], results[0]['status']) + + def test_get_zone_import(self): + # Create a zone_import + expected = self.create_zone_import() + actual = self.storage.get_zone_import(self.admin_context, + expected['id']) + + self.assertEqual(expected['status'], actual['status']) + + def test_get_zone_import_missing(self): + with testtools.ExpectedException(exceptions.ZoneImportNotFound): + uuid = '4c8e7f82-3519-4bf7-8940-a66a4480f223' + self.storage.get_zone_import(self.admin_context, uuid) + + def test_find_zone_import_criterion_missing(self): + expected = self.create_zone_import() + + criterion = dict(status=expected['status'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.ZoneImportNotFound): + self.storage.find_zone_import(self.admin_context, criterion) + + def test_update_zone_import(self): + # Create a zone_import + zone_import = self.create_zone_import(status='PENDING', + task_type='IMPORT') + + # Update the zone_import + zone_import.status = 'COMPLETE' + + # Update storage + zone_import = self.storage.update_zone_import(self.admin_context, + zone_import) + + # Verify the new value + self.assertEqual('COMPLETE', zone_import.status) + + # Ensure the version column was incremented + self.assertEqual(2, zone_import.version) + + def test_update_zone_import_missing(self): + zone_import = objects.ZoneImport( + id='486f9cbe-b8b6-4d8c-8275-1a6e47b13e00') + with testtools.ExpectedException(exceptions.ZoneImportNotFound): + self.storage.update_zone_import(self.admin_context, zone_import) + + def test_delete_zone_import(self): + # Create a zone_import + zone_import = self.create_zone_import() + + # Delete the zone_import + self.storage.delete_zone_import(self.admin_context, zone_import['id']) + + # Verify that it's deleted + with testtools.ExpectedException(exceptions.ZoneImportNotFound): + self.storage.get_zone_import(self.admin_context, zone_import['id']) + + def test_delete_zone_import_missing(self): + with testtools.ExpectedException(exceptions.ZoneImportNotFound): + uuid = 'cac1fc02-79b2-4e62-a1a4-427b6790bbe6' + self.storage.delete_zone_import(self.admin_context, uuid) + + def test_schema_table_names(self): + table_names = [ + 'blacklists', + 'pool_also_notifies', + 'pool_attributes', + 'pool_nameservers', + 'pool_ns_records', + 'pool_target_masters', + 'pool_target_options', + 'pool_targets', + 'pools', + 'quotas', + 'records', + 'recordsets', + 'service_statuses', + 'shared_zones', + 'tlds', + 'tsigkeys', + 'zone_attributes', + 'zone_masters', + 'zone_tasks', + 'zone_transfer_accepts', + 'zone_transfer_requests', + 'zones' + ] + + inspector = self.storage.get_inspector() + + actual_table_names = inspector.get_table_names() + + # We have transitioned database schema migration tools. + # They use different tracking tables. Accomidate that one or both + # may exist in this test. + migration_table_found = False + if ('migrate_version' in actual_table_names or + 'alembic_version' in actual_table_names): + migration_table_found = True + self.assertTrue( + migration_table_found, 'A DB migration table was not found.' + ) + try: + actual_table_names.remove('migrate_version') + except ValueError: + pass + try: + actual_table_names.remove('alembic_version') + except ValueError: + pass + + self.assertEqual(table_names, actual_table_names) + + def test_schema_table_indexes(self): + with sql.get_read_session() as session: + indexes_t = session.execute( + text("SELECT * FROM sqlite_master WHERE type = 'index';")) + + indexes = {} # table name -> index names -> cmd + for _, index_name, table_name, num, cmd in indexes_t: + if index_name.startswith("sqlite_"): + continue # ignore sqlite-specific indexes + if table_name not in indexes: + indexes[table_name] = {} + indexes[table_name][index_name] = cmd + + expected = { + "records": { + "record_created_at": "CREATE INDEX record_created_at ON records (created_at)", # noqa + "records_tenant": "CREATE INDEX records_tenant ON records (tenant_id)", # noqa + "update_status_index": "CREATE INDEX update_status_index ON records (status, zone_id, tenant_id, created_at, serial)", # noqa + }, + "recordsets": { + "recordset_created_at": "CREATE INDEX recordset_created_at ON recordsets (created_at)", # noqa + "recordset_type_name": "CREATE INDEX recordset_type_name ON recordsets (type, name)", # noqa + "reverse_name_dom_id": "CREATE INDEX reverse_name_dom_id ON recordsets (reverse_name, zone_id)", # noqa + "rrset_type_domainid": "CREATE INDEX rrset_type_domainid ON recordsets (type, zone_id)", # noqa + "rrset_updated_at": "CREATE INDEX rrset_updated_at ON recordsets (updated_at)", # noqa + "rrset_zoneid": "CREATE INDEX rrset_zoneid ON recordsets (zone_id)", # noqa + "rrset_type": "CREATE INDEX rrset_type ON recordsets (type)", # noqa + "rrset_ttl": "CREATE INDEX rrset_ttl ON recordsets (ttl)", # noqa + "rrset_tenant_id": "CREATE INDEX rrset_tenant_id ON recordsets (tenant_id)", # noqa + }, + "zones": { + "delayed_notify": "CREATE INDEX delayed_notify ON zones (delayed_notify)", # noqa + "increment_serial": "CREATE INDEX increment_serial ON zones (increment_serial)", # noqa + "reverse_name_deleted": "CREATE INDEX reverse_name_deleted ON zones (reverse_name, deleted)", # noqa + "zone_created_at": "CREATE INDEX zone_created_at ON zones (created_at)", # noqa + "zone_deleted": "CREATE INDEX zone_deleted ON zones (deleted)", + "zone_tenant_deleted": "CREATE INDEX zone_tenant_deleted ON zones (tenant_id, deleted)", # noqa + } + } + self.assertDictEqual(expected, indexes) diff --git a/designate/tests/unit/storage/impl_sqlalchemy/alembic/test_legacy_utils.py b/designate/tests/unit/storage/impl_sqlalchemy/alembic/test_legacy_utils.py index 5b8702cee..f61198ead 100644 --- a/designate/tests/unit/storage/impl_sqlalchemy/alembic/test_legacy_utils.py +++ b/designate/tests/unit/storage/impl_sqlalchemy/alembic/test_legacy_utils.py @@ -15,7 +15,7 @@ from unittest import mock import oslotest.base -from designate.storage.impl_sqlalchemy.alembic import legacy_utils +from designate.storage.sqlalchemy.alembic import legacy_utils class TestLegacyUtils(oslotest.base.BaseTestCase): diff --git a/designate/tests/unit/test_central/test_basic.py b/designate/tests/unit/test_central/test_basic.py index 5e3fba499..d5f81d56d 100644 --- a/designate/tests/unit/test_central/test_basic.py +++ b/designate/tests/unit/test_central/test_basic.py @@ -29,6 +29,7 @@ import designate.central.service from designate.central.service import Service from designate import exceptions from designate import objects +from designate.storage import sqlalchemy from designate.tests.fixtures import random_seed from designate.tests import TestCase @@ -223,7 +224,7 @@ class CentralBasic(TestCase): super(CentralBasic, self).setUp() self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf self.CONF([], project='designate') - mock_storage = mock.Mock(spec=designate.storage.base.Storage) + mock_storage = mock.Mock(spec=sqlalchemy.SQLAlchemyStorage) pool_list = objects.PoolList.from_list( [ diff --git a/designate/tests/unit/test_upgrade_checks.py b/designate/tests/unit/test_upgrade_checks.py index 93045ed15..af04148f4 100644 --- a/designate/tests/unit/test_upgrade_checks.py +++ b/designate/tests/unit/test_upgrade_checks.py @@ -18,7 +18,7 @@ from sqlalchemy.schema import MetaData from sqlalchemy.schema import Table from designate.cmd import status -from designate.sqlalchemy import sql +from designate.storage import sql from designate import tests @@ -69,7 +69,7 @@ class TestDuplicateServiceStatus(tests.TestCase): self.assertEqual(upgradecheck.Code.SUCCESS, checks._duplicate_service_status().code) - @mock.patch('designate.sqlalchemy.sql.get_read_session') + @mock.patch('designate.storage.sql.get_read_session') @mock.patch('designate.storage.sql.get_read_engine') def test_failure(self, mock_get_engine, mock_get_read): mock_sql_execute = mock.Mock() diff --git a/designate/worker/service.py b/designate/worker/service.py index 8eae9069a..29c352d59 100644 --- a/designate/worker/service.py +++ b/designate/worker/service.py @@ -111,8 +111,7 @@ class Service(service.RPCService): @property def storage(self): if not self._storage: - storage_driver = cfg.CONF['service:worker'].storage_driver - self._storage = storage.get_storage(storage_driver) + self._storage = storage.get_storage() return self._storage @property diff --git a/designate/worker/tasks/base.py b/designate/worker/tasks/base.py index b335980ac..91ad45be8 100644 --- a/designate/worker/tasks/base.py +++ b/designate/worker/tasks/base.py @@ -123,9 +123,7 @@ class Task(TaskConfig): @property def storage(self): if not self._storage: - # Get a storage connection - storage_driver = cfg.CONF['service:central'].storage_driver - self._storage = storage.get_storage(storage_driver) + self._storage = storage.get_storage() return self._storage @property diff --git a/doc/source/contributor/sourcedoc/storage.rst b/doc/source/contributor/sourcedoc/storage.rst index 52108913b..5e8976a3f 100644 --- a/doc/source/contributor/sourcedoc/storage.rst +++ b/doc/source/contributor/sourcedoc/storage.rst @@ -9,7 +9,7 @@ Storage Storage Base ============= -.. automodule:: designate.storage.base +.. automodule:: designate.storage.sqlalchemy :members: :undoc-members: :show-inheritance: diff --git a/setup.cfg b/setup.cfg index 65df4666d..a370b94b6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -63,9 +63,6 @@ designate.api.admin.extensions = quotas = designate.api.admin.controllers.extensions.quotas:QuotasController zones = designate.api.admin.controllers.extensions.zones:ZonesController -designate.storage = - sqlalchemy = designate.storage.impl_sqlalchemy:SQLAlchemyStorage - designate.notification.handler = fake = designate.notification_handler.fake:FakeHandler nova_fixed = designate.notification_handler.nova:NovaFixedHandler