From 1375a45a71a946108e277a3787fdfbc63729ec5c Mon Sep 17 00:00:00 2001 From: Kiall Mac Innes Date: Tue, 23 Feb 2016 13:47:28 +0000 Subject: [PATCH] Add New Pools DB Tables and Code This change adds the new pools DB tables and access code, but does not make use of or populate the new tables. Change-Id: I71232bc0087b53b6678295d7900916ec51e0cb2b --- designate/exceptions.py | 56 +- designate/storage/impl_sqlalchemy/__init__.py | 526 +++++++- .../versions/086_new_pools_tables.py | 126 ++ designate/storage/impl_sqlalchemy/tables.py | 88 ++ designate/tests/__init__.py | 55 +- designate/tests/test_storage/__init__.py | 1117 +++++++++++++---- .../tests/test_storage/test_sqlalchemy.py | 24 +- 7 files changed, 1747 insertions(+), 245 deletions(-) create mode 100644 designate/storage/impl_sqlalchemy/migrate_repo/versions/086_new_pools_tables.py diff --git a/designate/exceptions.py b/designate/exceptions.py index 339457c11..beb7dea68 100644 --- a/designate/exceptions.py +++ b/designate/exceptions.py @@ -291,18 +291,30 @@ class DuplicatePoolAttribute(Duplicate): error_type = 'duplicate_pool_attribute' -class DuplicateZoneAttribute(Duplicate): - error_type = 'duplicate_zone_attribute' - - -class DuplicateZoneMaster(Duplicate): - error_type = 'duplicate_zone_attribute' - - class DuplicatePoolNsRecord(Duplicate): error_type = 'duplicate_pool_ns_record' +class DuplicatePoolNameserver(Duplicate): + error_type = 'duplicate_pool_nameserver' + + +class DuplicatePoolTarget(Duplicate): + error_type = 'duplicate_pool_target' + + +class DuplicatePoolTargetOption(Duplicate): + error_type = 'duplicate_pool_target_option' + + +class DuplicatePoolTargetMaster(Duplicate): + error_type = 'duplicate_pool_target_master' + + +class DuplicatePoolAlsoNotify(Duplicate): + error_type = 'duplicate_pool_also_notify' + + class DuplicateZoneImport(Duplicate): error_type = 'duplicate_zone_import' @@ -325,6 +337,14 @@ class DuplicateZoneTransferAccept(Duplicate): error_type = 'duplicate_zone_transfer_accept' +class DuplicateZoneAttribute(Duplicate): + error_type = 'duplicate_zone_attribute' + + +class DuplicateZoneMaster(Duplicate): + error_type = 'duplicate_zone_attribute' + + class NotFound(Base): expected = True error_code = 404 @@ -395,6 +415,26 @@ class PoolNsRecordNotFound(NotFound): error_type = 'pool_ns_record_not_found' +class PoolNameserverNotFound(NotFound): + error_type = 'pool_nameserver_not_found' + + +class PoolTargetNotFound(NotFound): + error_type = 'pool_target_not_found' + + +class PoolTargetOptionNotFound(NotFound): + error_type = 'pool_target_option_not_found' + + +class PoolTargetMasterNotFound(NotFound): + error_type = 'pool_target_master_not_found' + + +class PoolAlsoNotifyNotFound(NotFound): + error_type = 'pool_also_notify_not_found' + + class ZoneTransferRequestNotFound(NotFound): error_type = 'zone_transfer_request_not_found' diff --git a/designate/storage/impl_sqlalchemy/__init__.py b/designate/storage/impl_sqlalchemy/__init__.py index 8b07b2adf..8af07ec76 100644 --- a/designate/storage/impl_sqlalchemy/__init__.py +++ b/designate/storage/impl_sqlalchemy/__init__.py @@ -922,7 +922,17 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): pool.ns_records = self._find_pool_ns_records( context, {'pool_id': pool.id}) - pool.obj_reset_changes(['attributes', 'ns_records']) + pool.nameservers = self._find_pool_nameservers( + context, {'pool_id': pool.id}) + + pool.targets = self._find_pool_targets( + context, {'pool_id': pool.id}) + + pool.also_notifies = self._find_pool_also_notifies( + context, {'pool_id': pool.id}) + + pool.obj_reset_changes(['attributes', 'ns_records', 'nameservers', + 'targets', 'also_notifies']) if one: _load_relations(pools) @@ -935,7 +945,8 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): def create_pool(self, context, pool): pool = self._create( tables.pools, pool, exceptions.DuplicatePool, - ['attributes', 'ns_records']) + ['attributes', 'ns_records', 'nameservers', 'targets', + 'also_notifies']) if pool.obj_attr_is_set('attributes'): for pool_attribute in pool.attributes: @@ -949,7 +960,26 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): else: pool.ns_records = objects.PoolNsRecordList() - pool.obj_reset_changes(['attributes', 'ns_records']) + if pool.obj_attr_is_set('nameservers'): + for nameserver in pool.nameservers: + self.create_pool_nameserver(context, pool.id, nameserver) + else: + pool.nameservers = objects.PoolNameserverList() + + if pool.obj_attr_is_set('targets'): + for target in pool.targets: + self.create_pool_target(context, pool.id, target) + else: + pool.targets = objects.PoolTargetList() + + if pool.obj_attr_is_set('also_notifies'): + for also_notify in pool.also_notifies: + self.create_pool_also_notify(context, pool.id, also_notify) + else: + pool.also_notifies = objects.PoolAlsoNotifyList() + + pool.obj_reset_changes(['attributes', 'ns_records', 'nameservers', + 'targets', 'also_notifies']) return pool @@ -968,9 +998,10 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): def update_pool(self, context, pool): pool = self._update(context, tables.pools, pool, exceptions.DuplicatePool, exceptions.PoolNotFound, - ['attributes', 'ns_records']) + ['attributes', 'ns_records', 'nameservers', + 'targets', 'also_notifies']) - # TODO(kiall): These two sections below are near identical, we should + # TODO(kiall): The sections below are near identical, we should # refactor into a single reusable method. if pool.obj_attr_is_set('attributes'): # Gather the pool ID's we have @@ -1058,6 +1089,136 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): self.create_pool_ns_record( context, pool.id, ns_record) + if pool.obj_attr_is_set('nameservers'): + # Gather the pool ID's we have + have_nameservers = set([r.id for r in self._find_pool_nameservers( + context, {'pool_id': pool.id})]) + + # Prep some lists of changes + keep_nameservers = set([]) + create_nameservers = [] + update_nameservers = [] + + nameservers = [] + if pool.obj_attr_is_set('nameservers'): + for r in pool.nameservers.objects: + nameservers.append(r) + + # Determine what to change + for nameserver in nameservers: + keep_nameservers.add(nameserver.id) + try: + nameserver.obj_get_original_value('id') + except KeyError: + create_nameservers.append(nameserver) + else: + update_nameservers.append(nameserver) + + # NOTE: Since we're dealing with mutable objects, the return value + # of create/update/delete nameserver is not needed. The + # original item will be mutated in place on the input + # "pool.nameservers" list. + + # Delete nameservers + for nameserver_id in have_nameservers - keep_nameservers: + self.delete_pool_nameserver(context, nameserver_id) + + # Update nameservers + for nameserver in update_nameservers: + self.update_pool_nameserver(context, nameserver) + + # Create nameservers + for nameserver in create_nameservers: + self.create_pool_nameserver( + context, pool.id, nameserver) + + if pool.obj_attr_is_set('targets'): + # Gather the pool ID's we have + have_targets = set([r.id for r in self._find_pool_targets( + context, {'pool_id': pool.id})]) + + # Prep some lists of changes + keep_targets = set([]) + create_targets = [] + update_targets = [] + + targets = [] + if pool.obj_attr_is_set('targets'): + for r in pool.targets.objects: + targets.append(r) + + # Determine what to change + for target in targets: + keep_targets.add(target.id) + try: + target.obj_get_original_value('id') + except KeyError: + create_targets.append(target) + else: + update_targets.append(target) + + # NOTE: Since we're dealing with mutable objects, the return value + # of create/update/delete target is not needed. The + # original item will be mutated in place on the input + # "pool.targets" list. + + # Delete targets + for target_id in have_targets - keep_targets: + self.delete_pool_target(context, target_id) + + # Update targets + for target in update_targets: + self.update_pool_target(context, target) + + # Create targets + for target in create_targets: + self.create_pool_target( + context, pool.id, target) + + if pool.obj_attr_is_set('also_notifies'): + # Gather the pool ID's we have + have_also_notifies = set( + [r.id for r in self._find_pool_also_notifies( + context, {'pool_id': pool.id})]) + + # Prep some lists of changes + keep_also_notifies = set([]) + create_also_notifies = [] + update_also_notifies = [] + + also_notifies = [] + if pool.obj_attr_is_set('also_notifies'): + for r in pool.also_notifies.objects: + also_notifies.append(r) + + # Determine what to change + for also_notify in also_notifies: + keep_also_notifies.add(also_notify.id) + try: + also_notify.obj_get_original_value('id') + except KeyError: + create_also_notifies.append(also_notify) + else: + update_also_notifies.append(also_notify) + + # NOTE: Since we're dealing with mutable objects, the return value + # of create/update/delete also_notify is not needed. The + # original item will be mutated in place on the input + # "pool.also_notifies" list. + + # Delete also_notifies + for also_notify_id in have_also_notifies - keep_also_notifies: + self.delete_pool_also_notify(context, also_notify_id) + + # Update also_notifies + for also_notify in update_also_notifies: + self.update_pool_also_notify(context, also_notify) + + # Create also_notifies + for also_notify in create_also_notifies: + self.create_pool_also_notify( + context, pool.id, also_notify) + # Call get_pool to get the ids of all the attributes/ns_records # refreshed in the pool object updated_pool = self.get_pool(context, pool.id) @@ -1072,7 +1233,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): # Pool attribute methods def _find_pool_attributes(self, context, criterion, one=False, marker=None, - limit=None, sort_key=None, sort_dir=None): + limit=None, sort_key=None, sort_dir=None): return self._find(context, tables.pool_attributes, objects.PoolAttribute, objects.PoolAttributeList, exceptions.PoolAttributeNotFound, criterion, one, @@ -1089,7 +1250,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): context, {'id': pool_attribute_id}, one=True) def find_pool_attributes(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): + limit=None, sort_key=None, sort_dir=None): return self._find_pool_attributes(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) @@ -1113,7 +1274,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): # Pool ns_record methods def _find_pool_ns_records(self, context, criterion, one=False, marker=None, - limit=None, sort_key=None, sort_dir=None): + limit=None, sort_key=None, sort_dir=None): return self._find(context, tables.pool_ns_records, objects.PoolNsRecord, objects.PoolNsRecordList, exceptions.PoolNsRecordNotFound, criterion, one, @@ -1130,7 +1291,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): context, {'id': pool_ns_record_id}, one=True) def find_pool_ns_records(self, context, criterion=None, marker=None, - limit=None, sort_key=None, sort_dir=None): + limit=None, sort_key=None, sort_dir=None): return self._find_pool_ns_records(context, criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) @@ -1152,6 +1313,353 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage): return deleted_pool_ns_record + # PoolNameserver methods + def _find_pool_nameservers(self, context, criterion, one=False, + marker=None, limit=None, sort_key=None, + sort_dir=None): + return self._find(context, tables.pool_nameservers, + objects.PoolNameserver, objects.PoolNameserverList, + exceptions.PoolNameserverNotFound, criterion, one, + marker, limit, sort_key, sort_dir) + + def create_pool_nameserver(self, context, pool_id, pool_nameserver): + pool_nameserver.pool_id = pool_id + + return self._create(tables.pool_nameservers, pool_nameserver, + exceptions.DuplicatePoolNameserver) + + def get_pool_nameserver(self, context, pool_nameserver_id): + return self._find_pool_nameservers( + context, {'id': pool_nameserver_id}, one=True) + + def find_pool_nameservers(self, context, criterion=None, marker=None, + limit=None, sort_key=None, sort_dir=None): + return self._find_pool_nameservers(context, criterion, marker=marker, + limit=limit, sort_key=sort_key, + sort_dir=sort_dir) + + def find_pool_nameserver(self, context, criterion): + return self._find_pool_nameservers(context, criterion, one=True) + + def update_pool_nameserver(self, context, pool_nameserver): + return self._update(context, tables.pool_nameservers, pool_nameserver, + exceptions.DuplicatePoolNameserver, + exceptions.PoolNameserverNotFound) + + def delete_pool_nameserver(self, context, pool_nameserver_id): + pool_nameserver = self._find_pool_nameservers( + context, {'id': pool_nameserver_id}, one=True) + deleted_pool_nameserver = self._delete( + context, tables.pool_nameservers, pool_nameserver, + exceptions.PoolNameserverNotFound) + + return deleted_pool_nameserver + + # PoolTarget methods + def _find_pool_targets(self, context, criterion, one=False, marker=None, + limit=None, sort_key=None, sort_dir=None): + pool_targets = self._find( + context, tables.pool_targets, objects.PoolTarget, + objects.PoolTargetList, exceptions.PoolTargetNotFound, + criterion, one, marker, limit, sort_key, + sort_dir) + + # Load Relations + def _load_relations(pool_target): + pool_target.options = self._find_pool_target_options( + context, {'pool_target_id': pool_target.id}) + + pool_target.masters = self._find_pool_target_masters( + context, {'pool_target_id': pool_target.id}) + + pool_target.obj_reset_changes(['options', 'masters']) + + if one: + _load_relations(pool_targets) + else: + for pool_target in pool_targets: + _load_relations(pool_target) + + return pool_targets + + def create_pool_target(self, context, pool_id, pool_target): + pool_target.pool_id = pool_id + + pool_target = self._create( + tables.pool_targets, pool_target, exceptions.DuplicatePoolTarget, + ['options', 'masters']) + + if pool_target.obj_attr_is_set('options'): + for pool_target_option in pool_target.options: + self.create_pool_target_option( + context, pool_target.id, pool_target_option) + else: + pool_target.options = objects.PoolTargetOptionList() + + if pool_target.obj_attr_is_set('masters'): + for pool_target_master in pool_target.masters: + self.create_pool_target_master( + context, pool_target.id, pool_target_master) + else: + pool_target.masters = objects.PoolTargetMasterList() + + pool_target.obj_reset_changes(['options', 'masters']) + + return pool_target + + def get_pool_target(self, context, pool_target_id): + return self._find_pool_targets( + context, {'id': pool_target_id}, one=True) + + def find_pool_targets(self, context, criterion=None, marker=None, + limit=None, sort_key=None, sort_dir=None): + return self._find_pool_targets(context, criterion, marker=marker, + limit=limit, sort_key=sort_key, + sort_dir=sort_dir) + + def find_pool_target(self, context, criterion): + return self._find_pool_targets(context, criterion, one=True) + + def update_pool_target(self, context, pool_target): + pool_target = self._update( + context, tables.pool_targets, pool_target, + exceptions.DuplicatePoolTarget, exceptions.PoolTargetNotFound, + ['options', 'masters']) + + # TODO(kiall): The sections below are near identical, we should + # refactor into a single reusable method. + if pool_target.obj_attr_is_set('options'): + # Gather the pool ID's we have + have_options = set([r.id for r in self._find_pool_target_options( + context, {'pool_target_id': pool_target.id})]) + + # Prep some lists of changes + keep_options = set([]) + create_options = [] + update_options = [] + + options = [] + if pool_target.obj_attr_is_set('options'): + for r in pool_target.options.objects: + options.append(r) + + # Determine what to change + for option in options: + keep_options.add(option.id) + try: + option.obj_get_original_value('id') + except KeyError: + create_options.append(option) + else: + update_options.append(option) + + # NOTE: Since we're dealing with mutable objects, the return value + # of create/update/delete option is not needed. The + # original item will be mutated in place on the input + # "pool.options" list. + + # Delete options + for option_id in have_options - keep_options: + self.delete_pool_target_option(context, option_id) + + # Update options + for option in update_options: + self.update_pool_target_option(context, option) + + # Create options + for option in create_options: + self.create_pool_target_option( + context, pool_target.id, option) + + if pool_target.obj_attr_is_set('masters'): + # Gather the pool ID's we have + have_masters = set([r.id for r in self._find_pool_target_masters( + context, {'pool_target_id': pool_target.id})]) + + # Prep some lists of changes + keep_masters = set([]) + create_masters = [] + update_masters = [] + + masters = [] + if pool_target.obj_attr_is_set('masters'): + for r in pool_target.masters.objects: + masters.append(r) + + # Determine what to change + for master in masters: + keep_masters.add(master.id) + try: + master.obj_get_original_value('id') + except KeyError: + create_masters.append(master) + else: + update_masters.append(master) + + # NOTE: Since we're dealing with mutable objects, the return value + # of create/update/delete master is not needed. The + # original item will be mutated in place on the input + # "pool.masters" list. + + # Delete masters + for master_id in have_masters - keep_masters: + self.delete_pool_target_master(context, master_id) + + # Update masters + for master in update_masters: + self.update_pool_target_master(context, master) + + # Create masters + for master in create_masters: + self.create_pool_target_master( + context, pool_target.id, master) + + # Call get_pool to get the ids of all the attributes/ns_records + # refreshed in the pool object + updated_pool_target = self.get_pool_target(context, pool_target.id) + + return updated_pool_target + + def delete_pool_target(self, context, pool_target_id): + pool_target = self._find_pool_targets( + context, {'id': pool_target_id}, one=True) + + return self._delete(context, tables.pool_targets, pool_target, + exceptions.PoolTargetNotFound) + + # PoolTargetOption methods + def _find_pool_target_options(self, context, criterion, one=False, + marker=None, limit=None, sort_key=None, + sort_dir=None): + return self._find( + context, tables.pool_target_options, + objects.PoolTargetOption, objects.PoolTargetOptionList, + exceptions.PoolTargetOptionNotFound, criterion, one, + marker, limit, sort_key, sort_dir) + + def create_pool_target_option(self, context, pool_target_id, + pool_target_option): + pool_target_option.pool_target_id = pool_target_id + + return self._create(tables.pool_target_options, pool_target_option, + exceptions.DuplicatePoolTargetOption) + + def get_pool_target_option(self, context, pool_target_option_id): + return self._find_pool_target_options( + context, {'id': pool_target_option_id}, one=True) + + def find_pool_target_options(self, context, criterion=None, marker=None, + limit=None, sort_key=None, sort_dir=None): + return self._find_pool_target_options( + context, criterion, marker=marker, limit=limit, sort_key=sort_key, + sort_dir=sort_dir) + + def find_pool_target_option(self, context, criterion): + return self._find_pool_target_options(context, criterion, one=True) + + def update_pool_target_option(self, context, pool_target_option): + return self._update( + context, tables.pool_target_options, pool_target_option, + exceptions.DuplicatePoolTargetOption, + exceptions.PoolTargetOptionNotFound) + + def delete_pool_target_option(self, context, pool_target_option_id): + pool_target_option = self._find_pool_target_options( + context, {'id': pool_target_option_id}, one=True) + deleted_pool_target_option = self._delete( + context, tables.pool_target_options, pool_target_option, + exceptions.PoolTargetOptionNotFound) + + return deleted_pool_target_option + + # PoolTargetMaster methods + def _find_pool_target_masters(self, context, criterion, one=False, + marker=None, limit=None, sort_key=None, + sort_dir=None): + return self._find( + context, tables.pool_target_masters, + objects.PoolTargetMaster, objects.PoolTargetMasterList, + exceptions.PoolTargetMasterNotFound, criterion, one, + marker, limit, sort_key, sort_dir) + + def create_pool_target_master(self, context, pool_target_id, + pool_target_master): + pool_target_master.pool_target_id = pool_target_id + + return self._create(tables.pool_target_masters, pool_target_master, + exceptions.DuplicatePoolTargetMaster) + + def get_pool_target_master(self, context, pool_target_master_id): + return self._find_pool_target_masters( + context, {'id': pool_target_master_id}, one=True) + + def find_pool_target_masters(self, context, criterion=None, marker=None, + limit=None, sort_key=None, sort_dir=None): + return self._find_pool_target_masters( + context, criterion, marker=marker, limit=limit, sort_key=sort_key, + sort_dir=sort_dir) + + def find_pool_target_master(self, context, criterion): + return self._find_pool_target_masters(context, criterion, one=True) + + def update_pool_target_master(self, context, pool_target_master): + return self._update( + context, tables.pool_target_masters, pool_target_master, + exceptions.DuplicatePoolTargetMaster, + exceptions.PoolTargetMasterNotFound) + + def delete_pool_target_master(self, context, pool_target_master_id): + pool_target_master = self._find_pool_target_masters( + context, {'id': pool_target_master_id}, one=True) + deleted_pool_target_master = self._delete( + context, tables.pool_target_masters, pool_target_master, + exceptions.PoolTargetMasterNotFound) + + return deleted_pool_target_master + + # PoolAlsoNotify methods + def _find_pool_also_notifies(self, context, criterion, one=False, + marker=None, limit=None, sort_key=None, + sort_dir=None): + return self._find(context, tables.pool_also_notifies, + objects.PoolAlsoNotify, objects.PoolAlsoNotifyList, + exceptions.PoolAlsoNotifyNotFound, criterion, one, + marker, limit, sort_key, sort_dir) + + def create_pool_also_notify(self, context, pool_id, pool_also_notify): + pool_also_notify.pool_id = pool_id + + return self._create(tables.pool_also_notifies, pool_also_notify, + exceptions.DuplicatePoolAlsoNotify) + + def get_pool_also_notify(self, context, pool_also_notify_id): + return self._find_pool_also_notifies( + context, {'id': pool_also_notify_id}, one=True) + + def find_pool_also_notifies(self, context, criterion=None, marker=None, + limit=None, sort_key=None, sort_dir=None): + return self._find_pool_also_notifies(context, criterion, marker=marker, + limit=limit, sort_key=sort_key, + sort_dir=sort_dir) + + def find_pool_also_notify(self, context, criterion): + return self._find_pool_also_notifies(context, criterion, one=True) + + def update_pool_also_notify(self, context, pool_also_notify): + return self._update( + context, tables.pool_also_notifies, pool_also_notify, + exceptions.DuplicatePoolAlsoNotify, + exceptions.PoolAlsoNotifyNotFound) + + def delete_pool_also_notify(self, context, pool_also_notify_id): + pool_also_notify = self._find_pool_also_notifies( + context, {'id': pool_also_notify_id}, one=True) + deleted_pool_also_notify = self._delete( + context, tables.pool_also_notifies, pool_also_notify, + exceptions.PoolAlsoNotifyNotFound) + + return deleted_pool_also_notify + # Zone Transfer Methods def _find_zone_transfer_requests(self, context, criterion, one=False, marker=None, limit=None, sort_key=None, diff --git a/designate/storage/impl_sqlalchemy/migrate_repo/versions/086_new_pools_tables.py b/designate/storage/impl_sqlalchemy/migrate_repo/versions/086_new_pools_tables.py new file mode 100644 index 000000000..602a11430 --- /dev/null +++ b/designate/storage/impl_sqlalchemy/migrate_repo/versions/086_new_pools_tables.py @@ -0,0 +1,126 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Author: Kiall Mac Innes +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import timeutils +from sqlalchemy import (Integer, String, Unicode, DateTime, + ForeignKeyConstraint, UniqueConstraint) +from sqlalchemy.schema import Table, Column, MetaData + +from designate import utils +from designate.sqlalchemy.types import UUID + +meta = MetaData() + +pool_nameservers = Table('pool_nameservers', meta, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_id', UUID(), nullable=False), + Column('host', String(255), nullable=False), + Column('port', Integer(), nullable=False), + + ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), + UniqueConstraint('pool_id', 'host', 'port', name='unique_pool_host_port'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + +pool_targets = Table('pool_targets', meta, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_id', UUID(), nullable=False), + Column('type', String(50), nullable=False), + Column('tsigkey_id', UUID(), nullable=True), + Column('description', Unicode(160), nullable=True), + + ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + +pool_target_masters = Table('pool_target_masters', meta, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_target_id', UUID(), nullable=False), + Column('host', String(255), nullable=False), + Column('port', Integer(), nullable=False), + + ForeignKeyConstraint(['pool_target_id'], ['pool_targets.id'], + ondelete='CASCADE'), + UniqueConstraint('pool_target_id', 'host', 'port', + name='unique_pool_target_host_port'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + +pool_target_options = Table('pool_target_options', meta, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_target_id', UUID(), nullable=False), + Column('key', String(255), nullable=False), + Column('value', String(255), nullable=False), + + ForeignKeyConstraint(['pool_target_id'], ['pool_targets.id'], + ondelete='CASCADE'), + UniqueConstraint('pool_target_id', 'key', name='unique_pool_target_key'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + +pool_also_notifies = Table('pool_also_notifies', meta, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_id', UUID(), nullable=False), + Column('host', String(255), nullable=False), + Column('port', Integer(), nullable=False), + + ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), + UniqueConstraint('pool_id', 'host', 'port', name='unique_pool_host_port'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + # Load the pool_attributes_table table schema for relations + Table('pools', meta, autoload=True) + + pool_nameservers.create() + pool_targets.create() + pool_target_options.create() + pool_target_masters.create() + pool_also_notifies.create() diff --git a/designate/storage/impl_sqlalchemy/tables.py b/designate/storage/impl_sqlalchemy/tables.py index 0b76b5349..5ab1672c7 100644 --- a/designate/storage/impl_sqlalchemy/tables.py +++ b/designate/storage/impl_sqlalchemy/tables.py @@ -298,6 +298,94 @@ pool_ns_records = Table('pool_ns_records', metadata, mysql_engine='InnoDB', mysql_charset='utf8') +pool_nameservers = Table('pool_nameservers', metadata, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_id', UUID(), nullable=False), + Column('host', String(255), nullable=False), + Column('port', Integer(), nullable=False), + + ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), + UniqueConstraint('pool_id', 'host', 'port', name='unique_pool_host_port'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + +pool_targets = Table('pool_targets', metadata, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_id', UUID(), nullable=False), + Column('type', String(50), nullable=False), + Column('tsigkey_id', UUID(), nullable=True), + Column('description', Unicode(160), nullable=True), + + ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + +pool_target_masters = Table('pool_target_masters', metadata, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_target_id', UUID(), nullable=False), + Column('host', String(255), nullable=False), + Column('port', Integer(), nullable=False), + + ForeignKeyConstraint(['pool_target_id'], ['pool_targets.id'], + ondelete='CASCADE'), + UniqueConstraint('pool_target_id', 'host', 'port', + name='unique_pool_target_host_port'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + +pool_target_options = Table('pool_target_options', metadata, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_target_id', UUID(), nullable=False), + Column('key', String(255), nullable=False), + Column('value', String(255), nullable=False), + + ForeignKeyConstraint(['pool_target_id'], ['pool_targets.id'], + ondelete='CASCADE'), + UniqueConstraint('pool_target_id', 'key', name='unique_pool_target_key'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + +pool_also_notifies = Table('pool_also_notifies', metadata, + Column('id', UUID, default=utils.generate_uuid, primary_key=True), + Column('version', Integer(), default=1, nullable=False), + Column('created_at', DateTime, default=lambda: timeutils.utcnow()), + Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), + + Column('pool_id', UUID(), nullable=False), + Column('host', String(255), nullable=False), + Column('port', Integer(), nullable=False), + + ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), + UniqueConstraint('pool_id', 'host', 'port', name='unique_pool_host_port'), + + mysql_engine='InnoDB', + mysql_charset='utf8', +) + zone_transfer_requests = Table('zone_transfer_requests', metadata, Column('id', UUID, default=utils.generate_uuid, primary_key=True), Column('version', Integer, default=1, nullable=False), diff --git a/designate/tests/__init__.py b/designate/tests/__init__.py index e4178c8ce..d61f25e8b 100644 --- a/designate/tests/__init__.py +++ b/designate/tests/__init__.py @@ -233,17 +233,32 @@ class TestCase(base.BaseTestCase): 'value': 'public'} ] - pool_manager_status_fixtures = [{ - 'server_id': '1d7a26e6-e604-4aa0-bbc5-d01081bf1f45', - 'status': 'SUCCESS', - 'serial_number': 1, - 'action': 'CREATE', - }, { - 'server_id': '1d7a26e6-e604-4aa0-bbc5-d01081bf1f45', - 'status': 'ERROR', - 'serial_number': 2, - 'action': 'DELETE' - }] + pool_nameserver_fixtures = [ + {'pool_id': default_pool_id, + 'host': "192.0.2.1", + 'port': 53}, + {'pool_id': default_pool_id, + 'host': "192.0.2.2", + 'port': 53}, + ] + + pool_target_fixtures = [ + {'pool_id': default_pool_id, + 'type': "fake", + 'description': u"FooBar"}, + {'pool_id': default_pool_id, + 'type': "fake", + 'description': u"BarFoo"}, + ] + + pool_also_notify_fixtures = [ + {'pool_id': default_pool_id, + 'host': "192.0.2.1", + 'port': 53}, + {'pool_id': default_pool_id, + 'host': "192.0.2.2", + 'port': 53}, + ] zone_transfers_request_fixtures = [{ "description": "Test Transfer", @@ -518,10 +533,24 @@ class TestCase(base.BaseTestCase): _values.update(values) return _values - def get_pool_manager_status_fixture(self, fixture=0, values=None): + def get_pool_nameserver_fixture(self, fixture=0, values=None): values = values or {} - _values = copy.copy(self.pool_manager_status_fixtures[fixture]) + _values = copy.copy(self.pool_nameserver_fixtures[fixture]) + _values.update(values) + return _values + + def get_pool_target_fixture(self, fixture=0, values=None): + values = values or {} + + _values = copy.copy(self.pool_target_fixtures[fixture]) + _values.update(values) + return _values + + def get_pool_also_notify_fixture(self, fixture=0, values=None): + values = values or {} + + _values = copy.copy(self.pool_also_notify_fixtures[fixture]) _values.update(values) return _values diff --git a/designate/tests/test_storage/__init__.py b/designate/tests/test_storage/__init__.py index bae40213a..ba22aaa7f 100644 --- a/designate/tests/test_storage/__init__.py +++ b/designate/tests/test_storage/__init__.py @@ -30,6 +30,24 @@ LOG = logging.getLogger(__name__) class StorageTestCase(object): + # TODO(kiall): Someone, Somewhere, could probably make use of a + # assertNestedDictContainsSubset(), cleanup and put somewhere + # better. + def assertNestedDictContainsSubset(self, expected, actual): + for key, value in expected.items(): + if isinstance(value, dict): + self.assertNestedDictContainsSubset(value, actual.get(key, {})) + + elif isinstance(value, list): + self.assertEqual(len(value), len(actual[key])) + + for index, item in enumerate(value): + self.assertNestedDictContainsSubset( + item, actual[key][index]) + + else: + self.assertEqual(value, actual[key]) + def create_quota(self, **kwargs): """ This create method has been kept in the StorageTestCase class as quotas @@ -46,6 +64,57 @@ class StorageTestCase(object): return self.storage.create_quota(context, values) + def create_pool_nameserver(self, pool, **kwargs): + # NOTE(kiall): We add this method here, rather than in the base test + # case, as the base methods expect to make a central API + # call. If a central API method is exposed for this, we + # should remove this and add to the base. + context = kwargs.pop('context', self.admin_context) + fixture = kwargs.pop('fixture', 0) + + values = self.get_pool_nameserver_fixture( + fixture=fixture, values=kwargs) + + if 'pool_id' not in values: + values['pool_id'] = pool.id + + return self.storage.create_pool_nameserver( + context, pool.id, objects.PoolNameserver.from_dict(values)) + + def create_pool_target(self, pool, **kwargs): + # NOTE(kiall): We add this method here, rather than in the base test + # case, as the base methods expect to make a central API + # call. If a central API method is exposed for this, we + # should remove this and add to the base. + context = kwargs.pop('context', self.admin_context) + fixture = kwargs.pop('fixture', 0) + + values = self.get_pool_target_fixture( + fixture=fixture, values=kwargs) + + if 'pool_id' not in values: + values['pool_id'] = pool.id + + return self.storage.create_pool_target( + context, pool.id, objects.PoolTarget.from_dict(values)) + + def create_pool_also_notify(self, pool, **kwargs): + # NOTE(kiall): We add this method here, rather than in the base test + # case, as the base methods expect to make a central API + # call. If a central API method is exposed for this, we + # should remove this and add to the base. + context = kwargs.pop('context', self.admin_context) + fixture = kwargs.pop('fixture', 0) + + values = self.get_pool_also_notify_fixture( + fixture=fixture, values=kwargs) + + if 'pool_id' not in values: + values['pool_id'] = pool.id + + return self.storage.create_pool_also_notify( + context, pool.id, objects.PoolAlsoNotify.from_dict(values)) + # Paging Tests def _ensure_paging(self, data, method): """ @@ -1827,6 +1896,31 @@ class StorageTestCase(object): self.assertEqual(values['tenant_id'], result['tenant_id']) self.assertEqual(values['provisioner'], result['provisioner']) + def test_create_pool_with_all_relations(self): + values = { + 'name': u'Pool', + 'description': u'Pool description', + 'attributes': [{'key': 'scope', 'value': 'public'}], + 'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}], + 'nameservers': [{'host': "192.0.2.1", 'port': 53}], + 'targets': [{ + 'type': "fake", + 'description': u"FooBar", + 'masters': [{'host': "192.0.2.2", 'port': 5354}], + 'options': [{'key': 'fake_option', 'value': 'fake_value'}], + }], + 'also_notifies': [{'host': "192.0.2.3", 'port': 53}] + } + + # Create the Pool, and check all values are OK + result = self.storage.create_pool( + self.admin_context, objects.Pool.from_dict(values)) + self.assertNestedDictContainsSubset(values, result.to_dict()) + + # Re-Fetch the pool, and check everything is still OK + result = self.storage.get_pool(self.admin_context, result.id) + self.assertNestedDictContainsSubset(values, result.to_dict()) + def test_create_pool_duplicate(self): # Create the first pool self.create_pool(fixture=0) @@ -1958,6 +2052,60 @@ class StorageTestCase(object): with testtools.ExpectedException(exceptions.PoolNotFound): self.storage.update_pool(self.admin_context, pool) + def test_update_pool_with_all_relations(self): + values = { + 'name': u'Pool-A', + 'description': u'Pool-A description', + 'attributes': [{'key': 'scope', 'value': 'public'}], + 'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}], + 'nameservers': [{'host': "192.0.2.1", 'port': 53}], + 'targets': [{ + 'type': "fake", + 'description': u"FooBar", + 'masters': [{'host': "192.0.2.2", 'port': 5354}], + 'options': [{'key': 'fake_option', 'value': 'fake_value'}], + }], + 'also_notifies': [{'host': "192.0.2.3", 'port': 53}] + } + + # Create the Pool + result = self.storage.create_pool( + self.admin_context, objects.Pool.from_dict(values)) + + created_pool_id = result.id + + # Prepare a new set of data for the Pool, copying over the ID so + # we trigger an update rather than a create. + values = { + 'id': created_pool_id, + 'name': u'Pool-B', + 'description': u'Pool-B description', + 'attributes': [{'key': 'scope', 'value': 'private'}], + 'ns_records': [{'priority': 1, 'hostname': 'ns2.example.org.'}], + 'nameservers': [{'host': "192.0.2.5", 'port': 53}], + 'targets': [{ + 'type': "fake", + 'description': u"NewFooBar", + 'masters': [{'host': "192.0.2.2", 'port': 5354}], + 'options': [{'key': 'fake_option', 'value': 'fake_value'}], + }, { + 'type': "fake", + 'description': u"FooBar2", + 'masters': [{'host': "192.0.2.7", 'port': 5355}], + 'options': [{'key': 'fake_option', 'value': 'new_fake_value'}], + }], + 'also_notifies': [] + } + + # Update the pool, and check everything is OK + result = self.storage.update_pool( + self.admin_context, objects.Pool.from_dict(values)) + self.assertNestedDictContainsSubset(values, result.to_dict()) + + # Re-Fetch the pool, and check everything is still OK + result = self.storage.get_pool(self.admin_context, created_pool_id) + self.assertNestedDictContainsSubset(values, result.to_dict()) + def test_delete_pool(self): pool = self.create_pool() @@ -2001,217 +2149,6 @@ class StorageTestCase(object): self.storage.update_pool_ns_record( self.admin_context, ns2) - def test_create_zone_transfer_request(self): - zone = self.create_zone() - - values = { - 'tenant_id': self.admin_context.tenant, - 'zone_id': zone.id, - 'key': 'qwertyuiop' - } - - result = self.storage.create_zone_transfer_request( - self.admin_context, objects.ZoneTransferRequest.from_dict(values)) - - self.assertEqual(self.admin_context.tenant, result['tenant_id']) - self.assertIn('status', result) - - def test_create_zone_transfer_request_scoped(self): - zone = self.create_zone() - tenant_2_context = self.get_context(tenant='2') - tenant_3_context = self.get_context(tenant='3') - - values = { - 'tenant_id': self.admin_context.tenant, - 'zone_id': zone.id, - 'key': 'qwertyuiop', - 'target_tenant_id': tenant_2_context.tenant, - } - - result = self.storage.create_zone_transfer_request( - self.admin_context, objects.ZoneTransferRequest.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(self.admin_context.tenant, result['tenant_id']) - self.assertEqual(tenant_2_context.tenant, result['target_tenant_id']) - self.assertIn('status', result) - - stored_ztr = self.storage.get_zone_transfer_request( - tenant_2_context, result.id) - - self.assertEqual(self.admin_context.tenant, stored_ztr['tenant_id']) - self.assertEqual(stored_ztr['id'], result['id']) - - with testtools.ExpectedException( - exceptions.ZoneTransferRequestNotFound): - self.storage.get_zone_transfer_request( - tenant_3_context, result.id) - - def test_find_zone_transfer_requests(self): - zone = self.create_zone() - - values = { - 'tenant_id': self.admin_context.tenant, - 'zone_id': zone.id, - 'key': 'qwertyuiop' - } - - self.storage.create_zone_transfer_request( - self.admin_context, objects.ZoneTransferRequest.from_dict(values)) - - requests = self.storage.find_zone_transfer_requests( - self.admin_context, {"tenant_id": self.admin_context.tenant}) - self.assertEqual(1, len(requests)) - - def test_delete_zone_transfer_request(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - - self.storage.delete_zone_transfer_request( - self.admin_context, zt_request.id) - - with testtools.ExpectedException( - exceptions.ZoneTransferRequestNotFound): - self.storage.get_zone_transfer_request( - self.admin_context, zt_request.id) - - def test_update_zone_transfer_request(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - - zt_request.description = 'New description' - result = self.storage.update_zone_transfer_request( - self.admin_context, zt_request) - self.assertEqual('New description', result.description) - - def test_get_zone_transfer_request(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - - result = self.storage.get_zone_transfer_request( - self.admin_context, zt_request.id) - self.assertEqual(zt_request.id, result.id) - self.assertEqual(zt_request.zone_id, result.zone_id) - - def test_create_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - values = { - 'tenant_id': self.admin_context.tenant, - 'zone_transfer_request_id': zt_request.id, - 'zone_id': zone.id, - 'key': zt_request.key - } - - result = self.storage.create_zone_transfer_accept( - self.admin_context, objects.ZoneTransferAccept.from_dict(values)) - - self.assertIsNotNone(result['id']) - self.assertIsNotNone(result['created_at']) - self.assertIsNone(result['updated_at']) - - self.assertEqual(self.admin_context.tenant, result['tenant_id']) - self.assertIn('status', result) - - def test_find_zone_transfer_accepts(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - values = { - 'tenant_id': self.admin_context.tenant, - 'zone_transfer_request_id': zt_request.id, - 'zone_id': zone.id, - 'key': zt_request.key - } - - self.storage.create_zone_transfer_accept( - self.admin_context, objects.ZoneTransferAccept.from_dict(values)) - - accepts = self.storage.find_zone_transfer_accepts( - self.admin_context, {"tenant_id": self.admin_context.tenant}) - self.assertEqual(1, len(accepts)) - - def test_find_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - values = { - 'tenant_id': self.admin_context.tenant, - 'zone_transfer_request_id': zt_request.id, - 'zone_id': zone.id, - 'key': zt_request.key - } - - result = self.storage.create_zone_transfer_accept( - self.admin_context, objects.ZoneTransferAccept.from_dict(values)) - - accept = self.storage.find_zone_transfer_accept( - self.admin_context, {"id": result.id}) - self.assertEqual(result.id, accept.id) - - def test_transfer_zone_ownership(self): - tenant_1_context = self.get_context(tenant='1') - tenant_2_context = self.get_context(tenant='2') - admin_context = self.get_admin_context() - admin_context.all_tenants = True - - zone = self.create_zone(context=tenant_1_context) - recordset = self.create_recordset(zone, context=tenant_1_context) - record = self.create_record( - zone, recordset, context=tenant_1_context) - - updated_zone = zone - - updated_zone.tenant_id = tenant_2_context.tenant - - self.storage.update_zone( - admin_context, updated_zone) - - saved_zone = self.storage.get_zone( - admin_context, zone.id) - saved_recordset = self.storage.get_recordset( - admin_context, recordset.id) - saved_record = self.storage.get_record( - admin_context, record.id) - - self.assertEqual(tenant_2_context.tenant, saved_zone.tenant_id) - self.assertEqual(tenant_2_context.tenant, saved_recordset.tenant_id) - self.assertEqual(tenant_2_context.tenant, saved_record.tenant_id) - - def test_delete_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - zt_accept = self.create_zone_transfer_accept(zt_request) - - self.storage.delete_zone_transfer_accept( - self.admin_context, zt_accept.id) - - with testtools.ExpectedException( - exceptions.ZoneTransferAcceptNotFound): - self.storage.get_zone_transfer_accept( - self.admin_context, zt_accept.id) - - def test_update_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - zt_accept = self.create_zone_transfer_accept(zt_request) - - zt_accept.status = 'COMPLETE' - result = self.storage.update_zone_transfer_accept( - self.admin_context, zt_accept) - self.assertEqual('COMPLETE', result.status) - - def test_get_zone_transfer_accept(self): - zone = self.create_zone() - zt_request = self.create_zone_transfer_request(zone) - zt_accept = self.create_zone_transfer_accept(zt_request) - - result = self.storage.get_zone_transfer_accept( - self.admin_context, zt_accept.id) - self.assertEqual(zt_accept.id, result.id) - self.assertEqual(zt_accept.zone_id, result.zone_id) - # PoolAttribute tests def test_create_pool_attribute(self): values = { @@ -2384,6 +2321,764 @@ class StorageTestCase(object): with testtools.ExpectedException(exceptions.DuplicatePoolAttribute): self.create_pool_attribute(fixture=0) + # PoolNameserver tests + def test_create_pool_nameserver(self): + pool = self.create_pool(fixture=0) + + values = { + 'pool_id': pool.id, + 'host': "192.0.2.1", + 'port': 53 + } + + result = self.storage.create_pool_nameserver( + self.admin_context, + pool.id, + objects.PoolNameserver.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNotNone(result['version']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['pool_id'], result['pool_id']) + self.assertEqual(values['host'], result['host']) + self.assertEqual(values['port'], result['port']) + + def test_create_pool_nameserver_duplicate(self): + pool = self.create_pool(fixture=0) + + # Create the initial PoolNameserver + self.create_pool_nameserver(pool, fixture=0) + + with testtools.ExpectedException(exceptions.DuplicatePoolNameserver): + self.create_pool_nameserver(pool, fixture=0) + + def test_find_pool_nameservers(self): + pool = self.create_pool(fixture=0) + + # Verify that there are no pool_nameservers created + actual = self.storage.find_pool_nameservers(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a PoolNameserver + pool_nameserver = self.create_pool_nameserver(pool, fixture=0) + + # Fetch the PoolNameservers and ensure only 1 exists + actual = self.storage.find_pool_nameservers(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(pool_nameserver['pool_id'], actual[0]['pool_id']) + self.assertEqual(pool_nameserver['host'], actual[0]['host']) + self.assertEqual(pool_nameserver['port'], actual[0]['port']) + + def test_find_pool_nameservers_paging(self): + pool = self.create_pool(fixture=0) + + # Create 10 PoolNameservers + created = [self.create_pool_nameserver(pool, host='192.0.2.%d' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_pool_nameservers) + + def test_find_pool_nameservers_with_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_nameservers + pool_nameserver_one = self.create_pool_nameserver(pool, fixture=0) + pool_nameserver_two = self.create_pool_nameserver(pool, fixture=1) + + # Verify pool_nameserver_one + criterion = dict(host=pool_nameserver_one['host']) + + results = self.storage.find_pool_nameservers( + self.admin_context, criterion) + + self.assertEqual(1, len(results)) + self.assertEqual(pool_nameserver_one['host'], results[0]['host']) + + # Verify pool_nameserver_two + criterion = dict(host=pool_nameserver_two['host']) + + results = self.storage.find_pool_nameservers(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual(pool_nameserver_two['host'], results[0]['host']) + + def test_get_pool_nameserver(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_nameserver(pool, fixture=0) + actual = self.storage.get_pool_nameserver( + self.admin_context, expected['id']) + + self.assertEqual(expected['host'], actual['host']) + + def test_get_pool_nameserver_missing(self): + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' + self.storage.get_pool_nameserver(self.admin_context, uuid) + + def test_find_pool_nameserver_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_nameservers + pool_nameserver_one = self.create_pool_nameserver(pool, fixture=0) + pool_nameserver_two = self.create_pool_nameserver(pool, fixture=1) + + # Verify pool_nameserver_one + criterion = dict(host=pool_nameserver_one['host']) + + result = self.storage.find_pool_nameserver( + self.admin_context, criterion) + + self.assertEqual(pool_nameserver_one['host'], result['host']) + + # Verify pool_nameserver_two + criterion = dict(host=pool_nameserver_two['host']) + + result = self.storage.find_pool_nameserver( + self.admin_context, criterion) + + self.assertEqual(pool_nameserver_two['host'], result['host']) + + def test_find_pool_nameserver_criterion_missing(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_nameserver(pool, fixture=0) + + criterion = dict(host=expected['host'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + self.storage.find_pool_nameserver(self.admin_context, criterion) + + def test_update_pool_nameserver(self): + pool = self.create_pool(fixture=0) + + pool_nameserver = self.create_pool_nameserver(pool, host='192.0.2.1') + + # Update the pool_nameserver + pool_nameserver.host = '192.0.2.2' + + pool_nameserver = self.storage.update_pool_nameserver( + self.admin_context, pool_nameserver) + + # Verify the new values + self.assertEqual('192.0.2.2', pool_nameserver.host) + + # Ensure the version column was incremented + self.assertEqual(2, pool_nameserver.version) + + def test_update_pool_nameserver_duplicate(self): + pool = self.create_pool(fixture=0) + + # Create two pool_nameservers + pool_nameserver_one = self.create_pool_nameserver( + pool, fixture=0, host='192.0.2.1') + pool_nameserver_two = self.create_pool_nameserver( + pool, fixture=0, host='192.0.2.2') + + # Update the second one to be a duplicate of the first + pool_nameserver_two.host = pool_nameserver_one.host + + with testtools.ExpectedException(exceptions.DuplicatePoolNameserver): + self.storage.update_pool_nameserver( + self.admin_context, pool_nameserver_two) + + def test_update_pool_nameserver_missing(self): + pool_nameserver = objects.PoolNameserver( + id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') + + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + self.storage.update_pool_nameserver( + self.admin_context, pool_nameserver) + + def test_delete_pool_nameserver(self): + pool = self.create_pool(fixture=0) + pool_nameserver = self.create_pool_nameserver(pool, fixture=0) + + self.storage.delete_pool_nameserver( + self.admin_context, pool_nameserver['id']) + + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + self.storage.get_pool_nameserver( + self.admin_context, pool_nameserver['id']) + + def test_delete_pool_nameserver_missing(self): + with testtools.ExpectedException(exceptions.PoolNameserverNotFound): + uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' + self.storage.delete_pool_nameserver(self.admin_context, uuid) + + # PoolTarget tests + def test_create_pool_target(self): + pool = self.create_pool(fixture=0) + + values = { + 'pool_id': pool.id, + 'type': "fake" + } + + result = self.storage.create_pool_target( + self.admin_context, + pool.id, + objects.PoolTarget.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNotNone(result['version']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['pool_id'], result['pool_id']) + self.assertEqual(values['type'], result['type']) + + def test_find_pool_targets(self): + pool = self.create_pool(fixture=0) + + # Verify that there are no pool_targets created + actual = self.storage.find_pool_targets(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a PoolTarget + pool_target = self.create_pool_target(pool, fixture=0) + + # Fetch the PoolTargets and ensure only 1 exists + actual = self.storage.find_pool_targets(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(pool_target['pool_id'], actual[0]['pool_id']) + self.assertEqual(pool_target['type'], actual[0]['type']) + + def test_find_pool_targets_paging(self): + pool = self.create_pool(fixture=0) + + # Create 10 PoolTargets + created = [self.create_pool_target(pool, description=u'Target %d' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_pool_targets) + + def test_find_pool_targets_with_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_targets + pool_target_one = self.create_pool_target( + pool, fixture=0, description=u'One') + pool_target_two = self.create_pool_target( + pool, fixture=1, description=u'Two') + + # Verify pool_target_one + criterion = dict(description=pool_target_one['description']) + + results = self.storage.find_pool_targets( + self.admin_context, criterion) + + self.assertEqual(1, len(results)) + self.assertEqual( + pool_target_one['description'], results[0]['description']) + + # Verify pool_target_two + criterion = dict(description=pool_target_two['description']) + + results = self.storage.find_pool_targets(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual( + pool_target_two['description'], results[0]['description']) + + def test_get_pool_target(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_target(pool, fixture=0) + actual = self.storage.get_pool_target( + self.admin_context, expected['id']) + + self.assertEqual(expected['type'], actual['type']) + + def test_get_pool_target_missing(self): + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' + self.storage.get_pool_target(self.admin_context, uuid) + + def test_find_pool_target_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_targets + pool_target_one = self.create_pool_target( + pool, fixture=0, description=u'One') + pool_target_two = self.create_pool_target( + pool, fixture=1, description=u'Two') + + # Verify pool_target_one + criterion = dict(description=pool_target_one['description']) + + result = self.storage.find_pool_target( + self.admin_context, criterion) + + self.assertEqual(pool_target_one['description'], result['description']) + + # Verify pool_target_two + criterion = dict(description=pool_target_two['description']) + + result = self.storage.find_pool_target( + self.admin_context, criterion) + + self.assertEqual(pool_target_two['description'], result['description']) + + def test_find_pool_target_criterion_missing(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_target(pool, fixture=0) + + criterion = dict(description=expected['description'] + u"NOT FOUND") + + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + self.storage.find_pool_target(self.admin_context, criterion) + + def test_update_pool_target(self): + pool = self.create_pool(fixture=0) + + pool_target = self.create_pool_target(pool, description=u'One') + + # Update the pool_target + pool_target.description = u'Two' + + pool_target = self.storage.update_pool_target( + self.admin_context, pool_target) + + # Verify the new values + self.assertEqual(u'Two', pool_target.description) + + # Ensure the version column was incremented + self.assertEqual(2, pool_target.version) + + def test_update_pool_target_missing(self): + pool_target = objects.PoolTarget( + id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') + + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + self.storage.update_pool_target( + self.admin_context, pool_target) + + def test_delete_pool_target(self): + pool = self.create_pool(fixture=0) + pool_target = self.create_pool_target(pool, fixture=0) + + self.storage.delete_pool_target( + self.admin_context, pool_target['id']) + + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + self.storage.get_pool_target( + self.admin_context, pool_target['id']) + + def test_delete_pool_target_missing(self): + with testtools.ExpectedException(exceptions.PoolTargetNotFound): + uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' + self.storage.delete_pool_target(self.admin_context, uuid) + + # PoolAlsoNotify tests + def test_create_pool_also_notify(self): + pool = self.create_pool(fixture=0) + + values = { + 'pool_id': pool.id, + 'host': "192.0.2.1", + 'port': 53 + } + + result = self.storage.create_pool_also_notify( + self.admin_context, + pool.id, + objects.PoolAlsoNotify.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNotNone(result['version']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(values['pool_id'], result['pool_id']) + self.assertEqual(values['host'], result['host']) + self.assertEqual(values['port'], result['port']) + + def test_create_pool_also_notify_duplicate(self): + pool = self.create_pool(fixture=0) + + # Create the initial PoolAlsoNotify + self.create_pool_also_notify(pool, fixture=0) + + with testtools.ExpectedException(exceptions.DuplicatePoolAlsoNotify): + self.create_pool_also_notify(pool, fixture=0) + + def test_find_pool_also_notifies(self): + pool = self.create_pool(fixture=0) + + # Verify that there are no pool_also_notifies created + actual = self.storage.find_pool_also_notifies(self.admin_context) + self.assertEqual(0, len(actual)) + + # Create a PoolAlsoNotify + pool_also_notify = self.create_pool_also_notify(pool, fixture=0) + + # Fetch the PoolAlsoNotifies and ensure only 1 exists + actual = self.storage.find_pool_also_notifies(self.admin_context) + self.assertEqual(1, len(actual)) + + self.assertEqual(pool_also_notify['pool_id'], actual[0]['pool_id']) + self.assertEqual(pool_also_notify['host'], actual[0]['host']) + self.assertEqual(pool_also_notify['port'], actual[0]['port']) + + def test_find_pool_also_notifies_paging(self): + pool = self.create_pool(fixture=0) + + # Create 10 PoolAlsoNotifies + created = [self.create_pool_also_notify(pool, host='192.0.2.%d' % i) + for i in range(10)] + + # Ensure we can page through the results. + self._ensure_paging(created, self.storage.find_pool_also_notifies) + + def test_find_pool_also_notifies_with_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_also_notifies + pool_also_notify_one = self.create_pool_also_notify(pool, fixture=0) + pool_also_notify_two = self.create_pool_also_notify(pool, fixture=1) + + # Verify pool_also_notify_one + criterion = dict(host=pool_also_notify_one['host']) + + results = self.storage.find_pool_also_notifies( + self.admin_context, criterion) + + self.assertEqual(1, len(results)) + self.assertEqual(pool_also_notify_one['host'], results[0]['host']) + + # Verify pool_also_notify_two + criterion = dict(host=pool_also_notify_two['host']) + + results = self.storage.find_pool_also_notifies(self.admin_context, + criterion) + self.assertEqual(1, len(results)) + self.assertEqual(pool_also_notify_two['host'], results[0]['host']) + + def test_get_pool_also_notify(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_also_notify(pool, fixture=0) + actual = self.storage.get_pool_also_notify( + self.admin_context, expected['id']) + + self.assertEqual(expected['host'], actual['host']) + + def test_get_pool_also_notify_missing(self): + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + uuid = '2c102ffd-7146-4b4e-ad62-b530ee0873fb' + self.storage.get_pool_also_notify(self.admin_context, uuid) + + def test_find_pool_also_notify_criterion(self): + pool = self.create_pool(fixture=0) + + # Create two pool_also_notifies + pool_also_notify_one = self.create_pool_also_notify(pool, fixture=0) + pool_also_notify_two = self.create_pool_also_notify(pool, fixture=1) + + # Verify pool_also_notify_one + criterion = dict(host=pool_also_notify_one['host']) + + result = self.storage.find_pool_also_notify( + self.admin_context, criterion) + + self.assertEqual(pool_also_notify_one['host'], result['host']) + + # Verify pool_also_notify_two + criterion = dict(host=pool_also_notify_two['host']) + + result = self.storage.find_pool_also_notify( + self.admin_context, criterion) + + self.assertEqual(pool_also_notify_two['host'], result['host']) + + def test_find_pool_also_notify_criterion_missing(self): + pool = self.create_pool(fixture=0) + + expected = self.create_pool_also_notify(pool, fixture=0) + + criterion = dict(host=expected['host'] + "NOT FOUND") + + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + self.storage.find_pool_also_notify(self.admin_context, criterion) + + def test_update_pool_also_notify(self): + pool = self.create_pool(fixture=0) + + pool_also_notify = self.create_pool_also_notify(pool, host='192.0.2.1') + + # Update the pool_also_notify + pool_also_notify.host = '192.0.2.2' + + pool_also_notify = self.storage.update_pool_also_notify( + self.admin_context, pool_also_notify) + + # Verify the new values + self.assertEqual('192.0.2.2', pool_also_notify.host) + + # Ensure the version column was incremented + self.assertEqual(2, pool_also_notify.version) + + def test_update_pool_also_notify_duplicate(self): + pool = self.create_pool(fixture=0) + + # Create two pool_also_notifies + pool_also_notify_one = self.create_pool_also_notify( + pool, fixture=0, host='192.0.2.1') + pool_also_notify_two = self.create_pool_also_notify( + pool, fixture=0, host='192.0.2.2') + + # Update the second one to be a duplicate of the first + pool_also_notify_two.host = pool_also_notify_one.host + + with testtools.ExpectedException(exceptions.DuplicatePoolAlsoNotify): + self.storage.update_pool_also_notify( + self.admin_context, pool_also_notify_two) + + def test_update_pool_also_notify_missing(self): + pool_also_notify = objects.PoolAlsoNotify( + id='e8cee063-3a26-42d6-b181-bdbdc2c99d08') + + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + self.storage.update_pool_also_notify( + self.admin_context, pool_also_notify) + + def test_delete_pool_also_notify(self): + pool = self.create_pool(fixture=0) + pool_also_notify = self.create_pool_also_notify(pool, fixture=0) + + self.storage.delete_pool_also_notify( + self.admin_context, pool_also_notify['id']) + + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + self.storage.get_pool_also_notify( + self.admin_context, pool_also_notify['id']) + + def test_delete_pool_also_notify_missing(self): + with testtools.ExpectedException(exceptions.PoolAlsoNotifyNotFound): + uuid = '97f57960-f41b-4e93-8e22-8fd6c7e2c183' + self.storage.delete_pool_also_notify(self.admin_context, uuid) + + # Zone Transfer Accept tests + def test_create_zone_transfer_request(self): + zone = self.create_zone() + + values = { + 'tenant_id': self.admin_context.tenant, + 'zone_id': zone.id, + 'key': 'qwertyuiop' + } + + result = self.storage.create_zone_transfer_request( + self.admin_context, objects.ZoneTransferRequest.from_dict(values)) + + self.assertEqual(self.admin_context.tenant, result['tenant_id']) + self.assertIn('status', result) + + def test_create_zone_transfer_request_scoped(self): + zone = self.create_zone() + tenant_2_context = self.get_context(tenant='2') + tenant_3_context = self.get_context(tenant='3') + + values = { + 'tenant_id': self.admin_context.tenant, + 'zone_id': zone.id, + 'key': 'qwertyuiop', + 'target_tenant_id': tenant_2_context.tenant, + } + + result = self.storage.create_zone_transfer_request( + self.admin_context, objects.ZoneTransferRequest.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(self.admin_context.tenant, result['tenant_id']) + self.assertEqual(tenant_2_context.tenant, result['target_tenant_id']) + self.assertIn('status', result) + + stored_ztr = self.storage.get_zone_transfer_request( + tenant_2_context, result.id) + + self.assertEqual(self.admin_context.tenant, stored_ztr['tenant_id']) + self.assertEqual(stored_ztr['id'], result['id']) + + with testtools.ExpectedException( + exceptions.ZoneTransferRequestNotFound): + self.storage.get_zone_transfer_request( + tenant_3_context, result.id) + + def test_find_zone_transfer_requests(self): + zone = self.create_zone() + + values = { + 'tenant_id': self.admin_context.tenant, + 'zone_id': zone.id, + 'key': 'qwertyuiop' + } + + self.storage.create_zone_transfer_request( + self.admin_context, objects.ZoneTransferRequest.from_dict(values)) + + requests = self.storage.find_zone_transfer_requests( + self.admin_context, {"tenant_id": self.admin_context.tenant}) + self.assertEqual(1, len(requests)) + + def test_delete_zone_transfer_request(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + + self.storage.delete_zone_transfer_request( + self.admin_context, zt_request.id) + + with testtools.ExpectedException( + exceptions.ZoneTransferRequestNotFound): + self.storage.get_zone_transfer_request( + self.admin_context, zt_request.id) + + def test_update_zone_transfer_request(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + + zt_request.description = 'New description' + result = self.storage.update_zone_transfer_request( + self.admin_context, zt_request) + self.assertEqual('New description', result.description) + + def test_get_zone_transfer_request(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + + result = self.storage.get_zone_transfer_request( + self.admin_context, zt_request.id) + self.assertEqual(zt_request.id, result.id) + self.assertEqual(zt_request.zone_id, result.zone_id) + + # Zone Transfer Accept tests + def test_create_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + values = { + 'tenant_id': self.admin_context.tenant, + 'zone_transfer_request_id': zt_request.id, + 'zone_id': zone.id, + 'key': zt_request.key + } + + result = self.storage.create_zone_transfer_accept( + self.admin_context, objects.ZoneTransferAccept.from_dict(values)) + + self.assertIsNotNone(result['id']) + self.assertIsNotNone(result['created_at']) + self.assertIsNone(result['updated_at']) + + self.assertEqual(self.admin_context.tenant, result['tenant_id']) + self.assertIn('status', result) + + def test_find_zone_transfer_accepts(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + values = { + 'tenant_id': self.admin_context.tenant, + 'zone_transfer_request_id': zt_request.id, + 'zone_id': zone.id, + 'key': zt_request.key + } + + self.storage.create_zone_transfer_accept( + self.admin_context, objects.ZoneTransferAccept.from_dict(values)) + + accepts = self.storage.find_zone_transfer_accepts( + self.admin_context, {"tenant_id": self.admin_context.tenant}) + self.assertEqual(1, len(accepts)) + + def test_find_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + values = { + 'tenant_id': self.admin_context.tenant, + 'zone_transfer_request_id': zt_request.id, + 'zone_id': zone.id, + 'key': zt_request.key + } + + result = self.storage.create_zone_transfer_accept( + self.admin_context, objects.ZoneTransferAccept.from_dict(values)) + + accept = self.storage.find_zone_transfer_accept( + self.admin_context, {"id": result.id}) + self.assertEqual(result.id, accept.id) + + def test_transfer_zone_ownership(self): + tenant_1_context = self.get_context(tenant='1') + tenant_2_context = self.get_context(tenant='2') + admin_context = self.get_admin_context() + admin_context.all_tenants = True + + zone = self.create_zone(context=tenant_1_context) + recordset = self.create_recordset(zone, context=tenant_1_context) + record = self.create_record( + zone, recordset, context=tenant_1_context) + + updated_zone = zone + + updated_zone.tenant_id = tenant_2_context.tenant + + self.storage.update_zone( + admin_context, updated_zone) + + saved_zone = self.storage.get_zone( + admin_context, zone.id) + saved_recordset = self.storage.get_recordset( + admin_context, recordset.id) + saved_record = self.storage.get_record( + admin_context, record.id) + + self.assertEqual(tenant_2_context.tenant, saved_zone.tenant_id) + self.assertEqual(tenant_2_context.tenant, saved_recordset.tenant_id) + self.assertEqual(tenant_2_context.tenant, saved_record.tenant_id) + + def test_delete_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + zt_accept = self.create_zone_transfer_accept(zt_request) + + self.storage.delete_zone_transfer_accept( + self.admin_context, zt_accept.id) + + with testtools.ExpectedException( + exceptions.ZoneTransferAcceptNotFound): + self.storage.get_zone_transfer_accept( + self.admin_context, zt_accept.id) + + def test_update_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + zt_accept = self.create_zone_transfer_accept(zt_request) + + zt_accept.status = 'COMPLETE' + result = self.storage.update_zone_transfer_accept( + self.admin_context, zt_accept) + self.assertEqual('COMPLETE', result.status) + + def test_get_zone_transfer_accept(self): + zone = self.create_zone() + zt_request = self.create_zone_transfer_request(zone) + zt_accept = self.create_zone_transfer_accept(zt_request) + + result = self.storage.get_zone_transfer_accept( + self.admin_context, zt_accept.id) + self.assertEqual(zt_accept.id, result.id) + self.assertEqual(zt_accept.zone_id, result.zone_id) + # Zone Import Tests def test_create_zone_import(self): values = { diff --git a/designate/tests/test_storage/test_sqlalchemy.py b/designate/tests/test_storage/test_sqlalchemy.py index 0eaa8b7ca..02def7e36 100644 --- a/designate/tests/test_storage/test_sqlalchemy.py +++ b/designate/tests/test_storage/test_sqlalchemy.py @@ -39,10 +39,26 @@ class SqlalchemyStorageTest(StorageTestCase, TestCase): def test_schema_table_names(self): table_names = [ - u'blacklists', u'migrate_version', u'pool_attributes', - u'pool_ns_records', u'pools', u'quotas', u'records', u'recordsets', - u'tlds', u'tsigkeys', u'zone_attributes', u'zone_masters', - u'zone_tasks', u'zone_transfer_accepts', u'zone_transfer_requests', + u'blacklists', + u'migrate_version', + u'pool_also_notifies', + u'pool_attributes', + u'pool_nameservers', + u'pool_ns_records', + u'pool_target_masters', + u'pool_target_options', + u'pool_targets', + u'pools', + u'quotas', + u'records', + u'recordsets', + u'tlds', + u'tsigkeys', + u'zone_attributes', + u'zone_masters', + u'zone_tasks', + u'zone_transfer_accepts', + u'zone_transfer_requests', u'zones' ] self.assertEqual(table_names, self.storage.engine.table_names())