Merge "Removed deprecated pool-manager implementation"

This commit is contained in:
Zuul 2019-09-25 18:29:47 +00:00 committed by Gerrit Code Review
commit 5c232b2181
76 changed files with 89 additions and 4440 deletions

View File

@ -63,16 +63,6 @@
voting: false
- job:
name: designate-bind9-manager-model
parent: designate-bind9
vars:
devstack_services:
designate-worker: false
designate-producer: false
designate-pool-manager: true
designate-zone-manager: true
- job:
name: designate-bind9-py27
parent: designate-bind9
@ -139,7 +129,6 @@
jobs:
- designate-bind9
- designate-pdns4
- designate-bind9-manager-model
- designate-bind9-py27
- designate-pdns4-py27
- designate-pdns4-postgres
@ -152,7 +141,6 @@
jobs:
- designate-bind9
- designate-pdns4
- designate-bind9-manager-model
- designate-bind9-py27
- designate-pdns4-py27
- designate-grenade-pdns4

View File

@ -32,7 +32,6 @@ import pecan.routing
from designate import exceptions
from designate.central import rpcapi as central_rpcapi
from designate.pool_manager import rpcapi as pool_mgr_rpcapi
class RestController(pecan.rest.RestController):
@ -50,10 +49,6 @@ class RestController(pecan.rest.RestController):
def central_api(self):
return central_rpcapi.CentralAPI.get_instance()
@property
def pool_mgr_api(self):
return pool_mgr_rpcapi.PoolManagerAPI.get_instance()
def _apply_filter_params(self, params, accepted_filters, criterion):
invalid = []
for k in params:

View File

@ -57,9 +57,9 @@ class AgentPoolBackend(base.Backend):
super(AgentPoolBackend, self).__init__(target)
self.host = self.options.get('host', '127.0.0.1')
self.port = int(self.options.get('port', DEFAULT_AGENT_PORT))
self.timeout = CONF['service:pool_manager'].poll_timeout
self.retry_interval = CONF['service:pool_manager'].poll_retry_interval
self.max_retries = CONF['service:pool_manager'].poll_max_retries
self.timeout = CONF['service:worker'].poll_timeout
self.retry_interval = CONF['service:worker'].poll_retry_interval
self.max_retries = CONF['service:worker'].poll_max_retries
# FIXME: the agent retries creating zones without any interval

View File

@ -25,9 +25,6 @@ from designate.mdns import rpcapi as mdns_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('pool_id', 'designate.pool_manager',
group='service:pool_manager')
class Backend(DriverPlugin):
"""Base class for backend implementations"""
@ -50,10 +47,10 @@ class Backend(DriverPlugin):
self.admin_context.all_tenants = True
# Options for sending NOTIFYs
self.timeout = CONF['service:pool_manager'].poll_timeout
self.retry_interval = CONF['service:pool_manager'].poll_retry_interval
self.max_retries = CONF['service:pool_manager'].poll_max_retries
self.delay = CONF['service:pool_manager'].poll_delay
self.timeout = CONF['service:worker'].poll_timeout
self.retry_interval = CONF['service:worker'].poll_retry_interval
self.max_retries = CONF['service:worker'].poll_max_retries
self.delay = CONF['service:worker'].poll_delay
def start(self):
LOG.info('Starting %s backend', self.get_canonical_name())

View File

@ -49,7 +49,6 @@ from designate import scheduler
from designate import storage
from designate import utils
from designate.mdns import rpcapi as mdns_rpcapi
from designate.pool_manager import rpcapi as pool_manager_rpcapi
from designate.storage import transaction
from designate.worker import rpcapi as worker_rpcapi
@ -242,20 +241,13 @@ class Service(service.RPCService):
def mdns_api(self):
return mdns_rpcapi.MdnsAPI.get_instance()
@property
def pool_manager_api(self):
return pool_manager_rpcapi.PoolManagerAPI.get_instance()
@property
def worker_api(self):
return worker_rpcapi.WorkerAPI.get_instance()
@property
def zone_api(self):
# TODO(timsim): Remove this when pool_manager_api is gone
if cfg.CONF['service:worker'].enabled:
return self.worker_api
return self.pool_manager_api
return self.worker_api
def _is_valid_zone_name(self, context, zone_name):
# Validate zone name length
@ -1794,7 +1786,7 @@ class Service(service.RPCService):
# Diagnostics Methods
def _sync_zone(self, context, zone):
return self.pool_manager_api.update_zone(context, zone)
return self.zone_api.update_zone(context, zone)
@rpc.expected_exceptions()
@transaction
@ -2875,40 +2867,9 @@ class Service(service.RPCService):
created_zone_export = self.storage.create_zone_export(context,
zone_export)
if not cfg.CONF['service:worker'].enabled:
# So that we can maintain asynch behavior during the time that this
# lives in central, we'll return the 'PENDING' object, and then the
# 'COMPLETE'/'ERROR' status will be available on the first poll.
export = copy.deepcopy(created_zone_export)
synchronous = cfg.CONF['service:zone_manager'].export_synchronous
criterion = {'zone_id': zone_id}
count = self.storage.count_recordsets(context, criterion)
if synchronous:
try:
self.quota.limit_check(
context, context.project_id, api_export_size=count)
except exceptions.OverQuota:
LOG.debug('Zone Export too large to perform synchronously')
export.status = 'ERROR'
export.message = 'Zone is too large to export'
return export
export.location = \
"designate://v2/zones/tasks/exports/%(eid)s/export" % \
{'eid': export.id}
export.status = 'COMPLETE'
else:
LOG.debug('No method found to export zone')
export.status = 'ERROR'
export.message = 'No suitable method for export'
self.update_zone_export(context, export)
else:
export = copy.deepcopy(created_zone_export)
self.worker_api.start_zone_export(context, zone, export)
export = copy.deepcopy(created_zone_export)
self.worker_api.start_zone_export(context, zone, export)
return created_zone_export

View File

@ -1,61 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
import debtcollector
import designate.conf
from designate import service
from designate import utils
from designate import version
from designate import hookpoints
from designate.pool_manager import service as pool_manager_service
LOG = logging.getLogger(__name__)
CONF = designate.conf.CONF
CONF.import_opt('workers', 'designate.pool_manager',
group='service:pool_manager')
def main():
utils.read_config('designate', sys.argv)
logging.setup(CONF, 'designate')
gmr.TextGuruMeditation.setup_autorun(version)
# NOTE(timsim): This is to ensure people don't start the wrong
# services when the worker model is enabled.
if CONF['service:worker'].enabled:
LOG.error('You have designate-worker enabled, starting '
'designate-pool-manager is incompatible with '
'designate-worker. You need to start '
'designate-worker instead.')
sys.exit(1)
debtcollector.deprecate('designate-pool-manager is deprecated in favor of '
'designate-worker', version='newton',
removal_version='rocky')
server = pool_manager_service.Service()
heartbeat = service.Heartbeat(server.service_name, server.tg)
hookpoints.log_hook_setup()
service.serve(server, workers=CONF['service:pool_manager'].workers)
heartbeat.start()
service.wait()

View File

@ -35,14 +35,6 @@ def main():
logging.setup(CONF, 'designate')
gmr.TextGuruMeditation.setup_autorun(version)
# NOTE(timsim): This is to ensure people don't start the wrong
# services when the worker model is enabled.
if not CONF['service:worker'].enabled:
LOG.error('You do not have designate-worker enabled, starting '
'designate-producer is not allowed. '
'You need to start designate-zone-manager instead.')
sys.exit(1)
hookpoints.log_hook_setup()
server = producer_service.Service()

View File

@ -35,14 +35,6 @@ def main():
logging.setup(CONF, 'designate')
gmr.TextGuruMeditation.setup_autorun(version)
# NOTE(timsim): This is to ensure people don't start the wrong
# services when the worker model is enabled.
if not CONF['service:worker'].enabled:
LOG.error('You do not have designate-worker enabled, starting '
'designate-worker is not allowed. '
'You need to start designate-pool-manager instead.')
sys.exit(1)
hookpoints.log_hook_setup()
server = worker_service.Service()

View File

@ -1,61 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
import debtcollector
import designate.conf
from designate import service
from designate import utils
from designate import version
from designate.producer import service as producer_service
LOG = logging.getLogger(__name__)
CONF = designate.conf.CONF
CONF.import_opt('workers', 'designate.producer',
group='service:zone_manager')
def main():
utils.read_config('designate', sys.argv)
logging.setup(CONF, 'designate')
gmr.TextGuruMeditation.setup_autorun(version)
# NOTE(timsim): This is to ensure people don't start the wrong
# services when the worker model is enabled.
if CONF['service:worker'].enabled:
LOG.error('You have designate-worker enabled, starting '
'designate-zone-manager is incompatible with '
'designate-worker. You need to start '
'designate-producer instead.')
sys.exit(1)
debtcollector.deprecate('designate-zone-manager Is deprecated in '
'favor of the designate-producer',
version='newton',
removal_version='rocky')
LOG.warning('Starting designate-producer under the zone-manager name')
server = producer_service.Service()
heartbeat = service.Heartbeat(server.service_name, server.tg)
service.serve(server, workers=CONF['service:zone_manager'].workers)
heartbeat.start()
service.wait()

View File

@ -1,84 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Super simple fake memcache client."""
from oslo_config import cfg
from oslo_utils import timeutils
CONF = cfg.CONF
def get_client(memcached_servers=None):
client_cls = Client
if not memcached_servers:
memcached_servers = CONF.memcached_servers
if memcached_servers:
import memcache
client_cls = memcache.Client
return client_cls(memcached_servers, debug=0)
class Client(object):
"""Replicates a tiny subset of memcached client interface."""
def __init__(self, *args, **kwargs):
"""Ignores the passed in args."""
self.cache = {}
def get(self, key):
"""Retrieves the value for a key or None.
This expunges expired keys during each get.
"""
now = timeutils.utcnow_ts()
for k in list(self.cache):
(timeout, _value) = self.cache[k]
if timeout and now >= timeout:
del self.cache[k]
return self.cache.get(key, (0, None))[1]
def set(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key."""
timeout = 0
if time != 0:
timeout = timeutils.utcnow_ts() + time
self.cache[key] = (timeout, value)
return True
def add(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key if it doesn't exist."""
if self.get(key) is not None:
return False
return self.set(key, value, time, min_compress_len)
def incr(self, key, delta=1):
"""Increments the value for a key."""
value = self.get(key)
if value is None:
return None
new_value = int(value) + delta
self.cache[key] = (self.cache[key][0], str(new_value))
return new_value
def delete(self, key, time=0):
"""Deletes the value associated with a key."""
if key in self.cache:
del self.cache[key]

View File

@ -31,7 +31,6 @@ from designate.conf import mdns
from designate.conf import metrics
from designate.conf import msdns
from designate.conf import network_api
from designate.conf import pool_manager
from designate.conf import producer
from designate.conf import proxy
from designate.conf import service_status
@ -59,7 +58,6 @@ mdns.register_opts(CONF)
metrics.register_opts(CONF)
msdns.register_opts(CONF)
network_api.register_opts(CONF)
pool_manager.register_opts(CONF)
producer.register_opts(CONF)
proxy.register_opts(CONF)
service_status.register_opts(CONF)

View File

@ -70,10 +70,6 @@ DESIGNATE_OPTS = [
default='sudo designate-rootwrap /etc/designate/rootwrap.conf',
help='designate-rootwrap configuration'),
# Memcached
cfg.ListOpt('memcached_servers',
help='Memcached servers or None for in process cache.'),
cfg.StrOpt('network_api', default='neutron', help='Which API to use.'),
# Notifications

View File

@ -1,112 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import options
POOL_MANAGER_GROUP = cfg.OptGroup(
name='service:pool_manager',
title="Configuration for Pool Manager Service"
)
POOL_MANAGER_SQLALCHEMY_GROUP = cfg.OptGroup(
name='pool_manager_cache:sqlalchemy',
title="Configuration for SQLAlchemy Pool Manager Cache"
)
POOL_MANAGER_MEMCACHE_GROUP = cfg.OptGroup(
name='pool_manager_cache:memcache',
title="Configuration for memcache Pool Manager Cache"
)
POOL_MANAGER_OPTS = [
cfg.IntOpt('workers',
help='Number of Pool Manager worker processes to spawn'),
cfg.IntOpt('threads', default=1000,
help='Number of Pool Manager greenthreads to spawn'),
cfg.StrOpt('pool_id', default='794ccc2c-d751-44fe-b57f-8894c9f5c842',
help='The ID of the pool managed by this instance of the '
'Pool Manager'),
cfg.IntOpt('threshold_percentage', default=100,
help='The percentage of servers requiring a successful update '
'for a zone change to be considered active',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll_timeout', default=30,
help='The time to wait for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll_retry_interval', default=15,
help='The time between retrying to send a request and '
'waiting for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll_max_retries', default=10,
help='The maximum number of times to retry sending a request '
'and wait for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll_delay', default=5,
help='The time to wait before sending the first request '
'to a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.BoolOpt('enable_recovery_timer', default=True,
help='The flag for the recovery timer'),
cfg.IntOpt('periodic_recovery_interval', default=120,
help='The time between recovering from failures'),
cfg.BoolOpt('enable_sync_timer', default=True,
help='The flag for the sync timer'),
cfg.IntOpt('periodic_sync_interval', default=1800,
help='The time between synchronizing the servers with storage'),
cfg.IntOpt('periodic_sync_seconds', default=21600,
help='Zones Updated within last N seconds will be syncd.'
'Use an empty value to sync all zones.'),
cfg.IntOpt('periodic_sync_max_attempts', default=3,
help='Number of attempts to update a zone during sync'),
cfg.IntOpt('periodic_sync_retry_interval', default=30,
help='Interval between zone update attempts during sync'),
cfg.StrOpt('cache_driver', default='memcache',
help='The cache driver to use'),
cfg.StrOpt('topic', default='pool_manager',
help='RPC topic name for pool-manager'),
]
POOL_MANAGER_MEMCACHE_OPTS = [
cfg.ListOpt('memcached_servers',
help='Memcached servers or None for in process cache.'),
cfg.IntOpt('expiration', default=3600,
help='Time in seconds to expire cache.'),
]
def register_opts(conf):
conf.register_group(POOL_MANAGER_GROUP)
conf.register_opts(POOL_MANAGER_OPTS,
group=POOL_MANAGER_GROUP)
conf.register_group(POOL_MANAGER_SQLALCHEMY_GROUP)
conf.register_opts(options.database_opts,
group=POOL_MANAGER_SQLALCHEMY_GROUP)
conf.register_group(POOL_MANAGER_MEMCACHE_GROUP)
conf.register_opts(POOL_MANAGER_MEMCACHE_OPTS,
group=POOL_MANAGER_MEMCACHE_GROUP)
def list_opts():
return {
POOL_MANAGER_GROUP: POOL_MANAGER_OPTS,
POOL_MANAGER_MEMCACHE_GROUP: POOL_MANAGER_MEMCACHE_OPTS,
POOL_MANAGER_SQLALCHEMY_GROUP: options.database_opts,
}

View File

@ -20,11 +20,6 @@ PRODUCER_GROUP = cfg.OptGroup(
title='Configuration for Producer Service'
)
ZONE_MANAGER_GROUP = cfg.OptGroup(
name='service:zone_manager',
title='Configuration for Zone Manager Service'
)
PRODUCER_TASK_DELAYED_NOTIFY_GROUP = cfg.OptGroup(
name='producer_task:delayed_notify',
title='Configuration for Producer Task: Delayed Notify'
@ -67,29 +62,6 @@ PRODUCER_OPTS = [
help='RPC topic name for producer'),
]
ZONE_MANAGER_OPTS = [
cfg.IntOpt('workers',
help='Number of Zone Manager worker processes to spawn',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('threads', default=1000,
help='Number of Zone Manager greenthreads to spawn',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.ListOpt('enabled_tasks',
help='Enabled tasks to run',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.StrOpt('storage_driver', default='sqlalchemy',
help='The storage driver to use',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.BoolOpt('export_synchronous', default=True,
help='Whether to allow synchronous zone exports',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
]
PRODUCER_TASK_DELAYED_NOTIFY_OPTS = [
cfg.IntOpt('interval', default=5,
help='Run interval in seconds'),
@ -136,8 +108,6 @@ PRODUCER_TASK_ZONE_PURGE_OPTS = [
def register_opts(conf):
conf.register_group(PRODUCER_GROUP)
conf.register_opts(PRODUCER_OPTS, group=PRODUCER_GROUP)
conf.register_group(ZONE_MANAGER_GROUP)
conf.register_opts(ZONE_MANAGER_OPTS, group=ZONE_MANAGER_GROUP)
conf.register_group(PRODUCER_TASK_DELAYED_NOTIFY_GROUP)
conf.register_opts(PRODUCER_TASK_DELAYED_NOTIFY_OPTS,
group=PRODUCER_TASK_DELAYED_NOTIFY_GROUP)
@ -158,7 +128,6 @@ def register_opts(conf):
def list_opts():
return {
PRODUCER_GROUP: PRODUCER_OPTS,
ZONE_MANAGER_GROUP: ZONE_MANAGER_OPTS,
PRODUCER_TASK_DELAYED_NOTIFY_GROUP:
PRODUCER_TASK_DELAYED_NOTIFY_OPTS,
PRODUCER_TASK_PERIODIC_EXISTS_GROUP:

View File

@ -20,12 +20,6 @@ WORKER_GROUP = cfg.OptGroup(
)
WORKER_OPTS = [
cfg.BoolOpt('enabled', default=True,
help='Whether to send events to worker instead of '
'Pool Manager',
deprecated_for_removal=True,
deprecated_reason='In Train, this option will disappear'
'because pool manager will be removed'),
cfg.IntOpt('workers',
help='Number of Worker worker processes to spawn'),
cfg.IntOpt('threads', default=200,
@ -49,6 +43,9 @@ WORKER_OPTS = [
help='The time to wait before sending the first request '
'to a server'),
cfg.BoolOpt('notify', default=True,
deprecated_for_removal=True,
deprecated_reason='This option is being removed to reduce '
'complexity',
help='Whether to allow worker to send NOTIFYs, this will '
'noop NOTIFYs in mdns if true'),
cfg.BoolOpt('export_synchronous', default=True,

View File

@ -218,86 +218,3 @@ class Partitioner(object):
def unwatch_partition_change(self, callback):
self._callbacks.remove(callback)
class LeaderElection(object):
def __init__(self, coordinator, group_id):
self._coordinator = coordinator
self._group_id = group_id
self._callbacks = []
self._started = False
self._leader = False
def _warn_no_backend(self):
LOG.warning('No coordination backend configured, assuming we are the '
'leader. Please configure a coordination backend')
def start(self):
self._started = True
if self._coordinator:
LOG.info('Starting leader election for group %(group)s',
{'group': self._group_id})
# Nominate myself for election
self._coordinator.watch_elected_as_leader(
self._group_id, self._on_elected_leader)
else:
self._warn_no_backend()
self._leader = True
for callback in self._callbacks:
callback(None)
def stop(self):
self._started = False
if self._coordinator:
LOG.info('Stopping leader election for group %(group)s',
{'group': self._group_id})
# Remove the elected_as_leader callback
self._coordinator.unwatch_elected_as_leader(
self._group_id, self._on_elected_leader)
if self._leader:
# Tell Tooz we no longer wish to be the leader
LOG.info('Standing down as leader candidate for group '
'%(group)s', {'group': self._group_id})
self._leader = False
self._coordinator.stand_down_group_leader(self._group_id)
elif self._leader:
LOG.info('Standing down as leader candidate for group %(group)s',
{'group': self._group_id})
self._leader = False
@property
def is_leader(self):
return self._leader
def _on_elected_leader(self, event):
LOG.info('Successfully elected as leader of group %(group)s',
{'group': self._group_id})
self._leader = True
for callback in self._callbacks:
callback(event)
def watch_elected_as_leader(self, callback):
self._callbacks.append(callback)
if self._started and self._leader:
# We're started, and we're the leader, we should trigger the
# callback
callback(None)
elif self._started and not self._coordinator:
# We're started, and there's no coordination backend configured,
# we assume we're leader and call the callback.
self._warn_no_backend()
callback(None)
def unwatch_elected_as_leader(self, callback):
self._callbacks.remove(callback)

View File

@ -299,10 +299,6 @@ class DuplicateBlacklist(Duplicate):
error_type = 'duplicate_blacklist'
class DuplicatePoolManagerStatus(Duplicate):
error_type = 'duplication_pool_manager_status'
class DuplicatePool(Duplicate):
error_type = 'duplicate_pool'
@ -419,10 +415,6 @@ class ReportNotFound(NotFound):
error_type = 'report_not_found'
class PoolManagerStatusNotFound(NotFound):
error_type = 'pool_manager_status_not_found'
class PoolNotFound(NotFound):
error_type = 'pool_not_found'

View File

@ -18,7 +18,6 @@ import os
from migrate.versioning import api as versioning_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_db import exception
from designate.manage import base
from designate.sqlalchemy import utils
@ -31,26 +30,13 @@ REPOSITORY = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'migrate_repo'))
cfg.CONF.import_opt('connection', 'designate.storage.impl_sqlalchemy',
group='storage:sqlalchemy')
cfg.CONF.import_opt('connection',
'designate.pool_manager.cache.impl_sqlalchemy',
group='pool_manager_cache:sqlalchemy')
CONF = cfg.CONF
INIT_VERSION = 69
def get_manager():
storage_db = CONF['storage:sqlalchemy'].connection
pool_manager_cache_db = CONF['pool_manager_cache:sqlalchemy'].connection
if storage_db == pool_manager_cache_db:
raise exception.DBMigrationError(
message=(
"Storage requires its own database. "
"Please check your config file."
))
else:
return utils.get_migration_manager(
REPOSITORY, CONF['storage:sqlalchemy'].connection, INIT_VERSION)
return utils.get_migration_manager(
REPOSITORY, CONF['storage:sqlalchemy'].connection, INIT_VERSION)
class DatabaseCommands(base.Commands):

View File

@ -61,36 +61,6 @@ class PoolCommands(base.Commands):
default_flow_style=False
)
@base.args('--file', help='The path to the file the yaml output should be '
'writen to',
default='/etc/designate/pools.yaml')
def export_from_config(self, file):
self._startup()
# Avoid circular dependency imports
from designate import pool_manager
pool_manager.register_dynamic_pool_options()
try:
pools = self.central_api.find_pools(self.context)
except messaging.exceptions.MessagingTimeout:
LOG.critical("No response received from designate-central. "
"Check it is running, and retry")
sys.exit(1)
r_pools = objects.PoolList()
for pool in pools:
r_pool = objects.Pool.from_config(CONF, pool.id)
r_pool.id = pool.id
r_pool.ns_records = pool.ns_records
r_pool.attributes = pool.attributes
r_pools.append(r_pool)
with open(file, 'w') as stream:
yaml.dump(
DesignateAdapter.render('YAML', r_pools),
stream,
default_flow_style=False
)
@base.args('--pool_id', help='ID of the pool to be examined',
default=CONF['service:central'].default_pool_id)
def show_config(self, pool_id):

View File

@ -1,66 +0,0 @@
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from migrate.versioning import api as versioning_api
from oslo_config import cfg
from oslo_db import exception
from designate.manage import base
from designate.sqlalchemy import utils
REPOSITORY = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'pool_manager',
'cache', 'impl_sqlalchemy',
'migrate_repo'))
cfg.CONF.import_opt('connection',
'designate.pool_manager.cache.impl_sqlalchemy',
group='pool_manager_cache:sqlalchemy')
cfg.CONF.import_opt('connection',
'designate.storage.impl_sqlalchemy',
group='storage:sqlalchemy')
CONF = cfg.CONF
def get_manager():
storage_db = CONF['storage:sqlalchemy'].connection
pool_manager_cache_db = CONF['pool_manager_cache:sqlalchemy'].connection
if storage_db == pool_manager_cache_db:
raise exception.DBMigrationError(
message=(
"Pool Manager Cache requires its own database."
" Please check your config file."
))
else:
return utils.get_migration_manager(REPOSITORY, pool_manager_cache_db)
class DatabaseCommands(base.Commands):
def version(self):
current = get_manager().version()
latest = versioning_api.version(repository=REPOSITORY).value
print("Current: %s Latest: %s" % (current, latest))
def sync(self):
get_manager().upgrade(None)
@base.args('revision', nargs='?')
def upgrade(self, revision):
get_manager().upgrade(revision)

View File

@ -16,7 +16,6 @@
import oslo_messaging as messaging
from oslo_log import log as logging
from designate.pool_manager import rpcapi as pool_mngr_api
from designate.central import rpcapi as central_api
LOG = logging.getLogger(__name__)
@ -37,7 +36,3 @@ class BaseEndpoint(object):
@property
def central_api(self):
return central_api.CentralAPI.get_instance()
@property
def pool_manager_api(self):
return pool_mngr_api.PoolManagerAPI.get_instance()

View File

@ -72,27 +72,7 @@ class NotifyEndpoint(base.BaseEndpoint):
def poll_for_serial_number(self, context, zone, nameserver, timeout,
retry_interval, max_retries, delay):
"""Get the serial number of a zone on a resolver, then call update_status
on Pool Manager to update the zone status.
:param context: The user context.
:param zone: The designate zone object. This contains the zone
name. zone.serial = expected_serial
:param nameserver: Destination for the poll
:param timeout: The time (in seconds) to wait for a SOA response from
nameserver.
:param retry_interval: The time (in seconds) between retries.
:param max_retries: The maximum number of retries mindns would do for
an expected serial number. After this many retries, mindns returns
an ERROR.
:param delay: The time to wait before sending the first request.
:return: None
"""
status, actual_serial, retries = self.get_serial_number(
context, zone, nameserver.host, nameserver.port, timeout,
retry_interval, max_retries, delay)
self.pool_manager_api.update_status(
context, zone, nameserver, status, actual_serial)
return
def get_serial_number(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):

View File

@ -77,7 +77,7 @@ class MdnsAPI(object):
def notify_zone_changed(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):
if CONF['service:worker'].notify and CONF['service:worker'].enabled:
if CONF['service:worker'].notify:
LOG.debug('Letting worker send NOTIFYs instead')
return True

View File

@ -21,7 +21,6 @@ from designate.objects.zone import Zone, ZoneList # noqa
from designate.objects.zone_attribute import ZoneAttribute, ZoneAttributeList # noqa
from designate.objects.zone_master import ZoneMaster, ZoneMasterList # noqa
from designate.objects.floating_ip import FloatingIP, FloatingIPList # noqa
from designate.objects.pool_manager_status import PoolManagerStatus, PoolManagerStatusList # noqa
from designate.objects.pool import Pool, PoolList # noqa
from designate.objects.pool_also_notify import PoolAlsoNotify, PoolAlsoNotifyList # noqa
from designate.objects.pool_attribute import PoolAttribute, PoolAttributeList # noqa

View File

@ -13,7 +13,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate import utils
from designate.objects import base
from designate.objects import fields
@ -35,71 +34,6 @@ class Pool(base.DictObjectMixin, base.PersistentObjectMixin,
nullable=True),
}
@classmethod
def from_config(cls, CONF, pool_id):
pool_target_ids = CONF['pool:%s' % pool_id].targets
pool_nameserver_ids = CONF['pool:%s' % pool_id].nameservers
pool_also_notifies = CONF['pool:%s' % pool_id].also_notifies
# Build Base Pool
pool = {
'id': pool_id,
'description': 'Pool built from configuration on %s' % CONF.host,
'targets': [],
'nameservers': [],
'also_notifies': [],
}
# Build Pool Also Notifies
for pool_also_notify in pool_also_notifies:
host, port = utils.split_host_port(pool_also_notify)
pool['also_notifies'].append({
'host': host,
'port': port,
})
# Build Pool Targets
for pool_target_id in pool_target_ids:
pool_target_group = 'pool_target:%s' % pool_target_id
pool_target = {
'id': pool_target_id,
'type': CONF[pool_target_group].type,
'masters': [],
'options': [],
}
# Build Pool Target Masters
for pool_target_master in CONF[pool_target_group].masters:
host, port = utils.split_host_port(pool_target_master)
pool_target['masters'].append({
'host': host,
'port': port,
})
# Build Pool Target Options
for k, v in CONF[pool_target_group].options.items():
pool_target['options'].append({
'key': k,
'value': v,
})
pool['targets'].append(pool_target)
# Build Pool Nameservers
for pool_nameserver_id in pool_nameserver_ids:
pool_nameserver_group = 'pool_nameserver:%s' % pool_nameserver_id
pool_nameserver = {
'id': pool_nameserver_id,
'host': CONF[pool_nameserver_group].host,
'port': CONF[pool_nameserver_group].port,
}
pool['nameservers'].append(pool_nameserver)
return cls.from_dict(pool)
STRING_KEYS = [
'id', 'name'
]

View File

@ -1,43 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects import base
from designate.objects import fields
@base.DesignateRegistry.register
class PoolManagerStatus(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
fields = {
'nameserver_id': fields.UUIDFields(),
'zone_id': fields.UUIDFields(),
'status': fields.EnumField(['ACTIVE', 'PENDING', 'ERROR',
'SUCCESS', 'COMPLETE'], nullable=True),
'serial_number': fields.IntegerFields(minimum=0, maximum=4294967295),
'action': fields.EnumField(['CREATE', 'DELETE',
'UPDATE', 'NONE'], nullable=True),
}
STRING_KEYS = [
'id', 'action', 'status', 'server_id', 'zone_id'
]
@base.DesignateRegistry.register
class PoolManagerStatusList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = PoolManagerStatus
fields = {
'objects': fields.ListOfObjectsField('PoolManagerStatus'),
}

View File

@ -1,71 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
CONF = cfg.CONF
def register_dynamic_pool_options():
# Pool Options Registration Pass One
# Find the Current Pool ID
pool_id = CONF['service:pool_manager'].pool_id
# Build the [pool:<id>] config section
pool_group = cfg.OptGroup('pool:%s' % pool_id)
pool_opts = [
cfg.ListOpt('targets', default=[]),
cfg.ListOpt('nameservers', default=[]),
cfg.ListOpt('also_notifies', default=[]),
]
CONF.register_group(pool_group)
CONF.register_opts(pool_opts, group=pool_group)
# Pool Options Registration Pass Two
# Find the Current Pools Target ID's
pool_target_ids = CONF['pool:%s' % pool_id].targets
# Build the [pool_target:<id>] config sections
pool_target_opts = [
cfg.StrOpt('type'),
cfg.ListOpt('masters', default=[]),
cfg.DictOpt('options', default={}, secret=True),
]
for pool_target_id in pool_target_ids:
pool_target_group = cfg.OptGroup('pool_target:%s' % pool_target_id)
CONF.register_group(pool_target_group)
CONF.register_opts(pool_target_opts, group=pool_target_group)
# Find the Current Pools Nameserver ID's
pool_nameserver_ids = CONF['pool:%s' % pool_id].nameservers
# Build the [pool_nameserver:<id>] config sections
pool_nameserver_opts = [
cfg.StrOpt('host'),
cfg.IntOpt('port'),
]
for pool_nameserver_id in pool_nameserver_ids:
pool_nameserver_group = cfg.OptGroup(
'pool_nameserver:%s' % pool_nameserver_id)
CONF.register_group(pool_nameserver_group)
CONF.register_opts(pool_nameserver_opts, group=pool_nameserver_group)

View File

@ -1,27 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.pool_manager.cache import base
LOG = logging.getLogger(__name__)
def get_pool_manager_cache(cache_driver):
"""Return the engine class from the provided engine name"""
cls = base.PoolManagerCache.get_driver(cache_driver)
return cls()

View File

@ -1,63 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from designate.plugin import DriverPlugin
@six.add_metaclass(abc.ABCMeta)
class PoolManagerCache(DriverPlugin):
"""Base class for cache plugins"""
__plugin_ns__ = 'designate.pool_manager.cache'
__plugin_type__ = 'pool_manager_cache'
@abc.abstractmethod
def clear(self, context, pool_manager_status):
"""
Clear the pool manager status object from the cache.
:param context: Security context information
:param pool_manager_status: Pool manager status object to clear
"""
@abc.abstractmethod
def store(self, context, pool_manager_status):
"""
Store the pool manager status object in the cache.
:param context: Security context information
:param pool_manager_status: Pool manager status object to store
:return:
"""
@abc.abstractmethod
def retrieve(self, context, nameserver_id, zone_id, action):
"""
Retrieve the pool manager status object.
:param context: Security context information
:param nameserver_id: the nameserver ID of the pool manager status
object
:param zone_id: the zone ID of the pool manger status object
:param action: the action of the pool manager status object
:return: the pool manager status object
"""

View File

@ -1,115 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
impl_memcache.__init__
~~~~~~~~~~~~~~~~~~~~~~
Memcache interface
Docs: doc/source/memory-caching.rst
"""""""""
import six
from oslo_config import cfg
from designate import exceptions
from designate import objects
from designate.common import memorycache
from designate.pool_manager.cache import base as cache_base
CONF = cfg.CONF
DEFAULT_STATUS = 'NONE'
class MemcachePoolManagerCache(cache_base.PoolManagerCache):
__plugin_name__ = 'memcache'
def __init__(self):
super(MemcachePoolManagerCache, self).__init__()
self.cache = memorycache.get_client(
CONF['pool_manager_cache:memcache'].memcached_servers)
self.expiration = CONF['pool_manager_cache:memcache'].expiration
def get_name(self):
return self.name
def clear(self, context, pool_manager_status):
status_key = self._build_status_key(pool_manager_status)
self.cache.delete(status_key)
serial_number_key = self._build_serial_number_key(pool_manager_status)
self.cache.delete(serial_number_key)
def store(self, context, pool_manager_status):
status_key = self._build_status_key(pool_manager_status)
# TODO(vinod): memcache does not seem to store None as the values
# Investigate if we can do a different default value for status
if pool_manager_status.status:
self.cache.set(
status_key, pool_manager_status.status, self.expiration)
else:
self.cache.set(status_key, DEFAULT_STATUS, self.expiration)
serial_number_key = self._build_serial_number_key(pool_manager_status)
self.cache.set(
serial_number_key, pool_manager_status.serial_number,
self.expiration)
def retrieve(self, context, nameserver_id, zone_id, action):
values = {
'nameserver_id': nameserver_id,
'zone_id': zone_id,
'action': action,
}
pool_manager_status = objects.PoolManagerStatus(**values)
status_key = self._build_status_key(pool_manager_status)
status = self.cache.get(status_key)
if status is None:
raise exceptions.PoolManagerStatusNotFound
serial_number_key = self._build_serial_number_key(pool_manager_status)
serial_number = self.cache.get(serial_number_key)
if serial_number is None:
raise exceptions.PoolManagerStatusNotFound
pool_manager_status.serial_number = serial_number
if status == DEFAULT_STATUS:
pool_manager_status.status = None
else:
pool_manager_status.status = status
return pool_manager_status
@staticmethod
def _status_key(pool_manager_status, tail):
key = '{nameserver}-{zone}-{action}-{tail}'.format(
nameserver=pool_manager_status.nameserver_id,
zone=pool_manager_status.zone_id,
action=pool_manager_status.action,
tail=tail
)
if six.PY2:
return key.encode('utf-8')
else:
return key
def _build_serial_number_key(self, pool_manager_status):
return self._status_key(pool_manager_status, 'serial_number')
def _build_status_key(self, pool_manager_status):
return self._status_key(pool_manager_status, 'status')

View File

@ -1,36 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate import exceptions
from designate.pool_manager.cache import base as cache_base
class NoopPoolManagerCache(cache_base.PoolManagerCache):
__plugin_name__ = 'noop'
def __init__(self):
super(NoopPoolManagerCache, self).__init__()
def get_name(self):
return self.name
def clear(self, context, pool_manager_status):
pass
def store(self, context, pool_manager_status):
pass
def retrieve(self, context, nameserver_id, zone_id, action):
raise exceptions.PoolManagerStatusNotFound

View File

@ -1,69 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from designate import exceptions
from designate import objects
from designate.pool_manager.cache import base as cache_base
from designate.sqlalchemy import base as sqlalchemy_base
from designate.pool_manager.cache.impl_sqlalchemy import tables
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SQLAlchemyPoolManagerCache(sqlalchemy_base.SQLAlchemy,
cache_base.PoolManagerCache):
__plugin_name__ = 'sqlalchemy'
def __init__(self):
super(SQLAlchemyPoolManagerCache, self).__init__()
def get_name(self):
return self.name
def clear(self, context, pool_manager_status):
# If there is no id retrieve the relevant pool manager status
if not pool_manager_status.id:
pool_manager_status = self.retrieve(
context, pool_manager_status.nameserver_id,
pool_manager_status.zone_id, pool_manager_status.action)
self._delete(
context, tables.pool_manager_statuses, pool_manager_status,
exceptions.PoolManagerStatusNotFound)
def store(self, context, pool_manager_status):
if pool_manager_status.id:
self._update(
context, tables.pool_manager_statuses, pool_manager_status,
exceptions.DuplicatePoolManagerStatus,
exceptions.PoolManagerStatusNotFound)
else:
self._create(
tables.pool_manager_statuses, pool_manager_status,
exceptions.DuplicatePoolManagerStatus)
def retrieve(self, context, nameserver_id, zone_id, action):
criterion = {
'nameserver_id': nameserver_id,
'zone_id': zone_id,
'action': action
}
return self._find(
context, tables.pool_manager_statuses, objects.PoolManagerStatus,
objects.PoolManagerStatusList,
exceptions.PoolManagerStatusNotFound, criterion, one=True)

View File

@ -1,4 +0,0 @@
This is a database migration repository for the project Designate.
More information at
http://code.google.com/p/sqlalchemy-migrate/

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.versioning.shell import main
if __name__ == '__main__':
main(debug='False')

View File

@ -1,25 +0,0 @@
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
repository_id=DesignatePoolManager
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
# If this is changed once a database is under version control, you'll need to
# change the table name in each database too.
version_table=migrate_version
# When committing a change script, Migrate will attempt to generate the
# sql for all supported databases; normally, if one of them fails - probably
# because you don't have that database installed - it is ignored and the
# commit continues, perhaps ending successfully.
# Databases in this list MUST compile successfully during a commit, or the
# entire commit will fail. List the databases your application will actually
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs=[]
# When creating new change scripts, Migrate will stamp the new script with
# a version number. By default this is latest_version + 1. You can set this
# to 'true' to tell Migrate to use the UTC timestamp instead.
use_timestamp_numbering=False

View File

@ -1,76 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Integer, DateTime, Enum, UniqueConstraint
from sqlalchemy.schema import Table, Column, MetaData
from designate.sqlalchemy.types import UUID
UPDATE_STATUSES = ['SUCCESS', 'ERROR']
UPDATE_ACTIONS = ['CREATE', 'DELETE', 'UPDATE']
meta = MetaData()
pool_manager_statuses = Table(
'pool_manager_statuses', meta,
Column('id', UUID(), primary_key=True),
Column('version', Integer(), nullable=False),
Column('created_at', DateTime()),
Column('updated_at', DateTime()),
Column('server_id', UUID(), nullable=False),
Column('domain_id', UUID(), nullable=False),
Column('action', Enum(name='update_actions', *UPDATE_ACTIONS),
nullable=False),
Column('status', Enum(name='update_statuses', *UPDATE_STATUSES),
nullable=True),
Column('serial_number', Integer, nullable=False),
UniqueConstraint('server_id', 'domain_id', 'action',
name='unique_pool_manager_status'),
mysql_engine='InnoDB',
mysql_charset='utf8')
def upgrade(migrate_engine):
meta.bind = migrate_engine
with migrate_engine.begin() as conn:
if migrate_engine.name == "mysql":
conn.execute("SET foreign_key_checks = 0;")
elif migrate_engine.name == "postgresql":
conn.execute("SET CONSTRAINTS ALL DEFERRED;")
pool_manager_statuses.create(conn)
if migrate_engine.name == "mysql":
conn.execute("SET foreign_key_checks = 1;")
def downgrade(migrate_engine):
meta.bind = migrate_engine
with migrate_engine.begin() as conn:
if migrate_engine.name == "mysql":
conn.execute("SET foreign_key_checks = 0;")
elif migrate_engine.name == "postgresql":
conn.execute("SET CONSTRAINTS ALL DEFERRED;")
pool_manager_statuses.drop()
if migrate_engine.name == "mysql":
conn.execute("SET foreign_key_checks = 1;")

View File

@ -1,33 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import Table, MetaData
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
pms_table = Table('pool_manager_statuses', meta, autoload=True)
pms_table.c.server_id.alter(name='nameserver_id')
def downgrade(migrate_engine):
meta.bind = migrate_engine
pms_table = Table('pool_manager_statuses', meta, autoload=True)
pms_table.c.nameserver_id.alter(name='server_id')

View File

@ -1,30 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -1,30 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -1,30 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -1,30 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -1,30 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -1,34 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
from sqlalchemy.schema import Table, MetaData
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
status_table = Table('pool_manager_statuses', meta, autoload=True)
status_table.c.domain_id.alter(name='zone_id')
def downgrade(migration_engine):
pass

View File

@ -1,50 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import (Table, MetaData, Column, Integer, DateTime, Enum,
UniqueConstraint, ForeignKeyConstraint)
from oslo_utils import timeutils
from designate import utils
from designate.sqlalchemy.types import UUID
UPDATE_STATUSES = ['SUCCESS', 'ERROR']
UPDATE_ACTIONS = ['CREATE', 'DELETE', 'UPDATE']
metadata = MetaData()
pool_manager_statuses = Table(
'pool_manager_statuses', metadata,
Column('id', UUID, default=utils.generate_uuid, primary_key=True),
Column('version', Integer, default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('nameserver_id', UUID, nullable=False),
Column('zone_id', UUID, nullable=False),
Column('action', Enum(name='update_actions', *UPDATE_ACTIONS),
nullable=False),
Column('status', Enum(name='update_statuses', *UPDATE_STATUSES),
nullable=True),
Column('serial_number', Integer, nullable=False),
UniqueConstraint('nameserver_id', 'zone_id', 'action',
name='unique_pool_manager_status'),
ForeignKeyConstraint(['zone_id'], ['zones.id']),
mysql_engine='InnoDB',
mysql_charset='utf8',
)

View File

@ -1,114 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate import rpc
from designate.loggingutils import rpc_logging
LOG = logging.getLogger(__name__)
MNGR_API = None
def reset():
global MNGR_API
MNGR_API = None
@rpc_logging(LOG, 'pool_manager')
class PoolManagerAPI(object):
"""
Client side of the Pool Manager RPC API.
API version history:
API version history:
1.0 - Initial version
2.0 - Rename domains to zones
2.1 - Add target_sync
"""
RPC_API_VERSION = '2.1'
def __init__(self, topic=None):
self.topic = topic if topic else cfg.CONF['service:pool_manager'].topic
target = messaging.Target(topic=self.topic,
version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='2.1')
@classmethod
def get_instance(cls):
"""
The rpc.get_client() which is called upon the API object initialization
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup
by rpc.init() before.
This fixes that by creating the rpcapi when demanded.
"""
global MNGR_API
if not MNGR_API:
MNGR_API = cls()
return MNGR_API
def target_sync(self, context, pool_id, target_id, timestamp):
LOG.info(
"target_sync: Syncing target %(target) since %(timestamp)d.",
{
'target': target_id,
'timestamp': timestamp
})
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.call(
context, 'target_sync', pool_id=pool_id, target_id=target_id,
timestamp=timestamp)
def create_zone(self, context, zone):
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, zone.pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.cast(
context, 'create_zone', zone=zone)
def delete_zone(self, context, zone):
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, zone.pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.cast(
context, 'delete_zone', zone=zone)
def update_zone(self, context, zone):
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, zone.pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.cast(
context, 'update_zone', zone=zone)
def update_status(self, context, zone, nameserver, status,
actual_serial):
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, zone.pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.cast(
context, 'update_status', zone=zone, nameserver=nameserver,
status=status, actual_serial=actual_serial)

View File

@ -1,976 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
from decimal import Decimal
import time
from datetime import datetime
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_log import log as logging
from oslo_concurrency import lockutils
from designate import backend
from designate import coordination
from designate import exceptions
from designate import objects
from designate import utils
from designate.central import rpcapi as central_api
from designate.pool_manager import rpcapi as pool_manager_rpcapi
from designate.mdns import rpcapi as mdns_api
from designate import service
from designate.context import DesignateContext
from designate.pool_manager import cache
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SUCCESS_STATUS = 'SUCCESS'
PENDING_STATUS = 'PENDING'
ERROR_STATUS = 'ERROR'
NO_ZONE_STATUS = 'NO_ZONE'
CREATE_ACTION = 'CREATE'
DELETE_ACTION = 'DELETE'
UPDATE_ACTION = 'UPDATE'
MAXIMUM_THRESHOLD = 100
@contextmanager
def wrap_backend_call():
"""
Wraps backend calls, ensuring any exception raised is a Backend exception.
"""
try:
yield
except exceptions.Backend:
raise
except Exception as e:
raise exceptions.Backend('Unknown backend failure: %r' % e)
def _constant_retries(num_attempts, sleep_interval):
"""Generate a sequence of False terminated by a True
Sleep `sleep_interval` between cycles but not at the end.
"""
for cnt in range(num_attempts):
if cnt != 0:
LOG.debug("Executing retry n. %d", cnt)
if cnt < num_attempts - 1:
yield False
time.sleep(sleep_interval)
else:
yield True
class Service(service.RPCService):
"""
Service side of the Pool Manager RPC API.
API version history:
1.0 - Initial version
2.0 - The Big Rename
2.1 - Add target_sync
"""
RPC_API_VERSION = '2.1'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self):
self._scheduler = None
self._storage = None
self._quota = None
self._pool_election = None
self._central_api = None
self._mdns_api = None
self._pool_manager_api = None
topic = '%s.%s' % (
cfg.CONF['service:pool_manager'].topic,
CONF['service:pool_manager'].pool_id
)
super(Service, self).__init__(
self.service_name, topic,
threads=cfg.CONF['service:worker'].threads,
)
self.coordination = coordination.Coordination(
self.service_name, self.tg
)
# Get a pool manager cache connection.
self.cache = cache.get_pool_manager_cache(
CONF['service:pool_manager'].cache_driver)
# Store some settings for quick access later
self.threshold = CONF['service:pool_manager'].threshold_percentage
self.timeout = CONF['service:pool_manager'].poll_timeout
self.retry_interval = CONF['service:pool_manager'].poll_retry_interval
self.max_retries = CONF['service:pool_manager'].poll_max_retries
self.delay = CONF['service:pool_manager'].poll_delay
self._periodic_sync_max_attempts = \
CONF['service:pool_manager'].periodic_sync_max_attempts
self._periodic_sync_retry_interval = \
CONF['service:pool_manager'].periodic_sync_retry_interval
# Compute a time (seconds) by which things should have propagated
self.max_prop_time = utils.max_prop_time(
self.timeout, self.max_retries, self.retry_interval, self.delay
)
def _setup_target_backends(self):
self.target_backends = {}
for target in self.pool.targets:
# Fetch an instance of the Backend class, passing in the options
# and masters
self.target_backends[target.id] = backend.get_backend(target)
LOG.info('%d targets setup', len(self.pool.targets))
if not self.target_backends:
raise exceptions.NoPoolTargetsConfigured()
@property
def service_name(self):
return 'pool_manager'
def start(self):
# Build the Pool (and related) Object from Config
context = DesignateContext.get_admin_context()
pool_id = CONF['service:pool_manager'].pool_id
has_targets = False
# TODO(kiall): This block of code should be replaced with a cleaner,
# limited version. e.g. should retry for X minutes, and
# backoff rather than fixed retry intervals.
while not has_targets:
try:
self.pool = self.central_api.get_pool(context, pool_id)
if len(self.pool.targets) > 0:
has_targets = True
else:
LOG.error("No targets for %s found.", self.pool)
time.sleep(5)
# Pool data may not have migrated to the DB yet
except exceptions.PoolNotFound:
LOG.error("Pool ID %s not found.", pool_id)
time.sleep(5)
# designate-central service may not have started yet
except messaging.exceptions.MessagingTimeout:
time.sleep(0.2)
# designate-central failed in an unknown way, don't allow another
# failing / not started service to cause pool-manager to crash.
except Exception:
LOG.exception("An unknown exception occurred while "
"fetching pool details")
time.sleep(5)
# Create the necessary Backend instances for each target
self._setup_target_backends()
for target in self.pool.targets:
self.target_backends[target.id].start()
super(Service, self).start()
self.coordination.start()
# Setup a Leader Election, use for ensuring certain tasks are executed
# on exactly one pool-manager instance at a time]
self._pool_election = coordination.LeaderElection(
self.coordination.coordinator,
'%s:%s' % (self.service_name, self.pool.id))
self._pool_election.start()
if CONF['service:pool_manager'].enable_recovery_timer:
interval = CONF['service:pool_manager'].periodic_recovery_interval
LOG.info('Starting periodic recovery timer every'
' %(interval)s s', {'interval': interval})
self.tg.add_timer(interval, self.periodic_recovery, interval)
if CONF['service:pool_manager'].enable_sync_timer:
interval = CONF['service:pool_manager'].periodic_sync_interval
LOG.info('Starting periodic synchronization timer every'
' %(interval)s s', {'interval': interval})
self.tg.add_timer(interval, self.periodic_sync, interval)
def stop(self, graceful=True):
self._pool_election.stop()
# self.coordination.stop()
super(Service, self).stop(graceful)
for target in self.pool.targets:
self.target_backends[target.id].stop()
@property
def central_api(self):
if not self._central_api:
self._central_api = central_api.CentralAPI.get_instance()
return self._central_api
@property
def mdns_api(self):
if not self._mdns_api:
self._mdns_api = mdns_api.MdnsAPI.get_instance()
return self._mdns_api
@property
def pool_manager_api(self):
if not self._pool_manager_api:
pool_mgr_api = pool_manager_rpcapi.PoolManagerAPI
self._pool_manager_api = pool_mgr_api.get_instance()
return self._pool_manager_api
def _get_admin_context_all_tenants(self):
return DesignateContext.get_admin_context(all_tenants=True)
# Periodic Tasks
def periodic_recovery(self):
"""
Runs only on the pool leader
:return: None
"""
if not self._pool_election.is_leader:
return
context = self._get_admin_context_all_tenants()
LOG.info("Starting Periodic Recovery")
try:
# Handle Deletion Failures
zones = self._get_failed_zones(context, DELETE_ACTION)
LOG.info("periodic_recovery:delete_zone needed on %d zones",
len(zones))
for zone in zones:
self.pool_manager_api.delete_zone(context, zone)
# Handle Creation Failures
zones = self._get_failed_zones(context, CREATE_ACTION)
LOG.info("periodic_recovery:create_zone needed on %d zones",
len(zones))
for zone in zones:
self.pool_manager_api.create_zone(context, zone)
# Handle Update Failures
zones = self._get_failed_zones(context, UPDATE_ACTION)
LOG.info("periodic_recovery:update_zone needed on %d zones",
len(zones))
for zone in zones:
self.pool_manager_api.update_zone(context, zone)
except Exception:
LOG.exception('An unhandled exception in periodic '
'recovery occurred')
def periodic_sync(self):
"""Periodically sync all the zones that are not in ERROR status
Runs only on the pool leader
:return: None
"""
if not self._pool_election.is_leader:
return
LOG.info("Starting Periodic Synchronization")
context = self._get_admin_context_all_tenants()
zones = self._fetch_healthy_zones(context)
zones = set(zones)
# TODO(kiall): If the zone was created within the last
# periodic_sync_seconds, attempt to recreate
# to fill in targets which may have failed.
retry_gen = _constant_retries(
self._periodic_sync_max_attempts,
self._periodic_sync_retry_interval
)
for is_last_cycle in retry_gen:
zones_in_error = []
for zone in zones:
try:
success = self.update_zone(context, zone)
if not success:
zones_in_error.append(zone)
except Exception:
LOG.exception('An unhandled exception in periodic '
'synchronization occurred.')
zones_in_error.append(zone)
if not zones_in_error:
return
zones = zones_in_error
for zone in zones_in_error:
self.central_api.update_status(context, zone.id, ERROR_STATUS,
zone.serial)
def target_sync(self, context, pool_id, target_id, timestamp):
"""
Replay all the events that we can since a certain timestamp
"""
context = self._get_admin_context_all_tenants()
context.show_deleted = True
target = None
for tar in self.pool.targets:
if tar.id == target_id:
target = tar
if target is None:
raise exceptions.BadRequest('Please supply a valid target id.')
LOG.info('Starting Target Sync')
criterion = {
'pool_id': pool_id,
'updated_at': '>%s' % datetime.fromtimestamp(timestamp).
isoformat(),
}
zones = self.central_api.find_zones(context, criterion=criterion,
sort_key='updated_at', sort_dir='asc')
self.tg.add_thread(self._target_sync,
context, zones, target, timestamp)
return 'Syncing %(len)s zones on %(target)s' % {'len': len(zones),
'target': target_id}
def _target_sync(self, context, zones, target, timestamp):
zone_ops = []
timestamp_dt = datetime.fromtimestamp(timestamp)
for zone in zones:
if isinstance(zone.created_at, datetime):
zone_created_at = zone.created_at
elif isinstance(zone.created_at, str):
zone_created_at = datetime.strptime(zone.created_at,
"%Y-%m-%dT%H:%M:%S.%f")
else:
raise Exception("zone.created_at is of type %s" %
str(type(zone.created_at)))
if zone.status == 'DELETED':
# Remove any other ops for this zone
for zone_op in zone_ops:
if zone.name == zone_op[0].name:
zone_ops.remove(zone_op)
# If the zone was created before the timestamp delete it,
# otherwise, it will just never be created
if (zone_created_at <= timestamp_dt):
zone_ops.append((zone, 'DELETE'))
elif (zone_created_at > timestamp_dt):
# If the zone was created after the timestamp
for zone_op in zone_ops:
if (
zone.name == zone_op[0].name and
zone_op[1] == 'DELETE'
):
zone_ops.remove(zone_op)
zone_ops.append((zone, 'CREATE'))
else:
zone_ops.append((zone, 'UPDATE'))
for zone, action in zone_ops:
if action == 'CREATE':
self._create_zone_on_target(context, target, zone)
elif action == 'UPDATE':
self._update_zone_on_target(context, target, zone)
elif action == 'DELETE':
self._delete_zone_on_target(context, target, zone)
zone.serial = 0
for nameserver in self.pool.nameservers:
self.mdns_api.poll_for_serial_number(
context, zone, nameserver, self.timeout,
self.retry_interval, self.max_retries, self.delay)
# Standard Create/Update/Delete Methods
def create_zone(self, context, zone):
"""Called by Central or by periodic_recovery, instruct the backends to
create a zone, then poll for consensus.
On success, send NOTIFY to also_notifies and nameservers
Finally, poll for zone serial number on nameservers.
:param context: Security context information.
:param zone: Zone to be created
:return: None
"""
LOG.info("Creating new zone %s", zone.name)
results = []
# Create the zone on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._create_zone_on_target(context, target, zone)
)
if self._exceed_or_meet_threshold(results.count(True)):
LOG.debug('Consensus reached for creating zone %(zone)s '
'on pool targets' % {'zone': zone.name})
# The zone status will be updated asyncronously by MiniDNS
else:
LOG.warning('Consensus not reached for creating zone %(zone)s '
'on pool targets', {'zone': zone.name})
self.central_api.update_status(
context, zone.id, ERROR_STATUS, zone.serial)
return
# Send a NOTIFY to each also-notifies
for also_notify in self.pool.also_notifies:
self._update_zone_on_also_notify(context, also_notify, zone)
# Ensure the change has propagated to each nameserver
for nameserver in self.pool.nameservers:
create_status = self._build_status_object(
nameserver, zone, CREATE_ACTION)
self.cache.store(context, create_status)
self.mdns_api.poll_for_serial_number(
context, zone, nameserver, self.timeout,
self.retry_interval, self.max_retries, self.delay)
def _create_zone_on_target(self, context, target, zone):
"""Called by create_zone, run create_zone on backends
:param context: Security context information.
:param target: Target to create Zone on
:param zone: Zone to be created
:return: True/False
"""
LOG.debug("Creating zone %s on target %s", zone.name, target.id)
backend = self.target_backends[target.id]
retries = 0
while retries < self.max_retries:
try:
backend.create_zone(context, zone)
return True
except Exception:
retries += 1
LOG.exception(
"Failed to create zone %(zone)s on "
"target %(target)s on attempt %(attempt)d",
{
'zone': zone.name,
'target': target.id,
'attempt': retries
}) # noqa
time.sleep(self.retry_interval)
return False
def update_zone(self, context, zone):
"""Update a zone across every pool target, check for consensus and
for propagation.
:param context: Security context information.
:param zone: Zone to be updated
:return: consensus reached (bool)
"""
LOG.info("Updating zone %s", zone.name)
# Update the zone on each of the Pool Targets
success_count = 0
for target in self.pool.targets:
ok_status = self._update_zone_on_target(context, target, zone)
if ok_status:
success_count += 1
if not self._exceed_or_meet_threshold(success_count):
LOG.warning('Consensus not reached for updating zone %(zone)s '
'on pool targets', {'zone': zone.name})
self.central_api.update_status(context, zone.id, ERROR_STATUS,
zone.serial)
return False
LOG.debug('Consensus reached for updating zone %(zone)s '
'on pool targets', {'zone': zone.name})
# The zone status will be updated asynchronously by MiniDNS
# Send a NOTIFY to each also-notifies
for also_notify in self.pool.also_notifies:
self._update_zone_on_also_notify(context, also_notify, zone)
# Ensure the change has propagated to each nameserver
for nameserver in self.pool.nameservers:
# See if there is already another update in progress
try:
self.cache.retrieve(context, nameserver.id, zone.id,
UPDATE_ACTION)
except exceptions.PoolManagerStatusNotFound:
update_status = self._build_status_object(
nameserver, zone, UPDATE_ACTION)
self.cache.store(context, update_status)
self.mdns_api.poll_for_serial_number(
context, zone, nameserver, self.timeout,
self.retry_interval, self.max_retries, self.delay)
return True
def _update_zone_on_target(self, context, target, zone):
"""Instruct the appropriate backend to update a zone on a target
:param context: Security context information.
:param target: Target to update Zone on
:param zone: Zone to be updated
:return: True/False
"""
LOG.debug("Updating zone %s on target %s", zone.name, target.id)
backend = self.target_backends[target.id]
try:
backend.update_zone(context, zone)
return True
except Exception:
LOG.exception("Failed to update zone %(zone)s on target "
"%(target)s",
{'zone': zone.name, 'target': target.id})
return False
def _update_zone_on_also_notify(self, context, also_notify, zone):
LOG.info('Updating zone %(zone)s on also_notify %(server)s.',
{'zone': zone.name,
'server': self._get_destination(also_notify)})
self.mdns_api.notify_zone_changed(
context, zone, also_notify.host, also_notify.port, self.timeout,
self.retry_interval, self.max_retries, 0)
def delete_zone(self, context, zone):
"""
:param context: Security context information.
:param zone: Zone to be deleted
:return: None
"""
LOG.info("Deleting zone %s", zone.name)
results = []
# Delete the zone on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._delete_zone_on_target(context, target, zone))
if not self._exceed_or_meet_threshold(
results.count(True), MAXIMUM_THRESHOLD):
LOG.warning('Consensus not reached for deleting zone %(zone)s '
'on pool targets', {'zone': zone.name})
self.central_api.update_status(
context, zone.id, ERROR_STATUS, zone.serial)
zone.serial = 0
# Ensure the change has propagated to each nameserver
for nameserver in self.pool.nameservers:
# See if there is already another update in progress
try:
self.cache.retrieve(context, nameserver.id, zone.id,
DELETE_ACTION)
except exceptions.PoolManagerStatusNotFound:
update_status = self._build_status_object(
nameserver, zone, DELETE_ACTION)
self.cache.store(context, update_status)
self.mdns_api.poll_for_serial_number(
context, zone, nameserver, self.timeout,
self.retry_interval, self.max_retries, self.delay)
def _delete_zone_on_target(self, context, target, zone):
"""
:param context: Security context information.
:param target: Target to delete Zone from
:param zone: Zone to be deleted
:return: True/False
"""
LOG.debug("Deleting zone %s on target %s", zone.name, target.id)
backend = self.target_backends[target.id]
retries = 0
while retries < self.max_retries:
try:
backend.delete_zone(context, zone)
return True
except Exception:
retries += 1
LOG.exception(
"Failed to delete zone %(zone)s on "
"target %(target)s on attempt %(attempt)d",
{
'zone': zone.name,
'target': target.id,
'attempt': retries
})
time.sleep(self.retry_interval)
return False
def update_status(self, context, zone, nameserver, status,
actual_serial):
"""
update_status is called by mdns for creates and updates.
deletes are handled by the backend entirely and status is determined
at the time of delete itself.
:param context: Security context information.
:param zone: The designate zone object.
:param nameserver: The nameserver for which a status update is being
sent.
:param status: The status, 'SUCCESS' or 'ERROR'.
:param actual_serial: The actual serial number received from the name
server for the zone.
:return: None
"""
LOG.debug("Calling update_status for %s : %s : %s : %s",
zone.name, zone.action, status, actual_serial)
action = UPDATE_ACTION if zone.action == 'NONE' else zone.action
with lockutils.lock('update-status-%s' % zone.id):
try:
current_status = self.cache.retrieve(
context, nameserver.id, zone.id, action)
except exceptions.PoolManagerStatusNotFound:
current_status = self._build_status_object(
nameserver, zone, action)
self.cache.store(context, current_status)
cache_serial = current_status.serial_number
LOG.debug('For zone %s : %s on nameserver %s the cache serial '
'is %s and the actual serial is %s.',
zone.name, action, self._get_destination(nameserver),
cache_serial, actual_serial)
if actual_serial and cache_serial <= actual_serial:
current_status.status = status
current_status.serial_number = actual_serial
self.cache.store(context, current_status)
LOG.debug('Attempting to get consensus serial for %s',
zone.name)
consensus_serial = self._get_consensus_serial(context, zone)
LOG.debug('Consensus serial for %s is %s',
zone.name, consensus_serial)
# If there is a valid consensus serial we can still send a success
# for that serial.
# If there is a higher error serial we can also send an error for
# the error serial.
if consensus_serial != 0 and cache_serial <= consensus_serial \
and zone.status != 'ACTIVE':
LOG.info('For zone %(zone)s the consensus serial is '
'%(consensus_serial)s.',
{
'zone': zone.name,
'consensus_serial': consensus_serial
})
self.central_api.update_status(
context, zone.id, SUCCESS_STATUS, consensus_serial)
if status == ERROR_STATUS:
error_serial = self._get_error_serial(
context, zone, consensus_serial)
if error_serial > consensus_serial or error_serial == 0:
LOG.warning('For zone %(zone)s '
'the error serial is %(error_serial)s.',
{
'zone': zone.name,
'error_serial': error_serial
})
self.central_api.update_status(
context, zone.id, ERROR_STATUS, error_serial)
if status == NO_ZONE_STATUS:
if action == DELETE_ACTION:
self.central_api.update_status(
context, zone.id, NO_ZONE_STATUS, 0)
else:
LOG.warning('Zone %(zone)s is not present in some targets',
{'zone': zone.name})
self.central_api.update_status(
context, zone.id, NO_ZONE_STATUS, 0)
if consensus_serial == zone.serial and self._is_consensus(
context, zone, action, SUCCESS_STATUS,
MAXIMUM_THRESHOLD):
self._clear_cache(context, zone, action)
# Utility Methods
def _get_failed_zones(self, context, action):
"""
Fetch zones that are in ERROR status or have been PENDING for a long
time. Used by periodic recovery.
After a certain time changes either should have successfully
propagated or gone to an ERROR state.
However, random failures and undiscovered bugs leave zones hanging out
in PENDING state forever. By treating those "stale" zones as failed,
periodic recovery will attempt to restore them.
:return: :class:`ZoneList` zones
"""
criterion = {
'pool_id': CONF['service:pool_manager'].pool_id,
'action': action,
'status': ERROR_STATUS
}
error_zones = self.central_api.find_zones(context, criterion)
# Include things that have been hanging out in PENDING
# status for longer than they should
# Generate the current serial, will provide a UTC Unix TS.
current = utils.increment_serial()
stale_criterion = {
'pool_id': CONF['service:pool_manager'].pool_id,
'action': action,
'status': PENDING_STATUS,
'serial': "<%s" % (current - self.max_prop_time)
}
LOG.debug('Including zones with action %(action)s and %(status)s '
'older than %(seconds)ds' % {'action': action,
'status': PENDING_STATUS,
'seconds': self.max_prop_time})
stale_zones = self.central_api.find_zones(context, stale_criterion)
if stale_zones:
LOG.warning(
'Found %(len)d zones PENDING for more than %(sec)d seconds',
{
'len': len(stale_zones),
'sec': self.max_prop_time
})
error_zones.extend(stale_zones)
return error_zones
def _fetch_healthy_zones(self, context):
"""Fetch all zones not in error
:return: :class:`ZoneList` zones
"""
criterion = {
'pool_id': CONF['service:pool_manager'].pool_id,
'status': '!%s' % ERROR_STATUS
}
periodic_sync_seconds = \
CONF['service:pool_manager'].periodic_sync_seconds
if periodic_sync_seconds is not None:
# Generate the current serial, will provide a UTC Unix TS.
current = utils.increment_serial()
criterion['serial'] = ">%s" % (current - periodic_sync_seconds)
zones = self.central_api.find_zones(context, criterion)
return zones
@staticmethod
def _get_destination(nameserver):
return '%s:%s' % (nameserver.host, nameserver.port)
@staticmethod
def _percentage(count, total_count):
return (Decimal(count) / Decimal(total_count)) * Decimal(100)
def _exceed_or_meet_threshold(self, count, threshold=None):
"""Evaluate if count / the number of pool targets >= threshold
Used to implement consensus
"""
threshold = threshold or self.threshold
perc = self._percentage(count, len(self.pool.targets))
return perc >= Decimal(threshold)
@staticmethod
def _get_sorted_serials(pool_manager_statuses, descending=False):
serials = []
for pool_manager_status in pool_manager_statuses:
serials.append(pool_manager_status.serial_number)
serials.sort(reverse=descending)
return serials
def _get_serials_ascending(self, pool_manager_statuses):
return self._get_sorted_serials(pool_manager_statuses)
def _get_serials_descending(self, pool_manager_statuses):
return self._get_sorted_serials(pool_manager_statuses, descending=True)
def _is_consensus(self, context, zone, action, status, threshold=None):
"""Fetch zone status across all nameservers through MiniDNS and compare
it with the expected `status`
:return: consensus reached (bool)
"""
status_count = 0
pool_manager_statuses = self._retrieve_statuses(
context, zone, action)
for pool_manager_status in pool_manager_statuses:
if pool_manager_status.status == status:
status_count += 1
if threshold is None:
threshold = self.threshold
return self._exceed_or_meet_threshold(status_count, threshold)
def _get_consensus_serial(self, context, zone):
consensus_serial = 0
action = UPDATE_ACTION if zone.action == 'NONE' else zone.action
pm_statuses = self._retrieve_statuses(context, zone, action)
for serial in self._get_serials_descending(pm_statuses):
serial_count = 0
for pm_status in pm_statuses:
if pm_status.serial_number >= serial:
serial_count += 1
if self._exceed_or_meet_threshold(serial_count, self.threshold):
consensus_serial = serial
break
return consensus_serial
def _get_error_serial(self, context, zone, consensus_serial):
error_serial = 0
action = UPDATE_ACTION if zone.action == 'NONE' else zone.action
if self._is_consensus(context, zone, action, ERROR_STATUS):
pm_statuses = self._retrieve_statuses(context, zone, action)
for serial in self._get_serials_ascending(pm_statuses):
if serial > consensus_serial:
error_serial = serial
break
return error_serial
# When we hear back from the nameserver, the serial_number is set to the
# value the nameserver
@staticmethod
def _build_status_object(nameserver, zone, action):
"""
:return: :class:`objects.PoolManagerStatus`
"""
values = {
'nameserver_id': nameserver.id,
'zone_id': zone.id,
'status': None,
'serial_number': 0,
'action': action
}
return objects.PoolManagerStatus(**values)
# Methods for manipulating the cache.
def _clear_cache(self, context, zone, action=None):
LOG.debug('Clearing cache for zone %s with action %s.' %
(zone.name, action))
pool_manager_statuses = []
if action:
actions = [action]
else:
actions = [CREATE_ACTION, UPDATE_ACTION, DELETE_ACTION]
for nameserver in self.pool.nameservers:
for action in actions:
pool_manager_status = self._build_status_object(
nameserver, zone, action)
pool_manager_statuses.append(pool_manager_status)
for pool_manager_status in pool_manager_statuses:
# Ignore any not found errors while clearing the cache
try:
self.cache.clear(context, pool_manager_status)
except exceptions.PoolManagerStatusNotFound:
pass
def _retrieve_from_mdns(self, context, nameserver, zone, action):
"""Instruct MiniDNS to get a zone serial number from a nameserver
Set error status if the zone is unexpectedly missing.
:return: :class:`objects.PoolManagerStatus` or None
"""
try:
(status, actual_serial, retries) = \
self.mdns_api.get_serial_number(
context, zone, nameserver.host, nameserver.port,
self.timeout, self.retry_interval, self.max_retries,
self.delay)
except messaging.MessagingException as msg_ex:
LOG.debug('Could not retrieve status and serial for zone %s on '
'nameserver %s with action %s (%s: %s)',
zone.name, self._get_destination(nameserver), action,
type(msg_ex), str(msg_ex))
return None
pool_manager_status = self._build_status_object(
nameserver, zone, action)
if status == NO_ZONE_STATUS:
if action == CREATE_ACTION:
pool_manager_status.status = ERROR_STATUS
elif action == DELETE_ACTION:
pool_manager_status.status = SUCCESS_STATUS
elif action == UPDATE_ACTION:
pool_manager_status.action = CREATE_ACTION
pool_manager_status.status = ERROR_STATUS
else:
pool_manager_status.status = status
pool_manager_status.serial_number = actual_serial or 0
LOG.debug('Retrieved status %s and serial %s for zone %s '
'on nameserver %s with action %s from mdns.',
pool_manager_status.status,
pool_manager_status.serial_number,
zone.name, self._get_destination(nameserver), action)
self.cache.store(context, pool_manager_status)
return pool_manager_status
def _retrieve_statuses(self, context, zone, action):
"""Instruct MiniDNS to get a zone serial number from all nameservers,
unless a cached value is available.
Set error status if the zone is unexpectedly missing.
:return: list of :class:`objects.PoolManagerStatus`
"""
pool_manager_statuses = []
for nameserver in self.pool.nameservers:
try:
pool_manager_status = self.cache.retrieve(
context, nameserver.id, zone.id, action)
LOG.debug('Cache hit! Retrieved status %s and serial %s '
'for zone %s on nameserver %s with action %s from '
'the cache.',
pool_manager_status.status,
pool_manager_status.serial_number,
zone.name,
self._get_destination(nameserver), action)
except exceptions.PoolManagerStatusNotFound:
LOG.debug('Cache miss! Did not retrieve status and serial '
'for zone %s on nameserver %s with action %s from '
'the cache. Getting it from the server.',
zone.name,
self._get_destination(nameserver),
action)
pool_manager_status = self._retrieve_from_mdns(
context, nameserver, zone, action)
if pool_manager_status is not None:
pool_manager_statuses.append(pool_manager_status)
return pool_manager_statuses

View File

@ -54,10 +54,7 @@ class Service(service.RPCService):
@property
def storage(self):
if not self._storage:
# TODO(timsim): Remove this when zone_mgr goes away
storage_driver = cfg.CONF['service:zone_manager'].storage_driver
if cfg.CONF['service:producer'].storage_driver != storage_driver:
storage_driver = cfg.CONF['service:producer'].storage_driver
storage_driver = cfg.CONF['service:producer'].storage_driver
self._storage = storage.get_storage(storage_driver)
return self._storage
@ -88,14 +85,8 @@ class Service(service.RPCService):
self._partitioner.start()
self._partitioner.watch_partition_change(self._rebalance)
# TODO(timsim): Remove this when zone_mgr goes away
zmgr_enabled_tasks = CONF['service:zone_manager'].enabled_tasks
producer_enabled_tasks = CONF['service:producer'].enabled_tasks
enabled = zmgr_enabled_tasks
if producer_enabled_tasks:
enabled = producer_enabled_tasks
for task in tasks.PeriodicTask.get_extensions(enabled):
enabled_tasks = CONF['service:producer'].enabled_tasks
for task in tasks.PeriodicTask.get_extensions(enabled_tasks):
LOG.debug("Registering task %s", task)
# Instantiate the task

View File

@ -20,7 +20,6 @@ from designate import plugin
from designate import rpc
from designate.central import rpcapi
from designate.worker import rpcapi as worker_rpcapi
from designate.pool_manager import rpcapi as pool_manager_rpcapi
from oslo_config import cfg
from oslo_log import log as logging
@ -48,16 +47,9 @@ class PeriodicTask(plugin.ExtensionPlugin):
def worker_api(self):
return worker_rpcapi.WorkerAPI.get_instance()
@property
def pool_manager_api(self):
return pool_manager_rpcapi.PoolManagerAPI.get_instance()
@property
def zone_api(self):
# TODO(timsim): Remove this when pool_manager_api is gone
if cfg.CONF['service:worker'].enabled:
return self.worker_api
return self.pool_manager_api
return self.worker_api
def on_partition_change(self, my_partitions, members, event):
"""Refresh partitions attribute
@ -276,10 +268,6 @@ class WorkerPeriodicRecovery(PeriodicTask):
__plugin_name__ = 'worker_periodic_recovery'
def __call__(self):
# TODO(timsim): Remove this when worker is always on
if not cfg.CONF['service:worker'].enabled:
return
pstart, pend = self._my_range()
LOG.info(
"Recovering zones for shards %(start)s to %(end)s",

View File

@ -50,11 +50,6 @@ CONF.import_opt('auth_strategy', 'designate.api',
group='service:api')
CONF.import_opt('connection', 'designate.storage.impl_sqlalchemy',
group='storage:sqlalchemy')
CONF.import_opt('cache_driver', 'designate.pool_manager',
group='service:pool_manager')
CONF.import_opt('connection',
'designate.pool_manager.cache.impl_sqlalchemy',
group='pool_manager_cache:sqlalchemy')
CONF.import_opt('emitter_type', 'designate.service_status',
group="heartbeat_emitter")
CONF.import_opt('scheduler_filters', 'designate.scheduler',
@ -377,8 +372,6 @@ class TestCase(base.BaseTestCase):
group='storage:sqlalchemy'
)
self._setup_pool_manager_cache()
self.config(network_api='fake')
self.config(
@ -400,31 +393,6 @@ class TestCase(base.BaseTestCase):
# Setup the Default Pool with some useful settings
self._setup_default_pool()
def _setup_pool_manager_cache(self):
self.config(
cache_driver='sqlalchemy',
group='service:pool_manager')
repository = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
'pool_manager',
'cache',
'impl_sqlalchemy',
'migrate_repo'))
db_fixture = self.useFixture(
fixtures.DatabaseFixture.get_fixture(repository))
if os.getenv('DESIGNATE_SQL_DEBUG', "False").lower() in _TRUE_VALUES:
connection_debug = 50
else:
connection_debug = 0
self.config(
connection=db_fixture.url,
connection_debug=connection_debug,
group='pool_manager_cache:sqlalchemy')
def _setup_default_pool(self):
# Fetch the default pool
pool = self.storage.get_pool(self.admin_context, default_pool_id)

View File

@ -211,111 +211,3 @@ class TestPartitionerWithoutBackend(TestCase):
partitioner.start()
partitioner.watch_partition_change(cb)
cb.assert_called_with(partitions, None, None)
class TestLeaderElection(TestCase):
def setUp(self):
super(TestLeaderElection, self).setUp()
self.coord_fixture = self.useFixture(fixtures.CoordinatorFixture(
'zake://', b'InsertNameHere'))
self.election = coordination.LeaderElection(
self.coordinator, 'President')
@property
def coordinator(self):
"""Helper for quick access to the raw coordinator"""
return self.coord_fixture.coordinator
def test_is_leader(self):
# We should not be leader until after we start the election.
self.assertFalse(self.election.is_leader)
# Start the election
self.election.start()
self.coordinator.run_watchers()
# We should now be the leader.
self.assertTrue(self.election.is_leader)
# Stop the election
self.election.stop()
# We should no longer be the leader.
self.assertFalse(self.election.is_leader)
def test_callbacks(self):
# We should not be leader until after we start the election.
self.assertFalse(self.election.is_leader)
# Create and attach a callback
mock_callback_one = mock.Mock()
self.election.watch_elected_as_leader(mock_callback_one)
# Ensure the callback has not yet been called.
self.assertFalse(mock_callback_one.called)
# Start the election
self.election.start()
self.coordinator.run_watchers()
# Ensure the callback has been called exactly once.
self.assertEqual(1, mock_callback_one.call_count)
# Create and attach a second callback after we start
mock_callback_two = mock.Mock()
self.election.watch_elected_as_leader(mock_callback_two)
# Ensure the callback has been called exactly once.
self.assertEqual(1, mock_callback_two.call_count)
class TestLeaderElectionWithoutBackend(TestCase):
def setUp(self):
super(TestLeaderElectionWithoutBackend, self).setUp()
# coordinator = None indicates no coordination backend has been
# configured
coordinator = None
self.election = coordination.LeaderElection(coordinator, 'President')
def test_is_leader(self):
# We should not be leader until after we start the election.
self.assertFalse(self.election.is_leader)
# Start the election
self.election.start()
# We should now be the leader.
self.assertTrue(self.election.is_leader)
# Stop the election
self.election.stop()
# We should no longer be the leader.
self.assertFalse(self.election.is_leader)
def test_callbacks(self):
# We should not be leader until after we start the election.
self.assertFalse(self.election.is_leader)
# Create and attach a callback
mock_callback_one = mock.Mock()
self.election.watch_elected_as_leader(mock_callback_one)
# Ensure the callback has not yet been called.
self.assertFalse(mock_callback_one.called)
# Start the election
self.election.start()
# Ensure the callback has been called exactly once.
self.assertEqual(1, mock_callback_one.call_count)
# Create and attach a second callback after we start
mock_callback_two = mock.Mock()
self.election.watch_elected_as_leader(mock_callback_two)
# Ensure the callback has been called exactly once.
self.assertEqual(1, mock_callback_two.call_count)

View File

@ -1,65 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.tests import TestCase
from designate.utils import DEFAULT_MDNS_PORT
POOL_DICT = {
'id': u'794ccc2c-d751-44fe-b57f-8894c9f5c842',
'name': u'default',
'targets': [
{
'id': 'f278782a-07dc-4502-9177-b5d85c5f7c7e',
'type': 'fake',
'masters': [
{
'host': '127.0.0.1',
'port': DEFAULT_MDNS_PORT
}
],
'options': {}
},
{
'id': 'a38703f2-b71e-4e5b-ab22-30caaed61dfd',
'type': 'fake',
'masters': [
{
'host': '127.0.0.1',
'port': DEFAULT_MDNS_PORT
}
],
'options': {}
},
],
'nameservers': [
{
'id': 'c5d64303-4cba-425a-9f3c-5d708584dde4',
'host': '127.0.0.1',
'port': 5355
},
{
'id': 'c67cdc95-9a9e-4d2a-98ed-dc78cbd85234',
'host': '127.0.0.1',
'port': 5356
},
],
'also_notifies': [],
}
class PoolManagerTestCase(TestCase):
pass

View File

@ -1,53 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from designate import exceptions
from designate import objects
from designate.pool_manager.cache import base
class PoolManagerCacheTestCase(object):
def create_pool_manager_status(self):
values = {
'nameserver_id': '896aa661-198c-4379-bccd-5d8de7007030',
'zone_id': 'bce45113-4a22-418d-a54d-c9777d056312',
'action': 'CREATE',
'status': 'SUCCESS',
'serial_number': 1
}
return objects.PoolManagerStatus.from_dict(values)
def test_interface(self):
self._ensure_interface(base.PoolManagerCache, self.cache.__class__)
def test_store_and_clear_and_retrieve(self):
expected = self.create_pool_manager_status()
self.cache.store(self.admin_context, expected)
self.cache.clear(self.admin_context, expected)
with testtools.ExpectedException(exceptions.PoolManagerStatusNotFound):
self.cache.retrieve(
self.admin_context, expected.nameserver_id, expected.zone_id,
expected.action)
def test_retrieve(self):
expected = self.create_pool_manager_status()
with testtools.ExpectedException(exceptions.PoolManagerStatusNotFound):
self.cache.retrieve(
self.admin_context, expected.nameserver_id, expected.zone_id,
expected.action)

View File

@ -1,66 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock
from designate.pool_manager import cache
from designate.tests import TestCase
from designate.tests.test_pool_manager.cache import PoolManagerCacheTestCase
class MemcachePoolManagerCacheTest(PoolManagerCacheTestCase, TestCase):
def setUp(self):
super(MemcachePoolManagerCacheTest, self).setUp()
self.cache = cache.get_pool_manager_cache('memcache')
self.mock_status = Mock(
nameserver_id='nameserver_id',
zone_id='zone_id',
action='CREATE',
)
def test_store_and_retrieve(self):
expected = self.create_pool_manager_status()
self.cache.store(self.admin_context, expected)
actual = self.cache.retrieve(
self.admin_context, expected.nameserver_id, expected.zone_id,
expected.action)
self.assertEqual(expected.nameserver_id, actual.nameserver_id)
self.assertEqual(expected.zone_id, actual.zone_id)
self.assertEqual(expected.status, actual.status)
self.assertEqual(expected.serial_number, actual.serial_number)
self.assertEqual(expected.action, actual.action)
def test_serial_number_key_is_a_string(self):
"""Memcache requires keys be strings.
RabbitMQ messages are unicode by default, so any string
interpolation requires explicit encoding.
"""
key = self.cache._build_serial_number_key(self.mock_status)
self.assertIsInstance(key, str)
self.assertEqual(key, 'nameserver_id-zone_id-CREATE-serial_number')
def test_status_key_is_a_string(self):
"""Memcache requires keys be strings.
RabbitMQ messages are unicode by default, so any string
interpolation requires explicit encoding.
"""
key = self.cache._build_status_key(self.mock_status)
self.assertIsInstance(key, str)
self.assertEqual(key, 'nameserver_id-zone_id-CREATE-status')

View File

@ -1,37 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from designate import exceptions
from designate.pool_manager import cache
from designate.tests import TestCase
from designate.tests.test_pool_manager.cache import PoolManagerCacheTestCase
class NoopPoolManagerCacheTest(PoolManagerCacheTestCase, TestCase):
def setUp(self):
super(NoopPoolManagerCacheTest, self).setUp()
self.cache = cache.get_pool_manager_cache('noop')
def test_store_and_retrieve(self):
expected = self.create_pool_manager_status()
self.cache.store(self.admin_context, expected)
with testtools.ExpectedException(exceptions.PoolManagerStatusNotFound):
self.cache.retrieve(
self.admin_context, expected.nameserver_id, expected.zone_id,
expected.action)

View File

@ -1,39 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.pool_manager import cache
from designate.tests import TestCase
from designate.tests.test_pool_manager.cache import PoolManagerCacheTestCase
class SqlalchemyPoolManagerCacheTest(PoolManagerCacheTestCase, TestCase):
def setUp(self):
super(SqlalchemyPoolManagerCacheTest, self).setUp()
self.cache = cache.get_pool_manager_cache('sqlalchemy')
def test_store_and_retrieve(self):
expected = self.create_pool_manager_status()
self.cache.store(self.admin_context, expected)
actual = self.cache.retrieve(
self.admin_context, expected.nameserver_id, expected.zone_id,
expected.action)
self.assertEqual(expected.nameserver_id, actual.nameserver_id)
self.assertEqual(expected.zone_id, actual.zone_id)
self.assertEqual(expected.status, actual.status)
self.assertEqual(expected.serial_number, actual.serial_number)
self.assertEqual(expected.action, actual.action)

View File

@ -1,104 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
import oslo_messaging as messaging
from designate import objects
from designate.pool_manager.rpcapi import PoolManagerAPI
from designate.tests.test_pool_manager import PoolManagerTestCase
class PoolManagerAPITest(PoolManagerTestCase):
@patch.object(messaging.RPCClient, 'prepare')
def test_create_zone(self, mock_prepare):
inner_mock = mock.Mock()
inner_mock.cast = mock.Mock(return_value=None)
mock_prepare.return_value = inner_mock
values = {
'name': 'example.org.',
'pool_id': '794ccc2c-d751-44fe-b57f-8894c9f5c842'
}
zone = objects.Zone.from_dict(values)
PoolManagerAPI.get_instance().create_zone(self.admin_context, zone)
mock_prepare.assert_called_once_with(
topic='pool_manager.%s' % zone.pool_id)
mock_prepare.return_value.cast.assert_called_once_with(
self.admin_context, 'create_zone', zone=zone)
@patch.object(messaging.RPCClient, 'prepare')
def test_delete_zone(self, mock_prepare):
inner_mock = mock.Mock()
inner_mock.cast = mock.Mock(return_value=None)
mock_prepare.return_value = inner_mock
values = {
'name': 'example.org.',
'pool_id': '794ccc2c-d751-44fe-b57f-8894c9f5c842'
}
zone = objects.Zone.from_dict(values)
PoolManagerAPI.get_instance().delete_zone(self.admin_context, zone)
mock_prepare.assert_called_once_with(
topic='pool_manager.%s' % zone.pool_id)
mock_prepare.return_value.cast.assert_called_once_with(
self.admin_context, 'delete_zone', zone=zone)
@patch.object(messaging.RPCClient, 'prepare')
def test_update_zone(self, mock_prepare):
inner_mock = mock.Mock()
inner_mock.cast = mock.Mock(return_value=None)
mock_prepare.return_value = inner_mock
values = {
'name': 'example.org.',
'pool_id': '794ccc2c-d751-44fe-b57f-8894c9f5c842'
}
zone = objects.Zone.from_dict(values)
PoolManagerAPI.get_instance().update_zone(self.admin_context, zone)
mock_prepare.assert_called_once_with(
topic='pool_manager.%s' % zone.pool_id)
mock_prepare.return_value.cast.assert_called_once_with(
self.admin_context, 'update_zone', zone=zone)
@patch.object(messaging.RPCClient, 'prepare')
def test_update_status(self, mock_prepare):
inner_mock = mock.Mock()
inner_mock.cast = mock.Mock(return_value=None)
mock_prepare.return_value = inner_mock
values = {
'name': 'example.org.',
'pool_id': '794ccc2c-d751-44fe-b57f-8894c9f5c842'
}
zone = objects.Zone.from_dict(values)
values = {
'host': '127.0.0.1',
'port': 53
}
nameserver = objects.PoolNameserver.from_dict(values)
PoolManagerAPI.get_instance().update_status(
self.admin_context, zone, nameserver, 'SUCCESS', 1)
mock_prepare.assert_called_once_with(
topic='pool_manager.%s' % zone.pool_id)
mock_prepare.return_value.cast.assert_called_once_with(
self.admin_context, 'update_status', zone=zone,
nameserver=nameserver, status='SUCCESS', actual_serial=1)

View File

@ -1,480 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import oslo_messaging as messaging
from mock import call
from mock import Mock
from mock import patch
from designate import exceptions
from designate import objects
from designate.utils import generate_uuid
from designate.backend import impl_fake
from designate.central import rpcapi as central_rpcapi
from designate.mdns import rpcapi as mdns_rpcapi
from designate.storage.impl_sqlalchemy import tables
from designate.tests.test_pool_manager import PoolManagerTestCase
from designate.tests.test_pool_manager import POOL_DICT
import designate.pool_manager.service as pm_module
LOG = log.getLogger(__name__)
class PoolManagerServiceNoopTest(PoolManagerTestCase):
def setUp(self):
super(PoolManagerServiceNoopTest, self).setUp()
self.config(
threshold_percentage=100,
enable_recovery_timer=False,
enable_sync_timer=False,
poll_retry_interval=0,
poll_max_retries=1,
cache_driver='noop',
group='service:pool_manager')
# TODO(kiall): Rework all this pool config etc into a fixture..
# Configure the Pool ID
self.config(
pool_id='794ccc2c-d751-44fe-b57f-8894c9f5c842',
group='service:pool_manager')
# Start the Service
with patch.object(
central_rpcapi.CentralAPI,
'get_pool',
return_value=objects.Pool.from_dict(POOL_DICT)):
self.service = self.start_service('pool_manager')
self.cache = self.service.cache
@staticmethod
def _build_zone(name, action, status, id=None):
zid = id or '75ea1626-eea7-46b5-acb7-41e5897c2d40'
values = {
'id': zid,
'name': name,
'pool_id': '794ccc2c-d751-44fe-b57f-8894c9f5c842',
'action': action,
'serial': 1422062497,
'status': status
}
return objects.Zone.from_dict(values)
def _build_zones(self, n, action, status):
return [
self._build_zone("zone%02X.example." % cnt, action,
status, id=generate_uuid())
for cnt in range(n)
]
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_create_zone(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, _):
zone = self._build_zone('example.org.', 'CREATE', 'PENDING')
self.service.create_zone(self.admin_context, zone)
create_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'CREATE')
# Even though _retrieve_statuses tries to get from mdns, mdns does
# not return any status
self.assertEqual(0, len(create_statuses))
# Ensure poll_for_serial_number was called for each nameserver.
self.assertEqual(2, mock_poll_for_serial_number.call_count)
self.assertEqual(
[call(self.admin_context, zone,
self.service.pool.nameservers[0], 30, 0, 1, 5),
call(self.admin_context, zone,
self.service.pool.nameservers[1], 30, 0, 1, 5)],
mock_poll_for_serial_number.call_args_list)
# Pool manager needs to call into mdns to calculate consensus as
# there is no cache. So update_status is never called.
self.assertFalse(mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(impl_fake.FakeBackend, 'create_zone')
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_create_zone_target_both_failure(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, mock_create_zone, _):
zone = self._build_zone('example.org.', 'CREATE', 'PENDING')
mock_create_zone.side_effect = exceptions.Backend
self.service.create_zone(self.admin_context, zone)
create_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'CREATE')
self.assertEqual(0, len(create_statuses))
# Since consensus is not reached this early, we immediately call
# central's update_status.
self.assertTrue(mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(impl_fake.FakeBackend, 'create_zone')
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_create_zone_target_one_failure(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, mock_create_zone, _):
zone = self._build_zone('example.org.', 'CREATE', 'PENDING')
mock_create_zone.side_effect = [exceptions.Backend, None]
self.service.create_zone(self.admin_context, zone)
create_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'CREATE')
self.assertEqual(0, len(create_statuses))
# Since consensus is not reached this early, we immediately call
# central's update_status.
self.assertTrue(mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(impl_fake.FakeBackend, 'create_zone')
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_create_zone_target_one_failure_consensus(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, mock_create_zone, _):
self.service.stop()
self.config(
threshold_percentage=50,
group='service:pool_manager')
with patch.object(
central_rpcapi.CentralAPI,
'get_pool',
return_value=objects.Pool.from_dict(POOL_DICT)):
self.service = self.start_service('pool_manager')
zone = self._build_zone('example.org.', 'CREATE', 'PENDING')
mock_create_zone.side_effect = [None, exceptions.Backend]
self.service.create_zone(self.admin_context, zone)
create_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'CREATE')
self.assertEqual(0, len(create_statuses))
# Ensure poll_for_serial_number was called for each nameserver.
self.assertFalse(mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_status(self, mock_update_status, _):
zone = self._build_zone('example.org.', 'UPDATE', 'PENDING')
self.service.update_status(self.admin_context, zone,
self.service.pool.nameservers[0],
'SUCCESS', zone.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'UPDATE')
self.assertEqual(0, len(update_statuses))
# Ensure update_status was not called.
self.assertFalse(mock_update_status.called)
self.service.update_status(self.admin_context, zone,
self.service.pool.nameservers[1],
'SUCCESS', zone.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'UPDATE')
self.assertEqual(0, len(update_statuses))
# Ensure update_status was not called.
self.assertFalse(mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_status_both_failure(self, mock_update_status, _):
zone = self._build_zone('example.org.', 'UPDATE', 'PENDING')
self.service.update_status(self.admin_context, zone,
self.service.pool.nameservers[0],
'ERROR', zone.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'UPDATE')
self.assertEqual(0, len(update_statuses))
mock_update_status.assert_called_once_with(
self.admin_context, zone.id, 'ERROR', 0)
# Reset the mock call attributes.
mock_update_status.reset_mock()
self.service.update_status(self.admin_context, zone,
self.service.pool.nameservers[1],
'ERROR', zone.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'UPDATE')
self.assertEqual(0, len(update_statuses))
mock_update_status.assert_called_once_with(
self.admin_context, zone.id, 'ERROR', 0)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_status_one_failure(self, mock_update_status, _):
zone = self._build_zone('example.org.', 'UPDATE', 'PENDING')
self.service.update_status(self.admin_context, zone,
self.service.pool.nameservers[0],
'SUCCESS', zone.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'UPDATE')
self.assertEqual(0, len(update_statuses))
# Ensure update_status was not called.
self.assertFalse(mock_update_status.called)
self.service.update_status(self.admin_context, zone,
self.service.pool.nameservers[1],
'ERROR', zone.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'UPDATE')
self.assertEqual(0, len(update_statuses))
mock_update_status.assert_called_once_with(
self.admin_context, zone.id, 'ERROR', 0)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_status_one_failure_consensus(self, mock_update_status, _):
self.service.stop()
self.config(
threshold_percentage=50,
group='service:pool_manager')
with patch.object(
central_rpcapi.CentralAPI,
'get_pool',
return_value=objects.Pool.from_dict(POOL_DICT)):
self.service = self.start_service('pool_manager')
zone = self._build_zone('example.org.', 'UPDATE', 'PENDING')
self.service.update_status(self.admin_context, zone,
self.service.pool.nameservers[0],
'SUCCESS', zone.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'UPDATE')
self.assertEqual(0, len(update_statuses))
# Ensure update_status was not called.
self.assertFalse(mock_update_status.called)
# Reset the mock call attributes.
mock_update_status.reset_mock()
self.service.update_status(self.admin_context, zone,
self.service.pool.nameservers[1],
'ERROR', zone.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, zone, 'UPDATE')
self.assertEqual(0, len(update_statuses))
mock_update_status.assert_called_once_with(
self.admin_context, zone.id, 'ERROR', 0)
@patch.object(central_rpcapi.CentralAPI, 'find_zones')
def test_periodic_sync_not_leader(self, mock_find_zones):
self.service._update_zone_on_target = Mock(return_value=False)
self.service._pool_election = Mock()
self.service._pool_election.is_leader = False
self.service.update_zone = Mock()
self.service.periodic_sync()
self.assertFalse(mock_find_zones.called)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_zone_no_consensus(self, mock_cent_update_status):
zone = self._build_zone('example.org.', 'UPDATE', 'PENDING')
self.service._update_zone_on_target = Mock(return_value=True)
self.service._exceed_or_meet_threshold = Mock(return_value=False)
ret = self.service.update_zone(self.admin_context, zone)
self.assertFalse(ret)
self.assertEqual(2, self.service._update_zone_on_target.call_count)
self.assertEqual(1, mock_cent_update_status.call_count)
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
def test_update_zone(self, mock_mdns_poll):
zone = self._build_zone('example.org.', 'UPDATE', 'PENDING')
self.service._update_zone_on_target = Mock(return_value=True)
self.service._update_zone_on_also_notify = Mock()
self.service.pool.also_notifies = objects.PoolAlsoNotifyList(
objects=[objects.PoolAlsoNotify(host='1.0.0.0', port=1)]
)
self.service._exceed_or_meet_threshold = Mock(return_value=True)
# cache.retrieve will throw exceptions.PoolManagerStatusNotFound
# mdns_api.poll_for_serial_number will be called twice
ret = self.service.update_zone(self.admin_context, zone)
self.assertTrue(ret)
self.assertEqual(2, self.service._update_zone_on_target.call_count)
self.assertEqual(1, self.service._update_zone_on_also_notify.call_count) # noqa
self.assertEqual(2, mock_mdns_poll.call_count)
# Periodic sync
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
@patch.object(central_rpcapi.CentralAPI, 'find_zones')
def test_periodic_sync(self, mock_find_zones,
mock_cent_update_status, *a):
self.service.update_zone = Mock()
mock_find_zones.return_value = self._build_zones(2, 'UPDATE',
'PENDING')
self.service.periodic_sync()
self.assertEqual(1, mock_find_zones.call_count)
criterion = mock_find_zones.call_args_list[0][0][1]
self.assertEqual('!ERROR', criterion['status'])
self.assertEqual(2, self.service.update_zone.call_count)
self.assertEqual(0, mock_cent_update_status.call_count)
@patch.object(pm_module.time, 'sleep')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
@patch.object(central_rpcapi.CentralAPI, 'find_zones')
def test_periodic_sync_with_failing_update(
self, mock_find_zones, mock_cent_update_status, *mocks):
self.service.update_zone = Mock(return_value=False) # fail update
mock_find_zones.return_value = self._build_zones(3, 'UPDATE',
'PENDING')
self.service.periodic_sync()
self.assertEqual(1, mock_find_zones.call_count)
criterion = mock_find_zones.call_args_list[0][0][1]
self.assertEqual('!ERROR', criterion['status'])
# 3 zones, all failing, with 3 attempts: 9 calls
self.assertEqual(9, self.service.update_zone.call_count)
# the zones have been put in ERROR status
self.assertEqual(3, mock_cent_update_status.call_count)
@patch.object(pm_module.time, 'sleep')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
@patch.object(central_rpcapi.CentralAPI, 'find_zones')
def test_periodic_sync_with_failing_update_with_exception(
self, mock_find_zones, mock_cent_update_status, *mocks):
self.service.update_zone = Mock(side_effect=Exception)
mock_find_zones.return_value = self._build_zones(3, 'UPDATE',
'PENDING')
self.service.periodic_sync()
self.assertEqual(1, mock_find_zones.call_count)
criterion = mock_find_zones.call_args_list[0][0][1]
self.assertEqual('!ERROR', criterion['status'])
# 3 zones, all failing, with 3 attempts: 9 calls
self.assertEqual(9, self.service.update_zone.call_count)
# the zones have been put in ERROR status
self.assertEqual(3, mock_cent_update_status.call_count)
class PoolManagerServiceEndToEndTest(PoolManagerServiceNoopTest):
def setUp(self):
super(PoolManagerServiceEndToEndTest, self).setUp()
def _fetch_all_zones(self):
"""Fetch all zones including deleted ones
"""
query = tables.zones.select()
return self.storage.session.execute(query).fetchall()
def _log_all_zones(self, zones, msg=None):
"""Log out a summary of zones
"""
if msg:
LOG.debug("--- %s ---" % msg)
cols = ('name', 'status', 'action', 'deleted', 'deleted_at',
'parent_zone_id')
tpl = "%-35s | %-11s | %-11s | %-32s | %-20s | %s"
LOG.debug(tpl % cols)
for z in zones:
LOG.debug(tpl % tuple(z[k] for k in cols))
def _assert_count_all_zones(self, n):
"""Assert count ALL zones including deleted ones
"""
zones = self._fetch_all_zones()
if len(zones) == n:
return
msg = "failed: %d zones expected, %d found" % (n, len(zones))
self._log_all_zones(zones, msg=msg)
raise Exception("Unexpected number of zones")
def _assert_num_failed_zones(self, action, n):
zones = self.service._get_failed_zones(
self.admin_context, action)
if len(zones) != n:
LOG.error("Expected %d failed zones, got %d", n, len(zones))
self._log_all_zones(zones, msg='listing zones')
self.assertEqual(n, len(zones))
def _assert_num_healthy_zones(self, action, n):
criterion = {
'action': action,
'pool_id': pm_module.CONF['service:pool_manager'].pool_id,
'status': '!%s' % pm_module.ERROR_STATUS
}
zones = self.service.central_api.find_zones(self.admin_context,
criterion)
if len(zones) != n:
LOG.error("Expected %d healthy zones, got %d", n, len(zones))
self._log_all_zones(zones, msg='listing zones')
self.assertEqual(n, len(zones))

View File

@ -28,11 +28,11 @@ from designate.tests.unit import RoObject
class AgentBackendTestCase(tests.TestCase):
def setUp(self):
super(AgentBackendTestCase, self).setUp()
self.CONF.set_override('poll_timeout', 1, 'service:pool_manager')
self.CONF.set_override('poll_timeout', 1, 'service:worker')
self.CONF.set_override('poll_retry_interval', 4,
'service:pool_manager')
self.CONF.set_override('poll_max_retries', 5, 'service:pool_manager')
self.CONF.set_override('poll_delay', 6, 'service:pool_manager')
'service:worker')
self.CONF.set_override('poll_max_retries', 5, 'service:worker')
self.CONF.set_override('poll_delay', 6, 'service:worker')
self.context = self.get_context()
self.zone = objects.Zone(

View File

@ -20,7 +20,6 @@ import dns.rdataclass
import dns.rdatatype
import mock
import designate.mdns.base as mdnsbase
import designate.mdns.notify as notify
import designate.tests
from designate.tests.unit import RoObject
@ -42,24 +41,6 @@ class MdnsNotifyTest(designate.tests.TestCase):
1, 2, 3, 4, 5, 6, notify=True
)
@mock.patch.object(mdnsbase.pool_mngr_api.PoolManagerAPI, 'get_instance')
def test_poll_for_serial_number(self, mock_get_instance):
self.notify.get_serial_number = mock.Mock(
return_value=('status', 99, 9)
)
ns = RoObject(host='host', port=1234)
self.notify.poll_for_serial_number(
'c', 'z', ns, 1, 2, 3, 4
)
self.notify.get_serial_number.assert_called_with(
'c', 'z', 'host', 1234, 1, 2, 3, 4
)
self.notify.pool_manager_api.update_status.assert_called_with(
'c', 'z', ns, 'status', 99
)
@mock.patch('time.sleep')
def test_get_serial_number_nxdomain(self, mock_sleep):
# The zone is not found but it was supposed to be there

View File

@ -1,326 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from datetime import datetime
import mock
from oslo_config import cfg
from oslo_config import fixture as cfg_fixture
from designate import exceptions
from designate import objects
from designate import tests
from designate.pool_manager import service
CONF = cfg.CONF
POOL_DICT = {
'also_notifies': [
{
'host': u'192.0.2.4',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
}
],
'attributes': [],
'description': u'Default PowerDNS Pool',
'id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'name': u'default',
'nameservers': [
{
'host': u'192.0.2.2',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
},
{
'host': u'192.0.2.3',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
}
],
'ns_records': [
{
'hostname': u'ns1-1.example.org.',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'priority': 1,
},
{
'hostname': u'ns1-2.example.org.',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'priority': 2,
}
],
'provisioner': u'UNMANAGED',
'targets': [
{
'description': u'PowerDNS Database Cluster',
'masters': [],
'options': [],
'type': 'fake',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
}
]
}
class PoolManagerInitTest(tests.TestCase):
def setUp(self):
super(PoolManagerInitTest, self).setUp()
self.useFixture(cfg_fixture.Config(CONF))
self.service = service.Service()
def test_init_no_pool_targets(self):
pool_dict = dict(POOL_DICT)
pool_dict['targets'] = []
self.service.pool = objects.Pool.from_dict(pool_dict)
self.assertRaises(
exceptions.NoPoolTargetsConfigured,
self.service._setup_target_backends
)
def test_service_name(self):
self.assertEqual('pool_manager', self.service.service_name)
def test_pool_manager_rpc_topic(self):
CONF.set_override('topic', 'test-topic', 'service:pool_manager')
self.service = service.Service()
self.assertEqual('test-topic.794ccc2c-d751-44fe-b57f-8894c9f5c842',
self.service.rpc_topic)
self.assertEqual('pool_manager', self.service.service_name)
@mock.patch('designate.service.RPCService.start')
def test_start(self, mock_rpc_start):
self.service.tg.add_timer = mock.Mock()
self.service._pool_election = mock.Mock()
with mock.patch.object(
self.service.central_api,
'get_pool',
return_value=objects.Pool.from_dict(POOL_DICT)):
self.service.start()
call1 = self.service.tg.add_timer.call_args_list[0][0]
self.assertEqual(120, call1[0])
self.assertEqual(120, call1[-1])
call2 = self.service.tg.add_timer.call_args_list[1][0]
self.assertEqual(1800, call2[0])
self.assertEqual(1800, call2[-1])
@mock.patch.object(time, 'sleep')
def test_constant_retries(self, mock_sleep):
gen = service._constant_retries(5, 2)
out = list(gen)
self.assertEqual(
[False, False, False, False, True],
out
)
self.assertEqual(4, mock_sleep.call_count)
mock_sleep.assert_called_with(2)
class PoolManagerTest(tests.TestCase):
def setUp(self):
super(PoolManagerTest, self).setUp()
self.context = self.get_context()
self.zone = objects.Zone(
name="example.com.",
type="PRIMARY",
email="hostmaster@example.com",
serial=1,
)
self.service = service.Service()
self.service.tg.add_timer = mock.Mock()
self.service.pool = mock.Mock()
setattr(self.service.pool, 'targets', ())
setattr(self.service.pool, 'also_notifies', ())
setattr(self.service.pool, 'nameservers', ())
self.service._pool_election = mock.Mock()
self.service.target_backends = {}
@mock.patch.object(service.central_api.CentralAPI, 'find_zones')
@mock.patch.object(service.utils, 'increment_serial')
def test_get_failed_zones(self, mock_increment_serial, mock_find_zones):
mock_increment_serial.return_value = 1453758656
self.service._get_failed_zones(self.context, service.DELETE_ACTION)
call_one = mock.call(
self.context,
{
'action': 'DELETE',
'status': 'ERROR',
'pool_id': '794ccc2c-d751-44fe-b57f-8894c9f5c842'
}
)
call_two = mock.call(
self.context,
{
'action': 'DELETE',
'status': 'PENDING',
'serial': '<1453758201', # 1453758656-455
'pool_id': '794ccc2c-d751-44fe-b57f-8894c9f5c842'
}
)
# any_order because Mock adds some random calls in
mock_find_zones.assert_has_calls([call_one, call_two],
any_order=True)
def test_periodic_recover(self):
def mock_get_failed_zones(ctx, action):
if action == service.DELETE_ACTION:
return [self.zone] * 3
if action == service.CREATE_ACTION:
return [self.zone] * 4
if action == service.UPDATE_ACTION:
return [self.zone] * 5
self.service._get_admin_context_all_tenants = mock.Mock(
return_value=self.context
)
self.service._get_failed_zones = mock_get_failed_zones
self.service.pool_manager_api.delete_zone = mock.Mock()
self.service.pool_manager_api.create_zone = mock.Mock()
self.service.pool_manager_api.update_zone = mock.Mock()
self.service.periodic_recovery()
self.service.pool_manager_api.delete_zone.assert_called_with(
self.context, self.zone
)
self.assertEqual(
3, self.service.pool_manager_api.delete_zone.call_count
)
self.service.pool_manager_api.create_zone.assert_called_with(
self.context, self.zone
)
self.assertEqual(
4, self.service.pool_manager_api.create_zone.call_count
)
self.service.pool_manager_api.update_zone.assert_called_with(
self.context, self.zone
)
self.assertEqual(
5, self.service.pool_manager_api.update_zone.call_count
)
def test_periodic_recover_exception(self):
# Raise an exception half through the recovery
def mock_get_failed_zones(ctx, action):
if action == service.DELETE_ACTION:
return [self.zone] * 3
if action == service.CREATE_ACTION:
return [self.zone] * 4
self.service._get_admin_context_all_tenants = mock.Mock(
return_value=self.context
)
self.service._get_failed_zones = mock_get_failed_zones
self.service.pool_manager_api.delete_zone = mock.Mock()
self.service.pool_manager_api.create_zone = mock.Mock(
side_effect=Exception('oops')
)
self.service.pool_manager_api.update_zone = mock.Mock()
self.service.periodic_recovery()
self.service.pool_manager_api.delete_zone.assert_called_with(
self.context, self.zone
)
self.assertEqual(
3, self.service.pool_manager_api.delete_zone.call_count
)
self.service.pool_manager_api.create_zone.assert_called_with(
self.context, self.zone
)
self.assertEqual(
1, self.service.pool_manager_api.create_zone.call_count
)
self.assertEqual(
0, self.service.pool_manager_api.update_zone.call_count
)
def test_periodic_sync(self, ):
self.service._fetch_healthy_zones = mock.Mock(return_value=[
objects.Zone(name='a_zone.'),
objects.Zone(name='b_zone.'),
objects.Zone(name='c_zone.'),
])
self.service.update_zone = mock.Mock()
self.service._exceed_or_meet_threshold = mock.Mock(return_value=True)
self.service.periodic_sync()
self.assertEqual(3, self.service.update_zone.call_count)
def test_target_sync(self):
date = 1463154200
older_date = datetime.fromtimestamp(1463154000)
newer_date = datetime.fromtimestamp(1463154300)
zones = [
objects.Zone(name='a_zone.', status='ACTIVE',
created_at=older_date),
objects.Zone(name='b_zone.', status='ACTIVE',
created_at=newer_date),
objects.Zone(name='c_zone.', status='DELETED',
created_at=older_date, serial=1),
]
self.service._delete_zone_on_target = mock.Mock()
self.service._create_zone_on_target = mock.Mock()
self.service._update_zone_on_target = mock.Mock()
self.service.mdns_api.poll_for_serial_number = mock.Mock()
target = mock.Mock()
self.service._target_sync(self.context, zones, target, date)
self.assertEqual(1, self.service._delete_zone_on_target.call_count)
self.assertEqual(1, self.service._create_zone_on_target.call_count)
self.assertEqual(1, self.service._update_zone_on_target.call_count)
def test_create_zone(self):
self.service._exceed_or_meet_threshold = mock.Mock(return_value=True)
self.service.create_zone(self.context, self.zone)
def test_update_zone(self, ):
self.service._exceed_or_meet_threshold = mock.Mock(return_value=True)
self.service.update_zone(self.context, self.zone)
def test_delete_zone(self):
self.service._exceed_or_meet_threshold = mock.Mock(return_value=True)
self.service.delete_zone(self.context, self.zone)

View File

@ -40,11 +40,6 @@ class ProducerTest(oslotest.base.BaseTestCase):
'service:producer': RoObject({
'enabled_tasks': None, # enable all tasks
}),
# TODO(timsim): Remove this
'service:zone_manager': RoObject({
'enabled_tasks': None, # enable all tasks
'export_synchronous': True
}),
'producer_task:zone_purge': '',
})
super(ProducerTest, self).setUp()

View File

@ -208,15 +208,6 @@ mdns_api = mock.PropertyMock(
])
)
fx_pool_manager = fixtures.MockPatch(
'designate.central.service.pool_manager_rpcapi.Pool'
'ManagerAPI.get_instance',
mock.MagicMock(spec_set=[
'create_zone',
'update_zone',
'delete_zone'
])
)
fx_worker = fixtures.MockPatch(
'designate.central.service.worker_rpcapi.WorkerAPI.get_instance',
@ -277,7 +268,6 @@ class CentralBasic(TestCase):
'get_storage',
])
designate.central.service.rpcapi = mock.Mock()
designate.central.service.pool_manager_rpcapi = mock.Mock()
designate.central.service.worker_rpcapi = mock.Mock()
self.context = mock.NonCallableMock(spec_set=[
'elevated',
@ -1013,8 +1003,7 @@ class CentralZoneTestCase(CentralBasic):
self.service.delete_zone(self.context,
CentralZoneTestCase.zone__id)
self.assertTrue(self.service.storage.delete_zone.called)
self.assertFalse(
self.service.pool_manager_api.delete_zone.called)
self.assertFalse(self.service.zone_api.delete_zone.called)
pcheck, _, _ = designate.central.service.policy.check.call_args[0]
self.assertEqual('abandon_zone', pcheck)
@ -1141,40 +1130,22 @@ class CentralZoneTestCase(CentralBasic):
self.assertEqual(exceptions.ReportNotFound, exc.exc_info[0])
def _test_touch_zone(self, worker_enabled=True):
if not worker_enabled:
self.config(
enabled="False",
group="service:worker"
)
def test_touch_zone_with_worker_model(self):
self.service._touch_zone_in_storage = Mock()
self.service.storage.get_zone.return_value = RoObject(
name='example.org.',
tenant_id='2',
)
if worker_enabled:
with fx_worker:
self.service.touch_zone(self.context,
CentralZoneTestCase.zone__id)
else:
with fx_pool_manager:
self.service.touch_zone(self.context,
CentralZoneTestCase.zone__id)
with fx_worker:
self.service.touch_zone(self.context,
CentralZoneTestCase.zone__id)
self.assertTrue(designate.central.service.policy.check.called)
self.assertEqual(
'touch_zone',
designate.central.service.policy.check.call_args[0][0]
)
def test_touch_zone_with_worker_model(self):
self._test_touch_zone(worker_enabled=True)
def test_touch_zone_with_pool_manager_model(self):
self._test_touch_zone(worker_enabled=False)
def test_get_recordset_not_found(self):
self.service.storage.get_zone.return_value = RoObject(
id=CentralZoneTestCase.zone__id,
@ -1301,12 +1272,7 @@ class CentralZoneTestCase(CentralBasic):
self.assertEqual(exceptions.BadRequest, exc.exc_info[0])
def _test_update_recordset(self, worker_enabled=True):
if not worker_enabled:
self.config(
enabled="False",
group="service:worker"
)
def test_update_recordset_worker_model(self):
self.service.storage.get_zone.return_value = RoObject(
type='foo',
name='example.org.',
@ -1322,12 +1288,8 @@ class CentralZoneTestCase(CentralBasic):
return_value=('x', 'y')
)
if worker_enabled:
with fx_worker:
self.service.update_recordset(self.context, recordset)
else:
with fx_pool_manager:
self.service.update_recordset(self.context, recordset)
with fx_worker:
self.service.update_recordset(self.context, recordset)
self.assertTrue(
self.service._update_recordset_in_storage.called)
@ -1340,12 +1302,6 @@ class CentralZoneTestCase(CentralBasic):
'recordset_id': '9c85d9b0-1e9d-4e99-aede-a06664f1af2e',
'tenant_id': '2'}, target)
def test_update_recordset_worker_model(self):
self._test_update_recordset(worker_enabled=True)
def test_update_recordset_pool_manager_model(self):
self._test_update_recordset(worker_enabled=False)
def test__update_recordset_in_storage(self):
recordset = Mock()
recordset.name = 'n'
@ -1500,12 +1456,7 @@ class CentralZoneTestCase(CentralBasic):
self.assertEqual(exceptions.BadRequest, exc.exc_info[0])
def _test_delete_recordset(self, worker_enabled=True):
if not worker_enabled:
self.config(
enabled="False",
group="service:worker"
)
def test_delete_recordset_worker(self):
mock_zone = RoObject(
action='foo',
id=CentralZoneTestCase.zone__id_2,
@ -1528,29 +1479,15 @@ class CentralZoneTestCase(CentralBasic):
return_value=(mock_rs, mock_zone)
)
if worker_enabled:
with fx_worker:
self.service.delete_recordset(self.context,
CentralZoneTestCase.zone__id_2,
CentralZoneTestCase.recordset__id)
self.assertTrue(
self.service.zone_api.update_zone.called)
else:
with fx_pool_manager:
self.service.delete_recordset(self.context,
CentralZoneTestCase.zone__id_2,
CentralZoneTestCase.recordset__id)
self.assertTrue(
self.service.zone_api.update_zone.called)
with fx_worker:
self.service.delete_recordset(self.context,
CentralZoneTestCase.zone__id_2,
CentralZoneTestCase.recordset__id)
self.assertTrue(
self.service._delete_recordset_in_storage.called)
self.service.zone_api.update_zone.called)
def test_delete_recordset_worker(self):
self._test_delete_recordset(worker_enabled=True)
def test_delete_recordset_pool_manager(self):
self._test_delete_recordset(worker_enabled=False)
self.assertTrue(
self.service._delete_recordset_in_storage.called)
def test__delete_recordset_in_storage(self):
def mock_uds(c, zone, inc):
@ -1620,9 +1557,7 @@ class CentralZoneTestCase(CentralBasic):
self.assertEqual(exceptions.BadRequest, exc.exc_info[0])
def _test_create_record(self, worker_enabled=True):
if not worker_enabled:
self.config(enabled="False", group="service:worker")
def _test_create_record(self):
self.service._create_record_in_storage = Mock(
return_value=(None, None)
)
@ -1637,24 +1572,14 @@ class CentralZoneTestCase(CentralBasic):
name='rs',
)
if worker_enabled:
with fx_worker:
self.service.create_record(
self.context,
CentralZoneTestCase.zone__id,
CentralZoneTestCase.recordset__id,
RoObject())
self.assertTrue(
self.service.zone_api.update_zone.called)
else:
with fx_pool_manager:
self.service.create_record(
self.context,
CentralZoneTestCase.zone__id,
CentralZoneTestCase.recordset__id,
RoObject())
self.assertTrue(
self.service.zone_api.update_zone.called)
with fx_worker:
self.service.create_record(
self.context,
CentralZoneTestCase.zone__id,
CentralZoneTestCase.recordset__id,
RoObject())
self.assertTrue(
self.service.zone_api.update_zone.called)
n, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('create_record', n)
@ -1667,10 +1592,7 @@ class CentralZoneTestCase(CentralBasic):
'tenant_id': '2'}, target)
def test_create_record_worker(self):
self._test_create_record(worker_enabled=True)
def test_create_record_pool_manager(self):
self._test_create_record(worker_enabled=False)
self._test_create_record()
def test__create_record_in_storage(self):
self.service._enforce_record_quota = Mock()
@ -1835,9 +1757,7 @@ class CentralZoneTestCase(CentralBasic):
self.assertEqual(exceptions.BadRequest, exc.exc_info[0])
def _test_update_record(self, worker_enabled=True):
if not worker_enabled:
self.config(enabled="False", group="service:worker")
def test_update_record_worker(self):
self.service.storage.get_zone.return_value = RoObject(
action='a',
name='n',
@ -1857,12 +1777,9 @@ class CentralZoneTestCase(CentralBasic):
return_value=('x', 'y')
)
if worker_enabled:
with fx_worker:
self.service.update_record(self.context, record)
else:
with fx_pool_manager:
self.service.update_record(self.context, record)
with fx_worker:
self.service.update_record(self.context, record)
self.assertTrue(self.service._update_record_in_storage.called)
n, ctx, target = designate.central.service.policy.check.call_args[0]
@ -1876,12 +1793,6 @@ class CentralZoneTestCase(CentralBasic):
'recordset_name': 'rsn',
'tenant_id': 'tid'}, target)
def test_update_record_worker(self):
self._test_update_record(worker_enabled=True)
def test_update_record_pool_manager(self):
self._test_update_record(worker_enabled=False)
def test__update_record_in_storage(self):
self.service._update_zone_in_storage = Mock()
self.service._update_record_in_storage(
@ -1947,9 +1858,7 @@ class CentralZoneTestCase(CentralBasic):
self.assertEqual(exceptions.RecordNotFound, exc.exc_info[0])
def _test_delete_record(self, worker_enabled=True):
if not worker_enabled:
self.config(enabled="False", group="service:worker")
def test_delete_record_worker(self):
self.service._delete_record_in_storage = Mock(
return_value=(None, None)
)
@ -1971,18 +1880,11 @@ class CentralZoneTestCase(CentralBasic):
managed=False,
)
if worker_enabled:
with fx_worker:
self.service.delete_record(self.context,
CentralZoneTestCase.zone__id_2,
CentralZoneTestCase.recordset__id_2,
CentralZoneTestCase.record__id)
else:
with fx_pool_manager:
self.service.delete_record(self.context,
CentralZoneTestCase.zone__id_2,
CentralZoneTestCase.recordset__id_2,
CentralZoneTestCase.record__id)
with fx_worker:
self.service.delete_record(self.context,
CentralZoneTestCase.zone__id_2,
CentralZoneTestCase.recordset__id_2,
CentralZoneTestCase.record__id)
t, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('delete_record', t)
@ -1995,46 +1897,6 @@ class CentralZoneTestCase(CentralBasic):
'recordset_name': 'rsn',
'tenant_id': 'tid'}, target)
def test_delete_record_worker(self):
self._test_delete_record(worker_enabled=True)
def test_delete_record_pool_manager(self):
self._test_delete_record(worker_enabled=False)
def test_delete_record_fail_on_managed(self):
self.service._delete_record_in_storage = Mock(
return_value=(None, None)
)
self.service.storage.get_zone.return_value = RoObject(
action='a',
id=CentralZoneTestCase.zone__id,
name='dn',
tenant_id='tid',
type='t',
)
self.service.storage.get_record.return_value = RoObject(
id=CentralZoneTestCase.record__id,
zone_id=CentralZoneTestCase.zone__id,
recordset_id=CentralZoneTestCase.recordset__id,
)
self.service.storage.get_recordset.return_value = RoObject(
name='rsn',
id=CentralZoneTestCase.recordset__id,
managed=True,
)
self.context = Mock()
self.context.edit_managed_records = False
with fx_pool_manager:
exc = self.assertRaises(rpc_dispatcher.ExpectedException,
self.service.delete_record,
self.context,
CentralZoneTestCase.zone__id_2,
CentralZoneTestCase.recordset__id_2,
CentralZoneTestCase.record__id_2)
self.assertEqual(exceptions.BadRequest, exc.exc_info[0])
def test_delete_record_in_storage(self):
self.service._delete_record_in_storage(
self.context,

View File

@ -1,112 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslotest.base
from designate import objects
LOG = logging.getLogger(__name__)
def create_test_pool():
return objects.Pool(
name='pool1',
description='desc',
)
class RoObject(dict):
def __setitem__(self, *a):
raise NotImplementedError
def __setattr__(self, *a):
raise NotImplementedError
def __getattr__(self, k):
return self[k]
mock_conf = RoObject(**{
'host': 'foohost',
'pool:769ca3fc-5924-4a44-8c1f-7efbe52fbd59': RoObject(
targets=['1588652b-50e7-46b9-b688-a9bad40a873e',
'2588652b-50e7-46b9-b688-a9bad40a873e'],
nameservers=['169ca3fc-5924-4a44-8c1f-7efbe52fbd59',
'269ca3fc-5924-4a44-8c1f-7efbe52fbd59'],
also_notifies=['1.0.0.0:1', '2.0.0.0:2']
),
'pool_nameserver:169ca3fc-5924-4a44-8c1f-7efbe52fbd59': RoObject(
host='pool_host_1.example.',
port=123
),
'pool_nameserver:269ca3fc-5924-4a44-8c1f-7efbe52fbd59': RoObject(
host='pool_host_2.example.',
port=456
),
'pool_target:1588652b-50e7-46b9-b688-a9bad40a873e': RoObject(
type='t1',
masters=[],
options=dict(a='1', b='2'),
),
'pool_target:2588652b-50e7-46b9-b688-a9bad40a873e': RoObject(
type='t2',
masters=['1.1.1.1:11'],
options={},
),
})
def deep_sort_lists(obj):
"""Sort lists nested in dictionaries
"""
if isinstance(obj, dict):
return sorted((k, deep_sort_lists(obj[k])) for k in obj)
if isinstance(obj, list):
return sorted(deep_sort_lists(v) for v in obj)
return obj
class poolTest(oslotest.base.BaseTestCase):
def test_init_from_config(self):
pool = objects.Pool.from_config(mock_conf,
'769ca3fc-5924-4a44-8c1f-7efbe52fbd59')
expected = [('also_notifies', [[('host', '1.0.0.0'), ('port', 1)],
[('host', '2.0.0.0'), ('port', 2)]]),
('description', 'Pool built from configuration on foohost'), # noqa
('id', '769ca3fc-5924-4a44-8c1f-7efbe52fbd59'),
('nameservers', [[('host', 'pool_host_1.example.'),
('id', '169ca3fc-5924-4a44-8c1f-7efbe52fbd59'), # noqa
('port', 123)],
[('host', 'pool_host_2.example.'),
('id', '269ca3fc-5924-4a44-8c1f-7efbe52fbd59'), # noqa
('port', 456)]]),
('targets', [[('id', '1588652b-50e7-46b9-b688-a9bad40a873e'), # noqa
('masters', []),
('options', [[('key', 'a'), ('value', '1')],
[('key', 'b'), ('value', '2')]]),
('type', 't1')],
[('id', '2588652b-50e7-46b9-b688-a9bad40a873e'), # noqa
('masters', [[('host', '1.1.1.1'),
('port', 11)]]),
('options', []),
('type', 't2')]])]
actual = deep_sort_lists(pool.to_dict())
self.assertEqual(actual, expected)

View File

@ -7,7 +7,7 @@ pushd $BASE/new/devstack
DEVSTACK_GATE_DESIGNATE_DRIVER=${DEVSTACK_GATE_DESIGNATE_DRIVER:-powerdns}
export KEEP_LOCALRC=1
export ENABLED_SERVICES=designate,designate-api,designate-central,designate-sink,designate-mdns,designate-pool-manager,designate-zone-manager
export ENABLED_SERVICES=designate,designate-api,designate-central,designate-sink,designate-mdns,designate-worker,designate-producer
echo "DESIGNATE_SERVICE_PORT_DNS=5322" >> $BASE/new/devstack/localrc
echo "DESIGNATE_BACKEND_DRIVER=$DEVSTACK_GATE_DESIGNATE_DRIVER" >> $BASE/new/devstack/localrc

View File

@ -52,19 +52,6 @@ function configure_designate {
iniset $DESIGNATE_CONF coordination backend_url $DESIGNATE_COORDINATION_URL
fi
if is_service_enabled designate-pool-manager; then
# Pool Manager Configuration
iniset $DESIGNATE_CONF service:pool_manager pool_id $DESIGNATE_POOL_ID
iniset $DESIGNATE_CONF service:pool_manager cache_driver $DESIGNATE_POOL_MANAGER_CACHE_DRIVER
iniset $DESIGNATE_CONF service:pool_manager periodic_recovery_interval $DESIGNATE_PERIODIC_RECOVERY_INTERVAL
iniset $DESIGNATE_CONF service:pool_manager periodic_sync_interval $DESIGNATE_PERIODIC_SYNC_INTERVAL
# Pool Manager Cache
if [ "$DESIGNATE_POOL_MANAGER_CACHE_DRIVER" == "sqlalchemy" ]; then
iniset $DESIGNATE_CONF pool_manager_cache:sqlalchemy connection `database_connection_url designate_pool_manager`
fi
fi
# API Configuration
sudo cp $DESIGNATE_DIR/etc/designate/api-paste.ini $DESIGNATE_APIPASTE_CONF
iniset $DESIGNATE_CONF service:api enabled_extensions_v2 $DESIGNATE_ENABLED_EXTENSIONS_V2
@ -226,14 +213,6 @@ function init_designate {
# Init and migrate designate database
$DESIGNATE_BIN_DIR/designate-manage database sync
if [ "$DESIGNATE_POOL_MANAGER_CACHE_DRIVER" == "sqlalchemy" ]; then
# (Re)create designate_pool_manager cache
recreate_database designate_pool_manager utf8
# Init and migrate designate pool-manager-cache
$DESIGNATE_BIN_DIR/designate-manage pool-manager-cache sync
fi
init_designate_backend
}
@ -290,15 +269,9 @@ function start_designate {
run_process designate-mdns "$DESIGNATE_BIN_DIR/designate-mdns --config-file $DESIGNATE_CONF"
run_process designate-agent "$DESIGNATE_BIN_DIR/designate-agent --config-file $DESIGNATE_CONF"
run_process designate-sink "$DESIGNATE_BIN_DIR/designate-sink --config-file $DESIGNATE_CONF"
if is_service_enabled designate-pool-manager; then
run_process designate-pool-manager "$DESIGNATE_BIN_DIR/designate-pool-manager --config-file $DESIGNATE_CONF"
run_process designate-zone-manager "$DESIGNATE_BIN_DIR/designate-zone-manager --config-file $DESIGNATE_CONF"
else
run_process designate-worker "$DESIGNATE_BIN_DIR/designate-worker --config-file $DESIGNATE_CONF"
run_process designate-producer "$DESIGNATE_BIN_DIR/designate-producer --config-file $DESIGNATE_CONF"
fi
run_process designate-worker "$DESIGNATE_BIN_DIR/designate-worker --config-file $DESIGNATE_CONF"
run_process designate-producer "$DESIGNATE_BIN_DIR/designate-producer --config-file $DESIGNATE_CONF"
# Start proxies if enabled
if is_service_enabled designate-api && is_service_enabled tls-proxy; then
@ -314,8 +287,6 @@ function start_designate {
function stop_designate {
stop_process designate-central
stop_process designate-api
stop_process designate-pool-manager
stop_process designate-zone-manager
stop_process designate-mdns
stop_process designate-agent
stop_process designate-sink

View File

@ -1,7 +1,6 @@
# Default options
DESIGNATE_BACKEND_DRIVER=${DESIGNATE_BACKEND_DRIVER:=bind9}
DESIGNATE_AGENT_BACKEND_DRIVER=${DESIGNATE_AGENT_BACKEND_DRIVER:-"fake"}
DESIGNATE_POOL_MANAGER_CACHE_DRIVER=${DESIGNATE_POOL_MANAGER_CACHE_DRIVER:-memcache}
DESIGNATE_POOL_ID=${DESIGNATE_POOL_ID:-794ccc2c-d751-44fe-b57f-8894c9f5c842}
DESIGNATE_DEFAULT_NS_RECORD=${DESIGNATE_DEFAULT_NS_RECORD:-ns1.devstack.org.}
DESIGNATE_NOTIFICATION_DRIVER=${DESIGNATE_NOTIFICATION_DRIVER:-messagingv2}

View File

@ -21,18 +21,10 @@ stop_process designate-api
stop_process designate-mdns
stop_process designate-agent
stop_process designate-sink
if is_service_enabled designate-worker; then
stop_process designate-worker
stop_process designate-producer
else
stop_process designate-pool-manager
stop_process designate-zone-manager
fi
stop_process designate-worker
stop_process designate-producer
# sanity check that service is actually down
ensure_services_stopped designate-api designate-central designate-mdns designate-agent designate-sink
if is_service_enabled designate-worker; then
ensure_services_stopped designate-worker designate-producer
else
ensure_services_stopped designate-pool-manager designate-zone-manager
fi
ensure_services_stopped designate-api designate-central designate-mdns designate-agent designate-sink designate-worker designate-producer

View File

@ -16,14 +16,13 @@
Agent Backend
=============
This page documents using the Pool Manager Agent backend, and it's accompanying
This page documents using the various Agent backends, and it's accompanying
service, `designate-agent`. This backend uses an extension of the DNS protocol
itself to send management requests to the remote agent processes, where the
requests will be actioned.
The traffic between `designate-pool-manager`, and `designate-agent` is both
unauthenticated and unencrypted. Do not run this traffic over unsecured
networks.
The `rpc` traffic between designate and the `agent` is both unauthenticated and
unencrypted. Do not run this traffic over unsecured networks.
Designate Configuration
-----------------------

View File

@ -18,7 +18,7 @@
Bind9 Backend
=============
This page documents using the Pool Manager Bind 9 backend.
This page documents using the Bind 9 backend.
The backend uses the rndc utility to create and delete zones remotely.
The traffic between rndc and Bind is authenticated with a key.
@ -32,7 +32,7 @@ One section for each pool target
.. literalinclude:: sample_yaml_snippets/bind.yaml
:language: yaml
The key and config files are relative to the host running Pool Manager
The key and config files are relative to the host running Designate
(and can be different from the hosts running Bind)
Then update the pools in designate - see :ref:`designate_manage_pool`
@ -46,7 +46,7 @@ Bind9 Configuration
-------------------
Ensure Bind can access the /etc/bind/rndc.conf and /etc/bind/rndc.key files and
receive rndc traffic from Pool Manager.
receive rndc traffic from Designate.
Enable rndc addzone/delzone functionality by editing named.conf.options
or named.conf and add this line under options

View File

@ -127,27 +127,6 @@ designate optional arguments
designate-manage pool
=====================
.. _designate_manage_pool_export_from_config:
designate-manage pool export_from_config
----------------------------------------
.. code-block:: console
usage: designate-manage pool export_from_config [-h] [--file FILE]
Export a YAML copy of a Kilo/Liberty pool config.
**Optional arguments:**
``-h, --help``
show this help message and exit
``--file FILE``
The path to the file the yaml output should be written to
(Defaults to /etc/designate/pools.yaml)
.. _designate_manage_pool_generate_file:
designate-manage pool generate_file
@ -240,52 +219,6 @@ Show what version of the database schema is currently in place
**Optional arguments:**
``-h, --help``
show this help message and exit
.. _designate_manage_pool_manager_cache:
designate-manage pool_manager_cache
===================================
.. _designate_manage_pool_manager_cache_sync:
designate-manage pool_manager_cache sync
----------------------------------------
.. code-block:: console
usage: designate-manage pool_manager_cache sync [-h] [--revision REVISION]
Update the designate pool manager cache database schema
**Optional arguments:**
``-h, --help``
show this help message and exit
``--revision REVISION``
The version that the designate pool manager cache database
should be synced to.
(Defaults to latest version)
.. _designate_manage_pool_manager_cache_version:
designate-manage pool_manager_cache version
-------------------------------------------
.. code-block:: console
usage: designate-manage pool_manager_cache version [-h]
Show what version of the pool manager cache database schema
is currently in place
**Optional arguments:**
``-h, --help``
show this help message and exit
@ -316,13 +249,9 @@ Update the designate powerdns database schema
show this help message and exit
``--revision REVISION``
The version that the designate pool manager cache database
should be synced to.
The version that the designate powerdns database should be synced to.
(Defaults to latest version)
.. _designate_manage_powerdns_version:
designate-manage powerdns version
@ -344,5 +273,3 @@ Show what version of the powerdns database schema is currently in place
``-h, --help``
show this help message and exit

View File

@ -16,9 +16,7 @@ The DNS service consists of the following components:
them to the ``designate-central`` over Remote Procedure Call (RPC).
``designate-central`` component
Orchestrates the creation, deletion and update of Zones and RecordSets, and
delegates work to ``designate-pool-manager`` for interaction with the DNS
servers
Orchestrates the creation, deletion and update of Zones and RecordSets.
``designate-producer`` component
Orchestrates periodic tasks that are run by designate.

View File

@ -0,0 +1,11 @@
---
prelude: >
Pool-Manager removal is complete in this version of designate.
upgrade:
- |
The pool-manager has been removed and can no longer be used. Instead
the worker and producer service should be used.
Because of this the ``[service:worker]`` option ``enabled`` has been
permanently removed, and in addition the ``notify`` option under the same
section has been deprecated for removal.

View File

@ -51,8 +51,6 @@ console_scripts =
designate-central = designate.cmd.central:main
designate-manage = designate.cmd.manage:main
designate-mdns = designate.cmd.mdns:main
designate-pool-manager = designate.cmd.pool_manager:main
designate-zone-manager = designate.cmd.zone_manager:main
designate-sink = designate.cmd.sink:main
designate-agent = designate.cmd.agent:main
designate-worker = designate.cmd.worker:main
@ -69,11 +67,6 @@ designate.api.admin.extensions =
designate.storage =
sqlalchemy = designate.storage.impl_sqlalchemy:SQLAlchemyStorage
designate.pool_manager.cache =
memcache = designate.pool_manager.cache.impl_memcache:MemcachePoolManagerCache
noop = designate.pool_manager.cache.impl_noop:NoopPoolManagerCache
sqlalchemy = designate.pool_manager.cache.impl_sqlalchemy:SQLAlchemyPoolManagerCache
designate.notification.handler =
fake = designate.notification_handler.fake:FakeHandler
nova_fixed = designate.notification_handler.nova:NovaFixedHandler
@ -119,7 +112,6 @@ designate.scheduler.filters =
designate.manage =
database = designate.manage.database:DatabaseCommands
pool = designate.manage.pool:PoolCommands
pool-manager-cache = designate.manage.pool_manager_cache:DatabaseCommands
powerdns = designate.manage.powerdns:DatabaseCommands
tlds = designate.manage.tlds:TLDCommands