Enable use of Pools YAML

This change adds the tooling to use the DB Tables created for pool
config data and the tooling to migrate the config info itself.

Change-Id: If99dbf527ef1ac0f05f15fe77f68f64e357fe0a5
This commit is contained in:
Kiall Mac Innes 2016-02-23 14:07:36 +00:00 committed by Graham Hayes
parent 1375a45a71
commit e612a3974f
44 changed files with 1969 additions and 305 deletions

View File

@ -16,6 +16,7 @@ import pecan
from oslo_log import log as logging from oslo_log import log as logging
from designate import utils from designate import utils
from designate.i18n import _LW
from designate.api.v2.controllers import rest from designate.api.v2.controllers import rest
from designate.objects import Pool from designate.objects import Pool
from designate.objects.adapters import DesignateAdapter from designate.objects.adapters import DesignateAdapter
@ -63,6 +64,11 @@ class PoolsController(rest.RestController):
@pecan.expose(template='json:', content_type='application/json') @pecan.expose(template='json:', content_type='application/json')
def post_all(self): def post_all(self):
"""Create a Pool""" """Create a Pool"""
LOG.warning(_LW("Use of this API Method is DEPRICATED. This will have "
"unforseen side affects when used with the "
"designate-manage pool commands"))
request = pecan.request request = pecan.request
response = pecan.response response = pecan.response
context = request.environ['context'] context = request.environ['context']
@ -86,6 +92,11 @@ class PoolsController(rest.RestController):
@utils.validate_uuid('pool_id') @utils.validate_uuid('pool_id')
def patch_one(self, pool_id): def patch_one(self, pool_id):
"""Update the specific pool""" """Update the specific pool"""
LOG.warning(_LW("Use of this API Method is DEPRICATED. This will have "
"unforseen side affects when used with the "
"designate-manage pool commands"))
request = pecan.request request = pecan.request
context = request.environ['context'] context = request.environ['context']
body = request.body_dict body = request.body_dict
@ -111,6 +122,11 @@ class PoolsController(rest.RestController):
@utils.validate_uuid('pool_id') @utils.validate_uuid('pool_id')
def delete_one(self, pool_id): def delete_one(self, pool_id):
"""Delete the specific pool""" """Delete the specific pool"""
LOG.warning(_LW("Use of this API Method is DEPRICATED. This will have "
"unforseen side affects when used with the "
"designate-manage pool commands"))
request = pecan.request request = pecan.request
response = pecan.response response = pecan.response
context = request.environ['context'] context = request.environ['context']

View File

@ -13,24 +13,172 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import pprint import yaml
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import oslo_messaging as messaging
from designate.manage import base from designate import exceptions
from designate import rpc
from designate.i18n import _LI
from designate.i18n import _LC
from designate import objects from designate import objects
from designate.central import rpcapi as central_rpcapi
from designate.manage import base
from designate.objects.adapters import DesignateAdapter
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
class PoolCommands(base.Commands): class PoolCommands(base.Commands):
def __init__(self):
super(PoolCommands, self).__init__()
rpc.init(cfg.CONF)
self.central_api = central_rpcapi.CentralAPI()
@base.args('--file', help='The path to the file the yaml output should be '
'writen to',
default='/etc/designate/pools.yaml')
def generate_file(self, file):
try:
pools = self.central_api.find_pools(self.context)
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response recieved from designate-central. "
"Check it is running, and retry"))
with open(file, 'w') as stream:
yaml.dump(
DesignateAdapter.render('YAML', pools),
stream,
default_flow_style=False
)
@base.args('--file', help='The path to the file the yaml output should be '
'writen to',
default='/etc/designate/pools.yaml')
def export_from_config(self, file):
try:
pools = self.central_api.find_pools(self.context)
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response recieved from designate-central. "
"Check it is running, and retry"))
r_pools = objects.PoolList()
for pool in pools:
r_pool = objects.Pool.from_config(CONF, pool.id)
r_pool.id = pool.id
r_pool.ns_records = pool.ns_records
r_pool.attributes = pool.attributes
r_pools.append(r_pool)
with open(file, 'w') as stream:
yaml.dump(
DesignateAdapter.render('YAML', r_pools),
stream,
default_flow_style=False
)
@base.args('--pool_id', help='ID of the pool to be examined', @base.args('--pool_id', help='ID of the pool to be examined',
default=CONF['service:central'].default_pool_id) default=CONF['service:central'].default_pool_id)
def show_config(self, pool_id): def show_config(self, pool_id):
print('*' * 100) try:
pool = self.central_api.find_pool(self.context, {"id": pool_id})
print('Pool Configuration:') print('Pool Configuration:')
print('*' * 100) print('-------------------')
pprint.pprint(objects.Pool.from_config(CONF, pool_id).to_dict())
print(yaml.dump(DesignateAdapter.render('YAML', pool),
default_flow_style=False))
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response recieved from designate-central. "
"Check it is running, and retry"))
@base.args('--file', help='The path to the yaml file describing the pools',
default='/etc/designate/pools.yaml')
@base.args(
'--delete',
help='Any Pools not listed in the config file will be deleted. '
' WARNING: This will delete any zones left in this pool',
default=False)
@base.args(
'--dry_run',
help='This will simulate what will happen when you run this command',
default=False)
def update(self, file, delete, dry_run):
print('Updating Pools Configuration')
print('****************************')
output_msg = ['']
with open(file, 'r') as stream:
xpools = yaml.safe_load(stream)
if dry_run:
output_msg.append("The following changes will occur:")
output_msg.append("*********************************")
for xpool in xpools:
try:
if 'id' in xpool:
try:
pool = self.central_api.get_pool(
self.context, xpool['id'])
except Exception:
LOG.critical(
_LC("Bad ID Supplied for pool %s"), xpool['name'])
continue
else:
pool = self.central_api.find_pool(
self.context, {"name": xpool['name']})
LOG.info(_LI('Updating existing pool: %s'), pool)
# TODO(kiall): Move the below into the pool object
pool = DesignateAdapter.parse('YAML', xpool, pool)
if dry_run:
output_msg.append("Update Pool: %s" % pool)
else:
pool = self.central_api.update_pool(self.context, pool)
except exceptions.PoolNotFound:
pool = DesignateAdapter.parse('YAML', xpool, objects.Pool())
# pool = objects.Pool.from_dict(xpool)
if dry_run:
output_msg.append("Create Pool: %s" % pool)
else:
LOG.info(_LI('Creating new pool: %s'), pool)
self.central_api.create_pool(self.context, pool)
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response recieved from designate-central."
" Check it is running, and retry"))
if delete:
pools = self.central_api.find_pools(self.context)
pools_in_db = {pool.name for pool in pools}
pools_in_yaml = {xpool['name'] for xpool in xpools}
pools_to_delete = pools_in_db - pools_in_yaml
for pool in pools_to_delete:
try:
p = self.central_api.find_pool(
self.context,
criterion={'name': pool})
if dry_run:
output_msg.append("Delete Pool: %s" % p)
else:
LOG.info(_LI('Deleting %s'), p)
self.central_api.delete_pool(self.context, p.id)
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response recieved from "
"designate-central. "
"Check it is running, and retry"))
for line in output_msg:
print(line)

View File

@ -21,7 +21,9 @@ from oslo_db.sqlalchemy.migration_cli import manager as migration_manager
from oslo_log import log as logging from oslo_log import log as logging
from designate.manage import base from designate.manage import base
from designate import rpc
from designate import utils from designate import utils
from designate.central import rpcapi as central_rpcapi
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -32,33 +34,50 @@ CONF = cfg.CONF
utils.register_plugin_opts() utils.register_plugin_opts()
def get_manager(pool_target_id): def get_manager(pool_target):
pool_target_options = CONF['pool_target:%s' % pool_target_id].options connection = pool_target.options.get('connection', None)
connection = pool_target_options['connection']
migration_config = { migration_config = {
'migration_repo_path': REPOSITORY, 'migration_repo_path': REPOSITORY,
'db_url': connection} 'db_url': connection}
return migration_manager.MigrationManager(migration_config) return migration_manager.MigrationManager(migration_config)
class DatabaseCommands(base.Commands): class DatabaseCommands(base.Commands):
@base.args('pool-target-id', help="Pool Target to Migrate", type=str) def __init__(self):
def version(self, pool_target_id): super(DatabaseCommands, self).__init__()
current = get_manager(pool_target_id).version() rpc.init(cfg.CONF)
self.central_api = central_rpcapi.CentralAPI()
@base.args('pool-id', help="Pool to Migrate", type=str)
def version(self, pool_id):
pool = self.central_api.find_pool(self.context, {"id": pool_id})
for pool_target in pool.targets:
current = get_manager(pool_target).version()
latest = versioning_api.version(repository=REPOSITORY).value latest = versioning_api.version(repository=REPOSITORY).value
print("Current: %s Latest: %s" % (current, latest)) print("Current: %s Latest: %s" % (current, latest))
@base.args('pool-target-id', help="Pool Target to Migrate", type=str) @base.args('pool-id', help="Pool to Migrate", type=str)
def sync(self, pool_target_id): def sync(self, pool_id):
get_manager(pool_target_id).upgrade(None) pool = self.central_api.find_pool(self.context, {"id": pool_id})
@base.args('pool-target-id', help="Pool Target to Migrate", type=str) for pool_target in pool.targets:
@base.args('revision', nargs='?') get_manager(pool_target).upgrade(None)
def upgrade(self, pool_target_id, revision):
get_manager(pool_target_id).upgrade(revision)
@base.args('pool-target-id', help="Pool Target to Migrate", type=str) @base.args('pool-id', help="Pool to Migrate", type=str)
@base.args('revision', nargs='?') @base.args('revision', nargs='?')
def downgrade(self, pool_target_id, revision): def upgrade(self, pool_id, revision):
get_manager(pool_target_id).downgrade(revision) pool = self.central_api.find_pool(self.context, {"id": pool_id})
for pool_target in pool.targets:
get_manager(pool_target).upgrade(revision)
@base.args('pool-id', help="Pool to Migrate", type=str)
@base.args('revision', nargs='?')
def downgrade(self, pool_id, revision):
pool = self.central_api.find_pool(self.context, {"id": pool_id})
for pool_target in pool.targets:
get_manager(pool_target).downgrade(revision)

View File

@ -32,3 +32,14 @@ from designate.objects.adapters.api_v2.zone_transfer_request import ZoneTransfer
from designate.objects.adapters.api_v2.validation_error import ValidationErrorAPIv2Adapter, ValidationErrorListAPIv2Adapter # noqa from designate.objects.adapters.api_v2.validation_error import ValidationErrorAPIv2Adapter, ValidationErrorListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.zone_import import ZoneImportAPIv2Adapter, ZoneImportListAPIv2Adapter # noqa from designate.objects.adapters.api_v2.zone_import import ZoneImportAPIv2Adapter, ZoneImportListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.zone_export import ZoneExportAPIv2Adapter, ZoneExportListAPIv2Adapter # noqa from designate.objects.adapters.api_v2.zone_export import ZoneExportAPIv2Adapter, ZoneExportListAPIv2Adapter # noqa
# YAML
from designate.objects.adapters.yaml.pool import PoolYAMLAdapter, PoolListYAMLAdapter # noqa
from designate.objects.adapters.yaml.pool_attribute import PoolAttributeYAMLAdapter, PoolAttributeListYAMLAdapter # noqa
from designate.objects.adapters.yaml.pool_also_notify import PoolAlsoNotifyYAMLAdapter, PoolAlsoNotifyListYAMLAdapter # noqa
from designate.objects.adapters.yaml.pool_nameserver import PoolNameserverYAMLAdapter, PoolNameserverListYAMLAdapter # noqa
from designate.objects.adapters.yaml.pool_ns_record import PoolNsRecordYAMLAdapter, PoolNsRecordListYAMLAdapter # noqa
from designate.objects.adapters.yaml.pool_target import PoolTargetYAMLAdapter, PoolTargetListYAMLAdapter # noqa
from designate.objects.adapters.yaml.pool_target_master import PoolTargetMasterYAMLAdapter, PoolTargetMasterListYAMLAdapter # noqa
from designate.objects.adapters.yaml.pool_target_option import PoolTargetOptionYAMLAdapter, PoolTargetOptionListYAMLAdapter # noqa

View File

@ -0,0 +1,81 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.objects.adapters import base
LOG = logging.getLogger(__name__)
class YAMLAdapter(base.DesignateAdapter):
ADAPTER_FORMAT = 'YAML'
#####################
# Parsing methods #
#####################
@classmethod
def parse(cls, values, output_object, *args, **kwargs):
obj = super(YAMLAdapter, cls).parse(
cls.ADAPTER_FORMAT, values, output_object, *args, **kwargs)
return obj
@classmethod
def _render_object(cls, object, *args, **kwargs):
# The dict we will return to be rendered to JSON / output format
r_obj = {}
# Loop over all fields that are supposed to be output
for key, value in cls.MODIFICATIONS['fields'].items():
# Get properties for this field
field_props = cls.MODIFICATIONS['fields'][key]
# Check if it has to be renamed
if field_props.get('rename', False):
obj = getattr(object, field_props.get('rename'))
# if rename is specified we need to change the key
obj_key = field_props.get('rename')
else:
# if not, move on
obj = getattr(object, key, None)
obj_key = key
# Check if this item is a relation (another DesignateObject that
# will need to be converted itself
if object.FIELDS.get(obj_key, {}).get('relation'):
# Get a adapter for the nested object
# Get the class the object is and get its adapter, then set
# the item in the dict to the output
r_obj[key] = cls.get_object_adapter(
cls.ADAPTER_FORMAT,
object.FIELDS[obj_key].get('relation_cls')).render(
cls.ADAPTER_FORMAT, obj, *args, **kwargs)
elif object.FIELDS.get(
obj_key, {}).get('schema', {}).get('type') == 'integer':
r_obj[key] = int(obj)
elif obj is not None:
# Just attach the damn item if there is no weird edge cases
r_obj[key] = str(obj)
# Send it back
return r_obj
@classmethod
def _render_list(cls, list_object, *args, **kwargs):
# The list we will return to be rendered to JSON / output format
r_list = []
# iterate and convert each DesignateObject in the list, and append to
# the object we are returning
for object in list_object:
r_list.append(cls.get_object_adapter(
cls.ADAPTER_FORMAT,
object).render(cls.ADAPTER_FORMAT, object, *args, **kwargs))
return r_list

View File

@ -0,0 +1,59 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.objects.adapters.yaml import base
from designate import objects
LOG = logging.getLogger(__name__)
class PoolYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.Pool
MODIFICATIONS = {
'fields': {
'id': {
'read_only': False
},
'name': {
'read_only': False
},
'description': {
'read_only': False
},
'attributes': {
'read_only': False
},
'ns_records': {
'read_only': False
},
'nameservers': {
'read_only': False
},
'targets': {
'read_only': False
},
'also_notifies': {
'read_only': False
},
}
}
class PoolListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolList
MODIFICATIONS = {}

View File

@ -0,0 +1,41 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.objects.adapters.yaml import base
from designate import objects
LOG = logging.getLogger(__name__)
class PoolAlsoNotifyYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolAlsoNotify
MODIFICATIONS = {
'fields': {
'host': {
'read_only': False
},
'port': {
'read_only': False
}
}
}
class PoolAlsoNotifyListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolAlsoNotifyList
MODIFICATIONS = {}

View File

@ -0,0 +1,86 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from oslo_log import log as logging
from designate.objects.adapters.yaml import base
from designate import objects
LOG = logging.getLogger(__name__)
class PoolAttributeYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolAttribute
MODIFICATIONS = {
'fields': {
'key': {
'read_only': False
},
'value': {
'read_only': False
},
}
}
@classmethod
def _render_object(cls, object, *arg, **kwargs):
return {str(object.key): str(object.value)}
@classmethod
def _parse_object(cls, values, object, *args, **kwargs):
for key in six.iterkeys(values):
object.key = key
object.value = values[key]
return object
class PoolAttributeListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolAttributeList
MODIFICATIONS = {}
@classmethod
def _render_list(cls, list_object, *args, **kwargs):
r_list = {}
for object in list_object:
value = cls.get_object_adapter(
cls.ADAPTER_FORMAT,
object).render(cls.ADAPTER_FORMAT, object, *args, **kwargs)
for key in six.iterkeys(value):
r_list[key] = value[key]
return r_list
@classmethod
def _parse_list(cls, values, output_object, *args, **kwargs):
for key, value in values.items():
# Add the object to the list
output_object.append(
# Get the right Adapter
cls.get_object_adapter(
cls.ADAPTER_FORMAT,
# This gets the internal type of the list, and parses it
# We need to do `get_object_adapter` as we need a new
# instance of the Adapter
output_object.LIST_ITEM_TYPE()).parse(
{key: value}, output_object.LIST_ITEM_TYPE()))
# Return the filled list
return output_object

View File

@ -0,0 +1,41 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.objects.adapters.yaml import base
from designate import objects
LOG = logging.getLogger(__name__)
class PoolNameserverYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolNameserver
MODIFICATIONS = {
'fields': {
'host': {
'read_only': False
},
'port': {
'read_only': False
}
}
}
class PoolNameserverListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolNameserverList
MODIFICATIONS = {}

View File

@ -0,0 +1,41 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.objects.adapters.yaml import base
from designate import objects
LOG = logging.getLogger(__name__)
class PoolNsRecordYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolNsRecord
MODIFICATIONS = {
'fields': {
'priority': {
'read_only': False
},
'hostname': {
'read_only': False
}
}
}
class PoolNsRecordListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolNsRecordList
MODIFICATIONS = {}

View File

@ -0,0 +1,50 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.objects.adapters.yaml import base
from designate import objects
LOG = logging.getLogger(__name__)
class PoolTargetYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolTarget
MODIFICATIONS = {
'fields': {
'type': {
'read_only': False
},
'tsigkey_id': {
'read_only': False
},
'description': {
'read_only': False
},
'masters': {
'read_only': False
},
'options': {
'read_only': False
}
}
}
class PoolTargetListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolTargetList
MODIFICATIONS = {}

View File

@ -0,0 +1,41 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.objects.adapters.yaml import base
from designate import objects
LOG = logging.getLogger(__name__)
class PoolTargetMasterYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolTargetMaster
MODIFICATIONS = {
'fields': {
'host': {
'read_only': False
},
'port': {
'read_only': False
},
}
}
class PoolTargetMasterListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolTargetMasterList
MODIFICATIONS = {}

View File

@ -0,0 +1,86 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from oslo_log import log as logging
from designate.objects.adapters.yaml import base
from designate import objects
LOG = logging.getLogger(__name__)
class PoolTargetOptionYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolTargetOption
MODIFICATIONS = {
'fields': {
'key': {
'read_only': False
},
'value': {
'read_only': False
},
}
}
@classmethod
def _render_object(cls, object, *arg, **kwargs):
return {str(object.key): str(object.value)}
@classmethod
def _parse_object(cls, values, object, *args, **kwargs):
for key in six.iterkeys(values):
object.key = key
object.value = values[key]
return object
class PoolTargetOptionListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolTargetOptionList
MODIFICATIONS = {}
@classmethod
def _render_list(cls, list_object, *args, **kwargs):
r_list = {}
for object in list_object:
value = cls.get_object_adapter(
cls.ADAPTER_FORMAT,
object).render(cls.ADAPTER_FORMAT, object, *args, **kwargs)
for key in six.iterkeys(value):
r_list[key] = value[key]
return r_list
@classmethod
def _parse_list(cls, values, output_object, *args, **kwargs):
for key, value in values.items():
# Add the object to the list
output_object.append(
# Get the right Adapter
cls.get_object_adapter(
cls.ADAPTER_FORMAT,
# This gets the internal type of the list, and parses it
# We need to do `get_object_adapter` as we need a new
# instance of the Adapter
output_object.LIST_ITEM_TYPE()).parse(
{key: value}, output_object.LIST_ITEM_TYPE()))
# Return the filled list
return output_object

View File

@ -18,10 +18,33 @@ from designate.objects import base
class PoolAlsoNotify(base.DictObjectMixin, base.PersistentObjectMixin, class PoolAlsoNotify(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject): base.DesignateObject):
FIELDS = { FIELDS = {
'pool_id': {}, 'pool_id': {
'host': {}, 'schema': {
'port': {}, 'type': 'string',
'description': 'Pool identifier',
'format': 'uuid',
},
},
'host': {
'schema': {
'type': 'string',
'format': 'ip-or-host',
'required': True,
},
},
'port': {
'schema': {
'type': 'integer',
'minimum': 1,
'maximum': 65535,
'required': True,
},
} }
}
STRING_KEYS = [
'id', 'host', 'port', 'pool_id'
]
class PoolAlsoNotifyList(base.ListObjectMixin, base.DesignateObject): class PoolAlsoNotifyList(base.ListObjectMixin, base.DesignateObject):

View File

@ -18,10 +18,33 @@ from designate.objects import base
class PoolNameserver(base.DictObjectMixin, base.PersistentObjectMixin, class PoolNameserver(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject): base.DesignateObject):
FIELDS = { FIELDS = {
'pool_id': {}, 'pool_id': {
'host': {}, 'schema': {
'port': {}, 'type': 'string',
'description': 'Pool identifier',
'format': 'uuid',
},
},
'host': {
'schema': {
'type': 'string',
'format': 'ip-or-host',
'required': True,
},
},
'port': {
'schema': {
'type': 'integer',
'minimum': 1,
'maximum': 65535,
'required': True,
},
} }
}
STRING_KEYS = [
'id', 'host', 'port', 'pool_id'
]
class PoolNameserverList(base.ListObjectMixin, base.DesignateObject): class PoolNameserverList(base.ListObjectMixin, base.DesignateObject):

View File

@ -18,11 +18,28 @@ from designate.objects import base
class PoolTarget(base.DictObjectMixin, base.PersistentObjectMixin, class PoolTarget(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject): base.DesignateObject):
FIELDS = { FIELDS = {
'pool_id': {}, 'pool_id': {
'schema': {
'type': 'string',
'description': 'Pool identifier',
'format': 'uuid',
},
},
'type': {}, 'type': {},
'tsigkey_id': {}, 'tsigkey_id': {
'description': {}, 'schema': {
'type': ['string', 'null'],
'description': 'TSIG identifier',
'format': 'uuid',
},
},
'description': {
'schema': {
'type': ['string', 'null'],
'description': 'Description for the pool',
'maxLength': 160
}
},
'masters': { 'masters': {
'relation': True, 'relation': True,
'relation_cls': 'PoolTargetMasterList' 'relation_cls': 'PoolTargetMasterList'
@ -33,6 +50,10 @@ class PoolTarget(base.DictObjectMixin, base.PersistentObjectMixin,
}, },
} }
STRING_KEYS = [
'id', 'type', 'pool_id'
]
class PoolTargetList(base.ListObjectMixin, base.DesignateObject): class PoolTargetList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = PoolTarget LIST_ITEM_TYPE = PoolTarget

View File

@ -18,10 +18,33 @@ from designate.objects import base
class PoolTargetMaster(base.DictObjectMixin, base.PersistentObjectMixin, class PoolTargetMaster(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject): base.DesignateObject):
FIELDS = { FIELDS = {
'pool_target_id': {}, 'pool_target_id': {
'host': {}, 'schema': {
'port': {} 'type': 'string',
'description': 'Pool Target identifier',
'format': 'uuid',
},
},
'host': {
'schema': {
'type': 'string',
'format': 'ip-or-host',
'required': True,
},
},
'port': {
'schema': {
'type': 'integer',
'minimum': 1,
'maximum': 65535,
'required': True,
},
} }
}
STRING_KEYS = [
'id', 'host', 'port', 'pool_target_id'
]
class PoolTargetMasterList(base.ListObjectMixin, base.DesignateObject): class PoolTargetMasterList(base.ListObjectMixin, base.DesignateObject):

View File

@ -18,10 +18,32 @@ from designate.objects import base
class PoolTargetOption(base.DictObjectMixin, base.PersistentObjectMixin, class PoolTargetOption(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject): base.DesignateObject):
FIELDS = { FIELDS = {
'pool_target_id': {}, 'pool_target_id': {
'key': {}, 'schema': {
'value': {}, 'type': 'string',
'description': 'Pool Target identifier',
'format': 'uuid',
},
},
'key': {
'schema': {
'type': 'string',
'maxLength': 255,
},
'required': True,
},
'value': {
'schema': {
'type': 'string',
'maxLength': 255,
},
'required': True
} }
}
STRING_KEYS = [
'id', 'key', 'value', 'pool_target_id'
]
class PoolTargetOptionList(base.AttributeListObjectMixin, class PoolTargetOptionList(base.AttributeListObjectMixin,

View File

@ -97,10 +97,6 @@ class Service(service.RPCService, coordination.CoordinationMixin,
def __init__(self, threads=None): def __init__(self, threads=None):
super(Service, self).__init__(threads=threads) super(Service, self).__init__(threads=threads)
# Build the Pool (and related) Object from Config
self.pool = objects.Pool.from_config(
CONF, CONF['service:pool_manager'].pool_id)
# Get a pool manager cache connection. # Get a pool manager cache connection.
self.cache = cache.get_pool_manager_cache( self.cache = cache.get_pool_manager_cache(
CONF['service:pool_manager'].cache_driver) CONF['service:pool_manager'].cache_driver)
@ -121,9 +117,6 @@ class Service(service.RPCService, coordination.CoordinationMixin,
self.max_retries * self.retry_interval + self.max_retries * self.retry_interval +
self.delay) self.delay)
# Create the necessary Backend instances for each target
self._setup_target_backends()
def _setup_target_backends(self): def _setup_target_backends(self):
self.target_backends = {} self.target_backends = {}
@ -154,6 +147,34 @@ class Service(service.RPCService, coordination.CoordinationMixin,
return topic return topic
def start(self): def start(self):
# Build the Pool (and related) Object from Config
context = DesignateContext.get_admin_context()
pool_id = CONF['service:pool_manager'].pool_id
has_targets = False
while not has_targets:
try:
self.pool = self.central_api.get_pool(context, pool_id)
if len(self.pool.targets) > 0:
has_targets = True
else:
LOG.error(_LE("No targets for %s found."), self.pool)
time.sleep(5)
# Pool data may not have migrated to the DB yet
except exceptions.PoolNotFound:
LOG.error(_LE("Pool ID %s not found."), pool_id)
time.sleep(5)
# designate-central service may not have started yet
except messaging.exceptions.MessagingTimeout:
time.sleep(0.2)
# Create the necessary Backend instances for each target
self._setup_target_backends()
for target in self.pool.targets: for target in self.pool.targets:
self.target_backends[target.id].start() self.target_backends[target.id].start()

View File

@ -382,9 +382,14 @@ class TestCase(base.BaseTestCase):
# Fetch the default pool # Fetch the default pool
pool = self.storage.get_pool(self.admin_context, default_pool_id) pool = self.storage.get_pool(self.admin_context, default_pool_id)
# Add a NS record to it # Fill out the necessary pool details
pool.ns_records.append( pool.ns_records = objects.PoolNsRecordList.from_list([
objects.PoolNsRecord(priority=0, hostname='ns1.example.org.')) {'hostname': 'ns1.example.org.', 'priority': 0}
])
pool.targets = objects.PoolTargetList.from_list([
{'type': 'fake', u'description': "Fake PoolTarget for Tests"}
])
# Save the default pool # Save the default pool
self.storage.update_pool(self.admin_context, pool) self.storage.update_pool(self.admin_context, pool)

View File

@ -0,0 +1,63 @@
---
- name: pool-1
description: Default PowerDNS Pool
attributes:
internal: true
ns_records:
- hostname: ns1-1.example.org.
priority: 1
- hostname: ns1-2.example.org.
priority: 2
nameservers:
- host: 192.0.2.2
port: 53
- host: 192.0.2.3
port: 53
targets:
- type: powerdns
description: PowerDNS Database Cluster
masters:
- host: 192.0.2.1
port: 5354
options:
connection: 'mysql+pymysql://designate:password@127.0.0.1/designate_pdns?charset=utf8'
also_notifies:
- host: 192.0.2.4
port: 53
- name: pool-2
id: cf2e8eab-76cd-4162-bf76-8aeee3556de0
description: Default PowerDNS Pool
attributes:
external: true
ns_records:
- hostname: ns1-1.example.org.
priority: 1
- hostname: ns1-2.example.org.
priority: 2
nameservers:
- host: 192.0.2.2
port: 53
- host: 192.0.2.3
port: 53
targets:
- type: bind
description: BIND9 Server 1
masters:
- host: 192.0.2.1
port: 5354
options:
rndc_host: 192.0.2.2
rndc_port: 953
rndc_key_file: /etc/designate/rndc.key
- type: bind
description: BIND9 Server 2
masters:
- host: 192.0.2.1
port: 5354
options:
rndc_host: 192.0.2.3
rndc_port: 953
rndc_key_file: /etc/designate/rndc.key

View File

@ -0,0 +1,32 @@
---
- name: default
description: Default PowerDNS Pool
attributes:
type: internal
ns_records:
- hostname: ns1-1.example.org.
priority: 1
- hostname: ns1-2.example.org.
priority: 2
nameservers:
- host: 192.0.2.2
port: 53
- host: 192.0.2.3
port: 53
targets:
- type: powerdns
description: PowerDNS Database Cluster
masters:
- host: 192.0.2.1
port: 5354
options:
connection: 'mysql+pymysql://designate:password@127.0.0.1/designate_pdns?charset=utf8'
also_notifies:
- host: 192.0.2.4
port: 53

View File

@ -0,0 +1,25 @@
- also_notifies:
- host: 192.0.2.4
port: 53
attributes: {}
description: Default PowerDNS Pool
id: cf2e8eab-76cd-4162-bf76-8aeee3556de0
name: default
nameservers:
- host: 192.0.2.2
port: 53
- host: 192.0.2.3
port: 53
ns_records:
- hostname: ns1-1.example.org.
priority: 1
- hostname: ns1-2.example.org.
priority: 2
targets:
- description: PowerDNS Database Cluster
masters:
- host: 192.0.2.1
port: 5354
options:
connection: mysql+pymysql://designate:password@127.0.0.1/designate_pdns?charset=utf8
type: powerdns

View File

@ -15,6 +15,49 @@
# under the License. # under the License.
from designate.tests import TestCase from designate.tests import TestCase
POOL_DICT = {
'id': u'794ccc2c-d751-44fe-b57f-8894c9f5c842',
'name': u'default',
'targets': [
{
'id': 'f278782a-07dc-4502-9177-b5d85c5f7c7e',
'type': 'fake',
'masters': [
{
'host': '127.0.0.1',
'port': 5354
}
],
'options': {}
},
{
'id': 'a38703f2-b71e-4e5b-ab22-30caaed61dfd',
'type': 'fake',
'masters': [
{
'host': '127.0.0.1',
'port': 5354
}
],
'options': {}
},
],
'nameservers': [
{
'id': 'c5d64303-4cba-425a-9f3c-5d708584dde4',
'host': '127.0.0.1',
'port': 5355
},
{
'id': 'c67cdc95-9a9e-4d2a-98ed-dc78cbd85234',
'host': '127.0.0.1',
'port': 5356
},
],
'also_notifies': [],
}
class PoolManagerTestCase(TestCase): class PoolManagerTestCase(TestCase):
pass pass

View File

@ -18,7 +18,6 @@ import logging
import uuid import uuid
import oslo_messaging as messaging import oslo_messaging as messaging
from oslo_config import cfg
from mock import call from mock import call
from mock import Mock from mock import Mock
from mock import patch from mock import patch
@ -30,6 +29,7 @@ from designate.central import rpcapi as central_rpcapi
from designate.mdns import rpcapi as mdns_rpcapi from designate.mdns import rpcapi as mdns_rpcapi
from designate.storage.impl_sqlalchemy import tables from designate.storage.impl_sqlalchemy import tables
from designate.tests.test_pool_manager import PoolManagerTestCase from designate.tests.test_pool_manager import PoolManagerTestCase
from designate.tests.test_pool_manager import POOL_DICT
import designate.pool_manager.service as pm_module import designate.pool_manager.service as pm_module
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -55,59 +55,11 @@ class PoolManagerServiceNoopTest(PoolManagerTestCase):
pool_id='794ccc2c-d751-44fe-b57f-8894c9f5c842', pool_id='794ccc2c-d751-44fe-b57f-8894c9f5c842',
group='service:pool_manager') group='service:pool_manager')
# Configure the Pool
section_name = 'pool:794ccc2c-d751-44fe-b57f-8894c9f5c842'
section_opts = [
cfg.ListOpt('targets', default=[
'f278782a-07dc-4502-9177-b5d85c5f7c7e',
'a38703f2-b71e-4e5b-ab22-30caaed61dfd',
]),
cfg.ListOpt('nameservers', default=[
'c5d64303-4cba-425a-9f3c-5d708584dde4',
'c67cdc95-9a9e-4d2a-98ed-dc78cbd85234',
]),
cfg.ListOpt('also_notifies', default=[]),
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
# Configure the Pool Targets
section_name = 'pool_target:f278782a-07dc-4502-9177-b5d85c5f7c7e'
section_opts = [
cfg.StrOpt('type', default='fake'),
cfg.ListOpt('masters', default=['127.0.0.1:5354']),
cfg.DictOpt('options', default={})
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
section_name = 'pool_target:a38703f2-b71e-4e5b-ab22-30caaed61dfd'
section_opts = [
cfg.StrOpt('type', default='fake'),
cfg.ListOpt('masters', default=['127.0.0.1:5354']),
cfg.DictOpt('options', default={})
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
# Configure the Pool Nameservers
section_name = 'pool_nameserver:c5d64303-4cba-425a-9f3c-5d708584dde4'
section_opts = [
cfg.StrOpt('host', default='127.0.0.1'),
cfg.StrOpt('port', default=5355),
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
section_name = 'pool_nameserver:c67cdc95-9a9e-4d2a-98ed-dc78cbd85234'
section_opts = [
cfg.StrOpt('host', default='127.0.0.1'),
cfg.StrOpt('port', default=5356),
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
# Start the Service # Start the Service
with patch.object(
central_rpcapi.CentralAPI,
'get_pool',
return_value=objects.Pool.from_dict(POOL_DICT)):
self.service = self.start_service('pool_manager') self.service = self.start_service('pool_manager')
self.cache = self.service.cache self.cache = self.service.cache
@ -230,6 +182,10 @@ class PoolManagerServiceNoopTest(PoolManagerTestCase):
self.config( self.config(
threshold_percentage=50, threshold_percentage=50,
group='service:pool_manager') group='service:pool_manager')
with patch.object(
central_rpcapi.CentralAPI,
'get_pool',
return_value=objects.Pool.from_dict(POOL_DICT)):
self.service = self.start_service('pool_manager') self.service = self.start_service('pool_manager')
zone = self._build_zone('example.org.', 'CREATE', 'PENDING') zone = self._build_zone('example.org.', 'CREATE', 'PENDING')
@ -349,6 +305,10 @@ class PoolManagerServiceNoopTest(PoolManagerTestCase):
self.config( self.config(
threshold_percentage=50, threshold_percentage=50,
group='service:pool_manager') group='service:pool_manager')
with patch.object(
central_rpcapi.CentralAPI,
'get_pool',
return_value=objects.Pool.from_dict(POOL_DICT)):
self.service = self.start_service('pool_manager') self.service = self.start_service('pool_manager')
zone = self._build_zone('example.org.', 'UPDATE', 'PENDING') zone = self._build_zone('example.org.', 'UPDATE', 'PENDING')

View File

@ -116,7 +116,7 @@ class StorageTestCase(object):
context, pool.id, objects.PoolAlsoNotify.from_dict(values)) context, pool.id, objects.PoolAlsoNotify.from_dict(values))
# Paging Tests # Paging Tests
def _ensure_paging(self, data, method): def _ensure_paging(self, data, method, criterion=None):
""" """
Given an array of created items we iterate through them making sure Given an array of created items we iterate through them making sure
they match up to things returned by paged results. they match up to things returned by paged results.
@ -124,19 +124,26 @@ class StorageTestCase(object):
results = None results = None
item_number = 0 item_number = 0
criterion = criterion or {}
for current_page in range(0, int(math.ceil(float(len(data)) / 2))): for current_page in range(0, int(math.ceil(float(len(data)) / 2))):
LOG.debug('Validating results on page %d', current_page) LOG.critical('Validating results on page %d', current_page)
if results is not None: if results is not None:
results = method( results = method(
self.admin_context, limit=2, marker=results[-1]['id']) self.admin_context,
limit=2,
marker=results[-1]['id'],
criterion=criterion
)
else: else:
results = method(self.admin_context, limit=2) results = method(self.admin_context, limit=2,
criterion=criterion)
LOG.critical('Results: %d', len(results)) LOG.critical('Results: %d', len(results))
for result_number, result in enumerate(results): for result_number, result in enumerate(results):
LOG.debug('Validating result %d on page %d', result_number, LOG.critical('Validating result %d on page %d', result_number,
current_page) current_page)
self.assertEqual( self.assertEqual(
data[item_number]['id'], results[result_number]['id']) data[item_number]['id'], results[result_number]['id'])
@ -2535,15 +2542,19 @@ class StorageTestCase(object):
def test_find_pool_targets(self): def test_find_pool_targets(self):
pool = self.create_pool(fixture=0) pool = self.create_pool(fixture=0)
# Verify that there are no pool_targets created # Verify that there are no new pool_targets created
actual = self.storage.find_pool_targets(self.admin_context) actual = self.storage.find_pool_targets(
self.admin_context,
criterion={'pool_id': pool.id})
self.assertEqual(0, len(actual)) self.assertEqual(0, len(actual))
# Create a PoolTarget # Create a PoolTarget
pool_target = self.create_pool_target(pool, fixture=0) pool_target = self.create_pool_target(pool, fixture=0)
# Fetch the PoolTargets and ensure only 1 exists # Fetch the PoolTargets and ensure only 2 exist
actual = self.storage.find_pool_targets(self.admin_context) actual = self.storage.find_pool_targets(
self.admin_context,
criterion={'pool_id': pool.id})
self.assertEqual(1, len(actual)) self.assertEqual(1, len(actual))
self.assertEqual(pool_target['pool_id'], actual[0]['pool_id']) self.assertEqual(pool_target['pool_id'], actual[0]['pool_id'])
@ -2557,7 +2568,8 @@ class StorageTestCase(object):
for i in range(10)] for i in range(10)]
# Ensure we can page through the results. # Ensure we can page through the results.
self._ensure_paging(created, self.storage.find_pool_targets) self._ensure_paging(created, self.storage.find_pool_targets,
criterion={'pool_id': pool.id})
def test_find_pool_targets_with_criterion(self): def test_find_pool_targets_with_criterion(self):
pool = self.create_pool(fixture=0) pool = self.create_pool(fixture=0)

View File

@ -0,0 +1,156 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from oslo_log import log as logging
import oslotest.base
from designate import objects
from designate.objects import adapters
from designate.tests import resources
LOG = logging.getLogger(__name__)
class DesignateYAMLAdapterTest(oslotest.base.BaseTestCase):
def assertNestedDictContainsSubset(self, expected, actual):
for key, value in expected.items():
if isinstance(value, dict):
self.assertNestedDictContainsSubset(value, actual.get(key, {}))
elif isinstance(value, list):
self.assertEqual(len(value), len(actual[key]))
for index, item in enumerate(value):
self.assertNestedDictContainsSubset(
item, actual[key][index])
else:
self.assertEqual(value, actual[key])
def test_yaml_parsing(self):
file = os.path.join(resources.path, 'pools_yaml/pools.yaml')
with open(file, 'r') as stream:
xpools = yaml.safe_load(stream)
for xpool in xpools:
r_pool = adapters.DesignateAdapter.parse(
'YAML', xpool, objects.Pool())
self.assertEqual('default', r_pool.name)
self.assertEqual('Default PowerDNS Pool', r_pool.description)
self.assertEqual(2, len(r_pool.ns_records))
self.assertEqual(1, r_pool.ns_records[0].priority)
self.assertEqual(2, r_pool.ns_records[1].priority)
self.assertEqual(
'ns1-1.example.org.', r_pool.ns_records[0].hostname)
self.assertEqual(
'ns1-2.example.org.', r_pool.ns_records[1].hostname)
self.assertEqual(1, len(r_pool.targets))
self.assertEqual('powerdns', r_pool.targets[0].type)
self.assertEqual(
'PowerDNS Database Cluster', r_pool.targets[0].description)
self.assertEqual(1, len(r_pool.targets[0].masters))
self.assertEqual('192.0.2.1', r_pool.targets[0].masters[0].host)
self.assertEqual(5354, r_pool.targets[0].masters[0].port)
self.assertEqual(1, len(r_pool.targets[0].options))
self.assertEqual('connection', r_pool.targets[0].options[0].key)
self.assertEqual(
'mysql+pymysql://designate:password@127.0.0.1/designate_pdns?charset=utf8', # noqa
r_pool.targets[0].options[0].value)
self.assertEqual(1, len(r_pool.also_notifies))
self.assertEqual('192.0.2.4', r_pool.also_notifies[0].host)
self.assertEqual(53, r_pool.also_notifies[0].port)
self.assertEqual(1, len(r_pool.attributes))
self.assertEqual('type', r_pool.attributes[0].key)
self.assertEqual('internal', r_pool.attributes[0].value)
def test_yaml_rendering(self):
pool_dict = {
'also_notifies': [
{
'host': u'192.0.2.4',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
}
],
'attributes': [],
'description': u'Default PowerDNS Pool',
'id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'name': u'default',
'nameservers': [
{
'host': u'192.0.2.2',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
},
{
'host': u'192.0.2.3',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
}
],
'ns_records': [
{
'hostname': u'ns1-1.example.org.',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'priority': 1,
},
{
'hostname': u'ns1-2.example.org.',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'priority': 2,
}
],
'provisioner': u'UNMANAGED',
'targets': [
{
'description': u'PowerDNS Database Cluster',
'masters': [
{
'host': u'192.0.2.1',
'pool_target_id': u'd567d569-2d69-41d5-828d-f7054bb10b5c', # noqa
'port': 5354,
}
],
'options': [
{
'key': u'connection',
'pool_target_id': u'd567d569-2d69-41d5-828d-f7054bb10b5c', # noqa
'value': u'mysql+pymysql://designate:password@127.0.0.1/designate_pdns?charset=utf8', # noqa
}
],
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'type': u'powerdns',
}
]
}
file = os.path.join(resources.path, 'pools_yaml/sample_output.yaml')
with open(file, 'r') as stream:
self.assertEqual(
stream.read(),
yaml.dump(
adapters.DesignateAdapter.render(
'YAML', objects.PoolList.from_list([pool_dict])
),
default_flow_style=False
)
)

View File

@ -33,6 +33,55 @@ from designate.tests.unit import RwObject
import designate.pool_manager.service as pm_module import designate.pool_manager.service as pm_module
POOL_DICT = {
'also_notifies': [
{
'host': u'192.0.2.4',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
}
],
'attributes': [],
'description': u'Default PowerDNS Pool',
'id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'name': u'default',
'nameservers': [
{
'host': u'192.0.2.2',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
},
{
'host': u'192.0.2.3',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'port': 53,
}
],
'ns_records': [
{
'hostname': u'ns1-1.example.org.',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'priority': 1,
},
{
'hostname': u'ns1-2.example.org.',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
'priority': 2,
}
],
'provisioner': u'UNMANAGED',
'targets': [
{
'description': u'PowerDNS Database Cluster',
'masters': [],
'options': [],
'type': 'fake',
'pool_id': u'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
}
]
}
class PoolManagerInitTest(test.BaseTestCase): class PoolManagerInitTest(test.BaseTestCase):
def __setUp(self): def __setUp(self):
super(PoolManagerTest, self).setUp() super(PoolManagerTest, self).setUp()
@ -44,20 +93,19 @@ class PoolManagerInitTest(test.BaseTestCase):
self.assertRaises(exceptions.NoPoolTargetsConfigured, Service) self.assertRaises(exceptions.NoPoolTargetsConfigured, Service)
def test_init(self): def test_init(self):
with patch.object(objects.Pool, 'from_config',
return_value=Mock()):
Service._setup_target_backends = Mock()
Service() Service()
def test_start(self): def test_start(self):
with patch.object(objects.Pool, 'from_config', with patch.object(objects.Pool, 'from_config',
return_value=Mock()): return_value=Mock()):
Service._setup_target_backends = Mock()
pm = Service() pm = Service()
pm.pool.targets = ()
pm.tg.add_timer = Mock() pm.tg.add_timer = Mock()
pm._pool_election = Mock() pm._pool_election = Mock()
with patch("designate.service.RPCService.start"): with patch("designate.service.RPCService.start"):
with patch.object(
pm.central_api,
'get_pool',
return_value=objects.Pool.from_dict(POOL_DICT)):
pm.start() pm.start()
call1 = pm.tg.add_timer.call_args_list[0][0] call1 = pm.tg.add_timer.call_args_list[0][0]
@ -88,8 +136,11 @@ class PoolManagerTest(test.BaseTestCase):
def setUp(self, *mocks): def setUp(self, *mocks):
super(PoolManagerTest, self).setUp() super(PoolManagerTest, self).setUp()
self.pm = Service() self.pm = Service()
self.pm.pool.targets = ()
self.pm.tg.add_timer = Mock() self.pm.tg.add_timer = Mock()
self.pm.pool = Mock()
setattr(self.pm.pool, 'targets', ())
setattr(self.pm.pool, 'also_notifies', ())
setattr(self.pm.pool, 'nameservers', ())
self.pm._pool_election = Mock() self.pm._pool_election = Mock()
self.pm.target_backends = {} self.pm.target_backends = {}

View File

@ -29,7 +29,7 @@ DESIGNATE_AKAMAI_USERNAME=${DESIGNATE_AKAMAI_USERNAME:-username}
DESIGNATE_AKAMAI_PASSWORD=${DESIGNATE_AKAMAI_PASSWORD:-password} DESIGNATE_AKAMAI_PASSWORD=${DESIGNATE_AKAMAI_PASSWORD:-password}
DESIGNATE_AKAMAI_MASTERS=${DESIGNATE_AKAMAI_MASTERS:-"$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_MDNS"} DESIGNATE_AKAMAI_MASTERS=${DESIGNATE_AKAMAI_MASTERS:-"$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_MDNS"}
DESIGNATE_AKAMAI_NAMESERVERS=${DESIGNATE_AKAMAI_NAMESERVERS:-""} DESIGNATE_AKAMAI_NAMESERVERS=${DESIGNATE_AKAMAI_NAMESERVERS:-""}
DESIGNATE_AKAMAI_ALSO_NOTIFIES=${DESIGNATE_AKAMAI_ALSO_NOTIFIES:-"193.108.155.34:53,23.73.134.141:53,80.67.64.148:53,23.73.134.237:53,23.73.133.141:53,23.73.133.237:53,80.67.64.10:53,72.246.0.10:53,72.247.45.157:53,72.246.192.168:53,193.108.152.143:53,60.254.128.45:53,72.247.45.110:53,72.247.45.65:53,72.247.45.25:53"} DESIGNATE_AKAMAI_ALSO_NOTIFIES=${DESIGNATE_AKAMAI_ALSO_NOTIFIES:-"193.108.155.34,23.73.134.141,80.67.64.148,23.73.134.237,23.73.133.141,23.73.133.237,80.67.64.10,72.246.0.10,72.247.45.157,72.246.192.168,193.108.152.143,60.254.128.45,72.247.45.110,72.247.45.65,72.247.45.25"}
# Pull in DESIGNATE_3RDPARTY_CREDS user/pass if set # Pull in DESIGNATE_3RDPARTY_CREDS user/pass if set
if [ -n "$DESIGNATE_3RDPARTY_CREDS" ]; then if [ -n "$DESIGNATE_3RDPARTY_CREDS" ]; then
@ -57,38 +57,72 @@ function install_designate_backend {
# configure_designate_backend - make configuration changes, including those to other services # configure_designate_backend - make configuration changes, including those to other services
function configure_designate_backend { function configure_designate_backend {
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID type akamai # Generate Designate pool.yaml file
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID masters $DESIGNATE_AKAMAI_MASTERS sudo tee $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID options "username: $DESIGNATE_AKAMAI_USERNAME, password: $DESIGNATE_AKAMAI_PASSWORD" ---
- name: default
description: DevStack Akamai Pool
attributes: {}
# Create a Pool Nameserver for each of the Akamai nameservers targets:
local nameserver_ids="" - type: akamai
IFS=',' read -a nameservers <<< "$DESIGNATE_AKAMAI_NAMESERVERS" description: Akamai API
for nameserver in "${nameservers[@]}"; do options:
local nameserver_id=`uuidgen` username: $DESIGNATE_AKAMAI_USERNAME
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id host $(dig +short A $nameserver | head -n 1) password: $DESIGNATE_AKAMAI_PASSWORD
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id port 53
# Append the Nameserver ID to the list masters:
nameserver_ids+=${nameserver_id}, EOF
# Create a Pool Master for each of the Akamai Masters
IFS=',' read -a masters <<< "$DESIGNATE_AKAMAI_MASTERS"
for master in "${masters[@]}"; do
sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
- host: $master
port: 53
EOF
done done
# Configure the Pool for the set of nameserver IDs, minus the trailing comma # Create a Pool NS Record for each of the Akamai Nameservers
iniset $DESIGNATE_CONF pool:$DESIGNATE_POOL_ID nameservers "${nameserver_ids:0:-1}" IFS=',' read -a nameservers <<< "$DESIGNATE_AKAMAI_NAMESERVERS"
# Configure the Pool to Notify Akamai's Transfer Agents sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
iniset $DESIGNATE_CONF pool:$DESIGNATE_POOL_ID also_notifies "$DESIGNATE_AKAMAI_ALSO_NOTIFIES" ns_records:
} EOF
# create_designate_ns_records - Create Pool NS Records for nameserver in "${nameservers[@]}"; do
function create_designate_ns_records_backend { sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
# Build an array of the Akamai nameservers. - hostname: $nameserver
IFS=',' read -a ns_records <<< "$DESIGNATE_AKAMAI_NAMESERVERS" priority: 1
EOF
done
# Create a NS Record for each of the Akamai nameservers # Create a Pool Nameserver for each of the Akamai Nameservers
for ns_record in "${ns_records[@]}"; do sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
designate server-create --name "${ns_record%%.}." nameservers:
EOF
for nameserver in "${nameservers[@]}"; do
sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
- host: `dig +short A $nameserver | head -n 1`
port: 53
EOF
done
# Create a Pool Also Notifies for each of the Akamai Also Notifies
IFS=',' read -a also_notifies <<< "$DESIGNATE_AKAMAI_ALSO_NOTIFIES"
sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
also_notifies:
EOF
for also_notify in "${also_notifies[@]}"; do
sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
- host: $also_notify
port: 53
EOF
done done
} }

View File

@ -74,15 +74,37 @@ EOF
# configure_designate_backend - make configuration changes, including those to other services # configure_designate_backend - make configuration changes, including those to other services
function configure_designate_backend { function configure_designate_backend {
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID type bind9 # Generate Designate pool.yaml file
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID masters $DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_MDNS sudo tee $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID options "rndc_host: $DESIGNATE_SERVICE_HOST, rndc_port: $DESIGNATE_SERVICE_PORT_RNDC, rndc_config_file: $BIND_CFG_DIR/rndc.conf, rndc_key_file: $BIND_CFG_DIR/rndc.key, host: $DESIGNATE_SERVICE_HOST, port: $DESIGNATE_SERVICE_PORT_DNS" ---
- name: default
description: DevStack BIND Pool
attributes: {}
# DevStack Managed BIND NameServer ns_records:
local nameserver_id=`uuidgen` - hostname: $DESIGNATE_DEFAULT_NS_RECORD
iniset $DESIGNATE_CONF pool:$DESIGNATE_POOL_ID nameservers $nameserver_id priority: 1
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id host $DESIGNATE_SERVICE_HOST
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id port $DESIGNATE_SERVICE_PORT_DNS nameservers:
- host: $DESIGNATE_SERVICE_HOST
port: $DESIGNATE_SERVICE_PORT_DNS
targets:
- type: bind9
description: BIND Instance
masters:
- host: $DESIGNATE_SERVICE_HOST
port: $DESIGNATE_SERVICE_PORT_MDNS
options:
host: $DESIGNATE_SERVICE_HOST
port: $DESIGNATE_SERVICE_PORT_DNS
rndc_host: $DESIGNATE_SERVICE_HOST
rndc_port: $DESIGNATE_SERVICE_PORT_RNDC
rndc_config_file: $BIND_CFG_DIR/rndc.conf
rndc_key_file: $BIND_CFG_DIR/rndc.key
EOF
sudo chown $STACK_USER $BIND_CFG_DIR sudo chown $STACK_USER $BIND_CFG_DIR

View File

@ -33,7 +33,7 @@ DESIGNATE_DYNECT_JOB_TIMEOUT=${DESIGNATE_DYNECT_JOB_TIMEOUT:-}
DESIGNATE_DYNECT_TIMEOUT=${DESIGNATE_DYNECT_TIMEOUT:-} DESIGNATE_DYNECT_TIMEOUT=${DESIGNATE_DYNECT_TIMEOUT:-}
DESIGNATE_DYNECT_MASTERS=${DESIGNATE_DYNECT_MASTERS:-"$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_MDNS"} DESIGNATE_DYNECT_MASTERS=${DESIGNATE_DYNECT_MASTERS:-"$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_MDNS"}
DESIGNATE_DYNECT_NAMESERVERS=${DESIGNATE_DYNECT_NAMESERVERS:-""} DESIGNATE_DYNECT_NAMESERVERS=${DESIGNATE_DYNECT_NAMESERVERS:-""}
DESIGNATE_DYNECT_ALSO_NOTIFIES=${DESIGNATE_DYNECT_ALSO_NOTIFIES:-"204.13.249.65:53,208.78.68.65:53"} DESIGNATE_DYNECT_ALSO_NOTIFIES=${DESIGNATE_DYNECT_ALSO_NOTIFIES:-"204.13.249.65,208.78.68.65"}
# Pull in DESIGNATE_3RDPARTY_CREDS user/pass if set # Pull in DESIGNATE_3RDPARTY_CREDS user/pass if set
if [ -n "$DESIGNATE_3RDPARTY_CREDS" ]; then if [ -n "$DESIGNATE_3RDPARTY_CREDS" ]; then
@ -62,47 +62,73 @@ function install_designate_backend {
# configure_designate_backend - make configuration changes, including those to other services # configure_designate_backend - make configuration changes, including those to other services
function configure_designate_backend { function configure_designate_backend {
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID type dynect # Generate Designate pool.yaml file
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID masters $DESIGNATE_DYNECT_MASTERS sudo tee $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID options "customer_name: $DESIGNATE_DYNECT_CUSTOMER, username: $DESIGNATE_DYNECT_USERNAME, password: $DESIGNATE_DYNECT_PASSWORD" ---
- name: default
description: DevStack DynECT Pool
attributes: {}
# Create a Pool Nameserver for each of the DynECT nameservers targets:
local nameserver_ids="" - type: dynect
IFS=',' read -a nameservers <<< "$DESIGNATE_DYNECT_NAMESERVERS" description: DynECT API
for nameserver in "${nameservers[@]}"; do options:
local nameserver_id=`uuidgen` customer_name: $DESIGNATE_DYNECT_CUSTOMER
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id host $(dig +short A $nameserver | head -n 1) username: $DESIGNATE_DYNECT_USERNAME
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id port 53 password: $DESIGNATE_DYNECT_PASSWORD
# Append the Nameserver ID to the list masters:
nameserver_ids+=${nameserver_id}, EOF
# Create a Pool Master for each of the DynECT Masters
IFS=',' read -a masters <<< "$DESIGNATE_DYNECT_MASTERS"
for master in "${masters[@]}"; do
sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
- host: $master
port: 53
EOF
done done
# Configure the Pool for the set of nameserver IDs, minus the trailing comma # Create a Pool NS Record for each of the DynECT Nameservers
iniset $DESIGNATE_CONF pool:$DESIGNATE_POOL_ID nameservers "${nameserver_ids:0:-1}" IFS=',' read -a nameservers <<< "$DESIGNATE_DYNECT_NAMESERVERS"
# Configure the Pool to Notify DynECT's Transfer Agents sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
iniset $DESIGNATE_CONF pool:$DESIGNATE_POOL_ID also_notifies "$DESIGNATE_DYNECT_ALSO_NOTIFIES" ns_records:
EOF
# Global DynECT Backend Settings for nameserver in "${nameservers[@]}"; do
if [ ! -z $DESIGNATE_DYNECT_JOB_TIMEOUT ]; then sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
iniset $DESIGNATE_CONF backend:dynect job_timeout "$DESIGNATE_DYNECT_JOB_TIMEOUT" - hostname: $nameserver
fi priority: 1
EOF
done
if [ ! -z $DESIGNATE_DYNECT_TIMEOUT ]; then # Create a Pool Nameserver for each of the DynECT Nameservers
iniset $DESIGNATE_CONF backend:dynect timeout "$DESIGNATE_DYNECT_TIMEOUT" sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
fi nameservers:
} EOF
# create_designate_ns_records - Create Pool NS Records for nameserver in "${nameservers[@]}"; do
function create_designate_ns_records_backend { sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
# Build an array of the DynECT nameservers. - host: `dig +short A $nameserver | head -n 1`
IFS=',' read -a ns_records <<< "$DESIGNATE_DYNECT_NAMESERVERS" port: 53
EOF
done
# Create a NS Record for each of the DynECT nameservers # Create a Pool Also Notifies for each of the DynECT Also Notifies
for ns_record in "${ns_records[@]}"; do IFS=',' read -a also_notifies <<< "$DESIGNATE_DYNECT_ALSO_NOTIFIES"
designate server-create --name "${ns_record%%.}."
sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
also_notifies:
EOF
for also_notify in "${also_notifies[@]}"; do
sudo tee -a $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
- host: $also_notify
port: 53
EOF
done done
} }

View File

@ -31,7 +31,21 @@ function install_designate_backend {
# configure_designate_backend - make configuration changes, including those to other services # configure_designate_backend - make configuration changes, including those to other services
function configure_designate_backend { function configure_designate_backend {
: # Generate Designate pool.yaml file
sudo tee $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
---
- name: default
description: DevStack Fake Pool
attributes: {}
ns_records:
- hostname: $DESIGNATE_DEFAULT_NS_RECORD
priority: 1
targets:
- type: fake
description: Fake Backend
EOF
} }
# init_designate_backend - initialize databases, etc. # init_designate_backend - initialize databases, etc.

View File

@ -53,15 +53,6 @@ function install_designate_backend {
# configure_designate_backend - make configuration changes, including those to other services # configure_designate_backend - make configuration changes, including those to other services
function configure_designate_backend { function configure_designate_backend {
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID type infoblox
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID masters $DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_MDNS
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID options "host: $DESIGNATE_INFOBLOX_NAMESERVER_IP, port: $DESIGNATE_SERVICE_PORT_DNS"
local nameserver_id=`uuidgen`
iniset $DESIGNATE_CONF pool:$DESIGNATE_POOL_ID nameservers $nameserver_id
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id host $DESIGNATE_INFOBLOX_NAMESERVER_IP
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id port $DESIGNATE_SERVICE_PORT_DNS
iniset $DESIGNATE_CONF backend:infoblox wapi_url $DESIGNATE_INFOBLOX_WAPI_URL iniset $DESIGNATE_CONF backend:infoblox wapi_url $DESIGNATE_INFOBLOX_WAPI_URL
iniset $DESIGNATE_CONF backend:infoblox username $DESIGNATE_INFOBLOX_USERNAME iniset $DESIGNATE_CONF backend:infoblox username $DESIGNATE_INFOBLOX_USERNAME
iniset $DESIGNATE_CONF backend:infoblox password $DESIGNATE_INFOBLOX_PASSWORD iniset $DESIGNATE_CONF backend:infoblox password $DESIGNATE_INFOBLOX_PASSWORD
@ -72,6 +63,34 @@ function configure_designate_backend {
iniset $DESIGNATE_CONF backend:infoblox dns_view $DESIGNATE_INFOBLOX_DNS_VIEW iniset $DESIGNATE_CONF backend:infoblox dns_view $DESIGNATE_INFOBLOX_DNS_VIEW
iniset $DESIGNATE_CONF backend:infoblox net_view $DESIGNATE_INFOBLOX_NET_VIEW iniset $DESIGNATE_CONF backend:infoblox net_view $DESIGNATE_INFOBLOX_NET_VIEW
iniset $DESIGNATE_CONF backend:infoblox ns_group $DESIGNATE_INFOBLOX_NS_GROUP iniset $DESIGNATE_CONF backend:infoblox ns_group $DESIGNATE_INFOBLOX_NS_GROUP
# Generate Designate pool.yaml file
sudo tee $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
---
- name: default
description: DevStack InfoBlox Pool
attributes: {}
ns_records:
- hostname: $DESIGNATE_DEFAULT_NS_RECORD
priority: 1
nameservers:
- host: $DESIGNATE_INFOBLOX_NAMESERVER_IP
port: $DESIGNATE_SERVICE_PORT_DNS
targets:
- type: powerdns
description: InfoBlox Cluster API
masters:
- host: $DESIGNATE_SERVICE_HOST
port: $DESIGNATE_SERVICE_PORT_MDNS
options:
host: $DESIGNATE_INFOBLOX_NAMESERVER_IP
port: $DESIGNATE_SERVICE_PORT_DNS
EOF
} }
# init_designate_backend - initialize databases, etc. # init_designate_backend - initialize databases, etc.

View File

@ -1,77 +0,0 @@
# Configure the ipa backend
# Enable with:
# DESIGNATE_BACKEND_DRIVER=ipa
# Dependencies:
# ``functions`` file
# ``designate`` configuration
# install_designate_backend - install any external requirements
# configure_designate_backend - make configuration changes, including those to other services
# init_designate_backend - initialize databases, etc.
# start_designate_backend - start any external services
# stop_designate_backend - stop any external services
# cleanup_designate_backend - remove transient data and cache
# Save trace setting
DP_IPA_XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
DESIGNATE_IPA_HOST=${DESIGNATE_IPA_HOST:-$(hostname)}
DESIGNATE_IPA_CA_CERT=${DESIGNATE_IPA_CA_CERT:-/etc/ipa/ca.crt}
DESIGNATE_IPA_CLIENT_KEYTAB=${DESIGNATE_IPA_CLIENT_KEYTAB:-$HOME/ipaadmin.keytab}
# Entry Points
# ------------
# install_designate_backend - install any external requirements
function install_designate_backend {
:
}
# configure_designate_backend - make configuration changes, including those to other services
function configure_designate_backend {
rc=0
if [[ ! -f $DESIGNATE_IPA_CA_CERT ]]; then
echo ERROR: IPA CA cert file $DESIGNATE_IPA_CA_CERT not found
rc=1
fi
if [[ ! -f $DESIGNATE_IPA_CLIENT_KEYTAB ]]; then
echo ERROR: IPA client keytab file $DESIGNATE_IPA_CLIENT_KEYTAB not found
rc=1
fi
if [[ $rc = 1 ]]; then
die $LINENO "Error with IPA configuration"
fi
iniset $DESIGNATE_CONF backend:ipa ipa_host $DESIGNATE_IPA_HOST
iniset $DESIGNATE_CONF backend:ipa ipa_ca_cert $DESIGNATE_IPA_CA_CERT
iniset $DESIGNATE_CONF backend:ipa ipa_client_keytab $DESIGNATE_IPA_CLIENT_KEYTAB
# devstack tests use dummy NS records, so tell IPA to allow this
iniset $DESIGNATE_CONF backend:ipa ipa_force_ns_use True
}
# init_designate_backend - initialize databases, etc.
function init_designate_backend {
:
}
# start_designate_backend - start any external services
function start_designate_backend {
:
}
# stop_designate_backend - stop any external services
function stop_designate_backend {
:
}
# cleanup_designate_backend - remove transient data and cache
function cleanup_designate_backend {
:
}
# Restore xtrace
$DP_IPA_XTRACE

View File

@ -53,16 +53,36 @@ function install_designate_backend {
# configure_designate_backend - make configuration changes, including those to other services # configure_designate_backend - make configuration changes, including those to other services
function configure_designate_backend { function configure_designate_backend {
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID type powerdns # Generate Designate pool.yaml file
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID masters $DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_MDNS sudo tee $DESIGNATE_CONF_DIR/pools.yaml > /dev/null <<EOF
iniset $DESIGNATE_CONF pool_target:$DESIGNATE_TARGET_ID options "connection: `database_connection_url designate_pdns`, host: $DESIGNATE_SERVICE_HOST, port: $DESIGNATE_SERVICE_PORT_DNS" ---
- name: default
description: DevStack PowerDNS Pool
attributes: {}
# DevStack Managed PDNS NameServer ns_records:
local nameserver_id=`uuidgen` - hostname: $DESIGNATE_DEFAULT_NS_RECORD
iniset $DESIGNATE_CONF pool:$DESIGNATE_POOL_ID nameservers $nameserver_id priority: 1
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id host $DESIGNATE_SERVICE_HOST
iniset $DESIGNATE_CONF pool_nameserver:$nameserver_id port $DESIGNATE_SERVICE_PORT_DNS
nameservers:
- host: $DESIGNATE_SERVICE_HOST
port: $DESIGNATE_SERVICE_PORT_DNS
targets:
- type: powerdns
description: PowerDNS Database Cluster
masters:
- host: $DESIGNATE_SERVICE_HOST
port: $DESIGNATE_SERVICE_PORT_MDNS
options:
host: $DESIGNATE_SERVICE_HOST
port: $DESIGNATE_SERVICE_PORT_DNS
connection: '`database_connection_url designate_pdns`'
EOF
# Generate PowerDNS pdns.conf file
sudo tee $POWERDNS_CFG_DIR/pdns.conf > /dev/null <<EOF sudo tee $POWERDNS_CFG_DIR/pdns.conf > /dev/null <<EOF
# General Config # General Config
setgid=pdns setgid=pdns
@ -123,9 +143,12 @@ function init_designate_backend {
# (Re)create designate_pdns database # (Re)create designate_pdns database
recreate_database designate_pdns utf8 recreate_database designate_pdns utf8
}
# create_designate_pool_configuration_backend - Perform post-pool config tasks
function create_designate_pool_configuration_backend {
# Init and migrate designate_pdns database # Init and migrate designate_pdns database
designate-manage powerdns sync $DESIGNATE_TARGET_ID designate-manage powerdns sync $DESIGNATE_POOL_ID
} }
# start_designate_backend - start any external services # start_designate_backend - start any external services
@ -133,6 +156,7 @@ function start_designate_backend {
start_service pdns start_service pdns
} }
# stop_designate_backend - stop any external services # stop_designate_backend - stop any external services
function stop_designate_backend { function stop_designate_backend {
stop_service pdns stop_service pdns

View File

@ -49,6 +49,7 @@ function configure_designate {
# General Configuration # General Configuration
iniset_rpc_backend designate $DESIGNATE_CONF DEFAULT iniset_rpc_backend designate $DESIGNATE_CONF DEFAULT
iniset $DESIGNATE_CONF DEFAULT rpc_response_timeout 5
iniset $DESIGNATE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $DESIGNATE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $DESIGNATE_CONF DEFAULT verbose True iniset $DESIGNATE_CONF DEFAULT verbose True
@ -76,9 +77,6 @@ function configure_designate {
iniset $DESIGNATE_CONF pool_manager_cache:sqlalchemy connection `database_connection_url designate_pool_manager` iniset $DESIGNATE_CONF pool_manager_cache:sqlalchemy connection `database_connection_url designate_pool_manager`
fi fi
# Pool Options
iniset $DESIGNATE_CONF pool:$DESIGNATE_POOL_ID targets $DESIGNATE_TARGET_ID
# API Configuration # API Configuration
sudo cp $DESIGNATE_DIR/etc/designate/api-paste.ini $DESIGNATE_APIPASTE_CONF sudo cp $DESIGNATE_DIR/etc/designate/api-paste.ini $DESIGNATE_APIPASTE_CONF
iniset $DESIGNATE_CONF service:api enabled_extensions_v1 $DESIGNATE_ENABLED_EXTENSIONS_V1 iniset $DESIGNATE_CONF service:api enabled_extensions_v1 $DESIGNATE_ENABLED_EXTENSIONS_V1
@ -185,13 +183,14 @@ function create_designate_accounts {
fi fi
} }
# create_designate_ns_records - Create Pool NS Records # create_designate_pool_configuration - Create Pool Configuration
function create_designate_ns_records { function create_designate_pool_configuration {
# Allow Backends to install their own NS Records rather than the default # Sync Pools Config
if function_exists create_designate_ns_records_backend; then designate-manage pool update --file $DESIGNATE_CONF_DIR/pools.yaml
create_designate_ns_records_backend
else # Allow Backends to do backend specific tasks
designate server-create --name $DESIGNATE_DEFAULT_NS_RECORD if function_exists create_designate_pool_configuration_backend; then
create_designate_pool_configuration_backend
fi fi
} }
@ -320,14 +319,14 @@ if is_service_enabled designate; then
echo_summary "Initializing Designate" echo_summary "Initializing Designate"
init_designate init_designate
echo "Configuring Tempest options for Designate" echo_summary "Configuring Tempest options for Designate"
configure_designate_tempest configure_designate_tempest
echo_summary "Starting Designate" echo_summary "Starting Designate"
start_designate start_designate
echo_summary "Creating pool NS records" echo_summary "Creating Pool Configuration"
create_designate_ns_records create_designate_pool_configuration
fi fi
if [[ "$1" == "unstack" ]]; then if [[ "$1" == "unstack" ]]; then

View File

@ -2,7 +2,6 @@
DESIGNATE_BACKEND_DRIVER=${DESIGNATE_BACKEND_DRIVER:=powerdns} DESIGNATE_BACKEND_DRIVER=${DESIGNATE_BACKEND_DRIVER:=powerdns}
DESIGNATE_POOL_MANAGER_CACHE_DRIVER=${DESIGNATE_POOL_MANAGER_CACHE_DRIVER:-memcache} DESIGNATE_POOL_MANAGER_CACHE_DRIVER=${DESIGNATE_POOL_MANAGER_CACHE_DRIVER:-memcache}
DESIGNATE_POOL_ID=${DESIGNATE_POOL_ID:-794ccc2c-d751-44fe-b57f-8894c9f5c842} DESIGNATE_POOL_ID=${DESIGNATE_POOL_ID:-794ccc2c-d751-44fe-b57f-8894c9f5c842}
DESIGNATE_TARGET_ID=${DESIGNATE_TARGET_ID:-f26e0b32-736f-4f0a-831b-039a415c481e}
DESIGNATE_DEFAULT_NS_RECORD=${DESIGNATE_DEFAULT_NS_RECORD:-ns1.devstack.org.} DESIGNATE_DEFAULT_NS_RECORD=${DESIGNATE_DEFAULT_NS_RECORD:-ns1.devstack.org.}
DESIGNATE_NOTIFICATION_DRIVER=${DESIGNATE_NOTIFICATION_DRIVER:-} DESIGNATE_NOTIFICATION_DRIVER=${DESIGNATE_NOTIFICATION_DRIVER:-}
DESIGNATE_NOTIFICATION_TOPICS=${DESIGNATE_NOTIFICATION_TOPICS:-notifications} DESIGNATE_NOTIFICATION_TOPICS=${DESIGNATE_NOTIFICATION_TOPICS:-notifications}

View File

@ -62,3 +62,64 @@ but it will **not** trigger zones to be moved from one pool to another.
As more filters are merged there will be support for dynamic filters. As more filters are merged there will be support for dynamic filters.
Managing Pools
==============
In mitaka we moved the method of updating pools to a CLI in `designate-manage`
There is a YAML file that defines the pool, and is used to load this information into the database.
.. literalinclude:: ../../etc/designate/pools.yaml.sample
:language: yaml
Designate Manage Pools Command Reference
----------------------------------------
Update Pools Information
^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: language
designate-manage pool update [options]
Options:
""""""""
--file Input file (Default: ``/etc/designate/pools.yaml``)
--dry_run Simulate an update. (Default: False)
--delete Delete Pools that are not in the input file (Defaults: False)
.. warning::
| Running with ``--delete True`` can be **extremely** dangerous.
| It will delete any pools that are not in the supplied YAML file, and any
| zones that are in that Pool.
| Before running with ``--delete True`` we recommend operators run with
| ``--delete True --dry_run True`` to view the outcome.
Generate YAML File
^^^^^^^^^^^^^^^^^^
.. code-block:: language
designate-manage pool generate_file [options]
Options:
""""""""
--file YAML file output too (Default: ``/etc/designate/pools.yaml``)
Generate YAML File from Liberty Config
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: language
designate-manage pool export_from_config [options]
Options:
""""""""
--file YAML file output too (Default: ``/etc/designate/pools.yaml``)

View File

@ -0,0 +1,94 @@
..
Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
********************************
Upgrading to Mitaka from Liberty
********************************
.. note:: This is a WIP - it will be updated as more items are added to Mitaka
Pools Configuration
===================
We have updated how the config data for pools is now stored.
Previously there was a mix of content in the ``designate.conf`` file and in the
designate database.
We have moved all of the data to the database in Mitaka, to avoid confusion, and
avoid the massive complexity that exists in the config file.
.. warning:: This part of the upgrade **requires** downtime.
We have 2 new commands in the ``designate-manage`` utility that are able to assist
the migration.
To make the config syntax simpler we have a new YAML based config file that is
used to load information into the database.
.. literalinclude:: ../../../etc/designate/pools.yaml.sample
:language: yaml
We have a command that will allow you to take your current running config, and
export it to the new YAML format.
.. note::
You will need to have at least one instance of central running, and machine
``designate-manage`` is running on will need access to the messageing queue
.. code-block:: console
designate-manage pool generate_file --file output.yml
This will create a YAML file, with all the currently defined pools, and all of their config.
We suggest this is then migrated into a config management system, or other document management system.
From this point on all updated to pools should be one by updating this file, and
running:
.. code-block:: console
designate-manage pool update --file /path/to/file.yml
Pools - Step by Step
--------------------
1. Ensure there is not 2 pools with the same name.
2. Stop all Designate Services.
3. Deploy new Mitaka code
4. Start ``designate-central``
5. Run
.. code-block:: console
designate-manage pool export_from_config --file output.yml
6. Ensure the output file is correct (reference sample file for each value)
7. Run
.. code-block:: console
designate-manage pool update --file output.yml --dry_run True --delete True
8. Ensure the output of this command is not removing any Pools
9. Run
.. code-block:: console
designate-manage pool generate_file --file output.yml --delete True
10. Start the remaining designate services.

View File

@ -0,0 +1,54 @@
---
- name: default
# The name is immutable. There will be no option to change the name after
# creation and the only way will to change it will be to delete it
# (and all zones associated with it) and recreate it.
description: Default PowerDNS Pool
# Attributes are Key:Value pairs that describe the pool. for example the level
# of service (i.e. service_tier:GOLD), capabilities (i.e. anycast: true) or
# other metadata. Users can use this infomation to point their zones to the
# correct pool
attributes: {}
# List out the NS records for zones hosted within this pool
ns_records:
- hostname: ns1-1.example.org.
priority: 1
- hostname: ns1-2.example.org.
priority: 2
# List out the nameservers for this pool. These are the actual PowerDNS
# servers. We use these to verify changes have propagated to all nameservers.
nameservers:
- host: 192.0.2.2
port: 53
- host: 192.0.2.3
port: 53
# List out the targets for this pool. For PowerDNS, this is the database
# (or databases, if you deploy a separate DB for each PowerDNS server)
targets:
- type: powerdns
description: PowerDNS Database Cluster
# List out the designate-mdns servers from which PowerDNS servers should
# request zone transfers (AXFRs) from.
masters:
- host: 192.0.2.1
port: 5354
# PowerDNS Configuration options
options:
host: 192.0.2.1
port: 53
connection: 'mysql+pymysql://designate:password@127.0.0.1/designate_pdns?charset=utf8'
# Optional list of additional IP/Port's for which designate-mdns will send
# DNS NOTIFY packets to
also_notifies:
- host: 192.0.2.4
port: 53

View File

@ -0,0 +1,66 @@
- name: default-bind
# The name is immutable. There will be no option to change the name after
# creation and the only way will to change it will be to delete it
# (and all zones associated with it) and recreate it.
description: Default BIND9 Pool
attributes: {}
# List out the NS records for zones hosted within this pool
ns_records:
- hostname: ns1-1.example.org.
priority: 1
- hostname: ns1-2.example.org.
priority: 2
# List out the nameservers for this pool. These are the actual BIND servers.
# We use these to verify changes have propagated to all nameservers.
nameservers:
- host: 192.0.2.2
port: 53
- host: 192.0.2.3
port: 53
# List out the targets for this pool. For BIND, most often, there will be one
# entry for each BIND server.
targets:
- type: bind
description: BIND9 Server 1
# List out the designate-mdns servers from which BIND servers should
# request zone transfers (AXFRs) from.
masters:
- host: 192.0.2.1
port: 5354
# BIND Configuration options
options:
host: 192.0.2.2
port: 53
rndc_host: 192.0.2.2
rndc_port: 953
rndc_key_file: /etc/designate/rndc.key
- type: bind
description: BIND9 Server 2
# List out the designate-mdns servers from which BIND servers should
# request zone transfers (AXFRs) from.
masters:
- host: 192.0.2.1
port: 5354
# BIND Configuration options
options:
host: 192.0.2.3
port: 53
rndc_host: 192.0.2.3
rndc_port: 953
rndc_key_file: /etc/designate/rndc.key
# Optional list of additional IP/Port's for which designate-mdns will send
# DNS NOTIFY packets to
# also_notifies:
# - host: 192.0.2.4
# port: 53

View File

@ -0,0 +1,115 @@
---
- name: pool-1
# The name is immutable. There will be no option to change the name after
# creation and the only way will to change it will be to delete it
# (and all zones associated with it) and recreate it.
description: Default PowerDNS Pool
attributes:
internal: true
# List out the NS records for zones hosted within this pool
ns_records:
- hostname: ns1-1.example.org.
priority: 1
- hostname: ns1-2.example.org.
priority: 2
# List out the nameservers for this pool. These are the actual PowerDNS
# servers. We use these to verify changes have propagated to all nameservers.
nameservers:
- host: 192.0.2.2
port: 53
- host: 192.0.2.3
port: 53
# List out the targets for this pool. For PowerDNS, this is the database
# (or databases, if you deploy a separate DB for each PowerDNS server)
targets:
- type: powerdns
description: PowerDNS Database Cluster
# List out the designate-mdns servers from which PowerDNS servers should
# request zone transfers (AXFRs) from.
masters:
- host: 192.0.2.1
port: 5354
# PowerDNS Configuration options
options:
host: 192.0.2.2
port: 53
connection: 'mysql+pymysql://designate:password@127.0.0.1/designate_pdns?charset=utf8'
# Optional list of additional IP/Port's for which designate-mdns will send
# DNS NOTIFY packets to
also_notifies:
- host: 192.0.2.4
port: 53
- name: pool-2
# The name is immutable. There will be no option to change the name after
# creation and the only way will to change it will be to delete it
# (and all zones associated with it) and recreate it.
description: Default PowerDNS Pool
attributes:
external: true
# List out the NS records for zones hosted within this pool
ns_records:
- hostname: ns1-1.example.org.
priority: 1
- hostname: ns1-2.example.org.
priority: 2
# List out the nameservers for this pool. These are the actual BIND servers.
# We use these to verify changes have propagated to all nameservers.
nameservers:
- host: 192.0.2.2
port: 53
- host: 192.0.2.3
port: 53
# List out the targets for this pool. For BIND, most often, there will be one
# entry for each BIND server.
targets:
- type: bind
description: BIND9 Server 1
# List out the designate-mdns servers from which BIND servers should
# request zone transfers (AXFRs) from.
masters:
- host: 192.0.2.1
port: 5354
# BIND Configuration options
options:
host: 192.0.2.2
port: 53
rndc_host: 192.0.2.2
rndc_port: 953
rndc_key_file: /etc/designate/rndc.key
- type: bind
description: BIND9 Server 2
# List out the designate-mdns servers from which BIND servers should
# request zone transfers (AXFRs) from.
masters:
- host: 192.0.2.1
port: 5354
# BIND Configuration options
options:
host: 192.0.2.3
port: 53
rndc_host: 192.0.2.3
rndc_port: 953
rndc_key_file: /etc/designate/rndc.key
# Optional list of additional IP/Port's for which designate-mdns will send
# DNS NOTIFY packets to
also_notifies:
- host: 192.0.2.4
port:

View File

@ -0,0 +1,14 @@
---
features:
- New method of updating Pools.
We have a new sub command on the ``designate-manage`` utility.
upgrade:
- Upgrade from Liberty to Mitaka *will* require downtime due to migration
of Pool Configuration data from config files, to the database.
See the Upgrade Documentation for full details.
- Pool Names are now required to be unique. If there is 2 pools with the same
name - this will need to be changed before the upgrade is performed.
deprecations:
- Create / Update / Delete API methods on /v2/pools/
Using these messages **will** cause unforseen issues with pools. We highly recommend
that the policy file be updated to restrict create / update / delete from all users