From 0e9294aca19d1909209a8ef730fda60f9914df0f Mon Sep 17 00:00:00 2001 From: dekehn Date: Thu, 15 Sep 2022 20:28:18 +0000 Subject: [PATCH] Multi-pools implementation This sets up 2 pools with an instance of named running in both. The second is denoted by /etc/bind-2 and var/cache/named-2 as its locations. The build up of the /etc/designate/pools.yaml reflects the changes. Co-Authored-By: Don Kehn Co-Authored-By: Omer Schwartz Change-Id: Icf73e730b31ab26b8be65347239636c0137ab4bd --- designate/manage/pool.py | 29 +- designate/mdns/handler.py | 17 + .../tests/functional/manage/test_pool.py | 69 +++- devstack/README.rst | 7 + .../designate_plugins/backend-multipool-bind9 | 387 ++++++++++++++++++ devstack/plugin.sh | 24 ++ devstack/settings | 5 + doc/source/user/manage-zones.rst | 44 ++ 8 files changed, 568 insertions(+), 14 deletions(-) create mode 100644 devstack/designate_plugins/backend-multipool-bind9 diff --git a/designate/manage/pool.py b/designate/manage/pool.py index 34f9f53c5..973a2e548 100644 --- a/designate/manage/pool.py +++ b/designate/manage/pool.py @@ -67,22 +67,29 @@ class PoolCommands(base.Commands): @base.args('--pool_id', help='ID of the pool to be examined', default=CONF['service:central'].default_pool_id) - def show_config(self, pool_id): + @base.args('--all_pools', help='show the config of all the pools', + default=False, required=False, action='store_true') + def show_config(self, pool_id, all_pools): self._setup() self.output_message.append('Pool Configuration:') self.output_message.append('-------------------') try: - if not uuidutils.is_uuid_like(pool_id): - self.output_message.append('Not a valid uuid: %s' % pool_id) - raise SystemExit(1) - - pool = self.central_api.find_pool(self.context, {'id': pool_id}) + pools = objects.PoolList() + if all_pools: + pools.extend(self.central_api.find_pools(self.context)) + else: + if not uuidutils.is_uuid_like(pool_id): + self.output_message.append( + 'Not a valid uuid: %s' % pool_id) + raise SystemExit(1) + pools.append( + self.central_api.find_pool(self.context, {'id': pool_id})) self.output_message.append( yaml.dump( - DesignateAdapter.render('YAML', pool), + DesignateAdapter.render('YAML', pools), default_flow_style=False ) ) @@ -131,7 +138,13 @@ class PoolCommands(base.Commands): self.output_message.append('*********************************') for pool_data in pools_data: - self._create_or_update_pool(pool_data) + try: + self._create_or_update_pool(pool_data) + except exceptions.DuplicatePool: + raise exceptions.DuplicatePool( + f'Pool {pool_data["name"]} already exist with id ' + f'{pool_data["id"]}. You cannot change id to an ' + 'existing pool.') if delete: pools = self.central_api.find_pools(self.context) diff --git a/designate/mdns/handler.py b/designate/mdns/handler.py index 236040467..85ab7642d 100644 --- a/designate/mdns/handler.py +++ b/designate/mdns/handler.py @@ -420,6 +420,23 @@ class RequestHandler: # Make the space we reserved for TSIG available for use renderer.max_size += TSIG_RRSIZE + # The following are a series of check for the DNS server + # requests oddities. + # If the message fudge value is not present set it to default, + # see RFC2845: Record Format Section rrdata.Fudge & Section 6.4 + # defines the recommended value of 300 seconds (5 mins). + if not hasattr(request, 'fudge'): + request.fudge = int(300) + + # If the original_id is not preset use the request.id, see + # https://github.com/rthalley/dnspython/blob/2.2/dns/message.py#L125 + if not hasattr(request, 'original_id'): + request.original_id = request.id + + # If the other_data is not preset then set to nothing. + if not hasattr(request, 'other_data'): + request.other_data = b"" + if multi_messages: # The first message context will be None then the # context for the prev message is used for the next diff --git a/designate/tests/functional/manage/test_pool.py b/designate/tests/functional/manage/test_pool.py index 36c033f5d..4a7c8862e 100644 --- a/designate/tests/functional/manage/test_pool.py +++ b/designate/tests/functional/manage/test_pool.py @@ -19,6 +19,7 @@ from oslo_log import log as logging import oslo_messaging from designate.central import service +from designate import exceptions from designate.manage import base from designate.manage import pool from designate.tests import base_fixtures @@ -60,7 +61,7 @@ class ManagePoolTestCase(designate.tests.functional.TestCase): pool_id = self.central_service.find_pool( self.admin_context, {'name': 'default'}).id - self.command.show_config(pool_id) + self.command.show_config(pool_id, all_pools=False) self.print_result.assert_called_once() self.assertIn('Pool Configuration', self.command.output_message[1]) @@ -75,7 +76,7 @@ class ManagePoolTestCase(designate.tests.functional.TestCase): pool_id = self.central_service.find_pool( self.admin_context, {'name': 'default'}).id - self.command.show_config(pool_id) + self.command.show_config(pool_id, all_pools=False) self.print_result.assert_called_once() self.assertIn('Pool Configuration', self.command.output_message[1]) @@ -88,7 +89,8 @@ class ManagePoolTestCase(designate.tests.functional.TestCase): def test_show_config_rpc_timeout(self, mock_find_pool): self.assertRaises( SystemExit, - self.command.show_config, '5421ca70-f1b7-4edc-9e01-b604011a262a' + self.command.show_config, '5421ca70-f1b7-4edc-9e01-b604011a262a', + all_pools=False ) mock_find_pool.assert_called_once() @@ -96,7 +98,8 @@ class ManagePoolTestCase(designate.tests.functional.TestCase): def test_show_config_pool_not_found(self): self.assertRaises( SystemExit, - self.command.show_config, '5421ca70-f1b7-4edc-9e01-b604011a262a' + self.command.show_config, '5421ca70-f1b7-4edc-9e01-b604011a262a', + all_pools=False ) self.assertIn( 'Pool not found', ''.join(self.command.output_message) @@ -105,7 +108,7 @@ class ManagePoolTestCase(designate.tests.functional.TestCase): def test_show_config_invalid_uuid(self): self.assertRaises( SystemExit, - self.command.show_config, 'None' + self.command.show_config, 'None', all_pools=False ) self.print_result.assert_called_once() self.assertIn( @@ -115,11 +118,39 @@ class ManagePoolTestCase(designate.tests.functional.TestCase): def test_show_config_empty(self): self.assertRaises( SystemExit, - self.command.show_config, 'a36bb018-9584-420c-acc6-2b5cf89714ad' + self.command.show_config, 'a36bb018-9584-420c-acc6-2b5cf89714ad', + all_pools=False ) self.print_result.assert_called_once() self.assertIn('Pool not found', ''.join(self.command.output_message)) + def test_show_config_multiple_pools(self): + self.command._setup() + self.command._create_pool(get_pools(name='multiple-pools.yaml')[0]) + self.command._create_pool(get_pools(name='multiple-pools.yaml')[1]) + + # Calling show_config --all_pools without specifying pool_id + self.command.show_config(None, all_pools=True) + + self.print_result.assert_called_once() + + pools = self.central_service.find_pools(self.admin_context, {}) + self.assertIn('Pool Configuration', self.command.output_message[1]) + for p in pools: + self.assertIn(p.id, ''.join(self.command.output_message)) + self.assertIn(p.description, + ''.join(self.command.output_message)) + + # Calling show_config --all_pools with pool_id + # (should ignore the pool_id) + self.command.show_config('a36bb018-9584-420c-acc6-2b5cf89714ad', + all_pools=True) + for p in pools: + self.assertEqual(2, sum( + p.id in s for s in self.command.output_message)) + self.assertEqual(2, sum( + p.description in s for s in self.command.output_message)) + def test_update(self): self.command.update( get_pools_path('pools.yaml'), delete=False, dry_run=False @@ -170,6 +201,32 @@ class ManagePoolTestCase(designate.tests.functional.TestCase): pools = self.central_service.find_pools(self.admin_context, {}) self.assertEqual(2, len(pools)) + def test_update_multiple_pools_name(self): + self.command.update( + get_pools_path('pools.yaml'), delete=False, dry_run=False + ) + + pools = self.central_service.find_pools(self.admin_context, {}) + self.assertEqual(1, len(pools)) + + # Updating an existing pool (same name) to a different id should fail + self.assertRaises( + exceptions.DuplicatePool, + self.command.update, + get_pools_path('sample_output.yaml'), delete=False, dry_run=False + ) + + pools = self.central_service.find_pools(self.admin_context, {}) + self.assertEqual(1, len(pools)) + + # Updating Pools with different name will only add pools + self.command.update( + get_pools_path('multiple-pools.yaml'), delete=False, dry_run=False + ) + + pools = self.central_service.find_pools(self.admin_context, {}) + self.assertEqual(3, len(pools)) + @mock.patch.object(service.Service, 'find_pool', side_effect=oslo_messaging.MessagingTimeout()) def test_update_rpc_timeout(self, mock_find_pool): diff --git a/devstack/README.rst b/devstack/README.rst index 8893c4412..b5d334227 100644 --- a/devstack/README.rst +++ b/devstack/README.rst @@ -17,4 +17,11 @@ repository. See contrib/vagrant to create a vagrant VM. [[local|localrc]] enable_plugin designate https://opendev.org/openstack/designate + **Note:** Running with a multipool option: + Perform the above step, and in addition set the backend driver and + scheduler filters:: + + SCHEDULER_FILTERS=attribute,pool_id_attributes,in_doubt_default_pool + DESIGNATE_BACKEND_DRIVER=multipool-bind9 + 3. run ``stack.sh`` diff --git a/devstack/designate_plugins/backend-multipool-bind9 b/devstack/designate_plugins/backend-multipool-bind9 new file mode 100644 index 000000000..03f3b2373 --- /dev/null +++ b/devstack/designate_plugins/backend-multipool-bind9 @@ -0,0 +1,387 @@ +#!/usr/bin/env bash +# Configure the bind9 pool backend for a multi-pool implementation + +# Enable with: +# DESIGNATE_BACKEND_DRIVER=multipool-bind9 + +# Dependencies: +# ``functions`` file +# ``designate`` configuration + +# install_designate_backend - install any external requirements +# configure_designate_backend - make configuration changes, including those to other services +# init_designate_backend - initialize databases, etc. +# start_designate_backend - start any external services +# stop_designate_backend - stop any external services +# cleanup_designate_backend - remove transient data and cache + +# Save trace setting +DP_BIND9_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- +BIND2_DNS_PORT=${DESIGNATE_SERVICE_PORT2_DNS:-1053} +BIND_SERVICE_NAME=bind9 +BIND2_SERVICE_NAME=bind9-2 +BIND2_DEFAULT_FILE=/etc/default/named-2 +BIND2_SERVICE_FILE=/etc/systemd/system/$BIND2_SERVICE_NAME.service +BIND_CFG_DIR=/etc/bind +BIND2_CFG_DIR=/etc/bind-2 +BIND2_TSIGKEY_FILE=$BIND2_CFG_DIR/named.conf.tsigkeys +BIND_VAR_DIR=/var/cache/bind +BIND2_VAR_DIR=/var/cache/bind-2 +BIND_RUN_DIR=/run/named +BIND2_RUN_DIR=/run/named-2 +BIND_CFG_FILE=$BIND_CFG_DIR/named.conf.options +BIND2_CFG_FILE=$BIND2_CFG_DIR/named.conf.options +BIND_USER=bind +BIND_GROUP=bind +DESIGNATE_SERVICE_PORT_RNDC=${DESIGNATE_SERVICE_PORT_RNDC:-953} +DESIGNATE_SERVICE_PORT2_RNDC=${DESIGNATE_SERVICE_PORT2_RNDC:-1953} + +if is_fedora; then + BIND_SERVICE_NAME=named + BIND2_SERVICE_NAME=named-2 + BIND2_SERVICE_FILE=/etc/systemd/system/$BIND2_SERVICE_NAME.service + BIND_CFG_DIR=/etc/$BIND_SERVICE_NAME + BIND2_CFG_DIR=/etc/$BIND2_SERVICE_NAME + BIND_CFG_FILE=/etc/$BIND_SERVICE_NAME.conf + BIND2_CFG_FILE=/etc/$BIND2_SERVICE_NAME.conf + BIND_VAR_DIR=/var/$BIND_SERVICE_NAME + BIND2_VAR_DIR=/var/$BIND2_SERVICE_NAME + BIND_USER=named + BIND_GROUP=named + BIND2_UNIT_CFG_FILE=/etc/sysconfig/$BIND2_SERVICE_NAME + BIND2_TSIGKEY_FILE=$BIND2_CFG_DIR/named.conf.tsigkeys +fi + +# Entry Points +# ------------ + +# install_designate_backend - install any external requirements +function install_designate_backend { + # The user that designate runs as needs to be member of **$BIND_GROUP** group. + # The designate bind9 backend needs read/write access to $BIND_VAR_DIR + sudo groupadd -f $BIND_GROUP + add_user_to_group $STACK_USER $BIND_GROUP + sudo mkdir -p $BIND2_CFG_DIR + sudo chown -R $STACK_USER:$BIND_GROUP $BIND2_CFG_DIR + sudo mkdir -p $BIND2_RUN_DIR + sudo chgrp $BIND_GROUP $BIND2_RUN_DIR + + if is_ubuntu; then + install_package bind9 + # generate a defaults/named2 file + sudo tee $BIND2_DEFAULT_FILE >/dev/null <>$BIND2_UNIT_CFG_FILE + sudo echo "$NAMEDCONF" >>$BIND2_UNIT_CFG_FILE + + sudo cp -a /lib/systemd/system/named.service $BIND2_SERVICE_FILE + + # set the various declarations + iniset -sudo $BIND2_SERVICE_FILE "Service" "Environment=NAMEDCONF" "$BIND2_CFG_FILE" + iniset -sudo $BIND2_SERVICE_FILE "Service" "EnvironmentFile" "$BIND2_UNIT_CFG_FILE" + iniset -sudo $BIND2_SERVICE_FILE "Service" "Environment=KRB5_KTNAME" "$BIND2_CFG_DIR.keytab" + iniset -sudo $BIND2_SERVICE_FILE "Service" "PIDFile" "$BIND2_RUN_DIR/named.pid" + + sudo chmod 750 $BIND2_CFG_DIR + fi + + sudo chown -R $BIND_USER:$BIND_GROUP $BIND2_RUN_DIR + sudo chown -R $BIND_USER:$BIND_GROUP $BIND_RUN_DIR + + # copy the /var/named default data + sudo cp -arf $BIND_VAR_DIR $BIND2_VAR_DIR + + for cfg_dir in "$BIND_CFG_DIR" "$BIND2_CFG_DIR"; do + sudo chmod -R g+r $cfg_dir + done + + for var_dir in "$BIND_VAR_DIR" "$BIND2_VAR_DIR"; do + sudo chmod -R g+rw $var_dir + done + + # Customize Bind9 apparmor profile if installed, include the necessary bits + # for the second named instance, bind-2 and named-2 + if [[ -d /etc/apparmor.d ]]; then + sudo tee /etc/apparmor.d/local/usr.sbin.named >/dev/null </dev/null </dev/null <$BIND2_TSIGKEY_FILE + NAME=$(cat $BIND2_TSIGKEY_FILE | grep 'key' | + awk '{split($0, a, " "); print a[2];}' | + sed -e 's/^"//' -e 's/"$//' | + awk '{split($0, a, "{"); print a[1];}') + sudo echo -e "server $HOST_IP {\n keys { $NAME };\n};" >>$BIND2_TSIGKEY_FILE + + # create rndc key and config + sudo rndc-confgen -a -p $DESIGNATE_SERVICE_PORT2_RNDC -c $BIND2_CFG_DIR/rndc.key + sudo chown $BIND_USER:$BIND_GROUP $BIND2_CFG_DIR/rndc.key + sudo chmod g+r $BIND2_CFG_DIR/rndc.key + sudo tee $BIND2_CFG_FILE >/dev/null </dev/null </dev/null <