From 69b3762dda47272513e02340e7942d5a39f825c5 Mon Sep 17 00:00:00 2001 From: Lujin Date: Thu, 16 Mar 2017 15:42:47 +0900 Subject: [PATCH] Add binding_index to NetworkDhcpAgentBinding The patch proposes adding a new binding_index to the NetworkDhcpAgentBinding table, with an additional Unique Constraint that enforces a single per network. 1. When a network is triggered to be auto-scheduled to DHCP agents, the number of DHCP agents is constrained by dhcp_agents_per_network in neutron.conf. This prevents too many DHCP agents from being scheduled in the first place. 2. If users manually schedule a network to specific DHCP agents, the binding_index increments to show the number of DHCP agents hosting this network. Co-Authored-By: Oleg Bondarev Change-Id: I1bc3f8b69c337f7c1cf7375509a0da61def9baf1 Closes-Bug: #1535554 --- neutron/db/agentschedulers_db.py | 4 +- .../alembic_migrations/versions/EXPAND_HEAD | 2 +- .../c3e9d13c4367_add_binding_index_to_.py | 68 ++++++++++++++ .../db/network_dhcp_agent_binding/models.py | 13 +++ neutron/objects/network.py | 5 +- neutron/scheduler/base_resource_filter.py | 2 +- neutron/scheduler/base_scheduler.py | 4 +- neutron/scheduler/dhcp_agent_scheduler.py | 64 +++++++++++-- ...test_c3e9d13c4367_add_binding_index_to_.py | 90 +++++++++++++++++++ .../scheduler/test_dhcp_agent_scheduler.py | 60 +++++-------- neutron/tests/unit/objects/test_objects.py | 2 +- .../scheduler/test_dhcp_agent_scheduler.py | 46 ++++++---- 12 files changed, 291 insertions(+), 69 deletions(-) create mode 100644 neutron/db/migration/alembic_migrations/versions/train/expand/c3e9d13c4367_add_binding_index_to_.py create mode 100644 neutron/tests/functional/db/migrations/test_c3e9d13c4367_add_binding_index_to_.py diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index a767a66a198..3d89f6cb683 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -390,8 +390,8 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler if id == dhcp_agent.id: raise das_exc.NetworkHostedByDHCPAgent( network_id=network_id, agent_id=id) - network.NetworkDhcpAgentBinding(context, dhcp_agent_id=id, - network_id=network_id).create() + self.network_scheduler.resource_filter.bind( + context, [agent_db], network_id, force_scheduling=True) dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) if dhcp_notifier: dhcp_notifier.network_added_to_agent( diff --git a/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD b/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD index ffa2bbaaf66..e7189617eb7 100644 --- a/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD +++ b/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD @@ -1 +1 @@ -c613d0b82681 +c3e9d13c4367 diff --git a/neutron/db/migration/alembic_migrations/versions/train/expand/c3e9d13c4367_add_binding_index_to_.py b/neutron/db/migration/alembic_migrations/versions/train/expand/c3e9d13c4367_add_binding_index_to_.py new file mode 100644 index 00000000000..2f4a2a57189 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/train/expand/c3e9d13c4367_add_binding_index_to_.py @@ -0,0 +1,68 @@ +# Copyright 2019 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from collections import defaultdict + +from alembic import op +import sqlalchemy as sa + + +"""Add binding index to NetworkDhcpAgentBindings + +Revision ID: c3e9d13c4367 +Revises: 63fd95af7dcd +Create Date: 2019-08-20 18:42:39.647676 + +""" + +# revision identifiers, used by Alembic. +revision = 'c3e9d13c4367' +down_revision = 'c613d0b82681' + + +NETWORK_DHCP_AGENT_BINDING = 'networkdhcpagentbindings' + + +def upgrade(): + op.add_column(NETWORK_DHCP_AGENT_BINDING, + sa.Column('binding_index', sa.Integer(), nullable=False, + server_default='1', autoincrement=True)) + + bindings_table = sa.Table( + NETWORK_DHCP_AGENT_BINDING, + sa.MetaData(), + sa.Column('network_id', sa.String(36)), + sa.Column('dhcp_agent_id', sa.String(36)), + sa.Column('binding_index', sa.Integer, + nullable=False, server_default='1'), + ) + + networks_to_bindings = defaultdict(list) + session = sa.orm.Session(bind=op.get_bind()) + with session.begin(subtransactions=True): + for result in session.query(bindings_table): + networks_to_bindings[result.network_id].append(result) + + for bindings in networks_to_bindings.values(): + for index, result in enumerate(bindings): + session.execute(bindings_table.update().values( + binding_index=index + 1).where( + bindings_table.c.network_id == result.network_id).where( + bindings_table.c.dhcp_agent_id == result.dhcp_agent_id)) + session.commit() + + op.create_unique_constraint( + 'uniq_network_dhcp_agent_binding0network_id0binding_index0', + NETWORK_DHCP_AGENT_BINDING, ['network_id', 'binding_index']) diff --git a/neutron/db/network_dhcp_agent_binding/models.py b/neutron/db/network_dhcp_agent_binding/models.py index f63da01b178..6edca51d438 100644 --- a/neutron/db/network_dhcp_agent_binding/models.py +++ b/neutron/db/network_dhcp_agent_binding/models.py @@ -17,9 +17,19 @@ from sqlalchemy import orm from neutron.db.models import agent as agent_model +LOWEST_BINDING_INDEX = 1 + + class NetworkDhcpAgentBinding(model_base.BASEV2): """Represents binding between neutron networks and DHCP agents.""" + __table_args__ = ( + sa.UniqueConstraint( + 'network_id', 'binding_index', + name='uniq_network_dhcp_agent_binding0network_id0binding_index0'), + model_base.BASEV2.__table_args__ + ) + network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete='CASCADE'), primary_key=True) @@ -28,3 +38,6 @@ class NetworkDhcpAgentBinding(model_base.BASEV2): sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) + binding_index = sa.Column(sa.Integer, nullable=False, + server_default=str(LOWEST_BINDING_INDEX), + autoincrement=True) diff --git a/neutron/objects/network.py b/neutron/objects/network.py index 907dc8efb42..22f0bade516 100644 --- a/neutron/objects/network.py +++ b/neutron/objects/network.py @@ -72,7 +72,9 @@ class NetworkRBAC(rbac.RBACBaseObject): @base.NeutronObjectRegistry.register class NetworkDhcpAgentBinding(base.NeutronDbObject): # Version 1.0: Initial version - VERSION = '1.0' + # Version 1.1: Added 'binding_index' + + VERSION = '1.1' db_model = ndab_models.NetworkDhcpAgentBinding @@ -81,6 +83,7 @@ class NetworkDhcpAgentBinding(base.NeutronDbObject): fields = { 'network_id': common_types.UUIDField(), 'dhcp_agent_id': common_types.UUIDField(), + 'binding_index': obj_fields.IntegerField(), } # NOTE(ndahiwade): The join was implemented this way as get_objects diff --git a/neutron/scheduler/base_resource_filter.py b/neutron/scheduler/base_resource_filter.py index 17f13bdafe6..d49201c3e43 100644 --- a/neutron/scheduler/base_resource_filter.py +++ b/neutron/scheduler/base_resource_filter.py @@ -26,7 +26,7 @@ class BaseResourceFilter(object): def filter_agents(self, plugin, context, resource): """Return the agents that can host the resource.""" - def bind(self, context, agents, resource_id): + def bind(self, context, agents, resource_id, force_scheduling=False): """Bind the resource to the agents.""" with db_api.CONTEXT_WRITER.using(context): for agent in agents: diff --git a/neutron/scheduler/base_scheduler.py b/neutron/scheduler/base_scheduler.py index 4223deda2e8..6928cb79a2a 100644 --- a/neutron/scheduler/base_scheduler.py +++ b/neutron/scheduler/base_scheduler.py @@ -50,7 +50,9 @@ class BaseScheduler(object): chosen_agents = self.select(plugin, context, hostable_agents, hosted_agents, num_agents) # bind the resource to the agents - self.resource_filter.bind(context, chosen_agents, resource['id']) + force_scheduling = bool(resource.get('candidate_hosts')) + self.resource_filter.bind( + context, chosen_agents, resource['id'], force_scheduling) debug_data = ['(%s, %s, %s)' % (agent['agent_type'], agent['host'], resource['id']) for agent in chosen_agents] diff --git a/neutron/scheduler/dhcp_agent_scheduler.py b/neutron/scheduler/dhcp_agent_scheduler.py index 94d92d3a7ea..9f2e9be82e2 100644 --- a/neutron/scheduler/dhcp_agent_scheduler.py +++ b/neutron/scheduler/dhcp_agent_scheduler.py @@ -25,6 +25,7 @@ from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import utils as agent_utils +from neutron.db.network_dhcp_agent_binding import models as ndab_model from neutron.objects import agent as agent_obj from neutron.objects import network from neutron.scheduler import base_resource_filter @@ -89,12 +90,15 @@ class AutoScheduler(object): if (az_hints and dhcp_agent['availability_zone'] not in az_hints): continue - bindings_to_add.append((dhcp_agent, net_id)) + bindings_to_add.append( + (dhcp_agent, net_id, is_routed_network)) # do it outside transaction so particular scheduling results don't # make other to fail debug_data = [] - for agent, net_id in bindings_to_add: - self.resource_filter.bind(context, [agent], net_id) + for agent, net_id, is_routed_network in bindings_to_add: + self.resource_filter.bind( + context, [agent], net_id, + force_scheduling=is_routed_network) debug_data.append('(%s, %s, %s)' % (agent['agent_type'], agent['host'], net_id)) LOG.debug('Resources bound (agent type, host, resource id): %s', @@ -174,26 +178,72 @@ class AZAwareWeightScheduler(WeightScheduler): class DhcpFilter(base_resource_filter.BaseResourceFilter): - def bind(self, context, agents, network_id): + def get_vacant_network_dhcp_agent_binding_index( + self, context, network_id, force_scheduling): + """Return a vacant binding_index to use and whether or not it exists. + + Each NetworkDhcpAgentBinding has a binding_index which is unique per + network_id, and when creating a single binding we require to find a + 'vacant' binding_index which isn't yet used - for example if we have + bindings with indices 1 and 3, then clearly binding_index == 2 is free. + + :returns: binding_index. + """ + num_agents = agent_obj.Agent.count( + context, agent_type=constants.AGENT_TYPE_DHCP) + num_agents = min(num_agents, cfg.CONF.dhcp_agents_per_network) + + bindings = network.NetworkDhcpAgentBinding.get_objects( + context, network_id=network_id) + + binding_indices = [b.binding_index for b in bindings] + all_indices = set(range(ndab_model.LOWEST_BINDING_INDEX, + num_agents + 1)) + open_slots = sorted(list(all_indices - set(binding_indices))) + + if open_slots: + return open_slots[0] + + # Last chance: if this is a manual scheduling, we're gonna allow + # creation of a binding_index even if it will exceed + # max_l3_agents_per_router. + if force_scheduling: + return max(all_indices) + 1 + + return -1 + + def bind(self, context, agents, network_id, force_scheduling=False): """Bind the network to the agents.""" # customize the bind logic bound_agents = agents[:] for agent in agents: + binding_index = self.get_vacant_network_dhcp_agent_binding_index( + context, network_id, force_scheduling) + if binding_index < ndab_model.LOWEST_BINDING_INDEX: + LOG.debug('Unable to find a vacant binding_index for ' + 'network %(network_id)s and agent %(agent_id)s', + {'network_id': network_id, + 'agent_id': agent.id}) + continue + # saving agent_id to use it after rollback to avoid # DetachedInstanceError agent_id = agent.id try: network.NetworkDhcpAgentBinding( context, dhcp_agent_id=agent_id, - network_id=network_id).create() + network_id=network_id, + binding_index=binding_index).create() except exceptions.NeutronDbObjectDuplicateEntry: # it's totally ok, someone just did our job! bound_agents.remove(agent) LOG.info('Agent %s already present', agent_id) LOG.debug('Network %(network_id)s is scheduled to be ' - 'hosted by DHCP agent %(agent_id)s', + 'hosted by DHCP agent %(agent_id)s with binding_index ' + '%(binding_index)d', {'network_id': network_id, - 'agent_id': agent_id}) + 'agent_id': agent_id, + 'binding_index': binding_index}) super(DhcpFilter, self).bind(context, bound_agents, network_id) def filter_agents(self, plugin, context, network): diff --git a/neutron/tests/functional/db/migrations/test_c3e9d13c4367_add_binding_index_to_.py b/neutron/tests/functional/db/migrations/test_c3e9d13c4367_add_binding_index_to_.py new file mode 100644 index 00000000000..8668f3b33b5 --- /dev/null +++ b/neutron/tests/functional/db/migrations/test_c3e9d13c4367_add_binding_index_to_.py @@ -0,0 +1,90 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import collections + +from oslo_db.sqlalchemy import utils as db_utils +from oslo_utils import uuidutils + +from neutron.tests.functional.db import test_migrations + + +class NetworkDhcpAgentBindingMigrationMixin(object): + """Validates binding_index for NetworkDhcpAgentBinding migration.""" + + def _create_so(self, o_type, values): + """create standard attr object.""" + stan = db_utils.get_table(self.engine, 'standardattributes') + # find next available id taking into account existing records + rec_ids = [r.id for r in self.engine.execute(stan.select()).fetchall()] + next_id = max([0] + rec_ids) + 1 + self.engine.execute(stan.insert().values({'id': next_id, + 'resource_type': o_type})) + values['standard_attr_id'] = next_id + return self._create_rec(o_type, values) + + def _create_rec(self, o_type, values): + otable = db_utils.get_table(self.engine, o_type) + self.engine.execute(otable.insert().values(values)) + + def _make_network_agents_and_bindings(self, network_id): + self._create_so('networks', {'id': network_id}) + # each network gets a couple of agents + for _ in range(2): + agent_id = uuidutils.generate_uuid() + timestamp = '2000-04-06T14:34:23' + self._create_rec('agents', {'id': agent_id, + 'topic': 'x', + 'agent_type': 'L3', + 'binary': 'x', + 'host': agent_id, + 'created_at': timestamp, + 'started_at': timestamp, + 'heartbeat_timestamp': timestamp, + 'configurations': ''}) + self._create_rec('networkdhcpagentbindings', + {'network_id': network_id, + 'dhcp_agent_id': agent_id}) + + def _create_networks(self, engine): + for nid in [uuidutils.generate_uuid() for i in range(10)]: + self._make_network_agents_and_bindings(nid) + + def _pre_upgrade_c3e9d13c4367(self, engine): + self._create_networks(engine) + return True # return True so check function is invoked after migrate + + def _check_c3e9d13c4367(self, engine, data): + bindings_table = db_utils.get_table(engine, 'networkdhcpagentbindings') + rows = engine.execute(bindings_table.select()).fetchall() + + networks_to_bindings = collections.defaultdict(list) + for network_id, agent_id, binding_index in rows: + networks_to_bindings[network_id].append(binding_index) + + for binding_indices in networks_to_bindings.values(): + self.assertEqual(list(range(1, 3)), sorted(binding_indices)) + + +class TestNetworkDhcpAgentBindingMigrationMysql( + NetworkDhcpAgentBindingMigrationMixin, + test_migrations.TestWalkMigrationsMysql): + pass + + +class TestNetworkDhcpAgentBindingMigrationPsql( + NetworkDhcpAgentBindingMigrationMixin, + test_migrations.TestWalkMigrationsPsql): + pass diff --git a/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py index 368b4ba77cf..550350a7659 100644 --- a/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py +++ b/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py @@ -283,15 +283,12 @@ class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, 'network-3'], 'agent-1': ['network-0', 'network-1', - 'network-2', 'network-3'], 'agent-2': ['network-1', 'network-2', 'network-3'], 'agent-3': ['network-0', - 'network-1', - 'network-2', - 'network-3']})), + 'network-2']})), ('No agents scheduled if networks already hosted and' ' max_agents_per_network reached', @@ -340,25 +337,18 @@ class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, no_network_with_az_match=True)), ] - def _strip_host_index(self, name): - """Strips the host index. - - Eg. if name = '2-agent-3', then 'agent-3' is returned. - """ - return name[name.find('-') + 1:] - def _extract_index(self, name): """Extracts the index number and returns. - Eg. if name = '2-agent-3', then 3 is returned + Eg. if name = 'agent-3', then 3 is returned """ return int(name.split('-')[-1]) def get_subnets(self, context, fields=None): subnets = [] for net in self._networks: - enable_dhcp = (self._strip_host_index(net['name']) not in - self.networks_with_dhcp_disabled) + enable_dhcp = (net['name'] not in + self.networks_with_dhcp_disabled) subnets.append({'network_id': net.id, 'enable_dhcp': enable_dhcp, 'segment_id': None}) @@ -375,15 +365,13 @@ class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, self.ctx, dhcp_agent_id=agent_id) return [item.network_id for item in binding_objs] - def _test_auto_schedule(self, host_index): + def test_auto_schedule(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.ChanceScheduler() self.ctx = context.get_admin_context() - msg = 'host_index = %s' % host_index # create dhcp agents - hosts = ['%s-agent-%s' % (host_index, i) - for i in range(self.agent_count)] + hosts = ['agent-%s' % i for i in range(self.agent_count)] dhcp_agents = self._create_and_set_agents_down(hosts) # create networks @@ -391,7 +379,7 @@ class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, network.Network( self.ctx, id=uuidutils.generate_uuid(), - name='%s-network-%s' % (host_index, i)) + name='network-%s' % i) for i in range(self.network_count) ] for i in range(len(self._networks)): @@ -407,26 +395,22 @@ class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, scheduler.resource_filter.bind(self.ctx, [dhcp_agents[agent_index]], network_ids[net_index]) + for host_index in range(self.agent_count): + msg = 'host_index = %s' % host_index + retval = scheduler.auto_schedule_networks(self, self.ctx, + hosts[host_index]) + self.assertEqual(self.expected_auto_schedule_return_value, retval, + message=msg) - retval = scheduler.auto_schedule_networks(self, self.ctx, - hosts[host_index]) - self.assertEqual(self.expected_auto_schedule_return_value, retval, - message=msg) - - agent_id = dhcp_agents[host_index].id - hosted_networks = self._get_hosted_networks_on_dhcp_agent(agent_id) - hosted_net_names = [ - self._strip_host_index(net['name']) - for net in network.Network.get_objects( - self.ctx, id=hosted_networks) - ] - expected_hosted_networks = self.expected_hosted_networks['agent-%s' % - host_index] - self.assertItemsEqual(hosted_net_names, expected_hosted_networks, msg) - - def test_auto_schedule(self): - for i in range(self.agent_count): - self._test_auto_schedule(i) + agent_id = dhcp_agents[host_index].id + hosted_net_ids = self._get_hosted_networks_on_dhcp_agent(agent_id) + hosted_net_names = [ + net['name'] for net in + network.Network.get_objects(self.ctx, id=hosted_net_ids)] + expected_hosted_networks = self.expected_hosted_networks[ + 'agent-%s' % host_index] + self.assertItemsEqual( + hosted_net_names, expected_hosted_networks, msg) class TestAZAwareWeightScheduler(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, diff --git a/neutron/tests/unit/objects/test_objects.py b/neutron/tests/unit/objects/test_objects.py index f7e161ae3e0..a72590353b4 100644 --- a/neutron/tests/unit/objects/test_objects.py +++ b/neutron/tests/unit/objects/test_objects.py @@ -58,7 +58,7 @@ object_data = { 'MeteringLabelRule': '1.0-b5c5717e7bab8d1af1623156012a5842', 'Log': '1.0-6391351c0f34ed34375a19202f361d24', 'Network': '1.0-f2f6308f79731a767b92b26b0f4f3849', - 'NetworkDhcpAgentBinding': '1.0-6eeceb5fb4335cd65a305016deb41c68', + 'NetworkDhcpAgentBinding': '1.1-d9443c88809ffa4c45a0a5a48134b54a', 'NetworkDNSDomain': '1.0-420db7910294608534c1e2e30d6d8319', 'NetworkPortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3', 'NetworkRBAC': '1.2-192845c5ed0718e1c54fac36936fcd7d', diff --git a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py index a67b36f8dfd..1688e5a6be0 100644 --- a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py @@ -71,6 +71,7 @@ class TestDhcpSchedulerBaseTestCase(testlib_api.SqlTestCase): network_obj.Network(self.ctx, id=network_id).create() def _test_schedule_bind_network(self, agents, network_id): + cfg.CONF.set_override('dhcp_agents_per_network', len(agents)) scheduler = dhcp_agent_scheduler.ChanceScheduler() scheduler.resource_filter.bind(self.ctx, agents, network_id) binding_objs = network_obj.NetworkDhcpAgentBinding.get_objects( @@ -93,7 +94,7 @@ class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase): def test_schedule_bind_network_multi_agent_fail_one(self): agents = self._create_and_set_agents_down(['host-a']) self._test_schedule_bind_network(agents, self.network_id) - with mock.patch.object(dhcp_agent_scheduler.LOG, 'info') as fake_log: + with mock.patch.object(dhcp_agent_scheduler.LOG, 'debug') as fake_log: self._test_schedule_bind_network(agents, self.network_id) self.assertEqual(1, fake_log.call_count) @@ -138,8 +139,7 @@ class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase): return network_obj.NetworkDhcpAgentBinding.get_objects( self.ctx, dhcp_agent_id=agent[0].id) - def _test_auto_reschedule_vs_network_on_dead_agent(self, - active_hosts_only): + def test_auto_reschedule_vs_network_on_dead_agent(self): dead_agent, alive_agent, scheduler = ( self._test_get_agents_and_scheduler_for_dead_agent()) plugin = mock.Mock() @@ -147,10 +147,7 @@ class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase): "enable_dhcp": True, "segment_id": None}] plugin.get_network.return_value = self.network - if active_hosts_only: - plugin.get_dhcp_agents_hosting_networks.return_value = [] - else: - plugin.get_dhcp_agents_hosting_networks.return_value = dead_agent + plugin.get_dhcp_agents_hosting_networks.return_value = dead_agent network_assigned_to_dead_agent = ( self._get_agent_binding_from_db(dead_agent)) self.assertEqual(1, len(network_assigned_to_dead_agent)) @@ -162,16 +159,9 @@ class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase): network_assigned_to_alive_agent = ( self._get_agent_binding_from_db(alive_agent)) self.assertEqual(1, len(network_assigned_to_dead_agent)) - if active_hosts_only: - self.assertEqual(1, len(network_assigned_to_alive_agent)) - else: - self.assertEqual(0, len(network_assigned_to_alive_agent)) - - def test_network_auto_rescheduled_when_db_returns_active_hosts(self): - self._test_auto_reschedule_vs_network_on_dead_agent(True) - - def test_network_not_auto_rescheduled_when_db_returns_all_hosts(self): - self._test_auto_reschedule_vs_network_on_dead_agent(False) + # network won't be scheduled to new agent unless removed from + # dead agent + self.assertEqual(0, len(network_assigned_to_alive_agent)) class TestAutoScheduleNetworks(TestDhcpSchedulerBaseTestCase): @@ -406,6 +396,27 @@ class TestAutoScheduleSegments(test_plugin.Ml2PluginV2TestCase, class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, sched_db.DhcpAgentSchedulerDbMixin): + def test_auto_schedule_network_excess_agents(self): + plugin = mock.MagicMock() + plugin.get_subnets.return_value = ( + [{"network_id": self.network_id, "enable_dhcp": True}]) + plugin.get_network.return_value = {'availability_zone_hints': ['nova']} + scheduler = dhcp_agent_scheduler.ChanceScheduler() + dhcpfilter = 'neutron.scheduler.dhcp_agent_scheduler.DhcpFilter' + self._create_and_set_agents_down(['host-a', 'host-b']) + expected_hosted_agents = 1 + binding_index = 1 + scheduler.auto_schedule_networks(plugin, self.ctx, 'host-a') + with mock.patch( + dhcpfilter + '.get_vacant_network_dhcp_agent_binding_index', + context=self.ctx, network_id=self.network_id) as ndab: + ndab.return_value = binding_index + scheduler.auto_schedule_networks(plugin, self.ctx, 'host-b') + self.assertTrue(ndab.called) + num_hosted_agents = network_obj.NetworkDhcpAgentBinding.count( + self.ctx, network_id=self.network_id) + self.assertEqual(expected_hosted_agents, num_hosted_agents) + def test_reschedule_network_from_down_agent(self): net_id = uuidutils.generate_uuid() agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) @@ -722,6 +733,7 @@ class DHCPAgentWeightSchedulerTestCase(test_plugin.Ml2PluginV2TestCase): class TestDhcpSchedulerFilter(TestDhcpSchedulerBaseTestCase, sched_db.DhcpAgentSchedulerDbMixin): def _test_get_dhcp_agents_hosting_networks(self, expected, **kwargs): + cfg.CONF.set_override('dhcp_agents_per_network', 4) agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) agents += self._create_and_set_agents_down(['host-c', 'host-d'], 1, admin_state_up=False)