Make DHCP agent scheduler physical_network aware

Currently neutron DCHP scheduler assumes that that every server running
a dhcp-agent can reach every network. Typically the scheduler can
wrongly schedule a vlan network on a dhcp-agent that has no reachability
to the network it's supposed to serve (ex: network's physical_network
not supported).

Typically such usecase can append if:

* physical_networks are dedicated to a specific service and we don't
  want to mix dnsmasqs related to different services (for
  isolation/configuration purpose),
* physical_networks are dedicated to a specific rack (see example
  diagram http://i.imgur.com/NTBxRxk.png), the rack interconnection can
  be handled outside of neutron or inside when routed-networks will be
  supported.

This change makes the DHCP scheduler network reachability aware by
querying plugin's filter_hosts_with_network_access method.

This change provides an implementation for ML2 plugin delegating host
filtering to its mechanism drivers: it aggregates the filtering done by
each mechanism or disables filtering if any mechanism doesn't overload
default mechanism implementation[1] (for backward compatibility with
out-of-tree mechanisms). Every in-tree mechanism overloads the default
implementation: OVS/LB/SRIOV mechanisms use their agent mapping to filter
hosts, l2pop/test/logger ones return empty set (they provide to "L2
capability").

This change provides a default implementation[2] for other plugins
filtering nothing (for backward compatibility), they can overload it to
provide their own implementation.

Such host filtering has some limitations if a dhcp-agent is on a host
handled by multiple l2 mechanisms with one mechanism claiming network
reachability but not the one handling dhcp-agent ports. Indeed the
host is able to reach the network but not dhcp-agent ports! Such
limitation will be handled in a follow-up change using host+vif_type
filtering.

[1] neutron.plugin.ml2.driver_api.MechanismDriver.\
      filter_hosts_with_network_access
[2] neutron.db.agents_db.AgentDbMixin.filter_hosts_with_network_access

Closes-Bug: #1478100
Co-Authored-By: Cedric Brandily <zzelle@gmail.com>
Change-Id: I0501d47404c8adbec4bccb84ac5980e045da68b3
This commit is contained in:
Assaf Muller 2015-07-23 18:14:35 -04:00 committed by Cedric Brandily
parent 5b6dab1ac9
commit 0267c6a5ac
16 changed files with 285 additions and 27 deletions

View File

@ -287,6 +287,18 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def filter_hosts_with_network_access(
self, context, network_id, candidate_hosts):
"""Filter hosts with access to network_id.
This method returns a subset of candidate_hosts with the ones with
network access to network_id.
A plugin can overload this method to define its own host network_id
based filter.
"""
return candidate_hosts
def _log_heartbeat(self, state, agent_db, agent_conf):
if agent_conf.get('log_agent_heartbeats'):
delta = timeutils.utcnow() - agent_db.heartbeat_timestamp

View File

@ -906,6 +906,25 @@ class MechanismDriver(object):
"""
return ()
@classmethod
def is_host_filtering_supported(cls):
return (cls.filter_hosts_with_segment_access !=
MechanismDriver.filter_hosts_with_segment_access)
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
"""Filter hosts with access to at least one segment.
:returns: a set with a subset of candidate_hosts.
A driver can overload this method to return a subset of candidate_hosts
with the ones with access to at least one segment.
Default implementation returns all hosts to disable filtering
(backward compatibility).
"""
return candidate_hosts
@six.add_metaclass(abc.ABCMeta)
class ExtensionDriver(object):

View File

@ -52,6 +52,12 @@ class L2populationMechanismDriver(api.MechanismDriver):
self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx,
fdb_entries)
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
# NOTE(cbrandily): let other mechanisms (openvswitch, linuxbridge, ...)
# perform the filtering
return set()
def _get_diff_ips(self, orig, port):
orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']])
port_ips = set([ip['ip_address'] for ip in port['fixed_ips']])

View File

@ -159,6 +159,17 @@ class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase):
"""Is the physical network part of the given mappings?"""
return physnet in mappings
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
hosts = set()
filters = {'host': candidate_hosts, 'agent_type': [self.agent_type]}
for agent in agent_getter(context, filters=filters):
if any(self.check_segment_for_agent(s, agent) for s in segments):
hosts.add(agent['host'])
return hosts
def check_segment_for_agent(self, segment, agent):
"""Check if segment can be bound for agent.

View File

@ -143,6 +143,17 @@ class SriovNicSwitchMechanismDriver(api.MechanismDriver):
return True
return False
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
hosts = set()
filters = {'host': candidate_hosts, 'agent_type': [self.agent_type]}
for agent in agent_getter(context, filters=filters):
if any(self.check_segment(s, agent) for s in segments):
hosts.add(agent['host'])
return hosts
def check_segment(self, segment, agent=None):
"""Check if segment can be bound.

View File

@ -311,6 +311,10 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
name_order=True)
LOG.info(_LI("Loaded mechanism driver names: %s"), self.names())
self._register_mechanisms()
self.host_filtering_supported = self.is_host_filtering_supported()
if not self.host_filtering_supported:
LOG.warning(_LW("Host filtering is disabled because at least one "
"mechanism doesn't support it."))
def _register_mechanisms(self):
"""Register all mechanism drivers.
@ -752,6 +756,33 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
{'port': context.current['id'],
'host': binding.host})
def is_host_filtering_supported(self):
return all(driver.obj.is_host_filtering_supported()
for driver in self.ordered_mech_drivers)
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
"""Filter hosts with access to at least one segment.
:returns: a subset of candidate_hosts.
This method returns all hosts from candidate_hosts with access to a
segment according to at least one driver.
"""
candidate_hosts = set(candidate_hosts)
if not self.host_filtering_supported:
return candidate_hosts
hosts_with_access = set()
for driver in self.ordered_mech_drivers:
hosts = driver.obj.filter_hosts_with_segment_access(
context, segments, candidate_hosts, agent_getter)
hosts_with_access |= hosts
candidate_hosts -= hosts
if not candidate_hosts:
break
return hosts_with_access
def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels):
# To prevent a possible binding loop, don't try to bind with
# this driver if the same driver has already bound at a higher

View File

@ -1617,3 +1617,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
def get_workers(self):
return self.mechanism_manager.get_workers()
def filter_hosts_with_network_access(
self, context, network_id, candidate_hosts):
segments = db.get_network_segments(context.session, network_id)
return self.mechanism_manager.filter_hosts_with_segment_access(
context, segments, candidate_hosts, self.get_agents)

View File

@ -235,6 +235,12 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter):
context, True, agent)
]
hostable_dhcp_hosts = plugin.filter_hosts_with_network_access(
context, network['id'],
[agent['host'] for agent in hostable_dhcp_agents])
hostable_dhcp_agents = [agent for agent in hostable_dhcp_agents
if agent['host'] in hostable_dhcp_hosts]
if not hostable_dhcp_agents:
return {'n_agents': 0, 'hostable_agents': [],
'hosted_agents': hosted_agents}

View File

@ -158,7 +158,7 @@ def set_agent_admin_state(agent_id, admin_state_up=False):
def _get_ovs_agent_dict(host, agent_type, binary, tunnel_types,
tunneling_ip='20.0.0.1', interface_mappings=None,
l2pop_network_types=None):
bridge_mappings=None, l2pop_network_types=None):
agent = {
'binary': binary,
'host': host,
@ -169,6 +169,8 @@ def _get_ovs_agent_dict(host, agent_type, binary, tunnel_types,
'tunnel_type': [],
'start_flag': True}
if bridge_mappings is not None:
agent['configurations']['bridge_mappings'] = bridge_mappings
if interface_mappings is not None:
agent['configurations']['interface_mappings'] = interface_mappings
if l2pop_network_types is not None:
@ -179,11 +181,11 @@ def _get_ovs_agent_dict(host, agent_type, binary, tunnel_types,
def register_ovs_agent(host=HOST, agent_type=constants.AGENT_TYPE_OVS,
binary='neutron-openvswitch-agent',
tunnel_types=['vxlan'], tunneling_ip='20.0.0.1',
interface_mappings=None,
interface_mappings=None, bridge_mappings=None,
l2pop_network_types=None):
agent = _get_ovs_agent_dict(host, agent_type, binary, tunnel_types,
tunneling_ip, interface_mappings,
l2pop_network_types)
bridge_mappings, l2pop_network_types)
return _register_agent(agent)

View File

@ -19,11 +19,15 @@ from operator import attrgetter
import six
import testscenarios
from neutron.api.v2 import attributes
from neutron import context
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import common_db_mixin
from neutron.extensions import providernet
from neutron.scheduler import dhcp_agent_scheduler
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron.tests.unit.scheduler import (test_dhcp_agent_scheduler as
test_dhcp_sch)
@ -560,3 +564,51 @@ class TestAZAwareWeightScheduler(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
self.assertEqual(self.scheduled_agent_count[i] +
scheduled_azs.get('az%s' % i, 0),
hosted_azs.get('az%s' % i, 0))
class TestDHCPSchedulerWithNetworkAccessibility(
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['openvswitch']
def test_dhcp_scheduler_filters_hosts_without_network_access(self):
dhcp_agent1 = helpers.register_dhcp_agent(host='host1')
dhcp_agent2 = helpers.register_dhcp_agent(host='host2')
dhcp_agent3 = helpers.register_dhcp_agent(host='host3')
dhcp_agents = [dhcp_agent1, dhcp_agent2, dhcp_agent3]
helpers.register_ovs_agent(
host='host1', bridge_mappings={'physnet1': 'br-eth-1'})
helpers.register_ovs_agent(
host='host2', bridge_mappings={'physnet2': 'br-eth-1'})
helpers.register_ovs_agent(
host='host3', bridge_mappings={'physnet2': 'br-eth-1'})
admin_context = context.get_admin_context()
net = self.driver.create_network(
admin_context,
{'network': {'name': 'net1',
providernet.NETWORK_TYPE: 'vlan',
providernet.PHYSICAL_NETWORK: 'physnet1',
providernet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one',
'admin_state_up': True,
'shared': True}})
self.driver.create_subnet(
admin_context,
{'subnet':
{'name': 'name',
'ip_version': 4,
'network_id': net['id'],
'cidr': '10.0.0.0/24',
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
'tenant_id': 'tenant_one',
'enable_dhcp': True}})
self.plugin.schedule_network(admin_context, net)
dhcp_agents = self.driver.get_dhcp_agents_hosting_networks(
admin_context, [net['id']])
self.assertEqual(1, len(dhcp_agents))
self.assertEqual('host1', dhcp_agents[0]['host'])

View File

@ -236,6 +236,9 @@ class OvsAgentSchedulerTestCaseBase(test_l3.L3NatTestCaseMixin,
mock.patch('neutron.common.rpc.get_client').start()
super(OvsAgentSchedulerTestCaseBase, self).setUp(
self.plugin_str, service_plugins=service_plugins)
mock.patch.object(
self.plugin, 'filter_hosts_with_network_access',
side_effect=lambda context, network_id, hosts: hosts).start()
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
@ -1320,6 +1323,9 @@ class OvsDhcpAgentNotifierTestCase(test_agent.AgentDBTestMixIn,
def setUp(self):
self.useFixture(tools.AttributeMapMemento())
super(OvsDhcpAgentNotifierTestCase, self).setUp(self.plugin_str)
mock.patch.object(
self.plugin, 'filter_hosts_with_network_access',
side_effect=lambda context, network_id, hosts: hosts).start()
self.dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.dhcp_notifier_cast = mock.patch(
'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'

View File

@ -130,3 +130,10 @@ class LoggerMechanismDriver(api.MechanismDriver):
def bind_port(self, context):
self._log_port_call("bind_port", context)
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
LOG.info(_("filter_hosts_with_segment_access called with segments "
"%(segments)s, candidate hosts %(hosts)s "),
{'segments': segments, 'hosts': candidate_hosts})
return set()

View File

@ -240,3 +240,7 @@ class TestMechanismDriver(api.MechanismDriver):
portbindings.VIF_TYPE_BINDING_FAILED,
{portbindings.CAP_PORT_FILTER: False})
self.bound_ports.add((context.current['id'], host))
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
return set()

View File

@ -56,6 +56,7 @@ from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.services.qos import qos_consts
from neutron.tests import base
from neutron.tests.common import helpers
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc
from neutron.tests.unit.db import test_allowedaddresspairs_db as test_pair
@ -1563,6 +1564,65 @@ class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase,
plugin=PLUGIN_NAME)
class TestMl2HostsNetworkAccess(Ml2PluginV2TestCase):
_mechanism_drivers = ['openvswitch', 'logger']
def setUp(self):
super(TestMl2HostsNetworkAccess, self).setUp()
helpers.register_ovs_agent(
host='host1', bridge_mappings={'physnet1': 'br-eth-1'})
helpers.register_ovs_agent(
host='host2', bridge_mappings={'physnet2': 'br-eth-2'})
helpers.register_ovs_agent(
host='host3', bridge_mappings={'physnet3': 'br-eth-3'})
self.dhcp_agent1 = helpers.register_dhcp_agent(
host='host1')
self.dhcp_agent2 = helpers.register_dhcp_agent(
host='host2')
self.dhcp_agent3 = helpers.register_dhcp_agent(
host='host3')
self.dhcp_hosts = {'host1', 'host2', 'host3'}
def test_filter_hosts_with_network_access(self):
net = self.driver.create_network(
self.context,
{'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one',
'admin_state_up': True,
'shared': True}})
observeds = self.driver.filter_hosts_with_network_access(
self.context, net['id'], self.dhcp_hosts)
self.assertEqual({self.dhcp_agent1.host}, observeds)
def test_filter_hosts_with_network_access_multi_segments(self):
net = self.driver.create_network(
self.context,
{'network': {'name': 'net1',
mpnet.SEGMENTS: [
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 2}],
'tenant_id': 'tenant_one',
'admin_state_up': True,
'shared': True}})
expecteds = {self.dhcp_agent1.host, self.dhcp_agent2.host}
observeds = self.driver.filter_hosts_with_network_access(
self.context, net['id'], self.dhcp_hosts)
self.assertEqual(expecteds, observeds)
def test_filter_hosts_with_network_access_not_supported(self):
self.driver.mechanism_manager.host_filtering_supported = False
observeds = self.driver.filter_hosts_with_network_access(
self.context, 'fake_id', self.dhcp_hosts)
self.assertEqual(self.dhcp_hosts, observeds)
class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt):
def setUp(self, plugin=None):

View File

@ -26,6 +26,7 @@ from neutron.db import models_v2
from neutron.extensions import dhcpagentscheduler
from neutron.scheduler import dhcp_agent_scheduler
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron.tests.unit import testlib_api
# Required to generate tests from scenarios. Not compatible with nose.
@ -108,6 +109,8 @@ class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase):
plugin.get_subnets.return_value = [{"network_id": self.network_id,
"enable_dhcp": True}]
plugin.get_agents_db.return_value = dead_agent + alive_agent
plugin.filter_hosts_with_network_access.side_effect = (
lambda context, network_id, hosts: hosts)
if active_hosts_only:
plugin.get_dhcp_agents_hosting_networks.return_value = []
self.assertTrue(
@ -375,70 +378,81 @@ class TestNetworksFailover(TestDhcpSchedulerBaseTestCase,
self.assertFalse(rn.called)
class DHCPAgentWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase):
class DHCPAgentWeightSchedulerTestCase(test_plugin.Ml2PluginV2TestCase):
"""Unit test scenarios for WeightScheduler.schedule."""
def setUp(self):
super(DHCPAgentWeightSchedulerTestCase, self).setUp()
DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(DB_PLUGIN_KLASS)
cfg.CONF.set_override("network_scheduler_driver",
weight_scheduler = (
'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler')
cfg.CONF.set_override('network_scheduler_driver', weight_scheduler)
self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.'
'Ml2Plugin')
self.assertEqual(1, self.patched_dhcp_periodic.call_count)
mock.patch.object(
self.plugin, 'filter_hosts_with_network_access',
side_effect=lambda context, network_id, hosts: hosts).start()
self.plugin.network_scheduler = importutils.import_object(
'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler'
)
cfg.CONF.set_override('dhcp_agents_per_network', 1)
weight_scheduler)
cfg.CONF.set_override("dhcp_load_type", "networks")
self.ctx = context.get_admin_context()
def _create_network(self):
net = self.plugin.create_network(
self.ctx,
{'network': {'name': 'name',
'tenant_id': 'tenant_one',
'admin_state_up': True,
'shared': True}})
return net['id']
def test_scheduler_one_agents_per_network(self):
self._save_networks(['1111'])
net_id = self._create_network()
helpers.register_dhcp_agent(HOST_C)
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '1111'})
{'id': net_id})
agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['1111'])
[net_id])
self.assertEqual(1, len(agents))
def test_scheduler_two_agents_per_network(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
self._save_networks(['1111'])
net_id = self._create_network()
helpers.register_dhcp_agent(HOST_C)
helpers.register_dhcp_agent(HOST_D)
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '1111'})
{'id': net_id})
agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['1111'])
[net_id])
self.assertEqual(2, len(agents))
def test_scheduler_no_active_agents(self):
self._save_networks(['1111'])
net_id = self._create_network()
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '1111'})
{'id': net_id})
agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['1111'])
[net_id])
self.assertEqual(0, len(agents))
def test_scheduler_equal_distribution(self):
self._save_networks(['1111', '2222', '3333'])
net_id_1 = self._create_network()
net_id_2 = self._create_network()
net_id_3 = self._create_network()
helpers.register_dhcp_agent(HOST_C)
helpers.register_dhcp_agent(HOST_D, networks=1)
self.plugin.network_scheduler.schedule(
self.plugin, context.get_admin_context(), {'id': '1111'})
self.plugin, context.get_admin_context(), {'id': net_id_1})
helpers.register_dhcp_agent(HOST_D, networks=2)
self.plugin.network_scheduler.schedule(
self.plugin, context.get_admin_context(), {'id': '2222'})
self.plugin, context.get_admin_context(), {'id': net_id_2})
helpers.register_dhcp_agent(HOST_C, networks=4)
self.plugin.network_scheduler.schedule(
self.plugin, context.get_admin_context(), {'id': '3333'})
self.plugin, context.get_admin_context(), {'id': net_id_3})
agent1 = self.plugin.get_dhcp_agents_hosting_networks(
self.ctx, ['1111'])
self.ctx, [net_id_1])
agent2 = self.plugin.get_dhcp_agents_hosting_networks(
self.ctx, ['2222'])
self.ctx, [net_id_2])
agent3 = self.plugin.get_dhcp_agents_hosting_networks(
self.ctx, ['3333'])
self.ctx, [net_id_3])
self.assertEqual('host-c', agent1[0]['host'])
self.assertEqual('host-c', agent2[0]['host'])
self.assertEqual('host-d', agent3[0]['host'])
@ -494,6 +508,9 @@ class DHCPAgentAZAwareWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase):
'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler')
self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.'
'Ml2Plugin')
mock.patch.object(
self.plugin, 'filter_hosts_with_network_access',
side_effect=lambda context, network_id, hosts: hosts).start()
cfg.CONF.set_override('dhcp_agents_per_network', 1)
cfg.CONF.set_override("dhcp_load_type", "networks")

View File

@ -0,0 +1,8 @@
---
prelude: >
Schedule networks on dhcp-agents with access to network
features:
- DHCP schedulers use "filter_host_with_network_access" plugin method to
filter hosts with access to dhcp network. Plugins can overload it to
define their own filtering logic. In particular, ML2 plugin delegates
the filtering to mechanism drivers.