Add a routed networks scheduler pre-filter

As explained in the spec, in order to support routed networks, we need
to add a new scheduler pre-filter with a specific conf option
so we can then look at the nova aggregates related to the segments.

Since this pre-filter is called every time we verify the scheduler,
that means that when we move instances, we should be also be able
to only support hosts that are in the same related aggregate.

NOTE(sbauza) : We're just setting admin_api variable in the
integrated helpers as it's used by _get_provider_uuid_by_host()

Implements: blueprint routed-networks-scheduling

Co-Authored-By: Matt Riedemann <mriedem.os@gmail.com>
Co-Authored-By: Balazs Gibizer <balazs.gibizer@est.tech>

Change-Id: I667f56612d7f63834863476694cb1f4c71a58302
This commit is contained in:
Sylvain Bauza 2021-02-03 15:19:43 +01:00
parent c4b28a5496
commit a1673d3118
8 changed files with 761 additions and 5 deletions

View File

@ -1,6 +1,7 @@
nova/compute/manager.py
nova/crypto.py
nova/privsep/path.py
nova/scheduler/request_filter.py
nova/scheduler/utils.py
nova/virt/driver.py
nova/virt/hardware.py

View File

@ -137,6 +137,15 @@ scheduler, this option has no effect.
Number of workers for the nova-scheduler service. The default will be the
number of CPUs available if using the "filter_scheduler" scheduler driver,
otherwise the default will be 1.
"""),
cfg.BoolOpt("query_placement_for_routed_network_aggregates",
default=False,
help="""
Enable the scheduler to filter compute hosts affined to routed network segment
aggregates.
See https://docs.openstack.org/neutron/latest/admin/config-routed-networks.html
for details.
"""),
cfg.BoolOpt("limit_tenants_to_placement_aggregate",
default=False,

View File

@ -17,9 +17,12 @@ from oslo_log import log as logging
from oslo_utils import timeutils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.network import neutron
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils
@ -267,6 +270,91 @@ def accelerators_filter(ctxt, request_spec):
return True
@trace_request_filter
def routed_networks_filter(
ctxt: nova_context.RequestContext,
request_spec: 'objects.RequestSpec'
) -> bool:
"""Adds requested placement aggregates that match requested networks.
This will modify request_spec to request hosts in aggregates that
matches segment IDs related to requested networks.
:param ctxt: The usual suspect for a context object.
:param request_spec: a classic RequestSpec object containing the request.
:returns: True if the filter was used or False if not.
:raises: exception.InvalidRoutedNetworkConfiguration if something went
wrong when trying to get the related segment aggregates.
"""
if not CONF.scheduler.query_placement_for_routed_network_aggregates:
return False
# NOTE(sbauza): On a create operation with no specific network request, we
# allocate the network only after scheduling when the nova-compute service
# calls Neutron. In this case, here we just want to accept any destination
# as fine.
# NOTE(sbauza): This could be also going from an old compute reschedule.
if 'requested_networks' not in request_spec:
return True
# This object field is not nullable
requested_networks = request_spec.requested_networks
# NOTE(sbauza): This field could be not created yet.
if (
'requested_destination' not in request_spec or
request_spec.requested_destination is None
):
request_spec.requested_destination = objects.Destination()
# Get the clients we need
network_api = neutron.API()
report_api = report.SchedulerReportClient()
for requested_network in requested_networks:
network_id = None
# Check for a specifically requested network ID.
if "port_id" in requested_network and requested_network.port_id:
# We have to lookup the port to see which segment(s) to support.
port = network_api.show_port(ctxt, requested_network.port_id)[
"port"
]
if port['fixed_ips']:
# The instance already exists with a related subnet. We need to
# stick on this subnet.
# NOTE(sbauza): In case of multiple IPs, we could have more
# subnets than only one but given they would be for the same
# port, just looking at the first subnet is needed.
subnet_id = port['fixed_ips'][0]['subnet_id']
aggregates = utils.get_aggregates_for_routed_subnet(
ctxt, network_api, report_api, subnet_id)
else:
# The port was just created without a subnet.
network_id = port["network_id"]
elif (
"network_id" in requested_network and requested_network.network_id
):
network_id = requested_network.network_id
if network_id:
# As the user only requested a network or a port unbound to a
# segment, we are free to choose any segment from the network.
aggregates = utils.get_aggregates_for_routed_network(
ctxt, network_api, report_api, network_id)
if aggregates:
LOG.debug(
'routed_networks_filter request filter added the following '
'aggregates for network ID %s: %s',
network_id, ', '.join(aggregates))
# NOTE(sbauza): All of the aggregates from this request will be
# accepted, but they will have an AND relationship with any other
# requested aggregate, like for another NIC request in this loop.
request_spec.requested_destination.require_aggregates(aggregates)
return True
ALL_REQUEST_FILTERS = [
require_tenant_aggregate,
map_az_to_placement_aggregate,
@ -275,6 +363,7 @@ ALL_REQUEST_FILTERS = [
isolate_aggregates,
transform_image_metadata,
accelerators_filter,
routed_networks_filter,
]

View File

@ -1401,6 +1401,11 @@ class NeutronFixture(fixtures.Fixture):
],
}
# Fixtures inheriting from NeutronFixture can redefine the default port
# that create_port() is duplicating for creating a new port by using this
# variable
default_port = copy.deepcopy(port_2)
# network_2 does not have security groups enabled - that's okay since most
# of these ports are SR-IOV'y anyway
network_2 = {
@ -1965,6 +1970,11 @@ class NeutronFixture(fixtures.Fixture):
return {'networks': self._list_resource(
self._networks, retrieve_all, **_params)}
def show_subnet(self, subnet_id, **_params):
if subnet_id not in self._subnets:
raise neutron_client_exc.NeutronClientException()
return {'subnet': copy.deepcopy(self._subnets[subnet_id])}
def list_subnets(self, retrieve_all=True, **_params):
# NOTE(gibi): The fixture does not support filtering for subnets
return {'subnets': copy.deepcopy(list(self._subnets.values()))}
@ -1982,10 +1992,10 @@ class NeutronFixture(fixtures.Fixture):
# created. This is port_2. So if that port is not created yet then
# that is the one created here.
new_port = copy.deepcopy(body['port'])
new_port.update(copy.deepcopy(self.port_2))
if self.port_2['id'] in self._ports:
# If port_2 is already created then create a new port based on
# the request body, the port_2 as a template, and assign new
new_port.update(copy.deepcopy(self.default_port))
if self.default_port['id'] in self._ports:
# If the port is already created then create a new port based on
# the request body, the default port as a template, and assign new
# port_id and mac_address for the new port
# we need truly random uuids instead of named sentinels as some
# tests needs more than 3 ports

View File

@ -1122,13 +1122,13 @@ class _IntegratedTestBase(test.TestCase, PlacementInstanceHelperMixin):
use_project_id_in_urls=self.USE_PROJECT_ID,
stub_keystone=self.STUB_KEYSTONE))
self.admin_api = self.api_fixture.admin_api
# if the class needs to run as admin, make the api endpoint
# the admin, otherwise it's safer to run as non admin user.
if self.ADMIN_API:
self.api = self.api_fixture.admin_api
else:
self.api = self.api_fixture.api
self.admin_api = self.api_fixture.admin_api
if self.microversion:
self.api.microversion = self.microversion

View File

@ -0,0 +1,528 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.network import constants
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
class NeutronRoutedNetworksFixture(nova_fixtures.NeutronFixture):
tenant_id = nova_fixtures.NeutronFixture.tenant_id
network_multisegment = {
'id': uuids.network_multisegment,
'name': 'net-multisegment',
'description': '',
'status': 'ACTIVE',
'admin_state_up': True,
'tenant_id': tenant_id,
'project_id': tenant_id,
'shared': False,
'mtu': 1450,
'router:external': False,
'availability_zone_hints': [],
'availability_zones': [
'nova'
],
'port_security_enabled': True,
'ipv4_address_scope': None,
'ipv6_address_scope': None,
'segments': [
{
"provider:network_type": "flat",
"provider:physical_network": "default",
"provider:segmentation_id": 0
},
{
"provider:network_type": "vlan",
"provider:physical_network": "public",
"provider:segmentation_id": 2
},
],
}
segment_id_0 = {
"name": "",
"network_id": network_multisegment['id'],
"segmentation_id": 0,
"network_type": "flat",
"physical_network": "default",
"revision_number": 1,
"id": uuids.segment_id_0,
"created_at": "2018-03-19T19:16:56Z",
"updated_at": "2018-03-19T19:16:56Z",
"description": "",
}
segment_id_2 = {
"name": "",
"network_id": network_multisegment['id'],
"segmentation_id": 2,
"network_type": "vlan",
"physical_network": "public",
"revision_number": 3,
"id": uuids.segment_id_2,
"created_at": "2018-03-19T19:16:56Z",
"updated_at": "2018-03-19T19:16:56Z",
"description": "",
}
segments = [segment_id_0, segment_id_2]
subnet_for_segment_id_0 = {
'id': uuids.subnet_for_segment_id_0,
'name': 'public-subnet',
'description': '',
'ip_version': 4,
'ipv6_address_mode': None,
'ipv6_ra_mode': None,
'enable_dhcp': True,
'network_id': network_multisegment['id'],
'tenant_id': tenant_id,
'project_id': tenant_id,
'dns_nameservers': [],
'gateway_ip': '192.168.1.1',
'allocation_pools': [
{
'start': '192.168.1.1',
'end': '192.168.1.254'
}
],
'host_routes': [],
'cidr': '192.168.1.1/24',
'segment_id': segment_id_0['id'],
}
subnet_for_segment_id_2 = {
'id': uuids.subnet_for_segment_id_2,
'name': 'vlan-subnet',
'description': '',
'ip_version': 4,
'ipv6_address_mode': None,
'ipv6_ra_mode': None,
'enable_dhcp': True,
'network_id': network_multisegment['id'],
'tenant_id': tenant_id,
'project_id': tenant_id,
'dns_nameservers': [],
'gateway_ip': '192.168.2.1',
'allocation_pools': [
{
'start': '192.168.2.1',
'end': '192.168.2.254'
}
],
'host_routes': [],
'cidr': '192.168.2.1/24',
'segment_id': segment_id_2['id'],
}
network_multisegment['subnets'] = [subnet_for_segment_id_0['id'],
subnet_for_segment_id_2['id']]
# Use this port only if you want a unbound port.
port_with_deferred_ip_allocation = {
'id': uuids.port_with_deferred_ip_allocation,
'name': '',
'description': '',
'network_id': network_multisegment['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': 'fa:16:3e:4c:2c:12',
# The allocation is deferred, so fixed_ips should be null *before*
# the port is binding.
# NOTE(sbauza): Make sure you modify the value if you look at the port
# after it's bound.
'fixed_ips': [],
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': '',
'binding:profile': {},
'binding:vnic_type': 'normal',
'binding:vif_type': 'ovs',
'ip_allocation': "deferred",
}
# Use this port if you want to fake the port being already bound
port_with_deferred_ip_allocation_bound_to_segment_0 = \
copy.deepcopy(port_with_deferred_ip_allocation)
port_with_deferred_ip_allocation_bound_to_segment_0.update({
'fixed_ips': [{
'ip_address': '192.168.1.4',
'subnet_id': subnet_for_segment_id_0['id']
}],
})
port_on_segment_id_0 = {
'id': uuids.port_on_segment_id_0,
'name': '',
'description': '',
'network_id': network_multisegment['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': 'fa:16:3e:4c:2c:13',
'fixed_ips': [
{
'ip_address': '192.168.1.3',
'subnet_id': subnet_for_segment_id_0['id']
}
],
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': '',
'binding:profile': {},
'binding:vnic_type': 'normal',
'binding:vif_type': 'ovs',
'ip_allocation': "immediate",
}
port_on_segment_id_2 = {
'id': uuids.port_on_segment_id_2,
'name': '',
'description': '',
'network_id': network_multisegment['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': 'fa:16:3e:4c:2c:13',
'fixed_ips': [
{
'ip_address': '192.168.2.4',
'subnet_id': subnet_for_segment_id_2['id']
}
],
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': '',
'binding:profile': {},
'binding:vnic_type': 'normal',
'binding:vif_type': 'ovs',
'ip_allocation': "immediate",
}
def __init__(self, test):
super().__init__(test)
# add extra ports and the related network to the neutron fixture
# specifically for these tests.
self._networks[
self.network_multisegment['id']
] = self.network_multisegment
self._ports[
self.port_with_deferred_ip_allocation['id']
] = copy.deepcopy(self.port_with_deferred_ip_allocation)
self._ports[
self.port_on_segment_id_0['id']
] = copy.deepcopy(self.port_on_segment_id_0)
self._ports[
self.port_on_segment_id_2['id']
] = copy.deepcopy(self.port_on_segment_id_2)
self._subnets[
self.subnet_for_segment_id_0['id']
] = copy.deepcopy(self.subnet_for_segment_id_0)
self._subnets[
self.subnet_for_segment_id_2['id']
] = copy.deepcopy(self.subnet_for_segment_id_2)
def list_extensions(self, *args, **kwargs):
return {
'extensions': [
{
# Copied from neutron-lib segment.py
"updated": "2016-02-24T17:00:00-00:00",
"name": constants.SEGMENT,
"links": [],
"alias": "segment",
"description": "Segments extension."
}
]
}
def list_subnets(self, retrieve_all=True, **_params):
if 'network_id' in _params:
network_id = _params['network_id']
assert network_id in self._networks, ('Network %s not in fixture' %
network_id)
filtered_subnets = []
for subnet in list(self._subnets.values()):
if subnet['network_id'] == network_id:
filtered_subnets.append(copy.deepcopy(subnet))
return {'subnets': filtered_subnets}
else:
return super().list_subnets(retrieve_all, **_params)
def create_port(self, body=None):
body = body or {'port': {}}
network_id = body['port'].get('network_id')
assert network_id in self._networks, ('Network %s not in fixture' %
network_id)
# Redefine the default port template to use for creating a new one to
# be the port already allocated on segment #0.
# NOTE(sbauza): Segment #0 is always used when booting an instance
# without a provided port as the first supported host is related to it.
# FIXME(sbauza): Do something here to not blindly set the segment
# without verifying which compute service is used by the instance.
self.default_port = (
self.port_with_deferred_ip_allocation_bound_to_segment_0
)
return super().create_port(body)
class RoutedNetworkTests(integrated_helpers._IntegratedTestBase):
compute_driver = 'fake.MediumFakeDriver'
microversion = 'latest'
ADMIN_API = True
def setUp(self):
self.flags(
query_placement_for_routed_network_aggregates=True,
group='scheduler')
# We will create 5 hosts, let's make sure we order them by their index.
weights = {'host1': 500, 'host2': 400, 'host3': 300, 'host4': 200,
'host5': 100}
self.useFixture(nova_fixtures.HostNameWeigherFixture(weights=weights))
super().setUp()
# Amend the usual neutron fixture with specific routed networks
self.neutron = self.useFixture(NeutronRoutedNetworksFixture(self))
# let's create 5 computes with their respective records
for i in range(1, 6):
setattr(self, 'compute%s' % i, self._start_compute('host%s' % i))
setattr(self, 'compute%s_rp_uuid' % i,
self._get_provider_uuid_by_host('host%s' % i))
setattr(self, 'compute%s_service_id' % i,
self.admin_api.get_services(host='host%s' % i,
binary='nova-compute')[0]['id']
)
# Simulate the placement setup neutron does for multi segment networks
segment_ids = [segment["id"] for segment in self.neutron.segments]
self.assertEqual(2, len(segment_ids))
# We have 5 computes and the network has two segments. Let's create a
# setup where the network has segments on host2 to host5 but not on
# host1. The HostNameWeigherFixture prefers host1 over host2 over host3
# over host4 over host5. So this way we can check if the scheduler
# selects host with available network segment.
# The segments are for this net :
# * segment 0 is for host2, host4 and host5
# * segment 2 is for host3 and host5
self.segment_id_to_compute_rp_uuid = {
segment_ids[0]: [self.compute2_rp_uuid, self.compute4_rp_uuid,
self.compute5_rp_uuid],
segment_ids[1]: [self.compute3_rp_uuid, self.compute5_rp_uuid],
}
self._create_multisegment_placement_setup(
self.segment_id_to_compute_rp_uuid)
def _create_multisegment_placement_setup(self, segment_to_compute_rp):
self.segment_id_to_aggregate_id = {}
# map each segment to one compute
for segment_id, compute_rp_uuids in segment_to_compute_rp.items():
# create segment RP
segment_rp_req = {
"name": segment_id,
"uuid": segment_id,
"parent_provider_uuid": None,
}
self.placement.post(
"/resource_providers", body=segment_rp_req, version="1.20"
)
# create aggregate around the segment RP and the compute RP
aggregate_uuid = getattr(uuids, segment_id)
self.segment_id_to_aggregate_id[segment_id] = aggregate_uuid
# as we created the segment RP above we assume that it does not
# have any aggregate and its generation is 0
self.assertEqual(
200,
self.placement.put(
"/resource_providers/%s/aggregates" % segment_id,
body={
"aggregates": [aggregate_uuid],
"resource_provider_generation": 0,
},
version="1.20",
).status,
)
# get compute RPs and append the new aggregate to it
for compute_rp_uuid in compute_rp_uuids:
resp = self.placement.get(
"/resource_providers/%s/aggregates" % compute_rp_uuid,
version="1.20",
).body
resp["aggregates"].append(aggregate_uuid)
self.assertEqual(
200,
self.placement.put(
"/resource_providers/%s/aggregates" % compute_rp_uuid,
body=resp,
version="1.20",
).status,
)
def test_boot_with_deferred_port(self):
# Neutron only assigns the deferred port to a segment when the port is
# bound to a host. So the scheduler can select any host that has
# a segment for the network.
port = self.neutron.port_with_deferred_ip_allocation
server = self._create_server(
name='server-with-routed-net',
networks=[{'port': port['id']}])
# HostNameWeigherFixture prefers host1 but the port is in a network
# that has only segments on host2 to host5.
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
def test_boot_with_immediate_port(self):
# Immediate port should be assigned to a network segment by neutron
# during port create. So the scheduler should only select a host that
# is connected to that network segment
port = self.neutron.port_on_segment_id_2
server = self._create_server(
name='server-with-routed-net',
networks=[{'port': port['id']}])
# Since the port is on the segment ID 2, only host3 and host5 are
# accepted, so host3 always wins because of the weigher.
self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])
def test_boot_with_network(self):
# Port is created _after_ scheduling to a host so the scheduler can
# select either host2 to host5 initially based on the segment
# availability of the network. But then nova needs to create
# the port in deferred mode so that the already selected host could
# not conflict with the neutron segment assignment at port create.
net = self.neutron.network_multisegment
server = self._create_server(
name='server-with-routed-net',
networks=[{'uuid': net['id']}])
# host2 always wins over host3 to host5 because of the weigher.
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
# Verify that we used a port with a deferred ip allocation
ip_addr = server['addresses'][net['name']][0][
'addr']
self.assertEqual(
self.neutron.port_with_deferred_ip_allocation_bound_to_segment_0[
'fixed_ips'][0]['ip_address'],
ip_addr)
def test_boot_with_two_nics(self):
# Test a scenario with a user trying to have two different NICs within
# two different segments not intertwined.
port0 = self.neutron.port_on_segment_id_0
port1 = self.neutron.port_on_segment_id_2
# Here we ask for a server with one NIC on each segment.
server = self._create_server(
name='server-with-routed-net',
networks=[{'port': port0['id']}, {'port': port1['id']}])
# host2 should win with the weigher but as we asked for both segments,
# only host5 supports them.
self.assertEqual('host5', server['OS-EXT-SRV-ATTR:host'])
def test_migrate(self):
net = self.neutron.network_multisegment
server = self._create_server(
name='server-with-routed-net',
networks=[{'uuid': net['id']}])
# Make sure we landed on host2 since both segments were accepted
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
server = self._migrate_server(server)
# HostNameWeigherFixture prefers host1 but the port is in a network
# that has only segments on the other hosts.
# host2 is avoided as the source and only host4 is left on this
# segment.
self.assertEqual('host4', server['OS-EXT-SRV-ATTR:host'])
def test_live_migrate(self):
net = self.neutron.network_multisegment
server = self._create_server(
name='server-with-routed-net',
networks=[{'uuid': net['id']}])
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
self._live_migrate(server)
server = self.api.get_server(server['id'])
# HostNameWeigherFixture prefers host1 but the port is in a network
# that has only segments on the other hosts.
# host2 is avoided as the source and only host4 is left on this
# segment.
self.assertEqual('host4', server['OS-EXT-SRV-ATTR:host'])
def test_evacuate(self):
net = self.neutron.network_multisegment
server = self._create_server(
name='server-with-routed-net',
networks=[{'uuid': net['id']}])
# The instance landed on host2 as the segment was related.
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
self.admin_api.put_service_force_down(self.compute2_service_id, True)
server = self._evacuate_server(server)
# HostNameWeigherFixture prefers host1 but the port is in a network
# that has only segments on the other hosts.
# host2 is avoided as the source and only host4 is left on this
# segment.
self.assertEqual('host4', server['OS-EXT-SRV-ATTR:host'])
def test_unshelve_after_shelve(self):
net = self.neutron.network_multisegment
server = self._create_server(
name='server-with-routed-net',
networks=[{'uuid': net['id']}])
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
# Shelve does offload the instance so unshelve will ask the scheduler
# again.
server = self._shelve_server(server)
server = self._unshelve_server(server)
# HostNameWeigherFixture prefers host1 but the port is in a network
# that has only segments on the others. Since the instance was
# offloaded, we can now again support host2.
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
def test_boot_with_immediate_port_fails_due_to_config(self):
# We will fake the fact that somehow the segment has no related
# aggregate (maybe because Neutron got a exception when trying to
# create the aggregate by calling the Nova API)
port = self.neutron.port_on_segment_id_2
with mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates',
return_value=None
) as mock_get_aggregates:
server = self._create_server(
name='server-with-routed-net',
networks=[{'port': port['id']}],
expected_state='ERROR')
# Make sure we correctly looked up at which aggregates were related to
# the segment ID #2
exp_segment_id = self.neutron.segment_id_2['id']
mock_get_aggregates.assert_called_once_with(
mock.ANY, exp_segment_id)
self.assertIn('No valid host', server['fault']['message'])
self.assertIn(
'Failed to find aggregate related to segment %s' % exp_segment_id,
server['fault']['message'])

View File

@ -36,6 +36,8 @@ class TestRequestFilter(test.NoDBTestCase):
group='scheduler')
self.flags(enable_isolated_aggregate_filtering=True,
group='scheduler')
self.flags(query_placement_for_routed_network_aggregates=True,
group='scheduler')
def test_process_reqspec(self):
fake_filters = [mock.MagicMock(), mock.MagicMock()]
@ -474,3 +476,107 @@ class TestRequestFilter(test.NoDBTestCase):
# Assert about logging
mock_log.assert_not_called()
def test_routed_networks_filter_not_enabled(self):
self.assertIn(request_filter.routed_networks_filter,
request_filter.ALL_REQUEST_FILTERS)
self.flags(query_placement_for_routed_network_aggregates=False,
group='scheduler')
reqspec = objects.RequestSpec(
requested_destination=objects.Destination())
self.assertFalse(request_filter.routed_networks_filter(
self.context, reqspec))
# We don't add any aggregates
self.assertIsNone(reqspec.requested_destination.aggregates)
def test_routed_networks_filter_no_requested_nets(self):
reqspec = objects.RequestSpec()
self.assertTrue(request_filter.routed_networks_filter(
self.context, reqspec))
@mock.patch('nova.scheduler.utils.get_aggregates_for_routed_subnet')
@mock.patch('nova.network.neutron.API.show_port')
def test_routed_networks_filter_with_requested_port_immediate(
self, mock_show_port, mock_get_aggs_subnet
):
req_net = objects.NetworkRequest(port_id=uuids.port1)
reqspec = objects.RequestSpec(
requested_networks=objects.NetworkRequestList(objects=[req_net]))
# Check whether the port was already bound to a segment
mock_show_port.return_value = {
'port': {
'fixed_ips': [
{
'subnet_id': uuids.subnet1
}]}}
mock_get_aggs_subnet.return_value = [uuids.agg1]
self.assertTrue(request_filter.routed_networks_filter(
self.context, reqspec))
self.assertEqual([uuids.agg1],
reqspec.requested_destination.aggregates)
mock_show_port.assert_called_once_with(self.context, uuids.port1)
mock_get_aggs_subnet.assert_called_once_with(
self.context, mock.ANY, mock.ANY, uuids.subnet1)
@mock.patch('nova.scheduler.utils.get_aggregates_for_routed_network')
@mock.patch('nova.network.neutron.API.show_port')
def test_routed_networks_filter_with_requested_port_deferred(
self, mock_show_port, mock_get_aggs_network
):
req_net = objects.NetworkRequest(port_id=uuids.port1)
reqspec = objects.RequestSpec(
requested_networks=objects.NetworkRequestList(objects=[req_net]))
# The port was created with a deferred allocation so for the moment,
# it's not bound to a specific segment.
mock_show_port.return_value = {
'port': {
'fixed_ips': [],
'network_id': uuids.net1}}
mock_get_aggs_network.return_value = [uuids.agg1]
self.assertTrue(request_filter.routed_networks_filter(
self.context, reqspec))
self.assertEqual([uuids.agg1],
reqspec.requested_destination.aggregates)
mock_show_port.assert_called_once_with(self.context, uuids.port1)
mock_get_aggs_network.assert_called_once_with(
self.context, mock.ANY, mock.ANY, uuids.net1)
@mock.patch('nova.scheduler.utils.get_aggregates_for_routed_network')
def test_routed_networks_filter_with_requested_net(
self, mock_get_aggs_network
):
req_net = objects.NetworkRequest(network_id=uuids.net1)
reqspec = objects.RequestSpec(
requested_networks=objects.NetworkRequestList(objects=[req_net]))
mock_get_aggs_network.return_value = [uuids.agg1]
self.assertTrue(request_filter.routed_networks_filter(
self.context, reqspec))
self.assertEqual([uuids.agg1],
reqspec.requested_destination.aggregates)
mock_get_aggs_network.assert_called_once_with(
self.context, mock.ANY, mock.ANY, uuids.net1)
@mock.patch('nova.scheduler.utils.get_aggregates_for_routed_network')
def test_routed_networks_filter_with_two_requested_nets(
self, mock_get_aggs_network
):
req_net1 = objects.NetworkRequest(network_id=uuids.net1)
req_net2 = objects.NetworkRequest(network_id=uuids.net2)
reqspec = objects.RequestSpec(
requested_networks=objects.NetworkRequestList(
objects=[req_net1, req_net2]))
mock_get_aggs_network.side_effect = ([uuids.agg1, uuids.agg2],
[uuids.agg3])
self.assertTrue(request_filter.routed_networks_filter(
self.context, reqspec))
# require_aggregates() has a specific semantics here where multiple
# aggregates provided in the same call have their UUIDs being joined.
self.assertEqual([','.join([uuids.agg1, uuids.agg2]), uuids.agg3],
reqspec.requested_destination.aggregates)
mock_get_aggs_network.assert_has_calls([
mock.call(self.context, mock.ANY, mock.ANY, uuids.net1),
mock.call(self.context, mock.ANY, mock.ANY, uuids.net2)])

View File

@ -0,0 +1,13 @@
---
features:
- |
The scheduler can now verify if the requested networks or the port are
related to Neutron `routed networks`_ with some specific segments to use.
In this case, the routed networks prefilter will require the related
aggregates to be reported in Placement, so only hosts within the asked
aggregates would be accepted.
In order to support this behaviour, operators need to set the
``[scheduler]/query_placement_for_routed_network_aggregates`` configuration
option which defaults to ``False``.
.. _`routed networks`: https://docs.openstack.org/neutron/latest/admin/config-routed-networks.html