[AIM] New streamlined Opflex RPC handler

The Opflex request_endpoint_details RPC handler is reimplemented in
the apic_aim mechanism driver, using a minimal set of baked DB
queries. These queries return specific individual column data rather
than entire ORM objects, eliminating the sqlalchemy and DB server
overhead for relationship processing. Joins are used to minimize the
number of queries made to the DB server. Rather than using or
emulating the dictionaries returned by Neutron's get_<resource>()
methods, the RPC response is built directly from the data returned
from the queries.

The older RPC handler implementations remain during testing to allow
performance and scalability comparison. A single implementation will
eventually be selected and the others removed. Until then, the new RPC
handler is enabled by setting the enable_new_rpc config variable.

Change-Id: I614d5bca3f101ceab06e2fad1a59f5514b438473
This commit is contained in:
Robert Kukura 2019-01-24 11:48:03 -05:00
parent fcda43c1b9
commit 6d09dbfddd
9 changed files with 1874 additions and 102 deletions

View File

@ -53,10 +53,15 @@ apic_opts = [
"plugin, formatted as a dictionary mapping Neutron external "
"network IDs (UUIDs) to ACI external network distinguished "
"names."),
# REVISIT: Eliminate the following two options, leaving a single
# RPC implementation.
cfg.BoolOpt('enable_raw_sql_for_device_rpc',
default=False,
help=("This will use those raw SQL statements to speed "
"up the calculation of the EP file.")),
cfg.BoolOpt('enable_new_rpc',
default=False,
help=("Enable new RPC handler.")),
cfg.IntOpt('apic_nova_vm_name_cache_update_interval', default=60,
help=("How many seconds for the polling thread on each "
"controller should wait before it updates the nova vm "

View File

@ -10,8 +10,20 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as n_constants
GBP_FLOW_CLASSIFIER = 'gbp_flowclassifier'
GBP_PORT = 'gbp_port'
GBP_NETWORK_VRF = 'gbp_network_vrf'
GBP_NETWORK_EPG = 'gbp_network_epg'
GBP_NETWORK_LINK = 'gbp_network_link'
DEVICE_OWNER_SNAT_PORT = 'apic:snat-pool'
DEVICE_OWNER_SVI_PORT = 'apic:svi'
IPV4_ANY_CIDR = '0.0.0.0/0'
IPV4_METADATA_CIDR = '169.254.169.254/16'
PROMISCUOUS_TYPES = [n_constants.DEVICE_OWNER_DHCP,
n_constants.DEVICE_OWNER_LOADBALANCER]
PROMISCUOUS_SUFFIX = 'promiscuous'

View File

@ -84,18 +84,22 @@ from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import config # noqa
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import db
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import exceptions
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import extension_db
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import rpc
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import trunk_driver
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
nova_client as nclient)
# REVISIT: We need the aim_mapping policy driver's config until
# advertise_mtu and nested_host_vlan are moved to the mechanism
# driver's own config. Also, the noqa comment has to be on the same
# line as the entire import.
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import config as pd_config # noqa
LOG = log.getLogger(__name__)
BAKERY = baked.bakery(500, _size_alert=lambda c: LOG.warning(
"sqlalchemy baked query cache size exceeded in %s" % __name__))
DEVICE_OWNER_SNAT_PORT = 'apic:snat-pool'
DEVICE_OWNER_SVI_PORT = 'apic:svi'
ANY_FILTER_NAME = 'AnyFilter'
ANY_FILTER_ENTRY_NAME = 'AnyFilterEntry'
DEFAULT_VRF_NAME = 'DefaultVRF'
@ -112,8 +116,6 @@ SUPPORTED_VNIC_TYPES = [portbindings.VNIC_NORMAL,
AGENT_TYPE_DVS = 'DVS agent'
VIF_TYPE_DVS = 'dvs'
PROMISCUOUS_TYPES = [n_constants.DEVICE_OWNER_DHCP,
n_constants.DEVICE_OWNER_LOADBALANCER]
VIF_TYPE_FABRIC = 'fabric'
FABRIC_HOST_ID = 'fabric'
@ -199,7 +201,8 @@ class KeystoneNotificationEndpoint(object):
class ApicMechanismDriver(api_plus.MechanismDriver,
db.DbMixin,
extension_db.ExtensionDbMixin):
extension_db.ExtensionDbMixin,
rpc.ApicRpcHandlerMixin):
NIC_NAME_LEN = 14
class TopologyRpcEndpoint(object):
@ -239,6 +242,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
cfg.CONF.ml2_apic_aim.enable_optimized_metadata)
self.enable_dhcp_opt = (
cfg.CONF.ml2_apic_aim.enable_optimized_dhcp)
# REVISIT: The following 2 items should be moved to
# the ml2_apic_aim group.
self.nested_host_vlan = cfg.CONF.aim_mapping.nested_host_vlan
self.advertise_mtu = cfg.CONF.aim_mapping.advertise_mtu
self.ap_name = 'OpenStack'
self.apic_system_id = cfg.CONF.apic_system_id
self.notifier = ofrpc.AgentNotifierApi(n_topics.AGENT)
@ -261,8 +268,11 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self.enable_iptables_firewall = (cfg.CONF.ml2_apic_aim.
enable_iptables_firewall)
self.l3_domain_dn = cfg.CONF.ml2_apic_aim.l3_domain_dn
# REVISIT: Eliminate the following two variables, leaving a
# single RPC implementation.
self.enable_raw_sql_for_device_rpc = (cfg.CONF.ml2_apic_aim.
enable_raw_sql_for_device_rpc)
self.enable_new_rpc = cfg.CONF.ml2_apic_aim.enable_new_rpc
self.apic_nova_vm_name_cache_update_interval = (cfg.CONF.ml2_apic_aim.
apic_nova_vm_name_cache_update_interval)
self._setup_nova_vm_update()
@ -663,7 +673,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
aim_ext_subnet_ipv4 = aim_resource.ExternalSubnet(
tenant_name=tenant_aname,
l3out_name=aname,
external_network_name=L3OUT_EXT_EPG, cidr='0.0.0.0/0',
external_network_name=L3OUT_EXT_EPG,
cidr=aim_cst.IPV4_ANY_CIDR,
scope=scope,
aggregate=aggregate)
self.aim.create(aim_ctx, aim_ext_subnet_ipv4)
@ -3553,7 +3564,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
snat_port_query = ("SELECT id FROM ports "
"WHERE network_id = '" + ext_network['id'] + "' "
"AND device_id = '" + host_or_vrf + "' AND "
"device_owner = '" + DEVICE_OWNER_SNAT_PORT + "'")
"device_owner = '" + aim_cst.DEVICE_OWNER_SNAT_PORT +
"'")
snat_port = session.execute(snat_port_query).first()
if snat_port:
snat_port = dict(snat_port)
@ -3572,7 +3584,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
query += lambda q: q.filter(
models_v2.Port.network_id == sa.bindparam('network_id'),
models_v2.Port.device_id == sa.bindparam('device_id'),
models_v2.Port.device_owner == DEVICE_OWNER_SNAT_PORT)
models_v2.Port.device_owner == aim_cst.DEVICE_OWNER_SNAT_PORT)
snat_port = query(session).params(
network_id=ext_network['id'],
device_id=host_or_vrf).first()
@ -3611,7 +3623,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
for snat_subnet in snat_subnets:
try:
attrs = {'device_id': host_or_vrf,
'device_owner': DEVICE_OWNER_SNAT_PORT,
'device_owner': aim_cst.DEVICE_OWNER_SNAT_PORT,
'tenant_id': ext_network['tenant_id'],
'name': 'snat-pool-port:%s' % host_or_vrf,
'network_id': ext_network['id'],
@ -3658,7 +3670,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
query += lambda q: q.filter(
models_v2.IPAllocation.subnet_id == sa.bindparam('subnet_id'))
query += lambda q: q.filter(
models_v2.Port.device_owner == DEVICE_OWNER_SNAT_PORT)
models_v2.Port.device_owner == aim_cst.DEVICE_OWNER_SNAT_PORT)
return query(session).params(
subnet_id=subnet_id).first()
@ -3683,7 +3695,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
models_v2.Port.id))
query += lambda q: q.filter(
models_v2.Port.network_id == sa.bindparam('ext_network_id'),
models_v2.Port.device_owner == DEVICE_OWNER_SNAT_PORT)
models_v2.Port.device_owner == aim_cst.DEVICE_OWNER_SNAT_PORT)
snat_ports = query(session).params(
ext_network_id=ext_network_id).all()
@ -3893,7 +3905,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
primary_ips.append(ip + '/' + mask)
else:
attrs = {'device_id': '',
'device_owner': DEVICE_OWNER_SVI_PORT,
'device_owner': aim_cst.DEVICE_OWNER_SVI_PORT,
'tenant_id': network['tenant_id'],
'name': 'apic-svi-port:node-%s' % node,
'network_id': network['id'],
@ -4994,7 +5006,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# using other values requires deleting and re-creating the
# external network.
res_dict[cisco_apic.NAT_TYPE] = 'distributed'
res_dict[cisco_apic.EXTERNAL_CIDRS] = ['0.0.0.0/0']
res_dict[cisco_apic.EXTERNAL_CIDRS] = [aim_cst.IPV4_ANY_CIDR]
self.set_network_extn_db(mgr.actual_session, net_db.id, res_dict)
def _missing_subnet_extension_mapping(self, mgr, subnet_db):

File diff suppressed because it is too large Load Diff

View File

@ -60,6 +60,7 @@ from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
apic_mapping_lib as alib)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
nova_client as nclient)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import config # noqa
from gbpservice.neutron.services.grouppolicy import plugin as gbp_plugin
LOG = logging.getLogger(__name__)
@ -98,48 +99,6 @@ COMMON_TENANT_AIM_RESOURCES = [aim_resource.Contract.__name__,
# REVISIT: override add_router_interface L3 API check for now
NO_VALIDATE = cisco_apic_l3.OVERRIDE_NETWORK_ROUTING_TOPOLOGY_VALIDATION
# REVISIT: Auto-PTG is currently config driven to align with the
# config driven behavior of the older driver but is slated for
# removal.
opts = [
cfg.BoolOpt('create_auto_ptg',
default=True,
help=_("Automatically create a PTG when a L2 Policy "
"gets created. This is currently an aim_mapping "
"policy driver specific feature.")),
cfg.BoolOpt('create_per_l3p_implicit_contracts',
default=True,
help=_("This configuration is set to True to migrate a "
"deployment that has l3_policies without implicit "
"AIM contracts (these are deployments which have "
"AIM implicit contracts per tenant). A Neutron server "
"restart is required for this configuration to take "
"effect. The creation of the implicit contracts "
"happens at the time of the AIM policy driver "
"initialization. The configuration can be set to "
"False to avoid recreating the implicit contracts "
"on subsequent Neutron server restarts. This "
"option will be removed in the O release")),
cfg.BoolOpt('advertise_mtu',
default=True,
help=_('If True, advertise network MTU values if core plugin '
'calculates them. MTU is advertised to running '
'instances via DHCP and RA MTU options.')),
cfg.IntOpt('nested_host_vlan',
default=4094,
help=_("This is a locally siginificant VLAN used to provide "
"connectivity to the OpenStack VM when configured "
"to host the nested domain (Kubernetes/OpenShift). "
"Any traffic originating from the VM and intended "
"to go on the Neutron network, is tagged with this "
"VLAN. The VLAN is stripped by the Opflex installed "
"flows on the integration bridge and the traffic is "
"forwarded on the Neutron network.")),
]
cfg.CONF.register_opts(opts, "aim_mapping")
class InvalidVrfForDualStackAddressScopes(exc.GroupPolicyBadRequest):
message = _("User-specified address scopes for both address families, "

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
import sqlalchemy as sa
from sqlalchemy.ext import baked
@ -25,6 +26,13 @@ from neutron_lib.api.definitions import portbindings
from opflexagent import rpc as o_rpc
from oslo_log import log
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_auto_ptg_db as auto_ptg_db)
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_segmentation_label_db as seg_label_db)
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
constants as md_const)
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
mechanism_driver as md)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
@ -36,6 +44,15 @@ LOG = log.getLogger(__name__)
BAKERY = baked.bakery(_size_alert=lambda c: LOG.warning(
"sqlalchemy baked query cache size exceeded in %s" % __name__))
EndpointPtInfo = namedtuple(
'EndpointPtInfo',
['pt_id',
'ptg_id',
'apg_id',
'inject_default_route',
'l3p_project_id',
'is_auto_ptg'])
class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
"""RPC mixin for AIM mapping.
@ -80,13 +97,28 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
return {'l3_policy_id': vrf}
def get_vrf_details(self, context, **kwargs):
if self.aim_mech_driver.enable_new_rpc:
# REVISIT: Eliminate other RPC implementations and
# move this handler directly to the mechanism driver.
return self.aim_mech_driver.get_vrf_details(
context, **kwargs)
return self._get_vrf_details(context, **kwargs)
def request_vrf_details(self, context, **kwargs):
if self.aim_mech_driver.enable_new_rpc:
# REVISIT: Eliminate other RPC implementations and
# move this handler directly to the mechanism driver.
return self.aim_mech_driver.request_vrf_details(
context, **kwargs)
return self._get_vrf_details(context, **kwargs)
def get_gbp_details(self, context, **kwargs):
LOG.debug("APIC AIM handling get_gbp_details for: %s", kwargs)
if self.aim_mech_driver.enable_new_rpc:
# REVISIT: Eliminate other RPC implementations and
# move this handler directly to the mechanism driver.
return self.aim_mech_driver.get_gbp_details(
context, **kwargs)
try:
return self._get_gbp_details(context, kwargs, kwargs.get('host'))
except Exception as e:
@ -98,6 +130,11 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
def request_endpoint_details(self, context, **kwargs):
LOG.debug("APIC AIM handling get_endpoint_details for: %s", kwargs)
if self.aim_mech_driver.enable_new_rpc:
# REVISIT: Eliminate other RPC implementations and
# move this handler directly to the mechanism driver.
return self.aim_mech_driver.request_endpoint_details(
context, **kwargs)
request = kwargs.get('request')
try:
return self._request_endpoint_details(context, **kwargs)
@ -857,3 +894,118 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
# What is an "End of the Chain" port for Neutron?
pass
# The query_endpoint_rpc_info and update_endpoint_rpc_details
# methods below are called by the apic_aim mechanism driver while
# handling the request_endpoint_details (aka get_gbp_details) RPC
# from the agent.
def query_endpoint_rpc_info(self, session, info):
# This method is called within a transaction from the apic_aim
# MD's request_endpoint_details RPC handler to retrieve GBP
# state needed to build the RPC response, after the info param
# has already been populated with the data available within
# Neutron itself.
# Query for all needed scalar (non-list) state for the
# policies associated with the port, and make sure the port is
# owned by a policy target before continuing.
pt_infos = self._query_pt_info(
session, info['port_info'].port_id)
if not pt_infos:
return
# A list was returned by the PT info query, like all the other
# endpoint RPC queries, here and in the mechanism
# driver. Currently, there will be at most a single item in
# this list, but a join may later be added to this query in
# order to eliminate another query's round-trip to the DB
# server, resulting in multiple rows being returned. For now,
# we just need that single row.
pt_info = pt_infos[0]
info['gbp_pt_info'] = pt_info
# Query for policy target's segmentation labels.
info['gbp_segmentation_labels'] = self._query_segmentation_labels(
session, pt_info.pt_id)
def _query_pt_info(self, session, port_id):
query = BAKERY(lambda s: s.query(
gpmdb.PolicyTargetMapping.id,
gpmdb.PolicyTargetMapping.policy_target_group_id,
gpmdb.PolicyTargetGroupMapping.application_policy_group_id,
gpmdb.L2PolicyMapping.inject_default_route,
gpmdb.L3PolicyMapping.project_id,
auto_ptg_db.ApicAutoPtgDB.is_auto_ptg,
))
query += lambda q: q.join(
gpmdb.PolicyTargetGroupMapping,
gpmdb.PolicyTargetGroupMapping.id ==
gpmdb.PolicyTargetMapping.policy_target_group_id)
query += lambda q: q.join(
gpmdb.L2PolicyMapping,
gpmdb.L2PolicyMapping.id ==
gpmdb.PolicyTargetGroupMapping.l2_policy_id)
query += lambda q: q.join(
gpmdb.L3PolicyMapping,
gpmdb.L3PolicyMapping.id ==
gpmdb.L2PolicyMapping.l3_policy_id)
query += lambda q: q.outerjoin(
auto_ptg_db.ApicAutoPtgDB,
auto_ptg_db.ApicAutoPtgDB.policy_target_group_id ==
gpmdb.PolicyTargetMapping.policy_target_group_id)
query += lambda q: q.filter(
gpmdb.PolicyTargetMapping.port_id == sa.bindparam('port_id'))
return [EndpointPtInfo._make(row) for row in
query(session).params(
port_id=port_id)]
def _query_segmentation_labels(self, session, pt_id):
query = BAKERY(lambda s: s.query(
seg_label_db.ApicSegmentationLabelDB.segmentation_label))
query += lambda q: q.filter(
seg_label_db.ApicSegmentationLabelDB.policy_target_id ==
sa.bindparam('pt_id'))
return [x for x, in query(session).params(
pt_id=pt_id)]
def update_endpoint_rpc_details(self, info, details):
# This method is called outside a transaction from the
# apic_aim MD's request_endpoint_details RPC handler to add or
# update details within the RPC response, using data stored in
# info by query_endpoint_rpc_info.
# First, make sure the port is owned by a PolicyTarget before
# continuing.
pt_info = info.get('gbp_pt_info')
if not pt_info:
return
gbp_details = details['gbp_details']
# Replace EPG identity if not auto_ptg.
if not pt_info.is_auto_ptg:
gbp_details['app_profile_name'] = (
self.name_mapper.application_policy_group(
None, pt_info.apg_id) if pt_info.apg_id
else self.aim_mech_driver.ap_name)
gbp_details['endpoint_group_name'] = pt_info.ptg_id
gbp_details['ptg_tenant'] = (
self.name_mapper.project(None, pt_info.l3p_project_id))
# Update subnet gateway_ip and default_routes if needed.
if not pt_info.inject_default_route:
for subnet in gbp_details['subnets']:
del subnet['gateway_ip']
subnet['host_routes'] = [
r for r in subnet['host_routes']
if r['destination'] not in
[md_const.IPV4_ANY_CIDR, md_const.IPV4_METADATA_CIDR]]
# Add segmentation labels.
gbp_details['segmentation_labels'] = (
info.get('gbp_segmentation_labels'))
# REVISIT: If/when support for the proxy_group extension is
# added to the aim_mapping PD, update promiscuous_mode to True
# if this PT has a cluster_id that identifies a different PT
# whose group_default_gateway set.

View File

@ -0,0 +1,59 @@
# Copyright (c) 2019 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
# REVISIT: Auto-PTG is currently config driven to align with the
# config driven behavior of the older driver but is slated for
# removal.
opts = [
cfg.BoolOpt('create_auto_ptg',
default=True,
help=_("Automatically create a PTG when a L2 Policy "
"gets created. This is currently an aim_mapping "
"policy driver specific feature.")),
cfg.BoolOpt('create_per_l3p_implicit_contracts',
default=True,
help=_("This configuration is set to True to migrate a "
"deployment that has l3_policies without implicit "
"AIM contracts (these are deployments which have "
"AIM implicit contracts per tenant). A Neutron server "
"restart is required for this configuration to take "
"effect. The creation of the implicit contracts "
"happens at the time of the AIM policy driver "
"initialization. The configuration can be set to "
"False to avoid recreating the implicit contracts "
"on subsequent Neutron server restarts. This "
"option will be removed in the O release")),
cfg.BoolOpt('advertise_mtu', # REVISIT: Move to apic_aim MD.
default=True,
help=_('If True, advertise network MTU values if core plugin '
'calculates them. MTU is advertised to running '
'instances via DHCP and RA MTU options.')),
cfg.IntOpt('nested_host_vlan', # REVISIT: Move to apic_aim MD.
default=4094,
help=_("This is a locally siginificant VLAN used to provide "
"connectivity to the OpenStack VM when configured "
"to host the nested domain (Kubernetes/OpenShift). "
"Any traffic originating from the VM and intended "
"to go on the Neutron network, is tagged with this "
"VLAN. The VLAN is stripped by the Opflex installed "
"flows on the integration bridge and the traffic is "
"forwarded on the Neutron network.")),
]
cfg.CONF.register_opts(opts, "aim_mapping")

View File

@ -38,6 +38,7 @@ from neutron.db import api as db_api
from neutron.db import provisioning_blocks
from neutron.db import segments_db
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2 import models as ml2_models
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit.extensions import test_address_scope
@ -45,6 +46,7 @@ from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit.extensions import test_securitygroup
from neutron.tests.unit.plugins.ml2 import test_tracked_resources as tr_res
from neutron.tests.unit import testlib_api
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import registry
from neutron_lib import constants as n_constants
from neutron_lib import context as n_context
@ -8280,3 +8282,285 @@ class TestPortOnPhysicalNodeSingleDriver(TestPortOnPhysicalNode):
mechanism_drivers=['logger', 'apic_aim'])
self.expected_binding_info = [('apic_aim', 'opflex'),
('apic_aim', 'vlan')]
class TestOpflexRpc(ApicAimTestCase):
def setUp(self, *args, **kwargs):
super(TestOpflexRpc, self).setUp(*args, **kwargs)
def _check_response(self, request, response, port, net, subnets,
network_type='opflex', vm_name='someid'):
epg = aim_resource.EndpointGroup.from_dn(
net['apic:distinguished_names']['EndpointGroup'])
vrf = aim_resource.VRF.from_dn(
net['apic:distinguished_names']['VRF'])
self.assertEqual(request['device'], response['device'])
self.assertEqual(request['request_id'], response['request_id'])
self.assertEqual(request['timestamp'], response['timestamp'])
neutron_details = response['neutron_details']
self.assertEqual(
port['admin_state_up'], neutron_details['admin_state_up'])
self.assertEqual(
port['device_owner'], neutron_details['device_owner'])
self.assertEqual(
sorted(port['fixed_ips'], key=lambda x: x['ip_address']),
sorted(neutron_details['fixed_ips'],
key=lambda x: x['ip_address']))
self.assertEqual(net['id'], neutron_details['network_id'])
self.assertEqual(network_type, neutron_details['network_type'])
self.assertEqual('physnet1', neutron_details['physical_network'])
self.assertEqual(port['id'], neutron_details['port_id'])
gbp_details = response['gbp_details']
self.assertEqual(epg.app_profile_name, gbp_details['app_profile_name'])
self.assertEqual(request['device'], gbp_details['device'])
if self.driver.apic_optimized_dhcp_lease_time > 0:
self.assertEqual(self.driver.apic_optimized_dhcp_lease_time,
gbp_details['dhcp_lease_time'])
else:
self.assertNotIn('dhcp_lease_time', gbp_details)
self.assertEqual(net['dns_domain'], gbp_details['dns_domain'])
self.assertEqual(self.driver.enable_dhcp_opt,
gbp_details['enable_dhcp_optimization'])
self.assertEqual(self.driver.enable_metadata_opt,
gbp_details['enable_metadata_optimization'])
self.assertEqual(epg.name, gbp_details['endpoint_group_name'])
# floating_ip tested in TestGbpDetailsForML2
self.assertEqual(port['binding:host_id'], gbp_details['host'])
self.assertEqual(vrf.tenant_name + ' ' + vrf.name,
gbp_details['l3_policy_id'])
# host_snat_ips tested in TestGbpDetailsForML2
self.assertEqual(net['mtu'], gbp_details['interface_mtu'])
# ip_mapping tested in TestGbpDetailsForML2
self.assertEqual(port['mac_address'], gbp_details['mac_address'])
# nested_domain_* and nested_host_vlan tested in TestNestedDomain
self.assertEqual(port['id'], gbp_details['port_id'])
self.assertEqual(not port['port_security_enabled'],
gbp_details['promiscuous_mode'])
self.assertEqual(epg.tenant_name, gbp_details['ptg_tenant'])
# security_group tested in TestGbpDetailsForML2
# segmentation_labels tested in TestPolicyTarget
self._check_response_subnets(gbp_details['subnets'], subnets)
self.assertEqual(vm_name, gbp_details['vm-name'])
self.assertEqual(vrf.name, gbp_details['vrf_name'])
self.assertEqual(sorted([sn['cidr'] for sn in subnets]),
sorted(gbp_details['vrf_subnets']))
self.assertEqual(vrf.tenant_name, gbp_details['vrf_tenant'])
# trunk_details tests in TestVlanAwareVM
def _check_response_subnets(self, subnet_details, subnets):
self.assertEqual(len(subnets), len(subnet_details))
for subnet, details in zip(
sorted(subnets, key=lambda x: x['cidr']),
sorted(subnet_details, key=lambda x: x['cidr'])):
dhcp_ports = subnet.get('_dhcp_ports', [])
dhcp_server_ips = [ip['ip_address'] for port in dhcp_ports
for ip in port['fixed_ips']
if ip['subnet_id'] == subnet['id']]
dhcp_server_ports = {port['mac_address']:
[ip['ip_address']
for ip in port['fixed_ips']
if ip['subnet_id'] == subnet['id']]
for port in dhcp_ports}
self.assertEqual(len(dhcp_ports), len(dhcp_server_ports))
dns_nameservers = subnet['dns_nameservers'] or dhcp_server_ips
host_routes = subnet['host_routes']
gateway_ip = subnet['gateway_ip']
default_routes = []
metadata_routes = []
for route in host_routes:
if route['destination'] == '0.0.0.0/0':
default_routes.append(route)
elif route['destination'] == '169.254.169.254/16':
metadata_routes.append(route)
if not default_routes and gateway_ip:
host_routes.append(
{'destination': '0.0.0.0/0', 'nexthop': gateway_ip})
if (not metadata_routes
and dhcp_server_ports and not default_routes):
# This test may not work if there are multiple DHCP
# ports for the subnet, since which DHCP port's IPs
# will be used for the metadata routes is not
# deterministic. Therefore, be sure to specify
# metadata routes or default routes for subnets with
# multiple DHCP ports.
for ip in dhcp_server_ports.values()[0]:
host_routes.append(
{'destination': '169.254.169.254/16',
'nexthop': ip})
self.assertEqual(subnet['cidr'], details['cidr'])
self.assertEqual(sorted(dhcp_server_ips),
sorted(details['dhcp_server_ips']))
self.assertEqual(dhcp_server_ports, details['dhcp_server_ports'])
self.assertEqual(sorted(dns_nameservers),
sorted(details['dns_nameservers']))
self.assertEqual(gateway_ip, details['gateway_ip'])
self.assertEqual(subnet['enable_dhcp'], details['enable_dhcp'])
self.assertEqual(sorted(host_routes),
sorted(details['host_routes']))
self.assertEqual(subnet['id'], details['id'])
self.assertEqual(subnet['ip_version'], details['ip_version'])
def _check_fail_response(self, request, response):
self.assertEqual(request['device'], response['device'])
self.assertEqual(request['request_id'], response['request_id'])
self.assertEqual(request['timestamp'], response['timestamp'])
self.assertNotIn('neutron_details', response)
self.assertNotIn('gbp_details', response)
self.assertNotIn('trunk_details', response)
def test_endpoint_details_bound(self):
self.driver.apic_optimized_dhcp_lease_time = 100
host = 'host1'
self._register_agent('host1', AGENT_CONF_OPFLEX)
net = self._make_network(
self.fmt, 'net1', True,
arg_list=('dns_domain',), dns_domain='example.com.')
net_id = net['network']['id']
dns_nameservers1 = ['192.168.1.201', '172.16.1.200']
host_routes1 = [
{'destination': '172.16.0.0/24', 'nexthop': '10.0.1.2'},
{'destination': '192.168.0.0/24', 'nexthop': '10.0.1.3'},
]
subnet1 = self._make_subnet(
self.fmt, net, '10.0.1.1', '10.0.1.0/24',
dns_nameservers=dns_nameservers1,
host_routes=host_routes1)['subnet']
subnet1_id = subnet1['id']
host_routes2 = [
{'destination': '169.254.169.254/16', 'nexthop': '10.0.1.2'},
]
subnet2 = self._make_subnet(
self.fmt, net, '10.0.2.1', '10.0.2.0/24',
host_routes=host_routes2)['subnet']
subnet2_id = subnet2['id']
subnet3 = self._make_subnet(
self.fmt, net, '10.0.3.1', '10.0.3.0/24')['subnet']
subnet3_id = subnet3['id']
# Create multiple DHCP ports and multiple subnets to exercise
# various combinations building dhcp_server_ids and
# dhcp_server_ports in subnet details. One subnet has two DHCP
# IPs on different DHCP ports. Another has two DHCP IPs on the
# same DHCP port, which does not seem very useful, but is
# allowed by Neutron.
dhcp1 = self._make_port(
self.fmt, net_id, fixed_ips=[
{'subnet_id': subnet1_id},
{'subnet_id': subnet2_id}],
device_owner='network:dhcp')['port']
dhcp2 = self._make_port(
self.fmt, net_id, fixed_ips=[
{'subnet_id': subnet2_id}],
device_owner='network:dhcp')['port']
dhcp3 = self._make_port(
self.fmt, net_id, fixed_ips=[
{'subnet_id': subnet3_id},
{'subnet_id': subnet3_id}],
device_owner='network:dhcp')['port']
subnet1['_dhcp_ports'] = [dhcp1]
subnet2['_dhcp_ports'] = [dhcp1, dhcp2]
subnet3['_dhcp_ports'] = [dhcp3]
subnets = [subnet1, subnet2, subnet3]
fixed_ips = [{'subnet_id': subnet1_id, 'ip_address': '10.0.1.10'},
{'subnet_id': subnet2_id, 'ip_address': '10.0.2.20'},
{'subnet_id': subnet3_id, 'ip_address': '10.0.3.30'}]
port = self._make_port(self.fmt, net_id, fixed_ips=fixed_ips)['port']
port_id = port['id']
self.driver._set_vm_name(self.db_session, 'someid', 'a name')
port = self._bind_port_to_host(port_id, host)['port']
self.assertEqual('ovs', port['binding:vif_type'])
# Call the request_endpoint_details RPC handler.
request = {
'device': 'tap' + port_id,
'timestamp': 12345,
'request_id': 'a_request'
}
response = self.driver.request_endpoint_details(
n_context.get_admin_context(), request=request, host=host)
self._check_response(
request, response, port, net['network'], subnets, vm_name='a name')
# Call the get_vrf_details RPC handler and check its response.
vrf = aim_resource.VRF.from_dn(
net['network']['apic:distinguished_names']['VRF'])
vrf_id = vrf.tenant_name + ' ' + vrf.name
response = self.driver.get_vrf_details(
n_context.get_admin_context(), vrf_id=vrf_id)
self.assertEqual(vrf_id, response['l3_policy_id'])
self.assertEqual(vrf.tenant_name, response['vrf_tenant'])
self.assertEqual(vrf.name, response['vrf_name'])
self.assertEqual(sorted([sn['cidr'] for sn in subnets]),
sorted(response['vrf_subnets']))
def test_endpoint_details_unbound(self):
host = 'host1'
net = self._make_network(self.fmt, 'net1', True)
net_id = net['network']['id']
subnet = self._make_subnet(
self.fmt, net, '10.0.1.1', '10.0.1.0/24')['subnet']
subnets = [subnet]
port = self._make_port(self.fmt, net_id)['port']
port_id = port['id']
# Not calling self._register_agent('host1', AGENT_CONF_OPFLEX)
# in order to force a hierarchical binding to ensure the
# bottom level segment info is returned from the RPC. Also,
# not calling self.driver._set_vm_name() to test use of
# device_id when name not in cache.
port = self._bind_port_to_host(port_id, host)['port']
self.assertEqual('ovs', port['binding:vif_type'])
# Unbind the port, as if binding failed, leaving it bindable.
self.db_session.query(ml2_models.PortBinding).filter_by(
port_id=port['id']).update(
{'vif_type': portbindings.VIF_TYPE_BINDING_FAILED})
# Call the RPC handler.
request = {
'device': 'tap' + port_id,
'timestamp': 12345,
'request_id': 'a_request'
}
response = self.driver.request_endpoint_details(
n_context.get_admin_context(), request=request, host=host)
self._check_response(
request, response, port, net['network'], subnets,
network_type='vlan')
def test_endpoint_details_nonexistent_port(self):
host = 'host1'
# Call the RPC handler.
request = {
'device': 'tapa9d98938-7bbe-4eae-ba2e-375f9bc3ab45',
'timestamp': 12345,
'request_id': 'a_request'
}
response = self.driver.request_endpoint_details(
n_context.get_admin_context(), request=request, host=host)
self._check_fail_response(request, response)
# REVISIT: Test with missing request, missing device, invalid
# device prefix, unbindable port, port bound to wrong host.

View File

@ -2629,6 +2629,11 @@ class TestPolicyTargetGroupRollback(AIMBaseTestCase):
class TestGbpDetailsForML2(AIMBaseTestCase,
test_securitygroup.SecurityGroupsTestCase):
# REVISIT: Once the new RPC handler implementation in the apic_aim
# mechanism driver is complete and tested, move this unit test
# class to test_apic_aim (or a new module) and remove the
# enable_raw_sql and enable_new_rpc flags.
def setUp(self, *args, **kwargs):
super(TestGbpDetailsForML2, self).setUp(*args, **kwargs)
cfg.CONF.set_override('path_mtu', 1000, group='ml2')
@ -2642,6 +2647,8 @@ class TestGbpDetailsForML2(AIMBaseTestCase,
self.assertEqual(mapping, req_mapping['gbp_details'])
self.assertEqual(port_id, mapping['port_id'])
self.assertEqual(expected_epg_name, mapping['endpoint_group_name'])
expected_epg_ap_name = self.driver.aim_mech_driver.ap_name
self.assertEqual(expected_epg_ap_name, mapping['app_profile_name'])
exp_tenant = (self.name_mapper.project(None, expected_epg_tenant)
if map_tenant_name else expected_epg_tenant)
self.assertEqual(exp_tenant, mapping['ptg_tenant'])
@ -2658,6 +2665,8 @@ class TestGbpDetailsForML2(AIMBaseTestCase,
dhcp_server_port = dhcp_server_ports[dhcp_port['mac_address']]
self.assertEqual(dhcp_server_port[0],
dhcp_port['fixed_ips'][0]['ip_address'])
self.assertEqual([dhcp_port['fixed_ips'][0]['ip_address']],
mapping['subnets'][0]['dhcp_server_ips'])
if default_route:
self.assertTrue(
{'destination': '0.0.0.0/0', 'nexthop': default_route} in
@ -2679,21 +2688,24 @@ class TestGbpDetailsForML2(AIMBaseTestCase,
def _verify_fip_details(self, mapping, fip, ext_epg_tenant,
ext_epg_name, ext_epg_app_profile='OpenStack'):
self.assertEqual(1, len(mapping['floating_ip']))
fip = copy.deepcopy(fip)
fip['nat_epg_name'] = ext_epg_name
fip['nat_epg_tenant'] = ext_epg_tenant
fip['nat_epg_app_profile'] = ext_epg_app_profile
fip_mapping = mapping['floating_ip'][0]
# REVISIT: The port_id, project_id, and floating_network_id
# are not used by the agent, and the new RPC implementation
# doesn't provide them, so these assertions are commented out
# until the RPC implementations are cleaned up.
self.assertEqual(fip['id'], fip_mapping['id'])
self.assertEqual(fip['port_id'], fip_mapping['port_id'])
self.assertEqual(fip['project_id'], fip_mapping['project_id'])
# self.assertEqual(fip['port_id'], fip_mapping['port_id'])
# self.assertEqual(fip['project_id'], fip_mapping['project_id'])
self.assertEqual(fip['fixed_ip_address'],
fip_mapping['fixed_ip_address'])
self.assertEqual(fip['floating_ip_address'],
fip_mapping['floating_ip_address'])
self.assertEqual(fip['floating_network_id'],
fip_mapping['floating_network_id'])
# self.assertEqual(fip['floating_network_id'],
# fip_mapping['floating_network_id'])
self.assertEqual(ext_epg_name, fip_mapping['nat_epg_name'])
self.assertEqual(ext_epg_tenant, fip_mapping['nat_epg_tenant'])
self.assertEqual(ext_epg_app_profile,
fip_mapping['nat_epg_app_profile'])
def _verify_ip_mapping_details(self, mapping, ext_segment_name,
ext_epg_tenant, ext_epg_name,
@ -2715,9 +2727,11 @@ class TestGbpDetailsForML2(AIMBaseTestCase,
mapping['host_snat_ips'][0])
def _do_test_get_gbp_details(self, pre_vrf=None,
enable_raw_sql=False):
enable_raw_sql=False,
enable_new_rpc=False):
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = (
enable_raw_sql)
self.driver.aim_mech_driver.enable_new_rpc = enable_new_rpc
self.driver.aim_mech_driver.apic_optimized_dhcp_lease_time = 100
ext_net1, rtr1, ext_net1_sub = self._setup_external_network(
'es1', dn='uni/tn-t1/out-l1/instP-n1')
@ -2858,6 +2872,9 @@ class TestGbpDetailsForML2(AIMBaseTestCase,
'uni:tn-t1:out-l2:instP-n2', 't1', 'EXT-l2')
self._verify_host_snat_ip_details(mapping,
'uni:tn-t1:out-l2:instP-n2', '200.200.0.3', '200.200.0.1/16')
# Make sure 2nd RPC returned SNAT IP allocated in 1st RPC.
self._verify_host_snat_ip_details(req_mapping['gbp_details'],
'uni:tn-t1:out-l2:instP-n2', '200.200.0.3', '200.200.0.1/16')
self.assertEqual(1000, mapping['interface_mtu'])
self.assertEqual(100, mapping['dhcp_lease_time'])
@ -2892,6 +2909,9 @@ class TestGbpDetailsForML2(AIMBaseTestCase,
def test_get_gbp_details_with_raw_sql(self):
self._do_test_get_gbp_details(enable_raw_sql=True)
def test_get_gbp_details_with_new_rpc(self):
self._do_test_get_gbp_details(enable_new_rpc=True)
def test_get_gbp_details_pre_existing_vrf(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
@ -2907,6 +2927,14 @@ class TestGbpDetailsForML2(AIMBaseTestCase,
self._do_test_get_gbp_details(pre_vrf=vrf,
enable_raw_sql=True)
def test_get_gbp_details_pre_existing_vrf_with_new_rpc(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
self._do_test_get_gbp_details(pre_vrf=vrf,
enable_new_rpc=True)
class TestPolicyTarget(AIMBaseTestCase,
test_securitygroup.SecurityGroupsTestCase):
@ -3248,10 +3276,18 @@ class TestPolicyTarget(AIMBaseTestCase,
def _verify_gbp_details_assertions(self, mapping, req_mapping, port_id,
expected_epg_name, expected_epg_tenant,
subnet, default_route=None,
map_tenant_name=True):
map_tenant_name=True,
prefix_ap_name=False):
self.assertEqual(mapping, req_mapping['gbp_details'])
self.assertEqual(port_id, mapping['port_id'])
self.assertEqual(expected_epg_name, mapping['endpoint_group_name'])
# This method is not used with APGs, but it is used with
# external network in common tenant.
expected_epg_ap_name = (
self.driver.aim_mech_driver.ap_name if not prefix_ap_name else
self.driver.aim_mech_driver.apic_system_id + '_' +
self.driver.aim_mech_driver.ap_name)
self.assertEqual(expected_epg_ap_name, mapping['app_profile_name'])
exp_tenant = (self.name_mapper.project(None, expected_epg_tenant)
if map_tenant_name else expected_epg_tenant)
self.assertEqual(exp_tenant, mapping['ptg_tenant'])
@ -3298,21 +3334,24 @@ class TestPolicyTarget(AIMBaseTestCase,
def _verify_fip_details(self, mapping, fip, ext_epg_tenant,
ext_epg_name, ext_epg_app_profile='OpenStack'):
self.assertEqual(1, len(mapping['floating_ip']))
fip = copy.deepcopy(fip)
fip['nat_epg_name'] = ext_epg_name
fip['nat_epg_tenant'] = ext_epg_tenant
fip['nat_epg_app_profile'] = ext_epg_app_profile
fip_mapping = mapping['floating_ip'][0]
# REVISIT: The port_id, project_id, and floating_network_id
# are not used by the agent, and the new RPC implementation
# doesn't provide them, so these assertions are commented out
# until the RPC implementations are cleaned up.
self.assertEqual(fip['id'], fip_mapping['id'])
self.assertEqual(fip['port_id'], fip_mapping['port_id'])
self.assertEqual(fip['project_id'], fip_mapping['project_id'])
# self.assertEqual(fip['port_id'], fip_mapping['port_id'])
# self.assertEqual(fip['project_id'], fip_mapping['project_id'])
self.assertEqual(fip['fixed_ip_address'],
fip_mapping['fixed_ip_address'])
self.assertEqual(fip['floating_ip_address'],
fip_mapping['floating_ip_address'])
self.assertEqual(fip['floating_network_id'],
fip_mapping['floating_network_id'])
# self.assertEqual(fip['floating_network_id'],
# fip_mapping['floating_network_id'])
self.assertEqual(ext_epg_name, fip_mapping['nat_epg_name'])
self.assertEqual(ext_epg_tenant, fip_mapping['nat_epg_tenant'])
self.assertEqual(ext_epg_app_profile,
fip_mapping['nat_epg_app_profile'])
def _verify_ip_mapping_details(self, mapping, ext_segment_name,
ext_epg_tenant, ext_epg_name,
@ -3333,9 +3372,11 @@ class TestPolicyTarget(AIMBaseTestCase,
'prefixlen': int(prefix)},
mapping['host_snat_ips'][0])
def _do_test_get_gbp_details(self, pre_vrf=None, enable_raw_sql=False):
def _do_test_get_gbp_details(self, pre_vrf=None, enable_raw_sql=False,
enable_new_rpc=False):
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = (
enable_raw_sql)
self.driver.aim_mech_driver.enable_new_rpc = enable_new_rpc
self.driver.aim_mech_driver.apic_optimized_dhcp_lease_time = 100
es1, es1_sub = self._setup_external_segment(
'es1', dn='uni/tn-t1/out-l1/instP-n1')
@ -3428,7 +3469,7 @@ class TestPolicyTarget(AIMBaseTestCase,
port = self._update('ports', port['id'], data)['port']
mapping = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % pt2['port_id'],
host='h2')
host='h1')
self.assertEqual(pt2['port_id'], mapping['port_id'])
self._verify_ip_mapping_details(mapping,
'uni:tn-t1:out-l1:instP-n1', 't1', 'EXT-l1')
@ -3460,13 +3501,15 @@ class TestPolicyTarget(AIMBaseTestCase,
port = self._update('ports', port['id'], data)['port']
mapping = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % pt2['port_id'],
host='h2')
host='h1')
self.assertEqual(2000, mapping['interface_mtu'])
def _do_test_gbp_details_no_pt(self, use_as=True, routed=True,
pre_vrf=None, enable_raw_sql=False):
pre_vrf=None, enable_raw_sql=False,
enable_new_rpc=False):
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = (
enable_raw_sql)
self.driver.aim_mech_driver.enable_new_rpc = enable_new_rpc
# Create port and bind it
address_scope = self._make_address_scope_for_vrf(
pre_vrf.dn if pre_vrf else None,
@ -3587,6 +3630,9 @@ class TestPolicyTarget(AIMBaseTestCase,
def test_get_gbp_details_with_raw_sql(self):
self._do_test_get_gbp_details(enable_raw_sql=True)
def test_get_gbp_details_with_new_rpc(self):
self._do_test_get_gbp_details(enable_new_rpc=True)
def test_get_gbp_details_pre_existing_vrf(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
@ -3601,6 +3647,13 @@ class TestPolicyTarget(AIMBaseTestCase,
monitored=True))
self._do_test_get_gbp_details(pre_vrf=vrf, enable_raw_sql=True)
def test_get_gbp_details_pre_existing_vrf_with_new_rpc(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
self._do_test_get_gbp_details(pre_vrf=vrf, enable_new_rpc=True)
def test_get_gbp_details_no_pt(self):
# Test that traditional Neutron ports behave correctly from the
# RPC perspective
@ -3611,6 +3664,11 @@ class TestPolicyTarget(AIMBaseTestCase,
# RPC perspective
self._do_test_gbp_details_no_pt(enable_raw_sql=True)
def test_get_gbp_details_no_pt_with_new_rpc(self):
# Test that traditional Neutron ports behave correctly from the
# RPC perspective
self._do_test_gbp_details_no_pt(enable_new_rpc=True)
def test_get_gbp_details_no_pt_pre_existing_vrf(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
@ -3625,12 +3683,22 @@ class TestPolicyTarget(AIMBaseTestCase,
monitored=True))
self._do_test_gbp_details_no_pt(pre_vrf=vrf, enable_raw_sql=True)
def test_get_gbp_details_no_pt_pre_existing_vrf_with_new_rpc(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
self._do_test_gbp_details_no_pt(pre_vrf=vrf, enable_new_rpc=True)
def test_get_gbp_details_no_pt_no_as(self):
self._do_test_gbp_details_no_pt(use_as=False)
def test_get_gbp_details_no_pt_no_as_with_raw_sql(self):
self._do_test_gbp_details_no_pt(use_as=False, enable_raw_sql=True)
def test_get_gbp_details_no_pt_no_as_with_new_rpc(self):
self._do_test_gbp_details_no_pt(use_as=False, enable_new_rpc=True)
def test_get_gbp_details_no_pt_no_as_unrouted(self):
self._do_test_gbp_details_no_pt(use_as=False, routed=False)
@ -3638,9 +3706,15 @@ class TestPolicyTarget(AIMBaseTestCase,
self._do_test_gbp_details_no_pt(use_as=False, routed=False,
enable_raw_sql=True)
def _test_gbp_details_ext_net_no_pt(self, enable_raw_sql=False):
def test_get_gbp_details_no_pt_no_as_unrouted_with_new_rpc(self):
self._do_test_gbp_details_no_pt(use_as=False, routed=False,
enable_new_rpc=True)
def _test_gbp_details_ext_net_no_pt(self, enable_raw_sql=False,
enable_new_rpc=False):
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = (
enable_raw_sql)
self.driver.aim_mech_driver.enable_new_rpc = enable_new_rpc
# Test ports created on Neutron external networks
ext_net1, _, sn1 = self._setup_external_network(
'l1', dn='uni/tn-common/out-l1/instP-n1')
@ -3679,7 +3753,7 @@ class TestPolicyTarget(AIMBaseTestCase,
host='h1')
self._verify_gbp_details_assertions(
mapping, req_mapping, port_id, "EXT-l1", "common", sn1,
map_tenant_name=False)
map_tenant_name=False, prefix_ap_name=True)
vrf_id = '%s %s' % ("common", "openstack_EXT-l1")
vrf_mapping = self.driver.get_vrf_details(
@ -3725,6 +3799,9 @@ class TestPolicyTarget(AIMBaseTestCase,
def test_gbp_details_ext_net_no_pt_with_raw_sql(self):
self._test_gbp_details_ext_net_no_pt(enable_raw_sql=True)
def test_gbp_details_ext_net_no_pt_with_new_rpc(self):
self._test_gbp_details_ext_net_no_pt(enable_new_rpc=True)
def test_ip_address_owner_update(self):
l3p = self.create_l3_policy(name='myl3')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
@ -5633,7 +5710,7 @@ class TestNestedDomain(AIMBaseTestCase):
p1 = self._bind_port_to_host(p1['id'], 'host1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h1')
host='host1')
self.assertEqual('myk8s', details['nested_domain_name'])
self.assertEqual('k8s', details['nested_domain_type'])
self.assertEqual(4093, details['nested_domain_infra_vlan'])
@ -5665,7 +5742,7 @@ class TestNestedDomain(AIMBaseTestCase):
p1 = self._bind_port_to_host(p1['id'], 'host1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h1')
host='host1')
self.assertEqual('', details['nested_domain_name'])
self.assertEqual('', details['nested_domain_type'])
self.assertIsNone(details['nested_domain_infra_vlan'])
@ -5682,6 +5759,13 @@ class TestNestedDomainWithRawSql(TestNestedDomain):
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = True
class TestNestedDomainWithNewRpc(TestNestedDomain):
def setUp(self, **kwargs):
super(TestNestedDomainWithNewRpc, self).setUp(**kwargs)
self.driver.aim_mech_driver.enable_new_rpc = True
class TestNeutronPortOperation(AIMBaseTestCase):
def setUp(self, **kwargs):
@ -5705,7 +5789,7 @@ class TestNeutronPortOperation(AIMBaseTestCase):
p1 = self._bind_port_to_host(p1['id'], 'host1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h1')
host='host1')
self.assertFalse(details['promiscuous_mode'])
p2 = self._make_port(self.fmt, net['network']['id'],
@ -5715,7 +5799,7 @@ class TestNeutronPortOperation(AIMBaseTestCase):
p2 = self._bind_port_to_host(p2['id'], 'host1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p2['id'],
host='h1')
host='host1')
self.assertFalse(details['promiscuous_mode'])
p3 = self._make_port(self.fmt, net['network']['id'],
@ -5725,16 +5809,19 @@ class TestNeutronPortOperation(AIMBaseTestCase):
p3 = self._bind_port_to_host(p3['id'], 'host1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p3['id'],
host='h1')
host='host1')
self.assertTrue(details['promiscuous_mode'])
# REVISIT: Test port name ending with PROMISCUOUS_SUFFIX, or
# is that deprecated?
# test DHCP port
p1_dhcp = self._make_port(self.fmt, net['network']['id'],
device_owner=n_constants.DEVICE_OWNER_DHCP)['port']
p1_dhcp = self._bind_port_to_host(p1_dhcp['id'], 'host1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1_dhcp['id'],
host='h1')
host='host1')
self.assertTrue(details['promiscuous_mode'])
p2_dhcp = self._make_port(self.fmt, net['network']['id'],
@ -5743,7 +5830,7 @@ class TestNeutronPortOperation(AIMBaseTestCase):
p2_dhcp = self._bind_port_to_host(p2_dhcp['id'], 'host1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p2_dhcp['id'],
host='h1')
host='host1')
self.assertTrue(details['promiscuous_mode'])
p3_dhcp = self._make_port(self.fmt, net['network']['id'],
@ -5752,9 +5839,14 @@ class TestNeutronPortOperation(AIMBaseTestCase):
p3_dhcp = self._bind_port_to_host(p3_dhcp['id'], 'host1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p3_dhcp['id'],
host='h1')
host='host1')
self.assertTrue(details['promiscuous_mode'])
# REVISIT: If we support proxy groups, we also need to test
# that promiscuous_mode is True when the port belongs to a PT
# that has a cluster_id, is not the master, and the master has
# a group_default_gateway.
def _aap_is_cidr(self, aap):
cidr = netaddr.IPNetwork(aap['ip_address'])
if cidr.prefixlen != 32:
@ -5791,11 +5883,13 @@ class TestNeutronPortOperation(AIMBaseTestCase):
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h1')
self.assertEqual(allow_addr, details['allowed_address_pairs'])
self.assertEqual(sorted(allow_addr),
sorted(details['allowed_address_pairs']))
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p2['id'],
host='h2')
self.assertEqual(allow_addr, details['allowed_address_pairs'])
self.assertEqual(sorted(allow_addr),
sorted(details['allowed_address_pairs']))
# Call agent => plugin RPC, requesting ownership of a /32 IP
ip_owner_info = {'port': p1['id'],
@ -5827,7 +5921,8 @@ class TestNeutronPortOperation(AIMBaseTestCase):
return expected_aaps
expected_aaps1 = _get_expected_aaps(allow_addr, owned_addr[0])
self.assertEqual(expected_aaps1, details['allowed_address_pairs'])
self.assertEqual(sorted(expected_aaps1),
sorted(details['allowed_address_pairs']))
# Call RPC sent by the agent, requesting ownership of a /32 IP
ip_owner_info = {'port': p2['id'],
@ -5840,7 +5935,8 @@ class TestNeutronPortOperation(AIMBaseTestCase):
host='h2')
expected_aaps2 = _get_expected_aaps(allow_addr, owned_addr[1])
self.assertEqual(expected_aaps2, details['allowed_address_pairs'])
self.assertEqual(sorted(expected_aaps2),
sorted(details['allowed_address_pairs']))
# set allowed-address as fixed-IP of ports p3 and p4, which also have
# floating-IPs. Verify that FIP is "stolen" by p1 and p2
@ -5914,7 +6010,8 @@ class TestNeutronPortOperation(AIMBaseTestCase):
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h1')
expected_aaps3 = _get_expected_aaps(update_addr, update_owned_addr[0])
self.assertEqual(expected_aaps3, details['allowed_address_pairs'])
self.assertEqual(sorted(expected_aaps3),
sorted(details['allowed_address_pairs']))
p2 = self._update('ports', p2['id'],
{'port': {'allowed_address_pairs': update_addr}},
@ -5930,7 +6027,8 @@ class TestNeutronPortOperation(AIMBaseTestCase):
self._neutron_admin_context, device='tap%s' % p2['id'],
host='h2')
expected_aaps4 = _get_expected_aaps(update_addr, update_owned_addr[1])
self.assertEqual(expected_aaps4, details['allowed_address_pairs'])
self.assertEqual(sorted(expected_aaps4),
sorted(details['allowed_address_pairs']))
def test_gbp_details_for_allowed_address_pair(self):
# 'aap' is configured, 'owned' is IP requested from agent
@ -5959,6 +6057,15 @@ class TestNeutronPortOperation(AIMBaseTestCase):
owned_addr, update_addr, update_owned_addr)
def test_port_bound_other_agent(self):
# REVISIT: This test should call request_endpoint_details
# rather than get_gbp_details, since the Opflex agent no
# longer calls get_gbp_details. The new
# request_endpoint_details implementation returns a response
# without a gbp_details key to indicate that the port either
# isn't bound or is bound to a different host. For now, we
# accept either RPC implemention's response from
# get_gbp_details.
self._register_agent('h1', test_aim_md.AGENT_CONF_OPFLEX)
self._register_agent('h2', test_aim_md.AGENT_CONF_OPFLEX)
net = self._make_network(self.fmt, 'net1', True)
@ -5970,24 +6077,24 @@ class TestNeutronPortOperation(AIMBaseTestCase):
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h1')
self.assertEqual('', details['host'])
self.assertEqual('', details.get('host', ''))
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h2')
self.assertEqual('', details['host'])
self.assertEqual('', details.get('host', ''))
# Test port bound to h1, queries from h1 and h2
# Test port bound to h2, queries from h1 and h2
p1 = self._bind_port_to_host(p1['id'], 'h2')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h1')
self.assertEqual('h2', details['host'])
self.assertEqual('h2', details.get('host', 'h2'))
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h2')
self.assertEqual('h2', details['host'])
# Test rebind of port to h2, queries from h1 and h2
# Test rebind of port to h1, queries from h1 and h2
p1 = self._bind_port_to_host(p1['id'], 'h1')['port']
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
@ -5996,7 +6103,32 @@ class TestNeutronPortOperation(AIMBaseTestCase):
details = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h2')
self.assertEqual('h1', details['host'])
self.assertEqual('h1', details.get('host', 'h1'))
# REVISIT: This test class is disabled because two of its tests fail
# with the following SQL error:
#
# OperationalError: (sqlite3.OperationalError) near "'1.2.3.250'":
# syntax error [SQL: u"SELECT DISTINCT id FROM ports JOIN
# ipallocations AS ipallocations_1 ON ipallocations_1.port_id =
# ports.id WHERE ports.network_id =
# 'e7b26ed0-9b92-47b5-a5ca-fd9b19dd4bc2' AND
# ipallocations_1.ip_address in (u'1.2.3.250')"] (Background on this
# error at: http://sqlalche.me/e/e3q8)
#
# class TestNeutronPortOperationWithRawSql(TestNeutronPortOperation):
#
# def setUp(self, **kwargs):
# super(TestNeutronPortOperationWithRawSql, self).setUp(**kwargs)
# self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = True
class TestNeutronPortOperationWithNewRpc(TestNeutronPortOperation):
def setUp(self, **kwargs):
super(TestNeutronPortOperationWithNewRpc, self).setUp(**kwargs)
self.driver.aim_mech_driver.enable_new_rpc = True
class TestPerL3PImplicitContractsConfig(TestL2PolicyWithAutoPTG):
@ -6073,3 +6205,88 @@ class TestVlanAwareVM(AIMBaseTestCase):
def test_trunk_master_port(self):
self._do_test_gbp_details_no_pt()
class TestVlanAwareVMWithRawSql(TestVlanAwareVM):
def setUp(self, **kwargs):
super(TestVlanAwareVMWithRawSql, self).setUp(**kwargs)
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = True
class TestVlanAwareVMWithNewRpc(TestVlanAwareVM):
def setUp(self, **kwargs):
super(TestVlanAwareVMWithNewRpc, self).setUp(**kwargs)
self.driver.aim_mech_driver.enable_new_rpc = True
class TestL2PolicyRouteInjection(AIMBaseTestCase):
def _verify_rpc_response(self, port_id, inject, metadata):
# Invoke request_endpoint_details RPC handler.
request = {'device': 'tap%s' % port_id, 'timestamp': 0,
'request_id': 'a_request_id'}
response = self.driver.request_endpoint_details(
nctx.get_admin_context(), request=request, host='host1')
# Check subnet details.
subnet_details = response['gbp_details']['subnets'][0]
expected_host_routes = []
if inject:
self.assertIn('gateway_ip', subnet_details)
expected_host_routes.append(
{'destination': '0.0.0.0/0',
'nexthop': subnet_details['gateway_ip']})
if metadata:
expected_host_routes.append(
{'destination': '169.254.169.254/16',
'nexthop': subnet_details['dns_nameservers'][0]})
else:
self.assertNotIn('gateway_ip', subnet_details)
self.assertEqual(sorted(expected_host_routes),
sorted(subnet_details['host_routes']))
def _test_route_injection(self, inject):
# Create GBP resources and bind port.
l2p = self.create_l2_policy(
inject_default_route=inject)['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
port_id = pt['port_id']
self._bind_port_to_host(port_id, 'host1')
# Test without metadata route.
self._verify_rpc_response(port_id, inject, False)
# Create a DHCP port on the PTG's subnet to enable metadata
# route injection.
fixed_ips = [{'subnet_id': ptg['subnets'][0]}]
self._make_port(
self.fmt, l2p['network_id'], fixed_ips=fixed_ips,
device_owner='network:dhcp')
# Test with metadata route.
self._verify_rpc_response(port_id, inject, True)
def test_route_injection_on(self):
self._test_route_injection(True)
def test_route_injection_off(self):
self._test_route_injection(False)
class TestL2PolicyRouteInjectionWithRawSql(TestL2PolicyRouteInjection):
def setUp(self, **kwargs):
super(TestL2PolicyRouteInjectionWithRawSql, self).setUp(**kwargs)
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = True
class TestL2PolicyRouteInjectionWithNewRpc(TestL2PolicyRouteInjection):
def setUp(self, **kwargs):
super(TestL2PolicyRouteInjectionWithNewRpc, self).setUp(**kwargs)
self.driver.aim_mech_driver.enable_new_rpc = True