[AIM] Add support for physical-domain nodes

Openstack hosts (compute node/network node) that do
not have the OpFlex agent running are considered
physical-domain nodes in the ACI fabric. Such hosts
can now be supported assuming the node is capable of
handling VLAN-encapsulated traffic (for instance by
running an agent like the openvswitch agent). The two
scenarios supported are:
* Tenant network is of type VLAN
* Tenant network is of type OpFlex with some ports
  on physical nodes. Hierarchical binding is used
  here with a dynamic VLAN segment.

Change-Id: I25fdd31b8ca98119e7f94d40808c001b112140e5
Signed-off-by: Amit Bose <amitbose@gmail.com>
This commit is contained in:
Amit Bose
2016-12-07 15:02:46 -08:00
parent 21b97bd737
commit bdf51685d7
2 changed files with 468 additions and 17 deletions

View File

@@ -17,6 +17,7 @@ import sqlalchemy as sa
from aim.aim_lib import nat_strategy from aim.aim_lib import nat_strategy
from aim import aim_manager from aim import aim_manager
from aim.api import infra as aim_infra
from aim.api import resource as aim_resource from aim.api import resource as aim_resource
from aim.common import utils from aim.common import utils
from aim import config as aim_cfg from aim import config as aim_cfg
@@ -36,6 +37,7 @@ from neutron.extensions import portbindings
from neutron import manager from neutron import manager
from neutron.plugins.common import constants as pconst from neutron.plugins.common import constants as pconst
from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import models
from opflexagent import constants as ofcst from opflexagent import constants as ofcst
from opflexagent import rpc as ofrpc from opflexagent import rpc as ofrpc
from oslo_log import log from oslo_log import log
@@ -887,9 +889,6 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
{'port': current['id'], {'port': current['id'],
'net': context.network.current['id']}) 'net': context.network.current['id']})
# TODO(rkukura): Add support for baremetal hosts, SR-IOV and
# other situations requiring dynamic segments.
# Check the VNIC type. # Check the VNIC type.
vnic_type = current.get(portbindings.VNIC_TYPE, vnic_type = current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL) portbindings.VNIC_NORMAL)
@@ -905,8 +904,36 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
return return
# Try to bind OpFlex agent. # Try to bind OpFlex agent.
self._agent_bind_port(context, ofcst.AGENT_TYPE_OPFLEX_OVS, if self._agent_bind_port(context, ofcst.AGENT_TYPE_OPFLEX_OVS,
self._opflex_bind_port) self._opflex_bind_port):
return
# If we reached here, it means that either there is no active opflex
# agent running on the host, or the agent on the host is not
# configured for this physical network. Treat the host as a physical
# node (i.e. has no OpFlex agent running) and try binding
# hierarchically if the network-type is OpFlex.
self._bind_physical_node(context)
def update_port_precommit(self, context):
port = context.current
if (self._use_static_path(context.original_bottom_bound_segment) and
context.original_host != context.host):
# remove static binding for old host
self._update_static_path(context, host=context.original_host,
segment=context.original_bottom_bound_segment, remove=True)
self._release_dynamic_segment(context, use_original=True)
if (self._is_port_bound(port) and
self._use_static_path(context.bottom_bound_segment)):
self._update_static_path(context)
def delete_port_precommit(self, context):
port = context.current
if (self._is_port_bound(port) and
self._use_static_path(context.bottom_bound_segment)):
self._update_static_path(context, remove=True)
self._release_dynamic_segment(context)
def create_floatingip(self, context, current): def create_floatingip(self, context, current):
if current['port_id']: if current['port_id']:
@@ -937,6 +964,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
for segment in context.segments_to_bind: for segment in context.segments_to_bind:
if bind_strategy(context, segment, agent): if bind_strategy(context, segment, agent):
LOG.debug("Bound using segment: %s", segment) LOG.debug("Bound using segment: %s", segment)
return True
else: else:
LOG.warning(_LW("Refusing to bind port %(port)s to dead " LOG.warning(_LW("Refusing to bind port %(port)s to dead "
"agent: %(agent)s"), "agent: %(agent)s"),
@@ -944,7 +972,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
def _opflex_bind_port(self, context, segment, agent): def _opflex_bind_port(self, context, segment, agent):
network_type = segment[api.NETWORK_TYPE] network_type = segment[api.NETWORK_TYPE]
if network_type == ofcst.TYPE_OPFLEX: if self._is_opflex_type(network_type):
opflex_mappings = agent['configurations'].get('opflex_networks') opflex_mappings = agent['configurations'].get('opflex_networks')
LOG.debug("Checking segment: %(segment)s " LOG.debug("Checking segment: %(segment)s "
"for physical network: %(mappings)s ", "for physical network: %(mappings)s ",
@@ -955,15 +983,42 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
elif network_type != 'local': elif network_type != 'local':
return False return False
context.set_binding(segment[api.ID], self._complete_binding(context, segment)
portbindings.VIF_TYPE_OVS, return True
{portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False})
def _dvs_bind_port(self, context, segment, agent): def _dvs_bind_port(self, context, segment, agent):
# TODO(rkukura): Implement DVS port binding # TODO(rkukura): Implement DVS port binding
return False return False
def _bind_physical_node(self, context):
# Bind physical nodes hierarchically by creating a dynamic segment.
for segment in context.segments_to_bind:
net_type = segment[api.NETWORK_TYPE]
# TODO(amitbose) For ports on baremetal (Ironic) hosts, use
# binding:profile to decide if dynamic segment should be created.
if self._is_opflex_type(net_type):
# TODO(amitbose) Consider providing configuration options
# for picking network-type and physical-network name
# for the dynamic segment
dyn_seg = context.allocate_dynamic_segment(
{api.NETWORK_TYPE: pconst.TYPE_VLAN})
LOG.info(_LI('Allocated dynamic-segment %(s)s for port %(p)s'),
{'s': dyn_seg, 'p': context.current['id']})
dyn_seg['aim_ml2_created'] = True
context.continue_binding(segment[api.ID], [dyn_seg])
return True
elif segment.get('aim_ml2_created'):
# Complete binding if another driver did not bind the
# dynamic segment that we created.
self._complete_binding(context, segment)
return True
def _complete_binding(self, context, segment):
context.set_binding(segment[api.ID],
portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False})
@property @property
def plugin(self): def plugin(self):
if not self._core_plugin: if not self._core_plugin:
@@ -1042,6 +1097,19 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
name=aname) name=aname)
return bd, epg return bd, epg
def _map_external_network(self, session, network):
l3out, ext_net, ns = self._get_aim_nat_strategy(network)
if ext_net:
aim_ctx = aim_context.AimContext(db_session=session)
for o in (ns.get_l3outside_resources(aim_ctx, l3out) or []):
if isinstance(o, aim_resource.EndpointGroup):
return o
def _map_network_to_epg(self, session, network):
if self._is_external(network):
return self._map_external_network(session, network)
return self._map_network(session, network)[1]
def _map_subnet(self, subnet, gw_ip, bd): def _map_subnet(self, subnet, gw_ip, bd):
prefix_len = subnet['cidr'].split('/')[1] prefix_len = subnet['cidr'].split('/')[1]
gw_ip_mask = gw_ip + '/' + prefix_len gw_ip_mask = gw_ip + '/' + prefix_len
@@ -1480,3 +1548,83 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
extn_db_sn.snat_host_pool.is_(None))) extn_db_sn.snat_host_pool.is_(None)))
.all()) .all())
return [s[0] for s in other_sn] return [s[0] for s in other_sn]
def _is_opflex_type(self, net_type):
return net_type == ofcst.TYPE_OPFLEX
def _is_supported_non_opflex_type(self, net_type):
return net_type in [pconst.TYPE_VLAN]
def _use_static_path(self, bound_segment):
return (bound_segment and
self._is_supported_non_opflex_type(
bound_segment[api.NETWORK_TYPE]))
def _update_static_path(self, port_context, host=None, segment=None,
remove=False):
host = host or port_context.host
segment = segment or port_context.bottom_bound_segment
session = port_context._plugin_context.session
if not segment:
LOG.debug('Port %s is not bound to any segment',
port_context.current['id'])
return
if remove:
# check if there are any other ports from this network on the host
exist = (session.query(models.PortBindingLevel)
.filter_by(host=host, segment_id=segment['id'])
.filter(models.PortBindingLevel.port_id !=
port_context.current['id'])
.first())
if exist:
return
else:
if (segment.get(api.NETWORK_TYPE) in [pconst.TYPE_VLAN]):
seg = segment[api.SEGMENTATION_ID]
else:
LOG.info(_LI('Unsupported segmentation type for static path '
'binding: %s'),
segment.get(api.NETWORK_TYPE))
return
aim_ctx = aim_context.AimContext(db_session=session)
host_link = self.aim.find(aim_ctx, aim_infra.HostLink, host_name=host)
if not host_link or not host_link[0].path:
LOG.warning(_LW('No host link information found for host %s'),
host)
return
host_link = host_link[0].path
epg = self._map_network_to_epg(session, port_context.network.current)
if not epg:
LOG.info(_LI('Network %s does not map to any EPG'),
port_context.network.current['id'])
return
epg = self.aim.get(aim_ctx, epg)
static_paths = [p for p in epg.static_paths
if p.get('path') != host_link]
if not remove:
static_paths.append({'path': host_link, 'encap': 'vlan-%s' % seg})
LOG.debug('Setting static paths for EPG %s to %s', epg, static_paths)
self.aim.update(aim_ctx, epg, static_paths=static_paths)
def _release_dynamic_segment(self, port_context, use_original=False):
top = (port_context.original_top_bound_segment if use_original
else port_context.top_bound_segment)
btm = (port_context.original_bottom_bound_segment if use_original
else port_context.bottom_bound_segment)
if (top and btm and
self._is_opflex_type(top[api.NETWORK_TYPE]) and
self._is_supported_non_opflex_type(btm[api.NETWORK_TYPE])):
# if there are no other ports bound to segment, release the segment
ports = (port_context._plugin_context.session
.query(models.PortBindingLevel)
.filter_by(segment_id=btm[api.ID])
.filter(models.PortBindingLevel.port_id !=
port_context.current['id'])
.first())
if not ports:
LOG.info(_LI('Releasing dynamic-segment %(s)s for port %(p)s'),
{'s': btm, 'p': port_context.current['id']})
port_context.release_dynamic_segment(btm[api.ID])

View File

@@ -18,6 +18,7 @@ import netaddr
from aim.aim_lib import nat_strategy from aim.aim_lib import nat_strategy
from aim import aim_manager from aim import aim_manager
from aim.api import infra as aim_infra
from aim.api import resource as aim_resource from aim.api import resource as aim_resource
from aim.api import status as aim_status from aim.api import status as aim_status
from aim import config as aim_cfg from aim import config as aim_cfg
@@ -28,11 +29,13 @@ from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
extension_db as extn_db) extension_db as extn_db)
from keystoneclient.v3 import client as ksc_client from keystoneclient.v3 import client as ksc_client
from neutron.api import extensions from neutron.api import extensions
from neutron.common import constants as n_constants
from neutron import context from neutron import context
from neutron.db import api as db_api from neutron.db import api as db_api
from neutron import manager from neutron import manager
from neutron.plugins.common import constants as service_constants from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import config from neutron.plugins.ml2 import config
from neutron.plugins.ml2 import db as ml2_db
from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit.extensions import test_address_scope from neutron.tests.unit.extensions import test_address_scope
@@ -51,6 +54,12 @@ AGENT_CONF_OPFLEX = {'alive': True, 'binary': 'somebinary',
'opflex_networks': None, 'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}} 'bridge_mappings': {'physnet1': 'br-eth1'}}}
AGENT_CONF_OVS = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic',
'agent_type': n_constants.AGENT_TYPE_OVS,
'configurations': {
'bridge_mappings': {'physnet1': 'br-eth1'}}}
DN = 'apic:distinguished_names' DN = 'apic:distinguished_names'
CIDR = 'apic:external_cidrs' CIDR = 'apic:external_cidrs'
PROV = 'apic:external_provided_contracts' PROV = 'apic:external_provided_contracts'
@@ -111,22 +120,20 @@ class ApicAimTestMixin(object):
class ApicAimTestCase(test_address_scope.AddressScopeTestCase, class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
test_l3.L3NatTestCaseMixin, ApicAimTestMixin): test_l3.L3NatTestCaseMixin, ApicAimTestMixin):
def setUp(self): def setUp(self, mechanism_drivers=None, tenant_network_types=None):
# Enable the test mechanism driver to ensure that # Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism # we can successfully call through to all mechanism
# driver apis. # driver apis.
config.cfg.CONF.set_override('mechanism_drivers', mech = mechanism_drivers or ['logger', 'apic_aim']
['logger', 'apic_aim'], config.cfg.CONF.set_override('mechanism_drivers', mech, 'ml2')
'ml2')
config.cfg.CONF.set_override('extension_drivers', config.cfg.CONF.set_override('extension_drivers',
['apic_aim'], ['apic_aim'],
'ml2') 'ml2')
config.cfg.CONF.set_override('type_drivers', config.cfg.CONF.set_override('type_drivers',
['opflex', 'local', 'vlan'], ['opflex', 'local', 'vlan'],
'ml2') 'ml2')
config.cfg.CONF.set_override('tenant_network_types', net_type = tenant_network_types or ['opflex']
['opflex'], config.cfg.CONF.set_override('tenant_network_types', net_type, 'ml2')
'ml2')
config.cfg.CONF.set_override('network_vlan_ranges', config.cfg.CONF.set_override('network_vlan_ranges',
['physnet1:1000:1099'], ['physnet1:1000:1099'],
group='ml2_type_vlan') group='ml2_type_vlan')
@@ -2304,3 +2311,299 @@ class TestSnatIpAllocation(ApicAimTestCase):
for x in range(0, 8): for x in range(0, 8):
fip = self._make_floatingip(self.fmt, ext_net['id'])['floatingip'] fip = self._make_floatingip(self.fmt, ext_net['id'])['floatingip']
self.assertTrue(fip['floating_ip_address'] in ips) self.assertTrue(fip['floating_ip_address'] in ips)
class TestPortVlanNetwork(ApicAimTestCase):
def setUp(self, **kwargs):
if kwargs.get('mechanism_drivers') is None:
kwargs['mechanism_drivers'] = ['logger', 'openvswitch', 'apic_aim']
if kwargs.get('tenant_network_types') is None:
kwargs['tenant_network_types'] = ['vlan']
super(TestPortVlanNetwork, self).setUp(**kwargs)
aim_ctx = aim_context.AimContext(self.db_session)
self.hlink1 = aim_infra.HostLink(
host_name='h1',
interface_name='eth0',
path='topology/pod-1/paths-102/pathep-[eth1/7]')
self._register_agent('h1', AGENT_CONF_OVS)
self.aim_mgr.create(aim_ctx, self.hlink1)
self.expected_binding_info = [('openvswitch', 'vlan')]
def _net_2_epg(self, network):
if network['router:external']:
epg = aim_resource.EndpointGroup.from_dn(
network['apic:distinguished_names']['EndpointGroup'])
else:
epg = aim_resource.EndpointGroup(
tenant_name=network['tenant_id'],
app_profile_name=self._app_profile_name,
name=network['id'])
return epg
def _check_binding(self, port_id, expected_binding_info=None):
port_context = self.plugin.get_bound_port_context(
context.get_admin_context(), port_id)
self.assertIsNotNone(port_context)
binding_info = [(bl['bound_driver'],
bl['bound_segment']['network_type'])
for bl in port_context.binding_levels]
self.assertEqual(expected_binding_info or self.expected_binding_info,
binding_info)
return port_context.bottom_bound_segment['segmentation_id']
def _check_no_dynamic_segment(self, network_id):
dyn_segments = ml2_db.get_network_segments(
context.get_admin_context().session, network_id,
filter_dynamic=True)
self.assertEqual(0, len(dyn_segments))
def _do_test_port_lifecycle(self, external_net=False):
aim_ctx = aim_context.AimContext(self.db_session)
if external_net:
net1 = self._make_ext_network('net1',
dn='uni/tn-t1/out-l1/instP-n1')
else:
net1 = self._make_network(self.fmt, 'net1', True)['network']
hlink2 = aim_infra.HostLink(
host_name='h2',
interface_name='eth0',
path='topology/pod-1/paths-201/pathep-[eth1/19]')
self.aim_mgr.create(aim_ctx, hlink2)
self._register_agent('h2', AGENT_CONF_OVS)
epg = self._net_2_epg(net1)
with self.subnet(network={'network': net1}) as sub1:
with self.port(subnet=sub1) as p1:
# unbound port -> no static paths expected
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual([], epg.static_paths)
# bind to host h1
p1 = self._bind_port_to_host(p1['port']['id'], 'h1')
vlan_h1 = self._check_binding(p1['port']['id'])
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual(
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_h1}],
epg.static_paths)
# move port to host h2
p1 = self._bind_port_to_host(p1['port']['id'], 'h2')
vlan_h2 = self._check_binding(p1['port']['id'])
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual(
[{'path': hlink2.path, 'encap': 'vlan-%s' % vlan_h2}],
epg.static_paths)
# delete port
self._delete('ports', p1['port']['id'])
self._check_no_dynamic_segment(net1['id'])
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual([], epg.static_paths)
def test_port_lifecycle_internal_network(self):
self._do_test_port_lifecycle()
def test_port_lifecycle_external_network(self):
self._do_test_port_lifecycle(external_net=True)
def test_multiple_ports_on_host(self):
aim_ctx = aim_context.AimContext(self.db_session)
net1 = self._make_network(self.fmt, 'net1', True)['network']
epg = self._net_2_epg(net1)
with self.subnet(network={'network': net1}) as sub1:
with self.port(subnet=sub1) as p1:
# bind p1 to host h1
p1 = self._bind_port_to_host(p1['port']['id'], 'h1')
vlan_p1 = self._check_binding(p1['port']['id'])
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual(
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_p1}],
epg.static_paths)
with self.port(subnet=sub1) as p2:
# bind p2 to host h1
p2 = self._bind_port_to_host(p2['port']['id'], 'h1')
vlan_p2 = self._check_binding(p2['port']['id'])
self.assertEqual(vlan_p1, vlan_p2)
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual(
[{'path': self.hlink1.path,
'encap': 'vlan-%s' % vlan_p2}],
epg.static_paths)
self._delete('ports', p2['port']['id'])
self._check_binding(p1['port']['id'])
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual(
[{'path': self.hlink1.path,
'encap': 'vlan-%s' % vlan_p1}],
epg.static_paths)
self._delete('ports', p1['port']['id'])
self._check_no_dynamic_segment(net1['id'])
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual([], epg.static_paths)
def test_multiple_networks_on_host(self):
aim_ctx = aim_context.AimContext(self.db_session)
net1 = self._make_network(self.fmt, 'net1', True)['network']
epg1 = self._net_2_epg(net1)
with self.subnet(network={'network': net1}) as sub1:
with self.port(subnet=sub1) as p1:
# bind p1 to host h1
p1 = self._bind_port_to_host(p1['port']['id'], 'h1')
vlan_p1 = self._check_binding(p1['port']['id'])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual(
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_p1}],
epg1.static_paths)
net2 = self._make_network(self.fmt, 'net2', True)['network']
epg2 = self._net_2_epg(net2)
with self.subnet(network={'network': net2}) as sub2:
with self.port(subnet=sub2) as p2:
# bind p2 to host h1
p2 = self._bind_port_to_host(p2['port']['id'], 'h1')
vlan_p2 = self._check_binding(p2['port']['id'])
self.assertNotEqual(vlan_p1, vlan_p2)
epg2 = self.aim_mgr.get(aim_ctx, epg2)
self.assertEqual(
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_p2}],
epg2.static_paths)
self._delete('ports', p2['port']['id'])
epg2 = self.aim_mgr.get(aim_ctx, epg2)
self._check_no_dynamic_segment(net2['id'])
self.assertEqual([], epg2.static_paths)
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual(
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_p1}],
epg1.static_paths)
def test_network_on_multiple_hosts(self):
aim_ctx = aim_context.AimContext(self.db_session)
net1 = self._make_network(self.fmt, 'net1', True)['network']
epg1 = self._net_2_epg(net1)
hlink2 = aim_infra.HostLink(
host_name='h2',
interface_name='eth0',
path='topology/pod-1/paths-201/pathep-[eth1/19]')
self.aim_mgr.create(aim_ctx, hlink2)
self._register_agent('h2', AGENT_CONF_OVS)
with self.subnet(network={'network': net1}) as sub1:
with self.port(subnet=sub1) as p1:
p1 = self._bind_port_to_host(p1['port']['id'], 'h1')
vlan_p1 = self._check_binding(p1['port']['id'])
with self.port(subnet=sub1) as p2:
p2 = self._bind_port_to_host(p2['port']['id'], 'h2')
vlan_p2 = self._check_binding(p2['port']['id'])
self.assertEqual(vlan_p1, vlan_p2)
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual(
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_p1},
{'path': hlink2.path, 'encap': 'vlan-%s' % vlan_p2}],
sorted(epg1.static_paths, key=lambda x: x['path']))
self._delete('ports', p2['port']['id'])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual(
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_p1}],
epg1.static_paths)
self._delete('ports', p1['port']['id'])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self._check_no_dynamic_segment(net1['id'])
self.assertEqual([], epg1.static_paths)
def test_port_binding_missing_hostlink(self):
aim_ctx = aim_context.AimContext(self.db_session)
net1 = self._make_network(self.fmt, 'net1', True)['network']
epg1 = self._net_2_epg(net1)
self._register_agent('h-42', AGENT_CONF_OVS)
with self.subnet(network={'network': net1}) as sub1:
with self.port(subnet=sub1) as p1:
p1 = self._bind_port_to_host(p1['port']['id'], 'h-42')
self._check_binding(p1['port']['id'])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual([], epg1.static_paths)
hlink42 = aim_infra.HostLink(host_name='h42',
interface_name='eth0')
self.aim_mgr.create(aim_ctx, hlink42)
with self.port(subnet=sub1) as p2:
p2 = self._bind_port_to_host(p2['port']['id'], 'h-42')
self._check_binding(p2['port']['id'])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual([], epg1.static_paths)
class TestPortOnPhysicalNode(TestPortVlanNetwork):
# Tests for binding port on physical node where another ML2 mechanism
# driver completes port binding.
def setUp(self, mechanism_drivers=None):
super(TestPortOnPhysicalNode, self).setUp(
mechanism_drivers=mechanism_drivers,
tenant_network_types=['opflex'])
self.expected_binding_info = [('apic_aim', 'opflex'),
('openvswitch', 'vlan')]
def test_mixed_ports_on_network(self):
aim_ctx = aim_context.AimContext(self.db_session)
self._register_agent('opflex-1', AGENT_CONF_OPFLEX)
net1 = self._make_network(self.fmt, 'net1', True)['network']
epg1 = self._net_2_epg(net1)
with self.subnet(network={'network': net1}) as sub1:
# "normal" port on opflex host
with self.port(subnet=sub1) as p1:
p1 = self._bind_port_to_host(p1['port']['id'], 'opflex-1')
self._check_binding(p1['port']['id'],
expected_binding_info=[('apic_aim', 'opflex')])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual([], epg1.static_paths)
# port on non-opflex host
with self.port(subnet=sub1) as p2:
p2 = self._bind_port_to_host(p2['port']['id'], 'h1')
vlan_p2 = self._check_binding(p2['port']['id'])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual(
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_p2}],
epg1.static_paths)
class TestPortOnPhysicalNodeSingleDriver(TestPortOnPhysicalNode):
# Tests for binding port on physical node where no other ML2 mechanism
# driver fulfills port binding.
def setUp(self, service_plugins=None):
super(TestPortOnPhysicalNodeSingleDriver, self).setUp(
mechanism_drivers=['logger', 'apic_aim'])
self.expected_binding_info = [('apic_aim', 'opflex'),
('apic_aim', 'vlan')]