[apic_aim] Map neutron resources to AIM, part 2

Neutron address scopes are mapped to AIM VRFs, with the DN and sync
status available via extended attributes.

Neutron networks are initially created with BDs that have routing
disabled and are associated with a common unrouted VRF.

The AIM display_name attribute has been implemented for the AIM
objects mapped from networks and address scopes.

New unit tests for the Neutron lifecyle operations have been added
that validate the state of AIM objects after each operation. These
also validate the apic:distinguished_name extension
attributes. Redundant unit tests have been eliminated.

Tracking the relationship between address scopes, subnetpools,
subnets, networks and routers is not yet implemented.

The original code mapping Neutron subnets to AIM Subnets has been
removed. A subsequent patch implementing routers will create/delete
AIM Subnets as Neutron subnets are added/deleted as router interfaces.

Neutron address scopes are mapped to AIM VRFs, but these VRFs are not
yet associatied with networks' BDs. This will also be done in a
subsequent patch as Neutron subnets on those networks that are
associated with address scopes are added as router interaces.

An obvious bug in sync_state handling is also fixed, but unit tests to
prevent regressions is still needed. The devstack plugin is also fixed
to include the proxy_group extension driver.

Change-Id: I8fef7809654924d4c3ff50bdf49188faba0684e0
This commit is contained in:
Robert Kukura 2016-08-09 16:42:01 -04:00
parent 0696748490
commit 5fae44574c
7 changed files with 508 additions and 321 deletions

View File

@ -26,8 +26,7 @@ function configure_apic_aim {
# GBP Configuration for AIM
# Policy drivers (REVISIT: chain_mapping might needed to be added later)
iniset $NEUTRON_CONF group_policy policy_drivers "aim_mapping"
# Extension drivers (REVISIT: proxy_group might needed to be added later)
iniset $NEUTRON_CONF group_policy extension_drivers "aim_extension"
iniset $NEUTRON_CONF group_policy extension_drivers "aim_extension,proxy_group"
# Service Chain (REVISIT: not overriding any defaults yet)
init_aim

View File

@ -22,6 +22,7 @@ LOG = None
NAME_TYPE_TENANT = 'tenant'
NAME_TYPE_NETWORK = 'network'
NAME_TYPE_ADDRESS_SCOPE = 'address_scope'
NAME_TYPE_POLICY_TARGET_GROUP = 'policy_target_group'
NAME_TYPE_L3_POLICY = 'l3_policy'
NAME_TYPE_L2_POLICY = 'l2_policy'
@ -145,6 +146,11 @@ class APICNameMapper(object):
def network(self, session, network_id, network_name=None):
return network_name
@mapper(NAME_TYPE_ADDRESS_SCOPE)
def address_scope(self, session, address_scope_id,
address_scope_name=None):
return address_scope_name
@mapper(NAME_TYPE_POLICY_TARGET_GROUP)
def policy_target_group(self, session, policy_target_group_id,
policy_target_group_name=None):

View File

@ -51,6 +51,10 @@ class ProjectNameCache(object):
inside a transaction with a project_id not already in the
cache.
"""
# TODO(rkukura): It seems load_from_conf_options() and
# keystoneclient auth plugins have been deprecated, and we
# should use keystoneauth instead.
if project_id not in self.project_names:
if self.keystone is None:
LOG.debug("Getting keystone client")

View File

@ -16,16 +16,16 @@
from neutron._i18n import _LI
from neutron.api import extensions
from neutron import manager as n_manager
from neutron.plugins.ml2 import driver_api
from oslo_log import log
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
extensions as extensions_pkg)
LOG = log.getLogger(__name__)
class ApicExtensionDriver(driver_api.ExtensionDriver):
class ApicExtensionDriver(api_plus.ExtensionDriver):
def __init__(self):
LOG.info(_LI("APIC AIM ED __init__"))
@ -55,3 +55,6 @@ class ApicExtensionDriver(driver_api.ExtensionDriver):
def extend_subnet_dict(self, session, base_model, result):
self._md.extend_subnet_dict(session, base_model, result)
def extend_address_scope_dict(self, session, base_model, result):
self._md.extend_address_scope_dict(session, base_model, result)

View File

@ -26,6 +26,7 @@ BD = 'BridgeDomain'
CTX = 'Context'
EPG = 'EndpointGroup'
SUBNET = 'Subnet'
VRF = 'VRF'
SYNC_SYNCED = 'synced'
SYNC_PENDING = 'pending'

View File

@ -16,13 +16,13 @@
from aim import aim_manager
from aim.api import resource as aim_resource
from aim import context as aim_context
from aim import utils as aim_utils
from neutron._i18n import _LE
from neutron._i18n import _LI
from neutron._i18n import _LW
from neutron.agent.linux import dhcp
from neutron.common import constants as n_constants
from neutron.common import rpc as n_rpc
from neutron.db import models_v2
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.ml2 import driver_api as api
@ -40,6 +40,8 @@ from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import model
LOG = log.getLogger(__name__)
AP_NAME = 'NeutronAP'
UNROUTED_VRF_NAME = 'UnroutedVRF'
COMMON_TENANT_NAME = 'common'
AGENT_TYPE_DVS = 'DVS agent'
VIF_TYPE_DVS = 'dvs'
PROMISCUOUS_TYPES = [n_constants.DEVICE_OWNER_DHCP,
@ -73,7 +75,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
self.opflex_conn.consume_in_threads()
def ensure_tenant(self, plugin_context, tenant_id):
LOG.info(_LI("APIC AIM MD ensuring tenant_id: %s"), tenant_id)
LOG.debug("APIC AIM MD ensuring tenant_id: %s", tenant_id)
self.project_name_cache.ensure_project(tenant_id)
@ -83,292 +85,277 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
session = plugin_context.session
with session.begin(subtransactions=True):
project_name = self.project_name_cache.get_project_name(tenant_id)
tenant_name = self.name_mapper.tenant(session, tenant_id,
project_name)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
tenant_aname = self.name_mapper.tenant(session, tenant_id,
project_name)
LOG.debug("Mapped tenant_id %(id)s with name %(name)s to "
"%(aname)s",
{'id': tenant_id, 'name': project_name,
'aname': tenant_aname})
aim_ctx = aim_context.AimContext(session)
tenant = aim_resource.Tenant(name=tenant_name)
tenant = aim_resource.Tenant(name=tenant_aname)
if not self.aim.get(aim_ctx, tenant):
self.aim.create(aim_ctx, tenant)
ap = aim_resource.ApplicationProfile(tenant_name=tenant_name,
ap = aim_resource.ApplicationProfile(tenant_name=tenant_aname,
name=AP_NAME)
if not self.aim.get(aim_ctx, ap):
self.aim.create(aim_ctx, ap)
def create_network_precommit(self, context):
LOG.info(_LI("APIC AIM MD creating network: %s"), context.current)
LOG.debug("APIC AIM MD creating network: %s", context.current)
session = context._plugin_context.session
tenant_id = context.current['tenant_id']
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = context.current['id']
name = context.current['name']
bd_name = self.name_mapper.network(session, id, name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': id, 'name': name, 'apic_name': bd_name})
aname = self.name_mapper.network(session, id, name)
LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s",
{'id': id, 'name': name, 'aname': aname})
dname = aim_utils.sanitize_display_name(name)
aim_ctx = aim_context.AimContext(session)
bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
name=bd_name)
vrf = self._get_unrouted_vrf(aim_ctx)
bd = aim_resource.BridgeDomain(
tenant_name=tenant_aname,
name=aname,
display_name=dname,
vrf_name=vrf.name,
enable_arp_flood=True,
enable_routing=False,
limit_ip_learn_to_subnets=True)
self.aim.create(aim_ctx, bd)
epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name=AP_NAME,
name=bd_name,
bd_name=bd_name)
epg = aim_resource.EndpointGroup(
tenant_name=tenant_aname,
app_profile_name=AP_NAME,
name=aname,
display_name=dname,
bd_name=aname)
self.aim.create(aim_ctx, epg)
def update_network_precommit(self, context):
LOG.debug("APIC AIM MD updating network: %s", context.current)
if context.current['name'] != context.original['name']:
session = context._plugin_context.session
tenant_id = context.current['tenant_id']
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = context.current['id']
name = context.current['name']
aname = self.name_mapper.network(session, id, name)
LOG.debug("Mapped network_id %(id)s with name %(name)s to "
"%(aname)s",
{'id': id, 'name': name, 'aname': aname})
dname = aim_utils.sanitize_display_name(context.current['name'])
aim_ctx = aim_context.AimContext(session)
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
name=aname)
bd = self.aim.update(aim_ctx, bd, display_name=dname)
epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
app_profile_name=AP_NAME,
name=aname)
epg = self.aim.update(aim_ctx, epg, display_name=dname)
def delete_network_precommit(self, context):
LOG.info(_LI("APIC AIM MD deleting network: %s"), context.current)
LOG.debug("APIC AIM MD deleting network: %s", context.current)
session = context._plugin_context.session
tenant_id = context.current['tenant_id']
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = context.current['id']
bd_name = self.name_mapper.network(session, id)
LOG.info(_LI("Mapped network_id %(id)s to %(apic_name)s"),
{'id': id, 'apic_name': bd_name})
name = context.current['name']
aname = self.name_mapper.network(session, id, name)
LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s",
{'id': id, 'name': name, 'aname': aname})
aim_ctx = aim_context.AimContext(session)
epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
app_profile_name=AP_NAME,
name=bd_name)
name=aname)
self.aim.delete(aim_ctx, epg)
bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
name=bd_name)
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
name=aname)
self.aim.delete(aim_ctx, bd)
self.name_mapper.delete_apic_name(session, id)
def extend_network_dict(self, session, base_model, result):
LOG.info(_LI("APIC AIM MD extending dict for network: %s"), result)
sync_state = cisco_apic.SYNC_SYNCED
LOG.debug("APIC AIM MD extending dict for network: %s", result)
tenant_id = result['tenant_id']
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = result['id']
name = result['name']
bd_name = self.name_mapper.network(session, id, name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': id, 'name': name, 'apic_name': bd_name})
aname = self.name_mapper.network(session, id, name)
LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s",
{'id': id, 'name': name, 'aname': aname})
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
name=aname)
epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
app_profile_name=AP_NAME,
name=aname)
aim_ctx = aim_context.AimContext(session)
bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
name=bd_name)
bd = self.aim.get(aim_ctx, bd)
LOG.debug("got BD with DN: %s", bd.dn)
epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name=AP_NAME,
name=bd_name)
epg = self.aim.get(aim_ctx, epg)
LOG.debug("got EPG with DN: %s", epg.dn)
sync_state = cisco_apic.SYNC_SYNCED
sync_state = self._merge_status(aim_ctx, sync_state, bd)
sync_state = self._merge_status(aim_ctx, sync_state, epg)
result[cisco_apic.DIST_NAMES] = {cisco_apic.BD: bd.dn,
cisco_apic.EPG: epg.dn}
bd_status = self.aim.get_status(aim_ctx, bd)
self._merge_status(sync_state, bd_status)
epg_status = self.aim.get_status(aim_ctx, epg)
self._merge_status(sync_state, epg_status)
result[cisco_apic.SYNC_STATE] = sync_state
def create_subnet_precommit(self, context):
LOG.info(_LI("APIC AIM MD creating subnet: %s"), context.current)
# REVISIT(rkukura): Do we need to do any of the
# constraints/scope stuff?
gateway_ip_mask = self._gateway_ip_mask(context.current)
if gateway_ip_mask:
session = context._plugin_context.session
network_id = context.current['network_id']
# REVISIT(rkukura): Should Ml2Plus extend SubnetContext
# with network?
network = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
tenant_id = network.tenant_id
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
network_name = network.name
bd_name = self.name_mapper.network(session, network_id,
network_name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': network_id, 'name': network_name,
'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
subnet = self.aim.create(aim_ctx, subnet)
subnet_dn = subnet.dn
subnet_status = self.aim.get_status(aim_ctx, subnet)
sync_state = cisco_apic.SYNC_SYNCED
self._merge_status(sync_state, subnet_status)
# ML2 does not extend subnet dict after precommit.
context.current[cisco_apic.DIST_NAMES] = {cisco_apic.SUBNET:
subnet_dn}
context.current[cisco_apic.SYNC_STATE] = sync_state
LOG.debug("APIC AIM MD creating subnet: %s", context.current)
# TODO(rkukura): Implement.
def update_subnet_precommit(self, context):
LOG.info(_LI("APIC AIM MD updating subnet: %s"), context.current)
if context.current['gateway_ip'] != context.original['gateway_ip']:
session = context._plugin_context.session
network_id = context.current['network_id']
# REVISIT(rkukura): Should Ml2Plus extend SubnetContext
# with network?
network = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
tenant_id = network.tenant_id
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
network_name = network.name
bd_name = self.name_mapper.network(session, network_id,
network_name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': network_id, 'name': network_name,
'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
gateway_ip_mask = self._gateway_ip_mask(context.original)
if gateway_ip_mask:
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
self.aim.delete(aim_ctx, subnet)
gateway_ip_mask = self._gateway_ip_mask(context.current)
if gateway_ip_mask:
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
subnet = self.aim.create(aim_ctx, subnet)
subnet_dn = subnet.dn
subnet_status = self.aim.get_status(aim_ctx, subnet)
sync_state = cisco_apic.SYNC_SYNCED
self._merge_status(sync_state, subnet_status)
# ML2 does not extend subnet dict after precommit.
context.current[cisco_apic.DIST_NAMES] = {cisco_apic.SUBNET:
subnet_dn}
context.current[cisco_apic.SYNC_STATE] = sync_state
LOG.debug("APIC AIM MD updating subnet: %s", context.current)
# TODO(rkukura): Implement.
def delete_subnet_precommit(self, context):
LOG.info(_LI("APIC AIM MD deleting subnet: %s"), context.current)
gateway_ip_mask = self._gateway_ip_mask(context.current)
if gateway_ip_mask:
session = context._plugin_context.session
network_id = context.current['network_id']
# REVISIT(rkukura): Should Ml2Plus extend SubnetContext
# with network?
network = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
tenant_id = network.tenant_id
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
network_name = network.name
bd_name = self.name_mapper.network(session, network_id,
network_name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': network_id, 'name': network_name,
'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
self.aim.delete(aim_ctx, subnet)
LOG.debug("APIC AIM MD deleting subnet: %s", context.current)
# TODO(rkukura): Implement.
def extend_subnet_dict(self, session, base_model, result):
LOG.info(_LI("APIC AIM MD extending dict for subnet: %s"), result)
LOG.debug("APIC AIM MD extending dict for subnet: %s", result)
subnet_dn = None
sync_state = cisco_apic.SYNC_SYNCED
gateway_ip_mask = self._gateway_ip_mask(result)
if gateway_ip_mask:
network_id = result['network_id']
network = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
# TODO(rkukura): Implement.
tenant_id = network.tenant_id
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
result[cisco_apic.DIST_NAMES] = {}
result[cisco_apic.SYNC_STATE] = sync_state
network_name = network.name
bd_name = self.name_mapper.network(session, network_id,
network_name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': network_id, 'name': network_name,
'apic_name': bd_name})
def create_address_scope_precommit(self, context):
LOG.debug("APIC AIM MD creating address scope: %s", context.current)
session = context._plugin_context.session
tenant_id = context.current['tenant_id']
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = context.current['id']
name = context.current['name']
aname = self.name_mapper.address_scope(session, id, name)
LOG.debug("Mapped address_scope_id %(id)s with name %(name)s to "
"%(aname)s",
{'id': id, 'name': name, 'aname': aname})
aim_ctx = aim_context.AimContext(session)
vrf = aim_resource.VRF(
tenant_name=tenant_aname,
name=aname,
display_name=aim_utils.sanitize_display_name(name))
self.aim.create(aim_ctx, vrf)
# ML2Plus does not extend address scope dict after precommit.
sync_state = cisco_apic.SYNC_SYNCED
sync_state = self._merge_status(aim_ctx, sync_state, vrf)
context.current[cisco_apic.DIST_NAMES] = {cisco_apic.VRF:
vrf.dn}
context.current[cisco_apic.SYNC_STATE] = sync_state
def update_address_scope_precommit(self, context):
LOG.debug("APIC AIM MD updating address_scope: %s", context.current)
if context.current['name'] != context.original['name']:
session = context._plugin_context.session
tenant_id = context.current['tenant_id']
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = context.current['id']
name = context.current['name']
aname = self.name_mapper.address_scope(session, id, name)
LOG.debug("Mapped address_scope_id %(id)s with name %(name)s to "
"%(aname)s",
{'id': id, 'name': name, 'aname': aname})
dname = aim_utils.sanitize_display_name(name)
aim_ctx = aim_context.AimContext(session)
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
subnet = self.aim.get(aim_ctx, subnet)
if subnet:
LOG.debug("got Subnet with DN: %s", subnet.dn)
subnet_dn = subnet.dn
subnet_status = self.aim.get_status(aim_ctx, subnet)
self._merge_status(sync_state, subnet_status)
else:
# This should always get replaced with the real DN
# during precommit.
subnet_dn = "AIM Subnet not yet created"
vrf = aim_resource.VRF(tenant_name=tenant_aname,
name=aname)
vrf = self.aim.update(aim_ctx, vrf, display_name=dname)
result[cisco_apic.DIST_NAMES] = {cisco_apic.SUBNET: subnet_dn}
def delete_address_scope_precommit(self, context):
LOG.debug("APIC AIM MD deleting address scope: %s", context.current)
session = context._plugin_context.session
tenant_id = context.current['tenant_id']
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = context.current['id']
name = context.current['name']
aname = self.name_mapper.address_scope(session, id, name)
LOG.debug("Mapped address_scope_id %(id)s with name %(name)s to "
"%(aname)s",
{'id': id, 'name': name, 'aname': aname})
aim_ctx = aim_context.AimContext(session)
vrf = aim_resource.VRF(tenant_name=tenant_aname,
name=aname)
self.aim.delete(aim_ctx, vrf)
self.name_mapper.delete_apic_name(session, id)
def extend_address_scope_dict(self, session, base_model, result):
LOG.debug("APIC AIM MD extending dict for address scope: %s", result)
tenant_id = result['tenant_id']
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = result['id']
name = result['name']
aname = self.name_mapper.address_scope(session, id, name)
LOG.debug("Mapped address_scope_id %(id)s with name %(name)s to "
"%(aname)s",
{'id': id, 'name': name, 'aname': aname})
vrf = aim_resource.VRF(tenant_name=tenant_aname,
name=aname)
aim_ctx = aim_context.AimContext(session)
sync_state = cisco_apic.SYNC_SYNCED
sync_state = self._merge_status(aim_ctx, sync_state, vrf)
result[cisco_apic.DIST_NAMES] = {cisco_apic.VRF: vrf.dn}
result[cisco_apic.SYNC_STATE] = sync_state
def bind_port(self, context):
@ -501,9 +488,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
# TODO(rkukura): Scope the tenant's AIM name.
network = port_context.network.current
epg_tenant_name = self.name_mapper.tenant(session,
network['tenant_id'])
epg_name = self.name_mapper.network(session, network['id'], None)
epg_tenant_aname = self.name_mapper.tenant(session,
network['tenant_id'])
epg_aname = self.name_mapper.network(session, network['id'],
network['name'])
promiscuous_mode = port['device_owner'] in PROMISCUOUS_TYPES
@ -512,13 +500,13 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
'device': device,
'enable_dhcp_optimization': self.enable_dhcp_opt,
'enable_metadata_optimization': self.enable_metadata_opt,
'endpoint_group_name': epg_name,
'endpoint_group_name': epg_aname,
'host': host,
'l3_policy_id': network['tenant_id'], # TODO(rkukura)
'mac_address': port['mac_address'],
'port_id': port_id,
'promiscuous_mode': promiscuous_mode,
'ptg_tenant': epg_tenant_name,
'ptg_tenant': epg_tenant_aname,
'subnets': self._get_subnet_details(core_plugin, context,
port)}
@ -577,14 +565,52 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
subnet['dhcp_server_ips'] = dhcp_ips
return subnets
def _merge_status(self, sync_state, status):
def _merge_status(self, aim_ctx, sync_state, resource):
status = self.aim.get_status(aim_ctx, resource)
if not status:
# REVISIT(rkukura): This should only occur if the AIM
# resource has not yet been created when
# extend_<resource>_dict() runs at the begining of a
# create operation. In this case, the real sync_state
# value will be generated, either in
# create_<resource>_precommit() or in a 2nd call to
# extend_<resource>_dict() after the precommit phase,
# depending on the resource. It might be safer to force
# sync_state to a SYNC_MISSING value here that is not
# overwritten on subsequent calls to _merge_status(), in
# case the real sync_state value somehow does not get
# generated. But sync_state handling in general needs to
# be revisited (and properly tested), so we can deal with
# this at that time.
return sync_state
if status.is_error():
sync_state = cisco_apic.SYNC_ERROR
elif status.is_build() and sync_state is not cisco_apic.SYNC_ERROR:
sync_state = cisco_apic.SYNC_BUILD
return sync_state
def _gateway_ip_mask(self, subnet):
gateway_ip = subnet['gateway_ip']
if gateway_ip:
prefix_len = subnet['cidr'].split('/')[1]
return gateway_ip + '/' + prefix_len
def _get_common_tenant(self, aim_ctx):
attrs = aim_resource.Tenant(name=COMMON_TENANT_NAME,
display_name='Common Tenant')
tenant = self.aim.get(aim_ctx, attrs)
if not tenant:
LOG.info(_LI("Creating common tenant"))
tenant = self.aim.create(aim_ctx, attrs)
return tenant
def _get_unrouted_vrf(self, aim_ctx):
tenant = self._get_common_tenant(aim_ctx)
attrs = aim_resource.VRF(tenant_name=tenant.name,
name=UNROUTED_VRF_NAME,
display_name='Common Unrouted Context')
vrf = self.aim.get(aim_ctx, attrs)
if not vrf:
LOG.info(_LI("Creating common unrouted VRF"))
vrf = self.aim.create(aim_ctx, attrs)
return vrf

View File

@ -18,11 +18,14 @@ from aim.api import resource as aim_resource
from aim import context as aim_context
from aim.db import model_base as aim_model_base
from keystoneclient.v3 import client as ksc_client
from neutron.api import extensions
from neutron import context
from neutron.db import api as db_api
from neutron import manager
from neutron.plugins.ml2 import config
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit.extensions import test_address_scope
from opflexagent import constants as ofcst
PLUGIN_NAME = 'gbpservice.neutron.plugins.ml2plus.plugin.Ml2PlusPlugin'
@ -44,8 +47,18 @@ class FakeTenant(object):
class FakeProjectManager(object):
def list(self):
return [FakeTenant('test-tenant', 'TestTenantName'),
FakeTenant('bad_tenant_id', 'BadTenantName')]
return [
FakeTenant('another_tenant', 'AnotherTenantName'),
FakeTenant('bad_tenant_id', 'BadTenantIdName'),
FakeTenant('not_admin', 'NotAdminName'),
FakeTenant('some_tenant', 'SomeTenantName'),
FakeTenant('somebody_else', 'SomebodyElseName'),
FakeTenant('t1', 'T1Name'),
FakeTenant('tenant1', 'Tenant1Name'),
FakeTenant('tenant_1', 'Tenant1Name'),
FakeTenant('tenant_2', 'Tenant2Name'),
FakeTenant('test-tenant', 'TestTenantName'),
]
class FakeKeystoneClient(object):
@ -53,8 +66,7 @@ class FakeKeystoneClient(object):
self.projects = FakeProjectManager()
class ApicAimTestCase(test_plugin.NeutronDbPluginV2TestCase):
class ApicAimTestCase(test_address_scope.AddressScopeTestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
@ -76,6 +88,8 @@ class ApicAimTestCase(test_plugin.NeutronDbPluginV2TestCase):
group='ml2_type_vlan')
super(ApicAimTestCase, self).setUp(PLUGIN_NAME)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.port_create_status = 'DOWN'
self.saved_keystone_client = ksc_client.Client
@ -89,126 +103,260 @@ class ApicAimTestCase(test_plugin.NeutronDbPluginV2TestCase):
self.driver = self.plugin.mechanism_manager.mech_drivers[
'apic_aim'].obj
self.aim_mgr = aim_manager.AimManager()
self._app_profile_name = 'NeutronAP'
self._tenant_name = self._map_name({'id': 'test-tenant',
'name': 'TestTenantName'})
self._unrouted_vrf_name = 'UnroutedVRF'
def tearDown(self):
ksc_client.Client = self.saved_keystone_client
super(ApicAimTestCase, self).tearDown()
def _find_by_dn(self, dn, cls):
def _map_name(self, resource):
# Assumes no conflicts and no substition needed.
return resource['name'][:40] + '_' + resource['id'][:5]
class TestAimMapping(ApicAimTestCase):
def _get_tenant(self, tenant_name, should_exist=True):
session = db_api.get_session()
aim_ctx = aim_context.AimContext(session)
resource = cls.from_dn(dn)
return self.aim_mgr.get(aim_ctx, resource)
tenant = aim_resource.Tenant(name=tenant_name)
tenant = self.aim_mgr.get(aim_ctx, tenant)
if should_exist:
self.assertIsNotNone(tenant)
else:
self.assertIsNone(tenant)
return tenant
def _get_vrf(self, vrf_name, tenant_name, should_exist=True):
session = db_api.get_session()
aim_ctx = aim_context.AimContext(session)
vrf = aim_resource.VRF(tenant_name=tenant_name,
name=vrf_name)
vrf = self.aim_mgr.get(aim_ctx, vrf)
if should_exist:
self.assertIsNotNone(vrf)
else:
self.assertIsNone(vrf)
return vrf
class TestApicExtension(ApicAimTestCase):
def _verify_dn(self, dist_names, key, mo_types, id):
def _get_bd(self, bd_name, tenant_name, should_exist=True):
session = db_api.get_session()
aim_ctx = aim_context.AimContext(session)
bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
name=bd_name)
bd = self.aim_mgr.get(aim_ctx, bd)
if should_exist:
self.assertIsNotNone(bd)
else:
self.assertIsNone(bd)
return bd
def _get_epg(self, epg_name, tenant_name, app_profile_name,
should_exist=True):
session = db_api.get_session()
aim_ctx = aim_context.AimContext(session)
epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name=app_profile_name,
name=epg_name)
epg = self.aim_mgr.get(aim_ctx, epg)
if should_exist:
self.assertIsNotNone(epg)
else:
self.assertIsNone(epg)
return epg
def _check_dn(self, resource, aim_resource, key):
dist_names = resource.get('apic:distinguished_names')
self.assertIsInstance(dist_names, dict)
dn = dist_names.get(key)
self.assertIsInstance(dn, basestring)
self.assertEqual('uni/', dn[:4])
for mo_type in mo_types:
self.assertIn('/' + mo_type + '-', dn)
self.assertIn(id, dn)
self.assertEqual(aim_resource.dn, dn)
def _verify_no_dn(self, dist_names, key):
self.assertIn(key, dist_names)
self.assertIsNone(dist_names.get(key))
def _verify_network_dist_names(self, net):
id = net['id']
dist_names = net.get('apic:distinguished_names')
def _check_no_dn(self, resource, key):
dist_names = resource.get('apic:distinguished_names')
self.assertIsInstance(dist_names, dict)
self._verify_dn(dist_names, 'BridgeDomain', ['tn', 'BD'], id[:5])
self._verify_dn(dist_names, 'EndpointGroup', ['tn', 'ap', 'epg'],
id[:5])
self.assertNotIn(key, dist_names)
def test_network(self):
def _check_unrouted_vrf(self):
aim_tenant = self._get_tenant('common')
self.assertEqual('common', aim_tenant.name)
self.assertEqual("Common Tenant", aim_tenant.display_name)
aim_vrf = self._get_vrf(self._unrouted_vrf_name, 'common')
self.assertEqual('common', aim_vrf.tenant_name)
self.assertEqual(self._unrouted_vrf_name, aim_vrf.name)
self.assertEqual('Common Unrouted Context', aim_vrf.display_name)
self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)
def _check_unrouted_network(self, net, orig_net=None):
orig_net = orig_net or net
# REVISIT(rkukura): Check AIM Tenant here?
self.assertEqual('test-tenant', net['tenant_id'])
aname = self._map_name(orig_net)
aim_bd = self._get_bd(aname,
self._tenant_name)
self.assertEqual(self._tenant_name, aim_bd.tenant_name)
self.assertEqual(aname, aim_bd.name)
self.assertEqual(net['name'], aim_bd.display_name)
self.assertEqual(self._unrouted_vrf_name, aim_bd.vrf_name)
self.assertTrue(aim_bd.enable_arp_flood)
self.assertFalse(aim_bd.enable_routing)
self.assertTrue(aim_bd.limit_ip_learn_to_subnets)
self.assertEqual('proxy', aim_bd.l2_unknown_unicast_mode) # REVISIT
self.assertEqual('', aim_bd.ep_move_detect_mode) # REVISIT
self._check_dn(net, aim_bd, 'BridgeDomain')
aim_epg = self._get_epg(aname,
tenant_name=self._tenant_name,
app_profile_name=self._app_profile_name)
self.assertEqual(self._tenant_name, aim_epg.tenant_name)
self.assertEqual(self._app_profile_name, aim_epg.app_profile_name)
self.assertEqual(aname, aim_epg.name)
self.assertEqual(net['name'], aim_epg.display_name)
self.assertEqual(aname, aim_epg.bd_name)
self.assertEqual([], aim_epg.provided_contract_names)
self.assertEqual([], aim_epg.consumed_contract_names)
# REVISIT(rkukura): Check openstack_vmm_domain_names and
# physical_domain_names?
self._check_dn(net, aim_epg, 'EndpointGroup')
self._check_unrouted_vrf()
def _check_network_deleted(self, net):
aname = self._map_name(net)
self._get_bd(aname,
self._tenant_name,
should_exist=False)
self._get_epg(aname,
tenant_name=self._tenant_name,
app_profile_name=self._app_profile_name,
should_exist=False)
def _check_unrouted_subnet(self, subnet):
# REVISIT(rkukura): Check AIM Tenant here?
self.assertEqual('test-tenant', subnet['tenant_id'])
self._check_no_dn(subnet, 'Subnet')
# REVISIT(rkukura): Anything else to check?
def _check_subnet_deleted(self, subnet):
# REVISIT(rkukura): Anything to check?
pass
def _check_address_scope(self, a_s, orig_a_s=None):
orig_a_s = orig_a_s or a_s
# REVISIT(rkukura): Check AIM Tenant here?
self.assertEqual('test-tenant', a_s['tenant_id'])
aname = self._map_name(orig_a_s)
aim_vrf = self._get_vrf(aname,
self._tenant_name)
self.assertEqual(self._tenant_name, aim_vrf.tenant_name)
self.assertEqual(aname, aim_vrf.name)
self.assertEqual(a_s['name'], aim_vrf.display_name)
self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)
self._check_dn(a_s, aim_vrf, 'VRF')
def _check_address_scope_deleted(self, a_s):
aname = self._map_name(a_s)
self._get_vrf(aname,
self._tenant_name,
should_exist=False)
def test_network_lifecycle(self):
# Test create.
net = self._make_network(self.fmt, 'net1', True)['network']
net_id = net['id']
self._verify_network_dist_names(net)
# Verify AIM resources.
aim_bd = self._find_by_dn(
net['apic:distinguished_names']['BridgeDomain'],
aim_resource.BridgeDomain)
aim_epg = self._find_by_dn(
net['apic:distinguished_names']['EndpointGroup'],
aim_resource.EndpointGroup)
self.assertEqual(aim_bd.name, aim_epg.name)
self.assertEqual(aim_bd.name, aim_epg.bd_name)
orig_net = self._make_network(self.fmt, 'net1', True)['network']
net_id = orig_net['id']
self._check_unrouted_network(orig_net)
# Test show.
res = self._show('networks', net_id)['network']
self._verify_network_dist_names(res)
net = self._show('networks', net_id)['network']
self._check_unrouted_network(net)
# Test update.
data = {'network': {'name': 'newnamefornet'}}
res = self._update('networks', net_id, data)['network']
self._verify_network_dist_names(res)
net = self._update('networks', net_id, data)['network']
self._check_unrouted_network(net, orig_net)
def _verify_subnet_dist_names(self, subnet):
dist_names = subnet.get('apic:distinguished_names')
self.assertIsInstance(dist_names, dict)
if subnet['gateway_ip']:
id = subnet['gateway_ip'] + '/' + subnet['cidr'].split('/')[1]
self._verify_dn(dist_names, 'Subnet', ['tn', 'BD', 'subnet'], id)
else:
self._verify_no_dn(dist_names, 'Subnet')
# Test delete.
self._delete('networks', net_id)
self._check_network_deleted(orig_net)
def test_subnet_without_gw(self):
# Test create without gateway.
net = self._make_network(self.fmt, 'net', True)
pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
subnet = self._make_subnet(self.fmt, net, None,
'10.0.0.0/24',
allocation_pools=pools)['subnet']
subnet_id = subnet['id']
self._verify_subnet_dist_names(subnet)
def test_subnet_lifecycle(self):
# Create network.
net = self._make_network(self.fmt, 'net1', True)
# Test show.
res = self._show('subnets', subnet_id)['subnet']
self._verify_subnet_dist_names(res)
# Test update.
data = {'subnet': {'name': 'newnameforsubnet'}}
res = self._update('subnets', subnet_id, data)['subnet']
self._verify_subnet_dist_names(res)
# Test update adding gateay.
data = {'subnet': {'gateway_ip': '10.0.0.1'}}
res = self._update('subnets', subnet_id, data)['subnet']
self._verify_subnet_dist_names(res)
# Test show after adding gateway.
res = self._show('subnets', subnet_id)['subnet']
self._verify_subnet_dist_names(res)
def test_subnet_with_gw(self):
# Test create.
net = self._make_network(self.fmt, 'net', True)
subnet = self._make_subnet(self.fmt, net, '10.0.1.1',
'10.0.1.0/24')['subnet']
subnet = self._make_subnet(
self.fmt, net, '10.0.0.1', '10.0.0.0/24')['subnet']
subnet_id = subnet['id']
self._verify_subnet_dist_names(subnet)
self._check_unrouted_subnet(subnet)
# Test show.
res = self._show('subnets', subnet_id)['subnet']
self._verify_subnet_dist_names(res)
subnet = self._show('subnets', subnet_id)['subnet']
self._check_unrouted_subnet(subnet)
# Test update.
data = {'subnet': {'name': 'newnameforsubnet'}}
res = self._update('subnets', subnet_id, data)['subnet']
self._verify_subnet_dist_names(res)
data = {'subnet': {'name': 'newnamefornet'}}
subnet = self._update('subnets', subnet_id, data)['subnet']
self._check_unrouted_subnet(subnet)
# Test update removing gateway.
data = {'subnet': {'gateway_ip': None}}
res = self._update('subnets', subnet_id, data)['subnet']
self._verify_subnet_dist_names(res)
# Test delete.
self._delete('subnets', subnet_id)
self._check_subnet_deleted(subnet)
# Test show after removing gateway.
res = self._show('subnets', subnet_id)['subnet']
self._verify_subnet_dist_names(res)
def test_address_scope_lifecycle(self):
# Test create.
orig_a_s = self._make_address_scope(
self.fmt, 4, name='as1')['address_scope']
a_s_id = orig_a_s['id']
self._check_address_scope(orig_a_s)
# Test show.
a_s = self._show('address-scopes', a_s_id)['address_scope']
self._check_address_scope(a_s)
# Test update.
data = {'address_scope': {'name': 'newnameforaddressscope'}}
a_s = self._update('address-scopes', a_s_id, data)['address_scope']
self._check_address_scope(a_s, orig_a_s)
# Test delete.
self._delete('address-scopes', a_s_id)
self._check_address_scope_deleted(orig_a_s)
# def test_create_subnet_with_address_scope(self):
# net = self._make_network(self.fmt, 'net1', True)
# name = self._map_name(net['network'])
# self._check(name, vrf_name='UnroutedVRF')
# a_s = self._make_address_scope(self.fmt, 4, name='as1')
# a_s_id = a_s['address_scope']['id']
# # vrf_name = self._map_name(a_s['address_scope'])
# sp = self._make_subnetpool(self.fmt, ['10.0.0.0/8'], name='sp1',
# tenant_id='test-tenant', # REVISIT
# address_scope_id=a_s_id,
# default_prefixlen=24)
# sp_id = sp['subnetpool']['id']
# self._make_subnet(self.fmt, net, None, None, subnetpool_id=sp_id)
# # REVISIT(rkukura): Should the address_scopes VRF be used
# # immediately, or not until connected to a router?
# #
# # self._check(name, vrf_name=vrf_name)
# self._check(name, vrf_name='UnroutedVRF')
class TestPortBinding(ApicAimTestCase):