Decompose Apic ML2 mechanism driver

As a part of the vendor decomposition effort, the
apic ML2 driver code is removed and replaced by
its version in the openstack/networking-cisco
repo.

Change-Id: Iffb5245b4c88b65afe62dd7435ee80489a654fee
Partial-implements: blueprint core-vendor-decomposition
changes/65/193365/18
Ivar Lazzaro 8 years ago committed by Henry Gessau
parent bc6a03e5c3
commit e0ba53a09a

@ -137,76 +137,6 @@
# mcast_ranges =
# Example: mcast_ranges = 224.0.0.1:224.0.0.3,224.0.1.1:224.0.1.
[ml2_cisco_apic]
# Hostname:port list of APIC controllers
# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80
# Username for the APIC controller
# apic_username = user
# Password for the APIC controller
# apic_password = password
# Whether use SSl for connecting to the APIC controller or not
# apic_use_ssl = True
# How to map names to APIC: use_uuid or use_name
# apic_name_mapping = use_name
# Names for APIC objects used by Neutron
# Note: When deploying multiple clouds against one APIC,
# these names must be unique between the clouds.
# apic_vmm_domain = openstack
# apic_vlan_ns_name = openstack_ns
# apic_node_profile = openstack_profile
# apic_entity_profile = openstack_entity
# apic_function_profile = openstack_function
# apic_app_profile_name = openstack_app
# Agent timers for State reporting and topology discovery
# apic_sync_interval = 30
# apic_agent_report_interval = 30
# apic_agent_poll_interval = 2
# Specify your network topology.
# This section indicates how your compute nodes are connected to the fabric's
# switches and ports. The format is as follows:
#
# [apic_switch:<swich_id_from_the_apic>]
# <compute_host>,<compute_host> = <switchport_the_host(s)_are_connected_to>
#
# You can have multiple sections, one for each switch in your fabric that is
# participating in OpenStack. e.g.
#
# [apic_switch:17]
# ubuntu,ubuntu1 = 1/10
# ubuntu2,ubuntu3 = 1/11
#
# [apic_switch:18]
# ubuntu5,ubuntu6 = 1/1
# ubuntu7,ubuntu8 = 1/2
# Describe external connectivity.
# In this section you can specify the external network configuration in order
# for the plugin to be able to teach the fabric how to route the internal
# traffic to the outside world. The external connectivity configuration
# format is as follows:
#
# [apic_external_network:<externalNetworkName>]
# switch = <switch_id_from_the_apic>
# port = <switchport_the_external_router_is_connected_to>
# encap = <encapsulation>
# cidr_exposed = <cidr_exposed_to_the_external_router>
# gateway_ip = <ip_of_the_external_gateway>
#
# An example follows:
# [apic_external_network:network_ext]
# switch=203
# port=1/34
# encap=vlan-100
# cidr_exposed=10.10.40.2/16
# gateway_ip=10.10.40.1
[ml2_cisco_ucsm]
# Cisco UCS Manager IP address

@ -1,17 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# cisco-apic filters
lldpctl: CommandFilter, lldpctl, root
# ip_lib filters
ip: IpFilter, ip, root
find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
ip_exec: IpNetnsExecFilter, ip, root

@ -24,4 +24,12 @@ LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools', 'healthmonitors',
FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies']
TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES)
DRIVER_TABLES = [
# Models moved to openstack/networking-cisco
'cisco_ml2_apic_contracts',
'cisco_ml2_apic_names',
'cisco_ml2_apic_host_links',
# Add your tables with moved models here^. Please end with a comma.
]
TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + DRIVER_TABLES)

@ -55,7 +55,6 @@ from neutron.plugins.cisco.db import network_models_v2 # noqa
from neutron.plugins.ml2.drivers.arista import db # noqa
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
models as ml2_brocade_models)
from neutron.plugins.ml2.drivers.cisco.apic import apic_model # noqa
from neutron.plugins.ml2.drivers.cisco.n1kv import n1kv_models # noqa
from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa
nexus_models_v2 as ml2_nexus_models_v2)

@ -1,193 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.db import models_v2
from neutron.plugins.ml2 import models as models_ml2
class RouterContract(model_base.BASEV2, models_v2.HasTenant):
"""Contracts created on the APIC.
tenant_id represents the owner (APIC side) of the contract.
router_id is the UUID of the router (Neutron side) this contract is
referring to.
"""
__tablename__ = 'cisco_ml2_apic_contracts'
router_id = sa.Column(sa.String(64), sa.ForeignKey('routers.id',
ondelete='CASCADE'),
primary_key=True)
class HostLink(model_base.BASEV2):
"""Connectivity of host links."""
__tablename__ = 'cisco_ml2_apic_host_links'
host = sa.Column(sa.String(255), nullable=False, primary_key=True)
ifname = sa.Column(sa.String(64), nullable=False, primary_key=True)
ifmac = sa.Column(sa.String(32), nullable=True)
swid = sa.Column(sa.String(32), nullable=False)
module = sa.Column(sa.String(32), nullable=False)
port = sa.Column(sa.String(32), nullable=False)
class ApicName(model_base.BASEV2):
"""Mapping of names created on the APIC."""
__tablename__ = 'cisco_ml2_apic_names'
neutron_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
neutron_type = sa.Column(sa.String(32), nullable=False, primary_key=True)
apic_name = sa.Column(sa.String(255), nullable=False)
class ApicDbModel(object):
"""DB Model to manage all APIC DB interactions."""
def __init__(self):
self.session = db_api.get_session()
def get_contract_for_router(self, router_id):
"""Returns the specified router's contract."""
return self.session.query(RouterContract).filter_by(
router_id=router_id).first()
def write_contract_for_router(self, tenant_id, router_id):
"""Stores a new contract for the given tenant."""
contract = RouterContract(tenant_id=tenant_id,
router_id=router_id)
with self.session.begin(subtransactions=True):
self.session.add(contract)
return contract
def update_contract_for_router(self, tenant_id, router_id):
with self.session.begin(subtransactions=True):
contract = self.session.query(RouterContract).filter_by(
router_id=router_id).with_lockmode('update').first()
if contract:
contract.tenant_id = tenant_id
self.session.merge(contract)
else:
self.write_contract_for_router(tenant_id, router_id)
def delete_contract_for_router(self, router_id):
with self.session.begin(subtransactions=True):
try:
self.session.query(RouterContract).filter_by(
router_id=router_id).delete()
except orm.exc.NoResultFound:
return
def add_hostlink(self, host, ifname, ifmac, swid, module, port):
link = HostLink(host=host, ifname=ifname, ifmac=ifmac,
swid=swid, module=module, port=port)
with self.session.begin(subtransactions=True):
self.session.merge(link)
def get_hostlinks(self):
return self.session.query(HostLink).all()
def get_hostlink(self, host, ifname):
return self.session.query(HostLink).filter_by(
host=host, ifname=ifname).first()
def get_hostlinks_for_host(self, host):
return self.session.query(HostLink).filter_by(
host=host).all()
def get_hostlinks_for_host_switchport(self, host, swid, module, port):
return self.session.query(HostLink).filter_by(
host=host, swid=swid, module=module, port=port).all()
def get_hostlinks_for_switchport(self, swid, module, port):
return self.session.query(HostLink).filter_by(
swid=swid, module=module, port=port).all()
def delete_hostlink(self, host, ifname):
with self.session.begin(subtransactions=True):
try:
self.session.query(HostLink).filter_by(host=host,
ifname=ifname).delete()
except orm.exc.NoResultFound:
return
def get_switches(self):
return self.session.query(HostLink.swid).distinct()
def get_modules_for_switch(self, swid):
return self.session.query(
HostLink.module).filter_by(swid=swid).distinct()
def get_ports_for_switch_module(self, swid, module):
return self.session.query(
HostLink.port).filter_by(swid=swid, module=module).distinct()
def get_switch_and_port_for_host(self, host):
return self.session.query(
HostLink.swid, HostLink.module, HostLink.port).filter_by(
host=host).distinct()
def get_tenant_network_vlan_for_host(self, host):
pb = models_ml2.PortBinding
po = models_v2.Port
ns = models_ml2.NetworkSegment
return self.session.query(
po.tenant_id, ns.network_id, ns.segmentation_id).filter(
po.id == pb.port_id).filter(pb.host == host).filter(
po.network_id == ns.network_id).distinct()
def add_apic_name(self, neutron_id, neutron_type, apic_name):
name = ApicName(neutron_id=neutron_id,
neutron_type=neutron_type,
apic_name=apic_name)
with self.session.begin(subtransactions=True):
self.session.add(name)
def update_apic_name(self, neutron_id, neutron_type, apic_name):
with self.session.begin(subtransactions=True):
name = self.session.query(ApicName).filter_by(
neutron_id=neutron_id,
neutron_type=neutron_type).with_lockmode('update').first()
if name:
name.apic_name = apic_name
self.session.merge(name)
else:
self.add_apic_name(neutron_id, neutron_type, apic_name)
def get_apic_names(self):
return self.session.query(ApicName).all()
def get_apic_name(self, neutron_id, neutron_type):
return self.session.query(ApicName.apic_name).filter_by(
neutron_id=neutron_id, neutron_type=neutron_type).first()
def delete_apic_name(self, neutron_id):
with self.session.begin(subtransactions=True):
try:
self.session.query(ApicName).filter_by(
neutron_id=neutron_id).delete()
except orm.exc.NoResultFound:
return

@ -1,111 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_service import loopingcall
from neutron.common import constants as n_constants
from neutron import context
from neutron.i18n import _LW
from neutron import manager
from neutron.plugins.ml2 import db as l2_db
from neutron.plugins.ml2 import driver_context
LOG = log.getLogger(__name__)
class SynchronizerBase(object):
def __init__(self, driver, interval=None):
self.core_plugin = manager.NeutronManager.get_plugin()
self.driver = driver
self.interval = interval
def sync(self, f, *args, **kwargs):
"""Fire synchronization based on interval.
Interval can be 0 for 'sync once' >0 for 'sync periodically' and
<0 for 'no sync'
"""
if self.interval:
if self.interval > 0:
loop_call = loopingcall.FixedIntervalLoopingCall(f, *args,
**kwargs)
loop_call.start(interval=self.interval)
return loop_call
else:
# Fire once
f(*args, **kwargs)
class ApicBaseSynchronizer(SynchronizerBase):
def sync_base(self):
self.sync(self._sync_base)
def _sync_base(self):
ctx = context.get_admin_context()
# Sync Networks
for network in self.core_plugin.get_networks(ctx):
mech_context = driver_context.NetworkContext(self.core_plugin, ctx,
network)
try:
self.driver.create_network_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create network postcommit failed for "
"network %s"), network['id'])
# Sync Subnets
for subnet in self.core_plugin.get_subnets(ctx):
mech_context = driver_context.SubnetContext(self.core_plugin, ctx,
subnet)
try:
self.driver.create_subnet_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create subnet postcommit failed for"
" subnet %s"), subnet['id'])
# Sync Ports (compute/gateway/dhcp)
for port in self.core_plugin.get_ports(ctx):
_, binding = l2_db.get_locked_port_and_binding(ctx.session,
port['id'])
network = self.core_plugin.get_network(ctx, port['network_id'])
mech_context = driver_context.PortContext(self.core_plugin, ctx,
port, network, binding,
[])
try:
self.driver.create_port_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create port postcommit failed for"
" port %s"), port['id'])
class ApicRouterSynchronizer(SynchronizerBase):
def sync_router(self):
self.sync(self._sync_router)
def _sync_router(self):
ctx = context.get_admin_context()
# Sync Router Interfaces
filters = {'device_owner': [n_constants.DEVICE_OWNER_ROUTER_INTF]}
for interface in self.core_plugin.get_ports(ctx, filters=filters):
try:
self.driver.add_router_interface_postcommit(
ctx, interface['device_id'],
{'port_id': interface['id']})
except Exception:
LOG.warn(_LW("Add interface postcommit failed for "
"port %s"), interface['id'])

@ -1,343 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
import eventlet
eventlet.monkey_patch()
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import periodic_task
from oslo_service import service as svc
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import config as common_cfg
from neutron.common import rpc
from neutron.common import utils as neutron_utils
from neutron.db import agents_db
from neutron.i18n import _LE, _LI
from neutron import manager
from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic as ma
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron import service
ACI_PORT_DESCR_FORMATS = [
r'topology/pod-1/node-(\d+)/sys/conng/path-\[eth(\d+)/(\d+)\]',
r'topology/pod-1/paths-(\d+)/pathep-\[eth(\d+)/(\d+)\]',
]
AGENT_FORCE_UPDATE_COUNT = 100
BINARY_APIC_SERVICE_AGENT = 'neutron-cisco-apic-service-agent'
BINARY_APIC_HOST_AGENT = 'neutron-cisco-apic-host-agent'
TOPIC_APIC_SERVICE = 'apic-service'
TYPE_APIC_SERVICE_AGENT = 'cisco-apic-service-agent'
TYPE_APIC_HOST_AGENT = 'cisco-apic-host-agent'
LOG = logging.getLogger(__name__)
class ApicTopologyService(manager.Manager):
target = oslo_messaging.Target(version='1.1')
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyService, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.conn = None
self.peers = {}
self.invalid_peers = []
self.dispatcher = None
self.state = None
self.state_agent = None
self.topic = TOPIC_APIC_SERVICE
self.apic_manager = ma.APICMechanismDriver.get_apic_manager(False)
def init_host(self):
LOG.info(_LI("APIC service agent starting ..."))
self.state = {
'binary': BINARY_APIC_SERVICE_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_SERVICE_AGENT,
}
self.conn = rpc.create_connection(new=True)
self.dispatcher = [self, agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(
self.topic, self.dispatcher, fanout=True)
self.conn.consume_in_threads()
def after_start(self):
LOG.info(_LI("APIC service agent started"))
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC service agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC service agent: failed in reporting state"))
@lockutils.synchronized('apic_service')
def update_link(self, context,
host, interface, mac,
switch, module, port):
LOG.debug("APIC service agent: received update_link: %s",
", ".join(map(str,
[host, interface, mac, switch, module, port])))
nlink = (host, interface, mac, switch, module, port)
clink = self.peers.get((host, interface), None)
if switch == 0:
# this is a link delete, remove it
if clink is not None:
self.apic_manager.remove_hostlink(*clink)
self.peers.pop((host, interface))
else:
if clink is None:
# add new link to database
self.apic_manager.add_hostlink(*nlink)
self.peers[(host, interface)] = nlink
elif clink != nlink:
# delete old link and add new one (don't update in place)
self.apic_manager.remove_hostlink(*clink)
self.peers.pop((host, interface))
self.apic_manager.add_hostlink(*nlink)
self.peers[(host, interface)] = nlink
class ApicTopologyServiceNotifierApi(object):
def __init__(self):
target = oslo_messaging.Target(topic=TOPIC_APIC_SERVICE, version='1.0')
self.client = rpc.get_client(target)
def update_link(self, context, host, interface, mac, switch, module, port):
cctxt = self.client.prepare(version='1.1', fanout=True)
cctxt.cast(context, 'update_link', host=host, interface=interface,
mac=mac, switch=switch, module=module, port=port)
def delete_link(self, context, host, interface):
cctxt = self.client.prepare(version='1.1', fanout=True)
cctxt.cast(context, 'delete_link', host=host, interface=interface,
mac=None, switch=0, module=0, port=0)
class ApicTopologyAgent(manager.Manager):
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyAgent, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.count_current = 0
self.count_force_send = AGENT_FORCE_UPDATE_COUNT
self.interfaces = {}
self.lldpcmd = None
self.peers = {}
self.port_desc_re = list(map(re.compile, ACI_PORT_DESCR_FORMATS))
self.service_agent = ApicTopologyServiceNotifierApi()
self.state = None
self.state_agent = None
self.topic = TOPIC_APIC_SERVICE
self.uplink_ports = []
self.invalid_peers = []
def init_host(self):
LOG.info(_LI("APIC host agent: agent starting on %s"), self.host)
self.state = {
'binary': BINARY_APIC_HOST_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_HOST_AGENT,
}
self.uplink_ports = []
for inf in self.conf.apic_host_uplink_ports:
if ip_lib.device_exists(inf):
self.uplink_ports.append(inf)
else:
# ignore unknown interfaces
LOG.error(_LE("No such interface (ignored): %s"), inf)
self.lldpcmd = ['lldpctl', '-f', 'keyvalue'] + self.uplink_ports
def after_start(self):
LOG.info(_LI("APIC host agent: started on %s"), self.host)
@periodic_task.periodic_task
def _check_for_new_peers(self, context):
LOG.debug("APIC host agent: _check_for_new_peers")
if not self.lldpcmd:
return
try:
# Check if we must send update even if there is no change
force_send = False
self.count_current += 1
if self.count_current >= self.count_force_send:
force_send = True
self.count_current = 0
# Check for new peers
new_peers = self._get_peers()
new_peers = self._valid_peers(new_peers)
# Make a copy of current interfaces
curr_peers = {}
for interface in self.peers:
curr_peers[interface] = self.peers[interface]
# Based curr -> new updates, add the new interfaces
self.peers = {}
for interface in new_peers:
peer = new_peers[interface]
self.peers[interface] = peer
if (interface in curr_peers and
curr_peers[interface] != peer):
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0)
if (interface not in curr_peers or
curr_peers[interface] != peer or
force_send):
self.service_agent.update_link(context, *peer)
if interface in curr_peers:
curr_peers.pop(interface)
# Any interface still in curr_peers need to be deleted
for peer in curr_peers.values():
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0)
except Exception:
LOG.exception(_LE("APIC service agent: exception in LLDP parsing"))
def _get_peers(self):
peers = {}
lldpkeys = utils.execute(self.lldpcmd, run_as_root=True)
for line in lldpkeys.splitlines():
if '=' not in line:
continue
fqkey, value = line.split('=', 1)
lldp, interface, key = fqkey.split('.', 2)
if key == 'port.descr':
for regexp in self.port_desc_re:
match = regexp.match(value)
if match:
mac = self._get_mac(interface)
switch, module, port = match.group(1, 2, 3)
peer = (self.host, interface, mac,
switch, module, port)
if interface not in peers:
peers[interface] = []
peers[interface].append(peer)
return peers
def _valid_peers(self, peers):
# Reduce the peers array to one valid peer per interface
# NOTE:
# There is a bug in lldpd daemon that it keeps reporting
# old peers even after their updates have stopped
# we keep track of that report remove them from peers
valid_peers = {}
invalid_peers = []
for interface in peers:
curr_peer = None
for peer in peers[interface]:
if peer in self.invalid_peers or curr_peer:
invalid_peers.append(peer)
else:
curr_peer = peer
if curr_peer is not None:
valid_peers[interface] = curr_peer
self.invalid_peers = invalid_peers
return valid_peers
def _get_mac(self, interface):
if interface in self.interfaces:
return self.interfaces[interface]
try:
mac = ip_lib.IPDevice(interface).link.address
self.interfaces[interface] = mac
return mac
except Exception:
# we can safely ignore it, it is only needed for debugging
LOG.exception(
_LE("APIC service agent: can not get MACaddr for %s"),
interface)
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC host agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC host agent: failed in reporting state"))
def launch(binary, manager, topic=None):
cfg.CONF(project='neutron')
common_cfg.init(sys.argv[1:])
config.setup_logging()
report_period = cfg.CONF.ml2_cisco_apic.apic_agent_report_interval
poll_period = cfg.CONF.ml2_cisco_apic.apic_agent_poll_interval
server = service.Service.create(
binary=binary, manager=manager, topic=topic,
report_interval=report_period, periodic_interval=poll_period)
svc.launch(cfg.CONF, server).wait()
def service_main():
launch(
BINARY_APIC_SERVICE_AGENT,
'neutron.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyService',
TOPIC_APIC_SERVICE)
def agent_main():
launch(
BINARY_APIC_HOST_AGENT,
'neutron.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyAgent')

@ -1,138 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
# oslo_config limits ${var} expansion to global variables
# That is why apic_system_id as a global variable
global_opts = [
cfg.StrOpt('apic_system_id',
default='openstack',
help=_("Prefix for APIC domain/names/profiles created")),
]
cfg.CONF.register_opts(global_opts)
apic_opts = [
cfg.ListOpt('apic_hosts',
default=[],
help=_("An ordered list of host names or IP addresses of "
"the APIC controller(s).")),
cfg.StrOpt('apic_username',
help=_("Username for the APIC controller")),
cfg.StrOpt('apic_password',
help=_("Password for the APIC controller"), secret=True),
cfg.StrOpt('apic_name_mapping',
default='use_name',
help=_("Name mapping strategy to use: use_uuid | use_name")),
cfg.BoolOpt('apic_use_ssl', default=True,
help=_("Use SSL to connect to the APIC controller")),
cfg.StrOpt('apic_domain_name',
default='${apic_system_id}',
help=_("Name for the domain created on APIC")),
cfg.StrOpt('apic_app_profile_name',
default='${apic_system_id}_app',
help=_("Name for the app profile used for Openstack")),
cfg.StrOpt('apic_vlan_ns_name',
default='${apic_system_id}_vlan_ns',
help=_("Name for the vlan namespace to be used for Openstack")),
cfg.StrOpt('apic_node_profile',
default='${apic_system_id}_node_profile',
help=_("Name of the node profile to be created")),
cfg.StrOpt('apic_entity_profile',
default='${apic_system_id}_entity_profile',
help=_("Name of the entity profile to be created")),
cfg.StrOpt('apic_function_profile',
default='${apic_system_id}_function_profile',
help=_("Name of the function profile to be created")),
cfg.StrOpt('apic_lacp_profile',
default='${apic_system_id}_lacp_profile',
help=_("Name of the LACP profile to be created")),
cfg.ListOpt('apic_host_uplink_ports',
default=[],
help=_('The uplink ports to check for ACI connectivity')),
cfg.ListOpt('apic_vpc_pairs',
default=[],
help=_('The switch pairs for VPC connectivity')),
cfg.StrOpt('apic_vlan_range',
default='2:4093',
help=_("Range of VLAN's to be used for Openstack")),
cfg.IntOpt('apic_sync_interval',
default=0,
help=_("Synchronization interval in seconds")),
cfg.FloatOpt('apic_agent_report_interval',
default=30,
help=_('Interval between agent status updates (in sec)')),
cfg.FloatOpt('apic_agent_poll_interval',
default=2,
help=_('Interval between agent poll for topology (in sec)')),
]
cfg.CONF.register_opts(apic_opts, "ml2_cisco_apic")
def _get_specific_config(prefix):
"""retrieve config in the format [<prefix>:<value>]."""
conf_dict = {}
multi_parser = cfg.MultiConfigParser()
multi_parser.read(cfg.CONF.config_file)
for parsed_file in multi_parser.parsed:
for parsed_item in parsed_file.keys():
if parsed_item.startswith(prefix):
switch, switch_id = parsed_item.split(':')
if switch.lower() == prefix:
conf_dict[switch_id] = parsed_file[parsed_item].items()
return conf_dict
def create_switch_dictionary():
switch_dict = {}
conf = _get_specific_config('apic_switch')
for switch_id in conf:
switch_dict[switch_id] = switch_dict.get(switch_id, {})
for host_list, port in conf[switch_id]:
hosts = host_list.split(',')
port = port[0]
switch_dict[switch_id][port] = (
switch_dict[switch_id].get(port, []) + hosts)
return switch_dict
def create_vpc_dictionary():
vpc_dict = {}
for pair in cfg.CONF.ml2_cisco_apic.apic_vpc_pairs:
pair_tuple = pair.split(':')
if (len(pair_tuple) != 2 or
any(map(lambda x: not x.isdigit(), pair_tuple))):
# Validation error, ignore this item
continue
vpc_dict[pair_tuple[0]] = pair_tuple[1]
vpc_dict[pair_tuple[1]] = pair_tuple[0]
return vpc_dict
def create_external_network_dictionary():
router_dict = {}
conf = _get_specific_config('apic_external_network')
for net_id in conf:
router_dict[net_id] = router_dict.get(net_id, {})
for key, value in conf[net_id]:
router_dict[net_id][key] = value[0] if value else None
return router_dict

@ -1,287 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from apicapi import apic_manager
from keystoneclient.v2_0 import client as keyclient
import netaddr
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log
from neutron.common import constants as n_constants
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.cisco.apic import apic_model
from neutron.plugins.ml2.drivers.cisco.apic import apic_sync
from neutron.plugins.ml2.drivers.cisco.apic import config
from neutron.plugins.ml2 import models
LOG = log.getLogger(__name__)
class APICMechanismDriver(api.MechanismDriver):
@staticmethod
def get_apic_manager(client=True):
apic_config = cfg.CONF.ml2_cisco_apic
network_config = {
'vlan_ranges': cfg.CONF.ml2_type_vlan.network_vlan_ranges,
'switch_dict': config.create_switch_dictionary(),
'vpc_dict': config.create_vpc_dictionary(),
'external_network_dict':
config.create_external_network_dictionary(),
}
apic_system_id = cfg.CONF.apic_system_id
keyclient_param = keyclient if client else None
keystone_authtoken = cfg.CONF.keystone_authtoken if client else None
return apic_manager.APICManager(apic_model.ApicDbModel(), log,
network_config, apic_config,
keyclient_param, keystone_authtoken,
apic_system_id)
@staticmethod
def get_base_synchronizer(inst):
apic_config = cfg.CONF.ml2_cisco_apic
return apic_sync.ApicBaseSynchronizer(inst,
apic_config.apic_sync_interval)
@staticmethod
def get_router_synchronizer(inst):
apic_config = cfg.CONF.ml2_cisco_apic
return apic_sync.ApicRouterSynchronizer(inst,
apic_config.apic_sync_interval)
def initialize(self):
# initialize apic
self.apic_manager = APICMechanismDriver.get_apic_manager()
self.name_mapper = self.apic_manager.apic_mapper
self.synchronizer = None
self.apic_manager.ensure_infra_created_on_apic()
self.apic_manager.ensure_bgp_pod_policy_created_on_apic()
def sync_init(f):
def inner(inst, *args, **kwargs):
if not inst.synchronizer:
inst.synchronizer = (
APICMechanismDriver.get_base_synchronizer(inst))
inst.synchronizer.sync_base()
# pylint: disable=not-callable
return f(inst, *args, **kwargs)
return inner
@lockutils.synchronized('apic-portlock')
def _perform_path_port_operations(self, context, port):
# Get network
network_id = context.network.current['id']
anetwork_id = self.name_mapper.network(context, network_id)
# Get tenant details from port context
tenant_id = context.current['tenant_id']
tenant_id = self.name_mapper.tenant(context, tenant_id)
# Get segmentation id
segment = context.top_bound_segment
if not segment:
LOG.debug("Port %s is not bound to a segment", port)
return
seg = None
if (segment.get(api.NETWORK_TYPE) in [constants.TYPE_VLAN]):
seg = segment.get(api.SEGMENTATION_ID)
# hosts on which this vlan is provisioned
host = context.host
# Create a static path attachment for the host/epg/switchport combo
with self.apic_manager.apic.transaction() as trs:
self.apic_manager.ensure_path_created_for_port(
tenant_id, anetwork_id, host, seg, transaction=trs)
def _perform_gw_port_operations(self, context, port):
router_id = port.get('device_id')
network = context.network.current
anetwork_id = self.name_mapper.network(context, network['id'])
router_info = self.apic_manager.ext_net_dict.get(network['name'])
if router_id and router_info:
address = router_info['cidr_exposed']
next_hop = router_info['gateway_ip']
encap = router_info.get('encap') # No encap if None
switch = router_info['switch']
module, sport = router_info['port'].split('/')
with self.apic_manager.apic.transaction() as trs:
# Get/Create contract
arouter_id = self.name_mapper.router(context, router_id)
cid = self.apic_manager.get_router_contract(arouter_id)
# Ensure that the external ctx exists
self.apic_manager.ensure_context_enforced()
# Create External Routed Network and configure it
self.apic_manager.ensure_external_routed_network_created(
anetwork_id, transaction=trs)
self.apic_manager.ensure_logical_node_profile_created(
anetwork_id, switch, module, sport, encap,
address, transaction=trs)
self.apic_manager.ensure_static_route_created(
anetwork_id, switch, next_hop, transaction=trs)
self.apic_manager.ensure_external_epg_created(
anetwork_id, transaction=trs)
self.apic_manager.ensure_external_epg_consumed_contract(
anetwork_id, cid, transaction=trs)
self.apic_manager.ensure_external_epg_provided_contract(
anetwork_id, cid, transaction=trs)
def _perform_port_operations(self, context):
# Get port
port = context.current
# Check if a compute port
if context.host:
self._perform_path_port_operations(context, port)
if port.get('device_owner') == n_constants.DEVICE_OWNER_ROUTER_GW:
self._perform_gw_port_operations(context, port)
def _delete_contract(self, context):
port = context.current
network_id = self.name_mapper.network(
context, context.network.current['id'])
arouter_id = self.name_mapper.router(context,
port.get('device_id'))
self.apic_manager.delete_external_epg_contract(arouter_id,
network_id)
def _get_active_path_count(self, context):
return context._plugin_context.session.query(
models.PortBinding).filter_by(
host=context.host, segment=context._binding.segment).count()
@lockutils.synchronized('apic-portlock')
def _delete_port_path(self, context, atenant_id, anetwork_id):
if not self._get_active_path_count(context):
self.apic_manager.ensure_path_deleted_for_port(
atenant_id, anetwork_id,
context.host)
def _delete_path_if_last(self, context):
if not self._get_active_path_count(context):
tenant_id = context.current['tenant_id']
atenant_id = self.name_mapper.tenant(context, tenant_id)
network_id = context.network.current['id']
anetwork_id = self.name_mapper.network(context, network_id)
self._delete_port_path(context, atenant_id, anetwork_id)
def _get_subnet_info(self, context, subnet):
if subnet['gateway_ip']:
tenant_id = subnet['tenant_id']
network_id = subnet['network_id']
network = context._plugin.get_network(context._plugin_context,
network_id)
if not network.get('router:external'):
cidr = netaddr.IPNetwork(subnet['cidr'])
gateway_ip = '%s/%s' % (subnet['gateway_ip'],
str(cidr.prefixlen))
# Convert to APIC IDs
tenant_id = self.name_mapper.tenant(context, tenant_id)
network_id = self.name_mapper.network(context, network_id)
return tenant_id, network_id, gateway_ip
@sync_init
def create_port_postcommit(self, context):
self._perform_port_operations(context)
@sync_init
def update_port_postcommit(self, context):
self._perform_port_operations(context)
def delete_port_postcommit(self, context):
port = context.current
# Check if a compute port
if context.host:
self._delete_path_if_last(context)
if port.get('device_owner') == n_constants.DEVICE_OWNER_ROUTER_GW:
self._delete_contract(context)
@sync_init
def create_network_postcommit(self, context):
if not context.current.get('router:external'):
tenant_id = context.current['tenant_id']
network_id = context.current['id']
# Convert to APIC IDs
tenant_id = self.name_mapper.tenant(context, tenant_id)
network_id = self.name_mapper.network(context, network_id)
# Create BD and EPG for this network
with self.apic_manager.apic.transaction() as trs:
self.apic_manager.ensure_bd_created_on_apic(tenant_id,
network_id,
transaction=trs)
self.apic_manager.ensure_epg_created(
tenant_id, network_id, transaction=trs)
@sync_init
def update_network_postcommit(self, context):
super(APICMechanismDriver, self).update_network_postcommit(context)
def delete_network_postcommit(self, context):
if not context.current.get('router:external'):
tenant_id = context.current['tenant_id']
network_id = context.current['id']
# Convert to APIC IDs
tenant_id = self.name_mapper.tenant(context, tenant_id)
network_id = self.name_mapper.network(context, network_id)
# Delete BD and EPG for this network
with self.apic_manager.apic.transaction() as trs:
self.apic_manager.delete_epg_for_network(tenant_id, network_id,
transaction=trs)
self.apic_manager.delete_bd_on_apic(tenant_id, network_id,
transaction=trs)
else:
network_name = context.current['name']
if self.apic_manager.ext_net_dict.get(network_name):
network_id = self.name_mapper.network(context,
context.current['id'])
self.apic_manager.delete_external_routed_network(network_id)
@sync_init
def create_subnet_postcommit(self, context):
info = self._get_subnet_info(context, context.current)
if info:
tenant_id, network_id, gateway_ip = info
# Create subnet on BD
self.apic_manager.ensure_subnet_created_on_apic(
tenant_id, network_id, gateway_ip)
@sync_init
def update_subnet_postcommit(self, context):
if context.current['gateway_ip'] != context.original['gateway_ip']:
with self.apic_manager.apic.transaction() as trs:
info = self._get_subnet_info(context, context.original)
if info:
tenant_id, network_id, gateway_ip = info
# Delete subnet
self.apic_manager.ensure_subnet_deleted_on_apic(
tenant_id, network_id, gateway_ip, transaction=trs)
info = self._get_subnet_info(context, context.current)
if info:
tenant_id, network_id, gateway_ip = info
# Create subnet
self.apic_manager.ensure_subnet_created_on_apic(
tenant_id, network_id, gateway_ip, transaction=trs)
def delete_subnet_postcommit(self, context):
info = self._get_subnet_info(context, context.current)
if info:
tenant_id, network_id, gateway_ip = info
self.apic_manager.ensure_subnet_deleted_on_apic(
tenant_id, network_id, gateway_ip)

@ -1,184 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from apicapi import apic_mapper
from oslo_utils import excutils
from neutron.db import db_base_plugin_v2
from neutron.db import extraroute_db
from neutron.db import l3_dvr_db
from neutron.plugins.common import constants
from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic
class ApicL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["router", "ext-gw-mode", "extraroute"]
def __init__(self):
super(ApicL3ServicePlugin, self).__init__()
self.manager = mechanism_apic.APICMechanismDriver.get_apic_manager()
self.name_mapper = self.manager.apic_mapper
self.synchronizer = None
self.manager.ensure_infra_created_on_apic()
self.manager.ensure_bgp_pod_policy_created_on_apic()
def _map_names(self, context,
tenant_id, router_id, net_id, subnet_id):
context._plugin = self
with apic_mapper.mapper_context(context) as ctx:
atenant_id = tenant_id and self.name_mapper.tenant(ctx, tenant_id)
arouter_id = router_id and self.name_mapper.router(ctx, router_id)
anet_id = net_id and self.name_mapper.network(ctx, net_id)
asubnet_id = subnet_id and self.name_mapper.subnet(ctx, subnet_id)
return atenant_id, arouter_id, anet_id, asubnet_id
@staticmethod
def get_plugin_type():
return constants.L3_ROUTER_NAT
@staticmethod
def get_plugin_description():
"""Returns string description of the plugin."""
return _("L3 Router Service Plugin for basic L3 using the APIC")
def sync_init(f):
def inner(inst, *args, **kwargs):
if not inst.synchronizer:
inst.synchronizer = (
mechanism_apic.APICMechanismDriver.
get_router_synchronizer(inst))
inst.synchronizer.sync_router()
# pylint: disable=not-callable
return f(inst, *args, **kwargs)
return inner
def add_router_interface_postcommit(self, context, router_id,
interface_info):
# Update router's state first
router = self.get_router(context, router_id)
self.update_router_postcommit(context, router)
# Add router interface
if 'subnet_id' in interface_info:
subnet = self.get_subnet(context, interface_info['subnet_id'])
network_id = subnet['network_id']
tenant_id = subnet['tenant_id']
else:
port = self.get_port(context, interface_info['port_id'])
network_id = port['network_id']
tenant_id = port['tenant_id']
# Map openstack IDs to APIC IDs
atenant_id, arouter_id, anetwork_id, _ = self._map_names(
context, tenant_id, router_id, network_id, None)
# Program APIC
self.manager.add_router_interface(atenant_id, arouter_id,
anetwork_id)
def remove_router_interface_precommit(self, context, router_id,
interface_info):
if 'subnet_id' in interface_info:
subnet = self.get_subnet(context, interface_info['subnet_id'])
network_id = subnet['network_id']
tenant_id = subnet['tenant_id']
else:
port = self.get_port(context, interface_info['port_id'])
network_id = port['network_id']
tenant_id = port['tenant_id']
# Map openstack IDs to APIC IDs
atenant_id, arouter_id, anetwork_id, _ = self._map_names(
context, tenant_id, router_id, network_id, None)
# Program APIC
self.manager.remove_router_interface(atenant_id, arouter_id,
anetwork_id)
def delete_router_precommit(self, context, router_id):
context._plugin = self
with apic_mapper.mapper_context(context) as ctx:
arouter_id = router_id and self.name_mapper.router(ctx, router_id)
self.manager.delete_router(arouter_id)
def update_router_postcommit(self, context, router):
context._plugin = self
with apic_mapper.mapper_context(context) as ctx:
arouter_id = router['id'] and self.name_mapper.router(ctx,
router['id'])
with self.manager.apic.transaction() as trs:
self.manager.create_router(arouter_id, transaction=trs)
if router['admin_state_up']:
self.manager.enable_router(arouter_id, transaction=trs)
else:
self.manager.disable_router(arouter_id, transaction=trs)
# Router API
@sync_init
def create_router(self, *args, **kwargs):
return super(ApicL3ServicePlugin, self).create_router(*args, **kwargs)
@sync_init
def update_router(self, context, id, router):
result = super(ApicL3ServicePlugin, self).update_router(context,
id, router)
self.update_router_postcommit(context, result)
return result
@sync_init
def get_router(self, *args, **kwargs):
return super(ApicL3ServicePlugin, self).get_router(*args, **kwargs)
@sync_init
def get_routers(self, *args, **kwargs):
return super(ApicL3ServicePlugin, self).get_routers(*args, **kwargs)
@sync_init
def get_routers_count(self, *args, **kwargs):
return super(ApicL3ServicePlugin, self).get_routers_count(*args,
**kwargs)
def delete_router(self, context, router_id):
self.delete_router_precommit(context, router_id)
result = super(ApicL3ServicePlugin, self).delete_router(context,
router_id)
return result
# Router Interface API
@sync_init
def add_router_interface(self, context, router_id, interface_info):
# Create interface in parent
result = super(ApicL3ServicePlugin, self).add_router_interface(
context, router_id, interface_info)
try:
self.add_router_interface_postcommit(context, router_id,
interface_info)
except Exception:
with excutils.save_and_reraise_exception():
# Rollback db operation
super(ApicL3ServicePlugin, self).remove_router_interface(
context, router_id, interface_info)
return result
def remove_router_interface(self, context, router_id, interface_info):
self.remove_router_interface_precommit(context, router_id,
interface_info)
return super(ApicL3ServicePlugin, self).remove_router_interface(
context, router_id, interface_info)

@ -1,234 +0,0 @@
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import requests
import mock
from oslo_config import cfg
from neutron.tests import base
OK = requests.codes.ok
APIC_HOSTS = ['fake.controller.local']
APIC_PORT = 7580
APIC_USR = 'notadmin'
APIC_PWD = 'topsecret'
APIC_TENANT = 'citizen14'
APIC_NETWORK = 'network99'
APIC_NETNAME = 'net99name'
APIC_SUBNET = '10.3.2.1/24'
APIC_L3CTX = 'layer3context'
APIC_AP = 'appProfile001'
APIC_EPG = 'endPointGroup001'
APIC_CONTRACT = 'signedContract'