Decompose Apic ML2 mechanism driver

As a part of the vendor decomposition effort, the
apic ML2 driver code is removed and replaced by
its version in the openstack/networking-cisco
repo.

Change-Id: Iffb5245b4c88b65afe62dd7435ee80489a654fee
Partial-implements: blueprint core-vendor-decomposition
This commit is contained in:
Ivar Lazzaro 2015-06-18 16:02:11 -07:00 committed by Henry Gessau
parent bc6a03e5c3
commit e0ba53a09a
19 changed files with 9 additions and 2340 deletions

View File

@ -137,76 +137,6 @@
# mcast_ranges =
# Example: mcast_ranges = 224.0.0.1:224.0.0.3,224.0.1.1:224.0.1.
[ml2_cisco_apic]
# Hostname:port list of APIC controllers
# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80
# Username for the APIC controller
# apic_username = user
# Password for the APIC controller
# apic_password = password
# Whether use SSl for connecting to the APIC controller or not
# apic_use_ssl = True
# How to map names to APIC: use_uuid or use_name
# apic_name_mapping = use_name
# Names for APIC objects used by Neutron
# Note: When deploying multiple clouds against one APIC,
# these names must be unique between the clouds.
# apic_vmm_domain = openstack
# apic_vlan_ns_name = openstack_ns
# apic_node_profile = openstack_profile
# apic_entity_profile = openstack_entity
# apic_function_profile = openstack_function
# apic_app_profile_name = openstack_app
# Agent timers for State reporting and topology discovery
# apic_sync_interval = 30
# apic_agent_report_interval = 30
# apic_agent_poll_interval = 2
# Specify your network topology.
# This section indicates how your compute nodes are connected to the fabric's
# switches and ports. The format is as follows:
#
# [apic_switch:<swich_id_from_the_apic>]
# <compute_host>,<compute_host> = <switchport_the_host(s)_are_connected_to>
#
# You can have multiple sections, one for each switch in your fabric that is
# participating in OpenStack. e.g.
#
# [apic_switch:17]
# ubuntu,ubuntu1 = 1/10
# ubuntu2,ubuntu3 = 1/11
#
# [apic_switch:18]
# ubuntu5,ubuntu6 = 1/1
# ubuntu7,ubuntu8 = 1/2
# Describe external connectivity.
# In this section you can specify the external network configuration in order
# for the plugin to be able to teach the fabric how to route the internal
# traffic to the outside world. The external connectivity configuration
# format is as follows:
#
# [apic_external_network:<externalNetworkName>]
# switch = <switch_id_from_the_apic>
# port = <switchport_the_external_router_is_connected_to>
# encap = <encapsulation>
# cidr_exposed = <cidr_exposed_to_the_external_router>
# gateway_ip = <ip_of_the_external_gateway>
#
# An example follows:
# [apic_external_network:network_ext]
# switch=203
# port=1/34
# encap=vlan-100
# cidr_exposed=10.10.40.2/16
# gateway_ip=10.10.40.1
[ml2_cisco_ucsm]
# Cisco UCS Manager IP address

View File

@ -1,17 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# cisco-apic filters
lldpctl: CommandFilter, lldpctl, root
# ip_lib filters
ip: IpFilter, ip, root
find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -24,4 +24,12 @@ LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools', 'healthmonitors',
FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies']
TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES)
DRIVER_TABLES = [
# Models moved to openstack/networking-cisco
'cisco_ml2_apic_contracts',
'cisco_ml2_apic_names',
'cisco_ml2_apic_host_links',
# Add your tables with moved models here^. Please end with a comma.
]
TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + DRIVER_TABLES)

View File

@ -55,7 +55,6 @@ from neutron.plugins.cisco.db import network_models_v2 # noqa
from neutron.plugins.ml2.drivers.arista import db # noqa
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
models as ml2_brocade_models)
from neutron.plugins.ml2.drivers.cisco.apic import apic_model # noqa
from neutron.plugins.ml2.drivers.cisco.n1kv import n1kv_models # noqa
from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa
nexus_models_v2 as ml2_nexus_models_v2)

View File

@ -1,193 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.db import models_v2
from neutron.plugins.ml2 import models as models_ml2
class RouterContract(model_base.BASEV2, models_v2.HasTenant):
"""Contracts created on the APIC.
tenant_id represents the owner (APIC side) of the contract.
router_id is the UUID of the router (Neutron side) this contract is
referring to.
"""
__tablename__ = 'cisco_ml2_apic_contracts'
router_id = sa.Column(sa.String(64), sa.ForeignKey('routers.id',
ondelete='CASCADE'),
primary_key=True)
class HostLink(model_base.BASEV2):
"""Connectivity of host links."""
__tablename__ = 'cisco_ml2_apic_host_links'
host = sa.Column(sa.String(255), nullable=False, primary_key=True)
ifname = sa.Column(sa.String(64), nullable=False, primary_key=True)
ifmac = sa.Column(sa.String(32), nullable=True)
swid = sa.Column(sa.String(32), nullable=False)
module = sa.Column(sa.String(32), nullable=False)
port = sa.Column(sa.String(32), nullable=False)
class ApicName(model_base.BASEV2):
"""Mapping of names created on the APIC."""
__tablename__ = 'cisco_ml2_apic_names'
neutron_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
neutron_type = sa.Column(sa.String(32), nullable=False, primary_key=True)
apic_name = sa.Column(sa.String(255), nullable=False)
class ApicDbModel(object):
"""DB Model to manage all APIC DB interactions."""
def __init__(self):
self.session = db_api.get_session()
def get_contract_for_router(self, router_id):
"""Returns the specified router's contract."""
return self.session.query(RouterContract).filter_by(
router_id=router_id).first()
def write_contract_for_router(self, tenant_id, router_id):
"""Stores a new contract for the given tenant."""
contract = RouterContract(tenant_id=tenant_id,
router_id=router_id)
with self.session.begin(subtransactions=True):
self.session.add(contract)
return contract
def update_contract_for_router(self, tenant_id, router_id):
with self.session.begin(subtransactions=True):
contract = self.session.query(RouterContract).filter_by(
router_id=router_id).with_lockmode('update').first()
if contract:
contract.tenant_id = tenant_id
self.session.merge(contract)
else:
self.write_contract_for_router(tenant_id, router_id)
def delete_contract_for_router(self, router_id):
with self.session.begin(subtransactions=True):
try:
self.session.query(RouterContract).filter_by(
router_id=router_id).delete()
except orm.exc.NoResultFound:
return
def add_hostlink(self, host, ifname, ifmac, swid, module, port):
link = HostLink(host=host, ifname=ifname, ifmac=ifmac,
swid=swid, module=module, port=port)
with self.session.begin(subtransactions=True):
self.session.merge(link)
def get_hostlinks(self):
return self.session.query(HostLink).all()
def get_hostlink(self, host, ifname):
return self.session.query(HostLink).filter_by(
host=host, ifname=ifname).first()
def get_hostlinks_for_host(self, host):
return self.session.query(HostLink).filter_by(
host=host).all()
def get_hostlinks_for_host_switchport(self, host, swid, module, port):
return self.session.query(HostLink).filter_by(
host=host, swid=swid, module=module, port=port).all()
def get_hostlinks_for_switchport(self, swid, module, port):
return self.session.query(HostLink).filter_by(
swid=swid, module=module, port=port).all()
def delete_hostlink(self, host, ifname):
with self.session.begin(subtransactions=True):
try:
self.session.query(HostLink).filter_by(host=host,
ifname=ifname).delete()
except orm.exc.NoResultFound:
return
def get_switches(self):
return self.session.query(HostLink.swid).distinct()
def get_modules_for_switch(self, swid):
return self.session.query(
HostLink.module).filter_by(swid=swid).distinct()
def get_ports_for_switch_module(self, swid, module):
return self.session.query(
HostLink.port).filter_by(swid=swid, module=module).distinct()
def get_switch_and_port_for_host(self, host):
return self.session.query(
HostLink.swid, HostLink.module, HostLink.port).filter_by(
host=host).distinct()
def get_tenant_network_vlan_for_host(self, host):
pb = models_ml2.PortBinding
po = models_v2.Port
ns = models_ml2.NetworkSegment
return self.session.query(
po.tenant_id, ns.network_id, ns.segmentation_id).filter(
po.id == pb.port_id).filter(pb.host == host).filter(
po.network_id == ns.network_id).distinct()
def add_apic_name(self, neutron_id, neutron_type, apic_name):
name = ApicName(neutron_id=neutron_id,
neutron_type=neutron_type,
apic_name=apic_name)
with self.session.begin(subtransactions=True):
self.session.add(name)
def update_apic_name(self, neutron_id, neutron_type, apic_name):
with self.session.begin(subtransactions=True):
name = self.session.query(ApicName).filter_by(
neutron_id=neutron_id,
neutron_type=neutron_type).with_lockmode('update').first()
if name:
name.apic_name = apic_name
self.session.merge(name)
else:
self.add_apic_name(neutron_id, neutron_type, apic_name)
def get_apic_names(self):
return self.session.query(ApicName).all()
def get_apic_name(self, neutron_id, neutron_type):
return self.session.query(ApicName.apic_name).filter_by(
neutron_id=neutron_id, neutron_type=neutron_type).first()
def delete_apic_name(self, neutron_id):
with self.session.begin(subtransactions=True):
try:
self.session.query(ApicName).filter_by(
neutron_id=neutron_id).delete()
except orm.exc.NoResultFound:
return

View File

@ -1,111 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_service import loopingcall
from neutron.common import constants as n_constants
from neutron import context
from neutron.i18n import _LW
from neutron import manager
from neutron.plugins.ml2 import db as l2_db
from neutron.plugins.ml2 import driver_context
LOG = log.getLogger(__name__)
class SynchronizerBase(object):
def __init__(self, driver, interval=None):
self.core_plugin = manager.NeutronManager.get_plugin()
self.driver = driver
self.interval = interval
def sync(self, f, *args, **kwargs):
"""Fire synchronization based on interval.
Interval can be 0 for 'sync once' >0 for 'sync periodically' and
<0 for 'no sync'
"""
if self.interval:
if self.interval > 0:
loop_call = loopingcall.FixedIntervalLoopingCall(f, *args,
**kwargs)
loop_call.start(interval=self.interval)
return loop_call
else:
# Fire once
f(*args, **kwargs)
class ApicBaseSynchronizer(SynchronizerBase):
def sync_base(self):
self.sync(self._sync_base)
def _sync_base(self):
ctx = context.get_admin_context()
# Sync Networks
for network in self.core_plugin.get_networks(ctx):
mech_context = driver_context.NetworkContext(self.core_plugin, ctx,
network)
try:
self.driver.create_network_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create network postcommit failed for "
"network %s"), network['id'])
# Sync Subnets
for subnet in self.core_plugin.get_subnets(ctx):
mech_context = driver_context.SubnetContext(self.core_plugin, ctx,
subnet)
try:
self.driver.create_subnet_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create subnet postcommit failed for"
" subnet %s"), subnet['id'])
# Sync Ports (compute/gateway/dhcp)
for port in self.core_plugin.get_ports(ctx):
_, binding = l2_db.get_locked_port_and_binding(ctx.session,
port['id'])
network = self.core_plugin.get_network(ctx, port['network_id'])
mech_context = driver_context.PortContext(self.core_plugin, ctx,
port, network, binding,
[])
try:
self.driver.create_port_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create port postcommit failed for"
" port %s"), port['id'])
class ApicRouterSynchronizer(SynchronizerBase):
def sync_router(self):
self.sync(self._sync_router)
def _sync_router(self):
ctx = context.get_admin_context()
# Sync Router Interfaces
filters = {'device_owner': [n_constants.DEVICE_OWNER_ROUTER_INTF]}
for interface in self.core_plugin.get_ports(ctx, filters=filters):
try:
self.driver.add_router_interface_postcommit(
ctx, interface['device_id'],
{'port_id': interface['id']})
except Exception:
LOG.warn(_LW("Add interface postcommit failed for "
"port %s"), interface['id'])

View File

@ -1,343 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
import eventlet
eventlet.monkey_patch()
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import periodic_task
from oslo_service import service as svc
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import config as common_cfg
from neutron.common import rpc
from neutron.common import utils as neutron_utils
from neutron.db import agents_db
from neutron.i18n import _LE, _LI
from neutron import manager
from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic as ma
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron import service
ACI_PORT_DESCR_FORMATS = [
r'topology/pod-1/node-(\d+)/sys/conng/path-\[eth(\d+)/(\d+)\]',
r'topology/pod-1/paths-(\d+)/pathep-\[eth(\d+)/(\d+)\]',
]
AGENT_FORCE_UPDATE_COUNT = 100
BINARY_APIC_SERVICE_AGENT = 'neutron-cisco-apic-service-agent'
BINARY_APIC_HOST_AGENT = 'neutron-cisco-apic-host-agent'
TOPIC_APIC_SERVICE = 'apic-service'
TYPE_APIC_SERVICE_AGENT = 'cisco-apic-service-agent'
TYPE_APIC_HOST_AGENT = 'cisco-apic-host-agent'
LOG = logging.getLogger(__name__)
class ApicTopologyService(manager.Manager):
target = oslo_messaging.Target(version='1.1')
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyService, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.conn = None
self.peers = {}
self.invalid_peers = []
self.dispatcher = None
self.state = None
self.state_agent = None
self.topic = TOPIC_APIC_SERVICE
self.apic_manager = ma.APICMechanismDriver.get_apic_manager(False)
def init_host(self):
LOG.info(_LI("APIC service agent starting ..."))
self.state = {
'binary': BINARY_APIC_SERVICE_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_SERVICE_AGENT,
}
self.conn = rpc.create_connection(new=True)
self.dispatcher = [self, agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(
self.topic, self.dispatcher, fanout=True)
self.conn.consume_in_threads()
def after_start(self):
LOG.info(_LI("APIC service agent started"))
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC service agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC service agent: failed in reporting state"))
@lockutils.synchronized('apic_service')
def update_link(self, context,
host, interface, mac,
switch, module, port):
LOG.debug("APIC service agent: received update_link: %s",
", ".join(map(str,
[host, interface, mac, switch, module, port])))
nlink = (host, interface, mac, switch, module, port)
clink = self.peers.get((host, interface), None)
if switch == 0:
# this is a link delete, remove it
if clink is not None:
self.apic_manager.remove_hostlink(*clink)
self.peers.pop((host, interface))
else:
if clink is None:
# add new link to database
self.apic_manager.add_hostlink(*nlink)
self.peers[(host, interface)] = nlink
elif clink != nlink:
# delete old link and add new one (don't update in place)
self.apic_manager.remove_hostlink(*clink)
self.peers.pop((host, interface))
self.apic_manager.add_hostlink(*nlink)
self.peers[(host, interface)] = nlink
class ApicTopologyServiceNotifierApi(object):
def __init__(self):
target = oslo_messaging.Target(topic=TOPIC_APIC_SERVICE, version='1.0')
self.client = rpc.get_client(target)
def update_link(self, context, host, interface, mac, switch, module, port):
cctxt = self.client.prepare(version='1.1', fanout=True)
cctxt.cast(context, 'update_link', host=host, interface=interface,
mac=mac, switch=switch, module=module, port=port)
def delete_link(self, context, host, interface):
cctxt = self.client.prepare(version='1.1', fanout=True)
cctxt.cast(context, 'delete_link', host=host, interface=interface,
mac=None, switch=0, module=0, port=0)
class ApicTopologyAgent(manager.Manager):
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyAgent, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.count_current = 0
self.count_force_send = AGENT_FORCE_UPDATE_COUNT
self.interfaces = {}
self.lldpcmd = None
self.peers = {}
self.port_desc_re = list(map(re.compile, ACI_PORT_DESCR_FORMATS))
self.service_agent = ApicTopologyServiceNotifierApi()
self.state = None
self.state_agent = None
self.topic = TOPIC_APIC_SERVICE
self.uplink_ports = []
self.invalid_peers = []
def init_host(self):
LOG.info(_LI("APIC host agent: agent starting on %s"), self.host)
self.state = {
'binary': BINARY_APIC_HOST_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_HOST_AGENT,
}
self.uplink_ports = []
for inf in self.conf.apic_host_uplink_ports:
if ip_lib.device_exists(inf):
self.uplink_ports.append(inf)
else:
# ignore unknown interfaces
LOG.error(_LE("No such interface (ignored): %s"), inf)
self.lldpcmd = ['lldpctl', '-f', 'keyvalue'] + self.uplink_ports
def after_start(self):
LOG.info(_LI("APIC host agent: started on %s"), self.host)
@periodic_task.periodic_task
def _check_for_new_peers(self, context):
LOG.debug("APIC host agent: _check_for_new_peers")
if not self.lldpcmd:
return
try:
# Check if we must send update even if there is no change
force_send = False
self.count_current += 1
if self.count_current >= self.count_force_send:
force_send = True
self.count_current = 0
# Check for new peers
new_peers = self._get_peers()
new_peers = self._valid_peers(new_peers)
# Make a copy of current interfaces
curr_peers = {}
for interface in self.peers:
curr_peers[interface] = self.peers[interface]
# Based curr -> new updates, add the new interfaces
self.peers = {}
for interface in new_peers:
peer = new_peers[interface]
self.peers[interface] = peer
if (interface in curr_peers and
curr_peers[interface] != peer):
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0)
if (interface not in curr_peers or
curr_peers[interface] != peer or
force_send):
self.service_agent.update_link(context, *peer)
if interface in curr_peers:
curr_peers.pop(interface)
# Any interface still in curr_peers need to be deleted
for peer in curr_peers.values():
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0)
except Exception:
LOG.exception(_LE("APIC service agent: exception in LLDP parsing"))
def _get_peers(self):
peers = {}
lldpkeys = utils.execute(self.lldpcmd, run_as_root=True)
for line in lldpkeys.splitlines():
if '=' not in line:
continue
fqkey, value = line.split('=', 1)
lldp, interface, key = fqkey.split('.', 2)
if key == 'port.descr':
for regexp in self.port_desc_re:
match = regexp.match(value)
if match:
mac = self._get_mac(interface)
switch, module, port = match.group(1, 2, 3)
peer = (self.host, interface, mac,
switch, module, port)
if interface not in peers:
peers[interface] = []
peers[interface].append(peer)
return peers
def _valid_peers(self, peers):
# Reduce the peers array to one valid peer per interface
# NOTE:
# There is a bug in lldpd daemon that it keeps reporting
# old peers even after their updates have stopped
# we keep track of that report remove them from peers
valid_peers = {}
invalid_peers = []
for interface in peers:
curr_peer = None
for peer in peers[interface]:
if peer in self.invalid_peers or curr_peer:
invalid_peers.append(peer)
else:
curr_peer = peer
if curr_peer is not None:
valid_peers[interface] = curr_peer
self.invalid_peers = invalid_peers
return valid_peers
def _get_mac(self, interface):
if interface in self.interfaces:
return self.interfaces[interface]
try:
mac = ip_lib.IPDevice(interface).link.address
self.interfaces[interface] = mac
return mac
except Exception:
# we can safely ignore it, it is only needed for debugging
LOG.exception(
_LE("APIC service agent: can not get MACaddr for %s"),
interface)
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC host agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC host agent: failed in reporting state"))
def launch(binary, manager, topic=None):
cfg.CONF(project='neutron')
common_cfg.init(sys.argv[1:])
config.setup_logging()
report_period = cfg.CONF.ml2_cisco_apic.apic_agent_report_interval
poll_period = cfg.CONF.ml2_cisco_apic.apic_agent_poll_interval
server = service.Service.create(
binary=binary, manager=manager, topic=topic,
report_interval=report_period, periodic_interval=poll_period)
svc.launch(cfg.CONF, server).wait()
def service_main():
launch(
BINARY_APIC_SERVICE_AGENT,
'neutron.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyService',
TOPIC_APIC_SERVICE)
def agent_main():
launch(
BINARY_APIC_HOST_AGENT,
'neutron.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyAgent')

View File

@ -1,138 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
# oslo_config limits ${var} expansion to global variables
# That is why apic_system_id as a global variable
global_opts = [
cfg.StrOpt('apic_system_id',
default='openstack',
help=_("Prefix for APIC domain/names/profiles created")),
]
cfg.CONF.register_opts(global_opts)
apic_opts = [
cfg.ListOpt('apic_hosts',
default=[],
help=_("An ordered list of host names or IP addresses of "
"the APIC controller(s).")),
cfg.StrOpt('apic_username',
help=_("Username for the APIC controller")),
cfg.StrOpt('apic_password',
help=_("Password for the APIC controller"), secret=True),
cfg.StrOpt('apic_name_mapping',
default='use_name',
help=_("Name mapping strategy to use: use_uuid | use_name")),
cfg.BoolOpt('apic_use_ssl', default=True,
help=_("Use SSL to connect to the APIC controller")),
cfg.StrOpt('apic_domain_name',
default='${apic_system_id}',
help=_("Name for the domain created on APIC")),
cfg.StrOpt('apic_app_profile_name',
default='${apic_system_id}_app',
help=_("Name for the app profile used for Openstack")),
cfg.StrOpt('apic_vlan_ns_name',
default='${apic_system_id}_vlan_ns',
help=_("Name for the vlan namespace to be used for Openstack")),
cfg.StrOpt('apic_node_profile',
default='${apic_system_id}_node_profile',
help=_("Name of the node profile to be created")),
cfg.StrOpt('apic_entity_profile',
default='${apic_system_id}_entity_profile',
help=_("Name of the entity profile to be created")),
cfg.StrOpt('apic_function_profile',
default='${apic_system_id}_function_profile',
help=_("Name of the function profile to be created")),
cfg.StrOpt('apic_lacp_profile',
default='${apic_system_id}_lacp_profile',
help=_("Name of the LACP profile to be created")),
cfg.ListOpt('apic_host_uplink_ports',
default=[],
help=_('The uplink ports to check for ACI connectivity')),
cfg.ListOpt('apic_vpc_pairs',
default=[],
help=_('The switch pairs for VPC connectivity')),
cfg.StrOpt('apic_vlan_range',
default='2:4093',
help=_("Range of VLAN's to be used for Openstack")),
cfg.IntOpt('apic_sync_interval',
default=0,
help=_("Synchronization interval in seconds")),
cfg.FloatOpt('apic_agent_report_interval',
default=30,
help=_('Interval between agent status updates (in sec)')),
cfg.FloatOpt('apic_agent_poll_interval',
default=2,
help=_('Interval between agent poll for topology (in sec)')),
]
cfg.CONF.register_opts(apic_opts, "ml2_cisco_apic")
def _get_specific_config(prefix):
"""retrieve config in the format [<prefix>:<value>]."""
conf_dict = {}
multi_parser = cfg.MultiConfigParser()
multi_parser.read(cfg.CONF.config_file)
for parsed_file in multi_parser.parsed:
for parsed_item in parsed_file.keys():
if parsed_item.startswith(prefix):
switch, switch_id = parsed_item.split(':')
if switch.lower() == prefix:
conf_dict[switch_id] = parsed_file[parsed_item].items()
return conf_dict
def create_switch_dictionary():
switch_dict = {}
conf = _get_specific_config('apic_switch')
for switch_id in conf:
switch_dict[switch_id] = switch_dict.get(switch_id, {})
for host_list, port in conf[switch_id]:
hosts = host_list.split(',')
port = port[0]
switch_dict[switch_id][port] = (
switch_dict[switch_id].get(port, []) + hosts)
return switch_dict
def create_vpc_dictionary():
vpc_dict = {}
for pair in cfg.CONF.ml2_cisco_apic.apic_vpc_pairs:
pair_tuple = pair.split(':')
if (len(pair_tuple) != 2 or
any(map(lambda x: not x.isdigit(), pair_tuple))):
# Validation error, ignore this item
continue
vpc_dict[pair_tuple[0]] = pair_tuple[1]
vpc_dict[pair_tuple[1]] = pair_tuple[0]
return vpc_dict
def create_external_network_dictionary():
router_dict = {}
conf = _get_specific_config('apic_external_network')
for net_id in conf:
router_dict[net_id] = router_dict.get(net_id, {})
for key, value in conf[net_id]:
router_dict[net_id][key] = value[0] if value else None
return router_dict

View File

@ -1,287 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from apicapi import apic_manager
from keystoneclient.v2_0 import client as keyclient
import netaddr
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log
from neutron.common import constants as n_constants
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.cisco.apic import apic_model
from neutron.plugins.ml2.drivers.cisco.apic import apic_sync
from neutron.plugins.ml2.drivers.cisco.apic import config
from neutron.plugins.ml2 import models
LOG = log.getLogger(__name__)
class APICMechanismDriver(api.MechanismDriver):
@staticmethod
def get_apic_manager(client=True):
apic_config = cfg.CONF.ml2_cisco_apic
network_config = {
'vlan_ranges': cfg.CONF.ml2_type_vlan.network_vlan_ranges,
'switch_dict': config.create_switch_dictionary(),
'vpc_dict': config.create_vpc_dictionary(),
'external_network_dict':
config.create_external_network_dictionary(),
}
apic_system_id = cfg.CONF.apic_system_id
keyclient_param = keyclient if client else None
keystone_authtoken = cfg.CONF.keystone_authtoken if client else None
return apic_manager.APICManager(apic_model.ApicDbModel(), log,
network_config, apic_config,
keyclient_param, keystone_authtoken,
apic_system_id)
@staticmethod
def get_base_synchronizer(inst):
apic_config = cfg.CONF.ml2_cisco_apic
return apic_sync.ApicBaseSynchronizer(inst,
apic_config.apic_sync_interval)
@staticmethod
def get_router_synchronizer(inst):
apic_config = cfg.CONF.ml2_cisco_apic
return apic_sync.ApicRouterSynchronizer(inst,
apic_config.apic_sync_interval)
def initialize(self):
# initialize apic
self.apic_manager = APICMechanismDriver.get_apic_manager()
self.name_mapper = self.apic_manager.apic_mapper
self.synchronizer = None
self.apic_manager.ensure_infra_created_on_apic()
self.apic_manager.ensure_bgp_pod_policy_created_on_apic()
def sync_init(f):
def inner(inst, *args, **kwargs):
if not inst.synchronizer:
inst.synchronizer = (
APICMechanismDriver.get_base_synchronizer(inst))
inst.synchronizer.sync_base()
# pylint: disable=not-callable
return f(inst, *args, **kwargs)
return inner
@lockutils.synchronized('apic-portlock')
def _perform_path_port_operations(self, context, port):
# Get network
network_id = context.network.current['id']
anetwork_id = self.name_mapper.network(context, network_id)
# Get tenant details from port context
tenant_id = context.current['tenant_id']
tenant_id = self.name_mapper.tenant(context, tenant_id)
# Get segmentation id
segment = context.top_bound_segment
if not segment:
LOG.debug("Port %s is not bound to a segment", port)
return
seg = None
if (segment.get(api.NETWORK_TYPE) in [constants.TYPE_VLAN]):
seg = segment.get(api.SEGMENTATION_ID)
# hosts on which this vlan is provisioned
host = context.host
# Create a static path attachment for the host/epg/switchport combo
with self.apic_manager.apic.transaction() as trs:
self.apic_manager.ensure_path_created_for_port(
tenant_id, anetwork_id, host, seg, transaction=trs)
def _perform_gw_port_operations(self, context, port):
router_id = port.get('device_id')
network = context.network.current
anetwork_id = self.name_mapper.network(context, network['id'])
router_info = self.apic_manager.ext_net_dict.get(network['name'])
if router_id and router_info:
address = router_info['cidr_exposed']
next_hop = router_info['gateway_ip']
encap = router_info.get('encap') # No encap if None
switch = router_info['switch']
module, sport = router_info['port'].split('/')
with self.apic_manager.apic.transaction() as trs:
# Get/Create contract
arouter_id = self.name_mapper.router(context, router_id)
cid = self.apic_manager.get_router_contract(arouter_id)
# Ensure that the external ctx exists
self.apic_manager.ensure_context_enforced()
# Create External Routed Network and configure it
self.apic_manager.ensure_external_routed_network_created(
anetwork_id, transaction=trs)
self.apic_manager.ensure_logical_node_profile_created(
anetwork_id, switch, module, sport, encap,
address, transaction=trs)
self.apic_manager.ensure_static_route_created(
anetwork_id, switch, next_hop, transaction=trs)
self.apic_manager.ensure_external_epg_created(
anetwork_id, transaction=trs)
self.apic_manager.ensure_external_epg_consumed_contract(
anetwork_id, cid, transaction=trs)
self.apic_manager.ensure_external_epg_provided_contract(
anetwork_id, cid, transaction=trs)
def _perform_port_operations(self, context):
# Get port
port = context.current
# Check if a compute port
if context.host:
self._perform_path_port_operations(context, port)
if port.get('device_owner') == n_constants.DEVICE_OWNER_ROUTER_GW:
self._perform_gw_port_operations(context, port)
def _delete_contract(self, context):
port = context.current
network_id = self.name_mapper.network(
context, context.network.current['id'])
arouter_id = self.name_mapper.router(context,
port.get('device_id'))
self.apic_manager.delete_external_epg_contract(arouter_id,
network_id)
def _get_active_path_count(self, context):
return context._plugin_context.session.query(
models.PortBinding).filter_by(
host=context.host, segment=context._binding.segment).count()
@lockutils.synchronized('apic-portlock')
def _delete_port_path(self, context, atenant_id, anetwork_id):
if not self._get_active_path_count(context):
self.apic_manager.ensure_path_deleted_for_port(
atenant_id, anetwork_id,
context.host)
def _delete_path_if_last(self, context):
if not self._get_active_path_count(context):
tenant_id = context.current['tenant_id']
atenant_id = self.name_mapper.tenant(context, tenant_id)
network_id = context.network.current['id']
anetwork_id = self.name_mapper.network(context, network_id)
self._delete_port_path(context, atenant_id, anetwork_id)
def _get_subnet_info(self, context, subnet):
if subnet['gateway_ip']:
tenant_id = subnet['tenant_id']
network_id = subnet['network_id']
network = context._plugin.get_network(context._plugin_context,
network_id)
if not network.get('router:external'):
cidr = netaddr.IPNetwork(subnet['cidr'])
gateway_ip = '%s/%s' % (subnet['gateway_ip'],
str(cidr.prefixlen))
# Convert to APIC IDs
tenant_id = self.name_mapper.tenant(context, tenant_id)
network_id = self.name_mapper.network(context, network_id)
return tenant_id, network_id, gateway_ip
@sync_init
def create_port_postcommit(self, context):
self._perform_port_operations(context)
@sync_init
def update_port_postcommit(self, context):
self._perform_port_operations(context)
def delete_port_postcommit(self, context):
port = context.current
# Check if a compute port
if context.host:
self._delete_path_if_last(context)
if port.get('device_owner') == n_constants.DEVICE_OWNER_ROUTER_GW:
self._delete_contract(context)
@sync_init
def create_network_postcommit(self, context):
if not context.current.get('router:external'):
tenant_id = context.current['tenant_id']
network_id = context.current['id']
# Convert to APIC IDs
tenant_id = self.name_mapper.tenant(context, tenant_id)
network_id = self.name_mapper.network(context, network_id)
# Create BD and EPG for this network
with self.apic_manager.apic.transaction() as trs:
self.apic_manager.ensure_bd_created_on_apic(tenant_id,
network_id,
transaction=trs)
self.apic_manager.ensure_epg_created(
tenant_id, network_id, transaction=trs)
@sync_init
def update_network_postcommit(self, context):
super(APICMechanismDriver, self).update_network_postcommit(context)
def delete_network_postcommit(self, context):
if not context.current.get('router:external'):
tenant_id = context.current['tenant_id']
network_id = context.current['id']
# Convert to APIC IDs
tenant_id = self.name_mapper.tenant(context, tenant_id)
network_id = self.name_mapper.network(context, network_id)
# Delete BD and EPG for this network
with self.apic_manager.apic.transaction() as trs:
self.apic_manager.delete_epg_for_network(tenant_id, network_id,
transaction=trs)
self.apic_manager.delete_bd_on_apic(tenant_id, network_id,
transaction=trs)
else:
network_name = context.current['name']
if self.apic_manager.ext_net_dict.get(network_name):
network_id = self.name_mapper.network(context,
context.current['id'])
self.apic_manager.delete_external_routed_network(network_id)
@sync_init
def create_subnet_postcommit(self, context):
info = self._get_subnet_info(context, context.current)
if info:
tenant_id, network_id, gateway_ip = info
# Create subnet on BD
self.apic_manager.ensure_subnet_created_on_apic(
tenant_id, network_id, gateway_ip)
@sync_init
def update_subnet_postcommit(self, context):
if context.current['gateway_ip'] != context.original['gateway_ip']:
with self.apic_manager.apic.transaction() as trs:
info = self._get_subnet_info(context, context.original)
if info:
tenant_id, network_id, gateway_ip = info
# Delete subnet
self.apic_manager.ensure_subnet_deleted_on_apic(
tenant_id, network_id, gateway_ip, transaction=trs)
info = self._get_subnet_info(context, context.current)
if info:
tenant_id, network_id, gateway_ip = info
# Create subnet
self.apic_manager.ensure_subnet_created_on_apic(
tenant_id, network_id, gateway_ip, transaction=trs)
def delete_subnet_postcommit(self, context):
info = self._get_subnet_info(context, context.current)
if info:
tenant_id, network_id, gateway_ip = info
self.apic_manager.ensure_subnet_deleted_on_apic(
tenant_id, network_id, gateway_ip)

View File

@ -1,184 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from apicapi import apic_mapper
from oslo_utils import excutils
from neutron.db import db_base_plugin_v2
from neutron.db import extraroute_db
from neutron.db import l3_dvr_db
from neutron.plugins.common import constants
from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic
class ApicL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["router", "ext-gw-mode", "extraroute"]
def __init__(self):
super(ApicL3ServicePlugin, self).__init__()
self.manager = mechanism_apic.APICMechanismDriver.get_apic_manager()
self.name_mapper = self.manager.apic_mapper
self.synchronizer = None
self.manager.ensure_infra_created_on_apic()
self.manager.ensure_bgp_pod_policy_created_on_apic()
def _map_names(self, context,
tenant_id, router_id, net_id, subnet_id):
context._plugin = self
with apic_mapper.mapper_context(context) as ctx:
atenant_id = tenant_id and self.name_mapper.tenant(ctx, tenant_id)
arouter_id = router_id and self.name_mapper.router(ctx, router_id)
anet_id = net_id and self.name_mapper.network(ctx, net_id)
asubnet_id = subnet_id and self.name_mapper.subnet(ctx, subnet_id)
return atenant_id, arouter_id, anet_id, asubnet_id
@staticmethod
def get_plugin_type():
return constants.L3_ROUTER_NAT
@staticmethod
def get_plugin_description():
"""Returns string description of the plugin."""
return _("L3 Router Service Plugin for basic L3 using the APIC")
def sync_init(f):
def inner(inst, *args, **kwargs):
if not inst.synchronizer:
inst.synchronizer = (
mechanism_apic.APICMechanismDriver.
get_router_synchronizer(inst))
inst.synchronizer.sync_router()
# pylint: disable=not-callable
return f(inst, *args, **kwargs)
return inner
def add_router_interface_postcommit(self, context, router_id,
interface_info):
# Update router's state first
router = self.get_router(context, router_id)
self.update_router_postcommit(context, router)
# Add router interface
if 'subnet_id' in interface_info:
subnet = self.get_subnet(context, interface_info['subnet_id'])
network_id = subnet['network_id']
tenant_id = subnet['tenant_id']
else:
port = self.get_port(context, interface_info['port_id'])
network_id = port['network_id']
tenant_id = port['tenant_id']
# Map openstack IDs to APIC IDs
atenant_id, arouter_id, anetwork_id, _ = self._map_names(
context, tenant_id, router_id, network_id, None)
# Program APIC
self.manager.add_router_interface(atenant_id, arouter_id,
anetwork_id)
def remove_router_interface_precommit(self, context, router_id,
interface_info):
if 'subnet_id' in interface_info:
subnet = self.get_subnet(context, interface_info['subnet_id'])
network_id = subnet['network_id']
tenant_id = subnet['tenant_id']
else:
port = self.get_port(context, interface_info['port_id'])
network_id = port['network_id']
tenant_id = port['tenant_id']
# Map openstack IDs to APIC IDs
atenant_id, arouter_id, anetwork_id, _ = self._map_names(
context, tenant_id, router_id, network_id, None)
# Program APIC
self.manager.remove_router_interface(atenant_id, arouter_id,
anetwork_id)
def delete_router_precommit(self, context, router_id):
context._plugin = self
with apic_mapper.mapper_context(context) as ctx:
arouter_id = router_id and self.name_mapper.router(ctx, router_id)
self.manager.delete_router(arouter_id)
def update_router_postcommit(self, context, router):
context._plugin = self
with apic_mapper.mapper_context(context) as ctx:
arouter_id = router['id'] and self.name_mapper.router(ctx,
router['id'])
with self.manager.apic.transaction() as trs:
self.manager.create_router(arouter_id, transaction=trs)
if router['admin_state_up']:
self.manager.enable_router(arouter_id, transaction=trs)
else:
self.manager.disable_router(arouter_id, transaction=trs)
# Router API
@sync_init
def create_router(self, *args, **kwargs):
return super(ApicL3ServicePlugin, self).create_router(*args, **kwargs)
@sync_init
def update_router(self, context, id, router):
result = super(ApicL3ServicePlugin, self).update_router(context,
id, router)
self.update_router_postcommit(context, result)
return result
@sync_init
def get_router(self, *args, **kwargs):
return super(ApicL3ServicePlugin, self).get_router(*args, **kwargs)
@sync_init
def get_routers(self, *args, **kwargs):
return super(ApicL3ServicePlugin, self).get_routers(*args, **kwargs)
@sync_init
def get_routers_count(self, *args, **kwargs):
return super(ApicL3ServicePlugin, self).get_routers_count(*args,
**kwargs)
def delete_router(self, context, router_id):
self.delete_router_precommit(context, router_id)
result = super(ApicL3ServicePlugin, self).delete_router(context,
router_id)
return result
# Router Interface API
@sync_init
def add_router_interface(self, context, router_id, interface_info):
# Create interface in parent
result = super(ApicL3ServicePlugin, self).add_router_interface(
context, router_id, interface_info)
try:
self.add_router_interface_postcommit(context, router_id,
interface_info)
except Exception:
with excutils.save_and_reraise_exception():
# Rollback db operation
super(ApicL3ServicePlugin, self).remove_router_interface(
context, router_id, interface_info)
return result
def remove_router_interface(self, context, router_id, interface_info):
self.remove_router_interface_precommit(context, router_id,
interface_info)
return super(ApicL3ServicePlugin, self).remove_router_interface(
context, router_id, interface_info)

View File

@ -1,234 +0,0 @@
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import requests
import mock
from oslo_config import cfg
from neutron.tests import base
OK = requests.codes.ok
APIC_HOSTS = ['fake.controller.local']
APIC_PORT = 7580
APIC_USR = 'notadmin'
APIC_PWD = 'topsecret'
APIC_TENANT = 'citizen14'
APIC_NETWORK = 'network99'
APIC_NETNAME = 'net99name'
APIC_SUBNET = '10.3.2.1/24'
APIC_L3CTX = 'layer3context'
APIC_AP = 'appProfile001'
APIC_EPG = 'endPointGroup001'
APIC_CONTRACT = 'signedContract'
APIC_SUBJECT = 'testSubject'
APIC_FILTER = 'carbonFilter'
APIC_ENTRY = 'forcedEntry'
APIC_SYSTEM_ID = 'sysid'
APIC_DOMAIN = 'cumuloNimbus'
APIC_NODE_PROF = 'red'
APIC_LEAF = 'green'
APIC_LEAF_TYPE = 'range'
APIC_NODE_BLK = 'blue'
APIC_PORT_PROF = 'yellow'
APIC_PORT_SEL = 'front'
APIC_PORT_TYPE = 'range'
APIC_PORT_BLK1 = 'block01'
APIC_PORT_BLK2 = 'block02'
APIC_ACC_PORT_GRP = 'alpha'
APIC_FUNC_PROF = 'beta'
APIC_ATT_ENT_PROF = 'delta'
APIC_VLAN_NAME = 'gamma'
APIC_VLAN_MODE = 'dynamic'
APIC_VLANID_FROM = 2900
APIC_VLANID_TO = 2999
APIC_VLAN_FROM = 'vlan-%d' % APIC_VLANID_FROM
APIC_VLAN_TO = 'vlan-%d' % APIC_VLANID_TO
APIC_ROUTER = 'router_id'
APIC_EXT_SWITCH = '203'
APIC_EXT_MODULE = '1'
APIC_EXT_PORT = '34'
APIC_EXT_ENCAP = 'vlan-100'
APIC_EXT_CIDR_EXPOSED = '10.10.40.2/16'
APIC_EXT_GATEWAY_IP = '10.10.40.1'
APIC_KEY = 'key'
KEYSTONE_TOKEN = '123Token123'
APIC_UPLINK_PORTS = ['uplink_port']
SERVICE_HOST = 'host1'
SERVICE_HOST_IFACE = 'eth0'
SERVICE_HOST_MAC = 'aa:ee:ii:oo:uu:yy'
SERVICE_PEER_CHASSIS_NAME = 'leaf4'
SERVICE_PEER_CHASSIS = 'topology/pod-1/node-' + APIC_EXT_SWITCH
SERVICE_PEER_PORT_LOCAL = 'Eth%s/%s' % (APIC_EXT_MODULE, APIC_EXT_PORT)
SERVICE_PEER_PORT_DESC = ('topology/pod-1/paths-%s/pathep-[%s]' %
(APIC_EXT_SWITCH, SERVICE_PEER_PORT_LOCAL.lower()))
cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config')
class ControllerMixin(object):
"""Mock the controller for APIC driver and service unit tests."""
def __init__(self):
self.response = None
def set_up_mocks(self):
# The mocked responses from the server are lists used by
# mock.side_effect, which means each call to post or get will
# return the next item in the list. This allows the test cases
# to stage a sequence of responses to method(s) under test.
self.response = {'post': [], 'get': []}
self.reset_reponses()
def reset_reponses(self, req=None):
# Clear all staged responses.
reqs = [req] if req else ['post', 'get'] # Both if none specified.
for req in reqs:
del self.response[req][:]
self.restart_responses(req)
def restart_responses(self, req):
responses = mock.MagicMock(side_effect=self.response[req])
if req == 'post':
requests.Session.post = responses
elif req == 'get':
requests.Session.get = responses
def mock_response_for_post(self, mo, **attrs):
attrs['debug_mo'] = mo # useful for debugging
self._stage_mocked_response('post', OK, mo, **attrs)
def _stage_mocked_response(self, req, mock_status, mo, **attrs):
response = mock.MagicMock()
response.status_code = mock_status
mo_attrs = [{mo: {'attributes': attrs}}] if attrs else []
response.json.return_value = {'imdata': mo_attrs}
self.response[req].append(response)
def mock_apic_manager_login_responses(self, timeout=300):
# APIC Manager tests are based on authenticated session
self.mock_response_for_post('aaaLogin', userName=APIC_USR,
token='ok', refreshTimeoutSeconds=timeout)
@contextlib.contextmanager
def fake_transaction(self, *args, **kwargs):
yield 'transaction'
class ConfigMixin(object):
"""Mock the config for APIC driver and service unit tests."""
def __init__(self):
self.mocked_parser = None
def set_up_mocks(self):
# Mock the configuration file
base.BaseTestCase.config_parse()
# Configure global option apic_system_id
cfg.CONF.set_override('apic_system_id', APIC_SYSTEM_ID)
# Configure option keystone_authtoken
cfg.CONF.keystone_authtoken = KEYSTONE_TOKEN
# Configure the ML2 mechanism drivers and network types
ml2_opts = {
'mechanism_drivers': ['apic'],
'tenant_network_types': ['vlan'],
}
for opt, val in ml2_opts.items():
cfg.CONF.set_override(opt, val, 'ml2')
# Configure the ML2 type_vlan opts
ml2_type_vlan_opts = {
'vlan_ranges': ['physnet1:100:199'],
}
cfg.CONF.set_override('network_vlan_ranges',
ml2_type_vlan_opts['vlan_ranges'],
'ml2_type_vlan')
self.vlan_ranges = ml2_type_vlan_opts['vlan_ranges']
# Configure the Cisco APIC mechanism driver
apic_test_config = {
'apic_hosts': APIC_HOSTS,
'apic_username': APIC_USR,
'apic_password': APIC_PWD,
'apic_domain_name': APIC_SYSTEM_ID,
'apic_vlan_ns_name': APIC_VLAN_NAME,
'apic_vlan_range': '%d:%d' % (APIC_VLANID_FROM, APIC_VLANID_TO),
'apic_node_profile': APIC_NODE_PROF,
'apic_entity_profile': APIC_ATT_ENT_PROF,
'apic_function_profile': APIC_FUNC_PROF,
'apic_host_uplink_ports': APIC_UPLINK_PORTS
}
for opt, val in apic_test_config.items():
cfg.CONF.set_override(opt, val, 'ml2_cisco_apic')
self.apic_config = cfg.CONF.ml2_cisco_apic
# Configure switch topology
apic_switch_cfg = {
'apic_switch:101': {'ubuntu1,ubuntu2': ['3/11']},
'apic_switch:102': {'rhel01,rhel02': ['4/21'],
'rhel03': ['4/22']},
}
self.switch_dict = {
'101': {
'3/11': ['ubuntu1', 'ubuntu2'],
},
'102': {
'4/21': ['rhel01', 'rhel02'],
'4/22': ['rhel03'],
},
}
self.vpc_dict = {
'201': '202',
'202': '201',
}
self.external_network_dict = {
APIC_NETWORK + '-name': {
'switch': APIC_EXT_SWITCH,
'port': APIC_EXT_MODULE + '/' + APIC_EXT_PORT,
'encap': APIC_EXT_ENCAP,
'cidr_exposed': APIC_EXT_CIDR_EXPOSED,
'gateway_ip': APIC_EXT_GATEWAY_IP,
},
}
self.mocked_parser = mock.patch.object(
cfg, 'MultiConfigParser').start()
self.mocked_parser.return_value.read.return_value = [apic_switch_cfg]
self.mocked_parser.return_value.parsed = [apic_switch_cfg]
class FakeDbContract(object):
def __init__(self, contract_id):
self.contract_id = contract_id

View File

@ -1,78 +0,0 @@
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
sys.modules["apicapi"] = mock.Mock()
from neutron.plugins.ml2.drivers.cisco.apic import apic_sync
from neutron.tests import base
LOOPING_CALL = 'oslo_service.loopingcall.FixedIntervalLoopingCall'
GET_PLUGIN = 'neutron.manager.NeutronManager.get_plugin'
GET_ADMIN_CONTEXT = 'neutron.context.get_admin_context'
L2_DB = 'neutron.plugins.ml2.db.get_locked_port_and_binding'
NETWORK_CONTEXT = 'neutron.plugins.ml2.driver_context.NetworkContext'
SUBNET_CONTEXT = 'neutron.plugins.ml2.driver_context.SubnetContext'
PORT_CONTEXT = 'neutron.plugins.ml2.driver_context.PortContext'
class TestCiscoApicSync(base.BaseTestCase):
def setUp(self):
super(TestCiscoApicSync, self).setUp()
self.driver = mock.Mock()
# Patch looping call
loopingcall_c = mock.patch(LOOPING_CALL).start()
self.loopingcall = mock.Mock()
loopingcall_c.return_value = self.loopingcall
# Patch get plugin
self.get_plugin = mock.patch(GET_PLUGIN).start()
self.get_plugin.return_value = mock.Mock()
# Patch get admin context
self.get_admin_context = mock.patch(GET_ADMIN_CONTEXT).start()
self.get_admin_context.return_value = mock.Mock()
# Patch get locked port and binding
self.get_locked_port_and_binding = mock.patch(L2_DB).start()
self.get_locked_port_and_binding.return_value = [mock.Mock()] * 2
# Patch driver context
mock.patch(NETWORK_CONTEXT).start()
mock.patch(SUBNET_CONTEXT).start()
mock.patch(PORT_CONTEXT).start()
def test_sync_base(self):
sync = apic_sync.ApicBaseSynchronizer(self.driver)
sync.core_plugin = mock.Mock()
sync.core_plugin.get_networks.return_value = [{'id': 'net'}]
sync.core_plugin.get_subnets.return_value = [{'id': 'sub'}]
sync.core_plugin.get_ports.return_value = [{'id': 'port',
'network_id': 'net'}]
sync.sync_base()
self.assertEqual(1, self.driver.create_network_postcommit.call_count)
self.assertEqual(1, self.driver.create_subnet_postcommit.call_count)
self.assertEqual(1, self.get_locked_port_and_binding.call_count)
self.assertEqual(1, self.driver.create_port_postcommit.call_count)
def test_sync_router(self):
sync = apic_sync.ApicRouterSynchronizer(self.driver)
sync.core_plugin = mock.Mock()
sync.core_plugin.get_ports.return_value = [{'id': 'port',
'network_id': 'net',
'device_id': 'dev'}]
sync.sync_router()
self.assertEqual(
1, self.driver.add_router_interface_postcommit.call_count)

View File

@ -1,206 +0,0 @@
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
sys.modules["apicapi"] = mock.Mock()
from neutron.plugins.ml2.drivers.cisco.apic import apic_topology
from neutron.tests import base
from neutron.tests.unit.plugins.ml2.drivers.cisco.apic import (
base as mocked)
NOTIFIER = ('neutron.plugins.ml2.drivers.cisco.apic.'
'apic_topology.ApicTopologyServiceNotifierApi')
RPC_CONNECTION = 'neutron.common.rpc.Connection'
AGENTS_DB = 'neutron.db.agents_db'
PERIODIC_TASK = 'oslo_service.periodic_task'
DEV_EXISTS = 'neutron.agent.linux.ip_lib.device_exists'
IP_DEVICE = 'neutron.agent.linux.ip_lib.IPDevice'
EXECUTE = 'neutron.agent.linux.utils.execute'
LLDP_CMD = ['lldpctl', '-f', 'keyvalue']
ETH0 = mocked.SERVICE_HOST_IFACE
LLDPCTL_RES = (
'lldp.' + ETH0 + '.via=LLDP\n'
'lldp.' + ETH0 + '.rid=1\n'
'lldp.' + ETH0 + '.age=0 day, 20:55:54\n'
'lldp.' + ETH0 + '.chassis.mac=' + mocked.SERVICE_HOST_MAC + '\n'
'lldp.' + ETH0 + '.chassis.name=' + mocked.SERVICE_PEER_CHASSIS_NAME + '\n'
'lldp.' + ETH0 + '.chassis.descr=' + mocked.SERVICE_PEER_CHASSIS + '\n'
'lldp.' + ETH0 + '.chassis.Bridge.enabled=on\n'
'lldp.' + ETH0 + '.chassis.Router.enabled=on\n'
'lldp.' + ETH0 + '.port.local=' + mocked.SERVICE_PEER_PORT_LOCAL + '\n'
'lldp.' + ETH0 + '.port.descr=' + mocked.SERVICE_PEER_PORT_DESC)
class TestCiscoApicTopologyService(base.BaseTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicTopologyService, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
# Patch notifier
notifier_c = mock.patch(NOTIFIER).start()
self.notifier = mock.Mock()
notifier_c.return_value = self.notifier
# Patch Connection
connection_c = mock.patch(RPC_CONNECTION).start()
self.connection = mock.Mock()
connection_c.return_value = self.connection
# Patch agents db
self.agents_db = mock.patch(AGENTS_DB).start()
self.service = apic_topology.ApicTopologyService()
self.service.apic_manager = mock.Mock()
def test_init_host(self):
self.service.init_host()
self.connection.create_consumer.ensure_called_once()
self.connection.consume_in_threads.ensure_called_once()
def test_update_link_add_nopeers(self):
self.service.peers = {}
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.update_link(None, *args)
self.service.apic_manager.add_hostlink.assert_called_once_with(*args)
self.assertEqual(args,
self.service.peers[(mocked.SERVICE_HOST,
mocked.SERVICE_HOST_IFACE)])
def test_update_link_add_with_peers_diff(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
args_prime = args[:2] + tuple(x + '1' for x in args[2:])
self.service.peers = {args_prime[:2]: args_prime}
self.service.update_link(None, *args)
self.service.apic_manager.remove_hostlink.assert_called_once_with(
*args_prime)
self.service.apic_manager.add_hostlink.assert_called_once_with(*args)
self.assertEqual(
args, self.service.peers[
(mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE)])
def test_update_link_add_with_peers_eq(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC,
mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.peers = {args[:2]: args}
self.service.update_link(None, *args)
def test_update_link_rem_with_peers(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, 0,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.peers = {args[:2]: args}
self.service.update_link(None, *args)
self.service.apic_manager.remove_hostlink.assert_called_once_with(
*args)
self.assertFalse(bool(self.service.peers))
def test_update_link_rem_no_peers(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, 0,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.update_link(None, *args)
class TestCiscoApicTopologyAgent(base.BaseTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicTopologyAgent, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
# Patch notifier
notifier_c = mock.patch(NOTIFIER).start()
self.notifier = mock.Mock()
notifier_c.return_value = self.notifier
# Patch device_exists
self.dev_exists = mock.patch(DEV_EXISTS).start()
# Patch IPDevice
ipdev_c = mock.patch(IP_DEVICE).start()
self.ipdev = mock.Mock()
ipdev_c.return_value = self.ipdev
self.ipdev.link.address = mocked.SERVICE_HOST_MAC
# Patch execute
self.execute = mock.patch(EXECUTE).start()
self.execute.return_value = LLDPCTL_RES
# Patch tasks
self.periodic_task = mock.patch(PERIODIC_TASK).start()
self.agent = apic_topology.ApicTopologyAgent()
self.agent.host = mocked.SERVICE_HOST
self.agent.service_agent = mock.Mock()
self.agent.lldpcmd = LLDP_CMD
def test_init_host_device_exists(self):
self.agent.lldpcmd = None
self.dev_exists.return_value = True
self.agent.init_host()
self.assertEqual(LLDP_CMD + mocked.APIC_UPLINK_PORTS,
self.agent.lldpcmd)
def test_init_host_device_not_exist(self):
self.agent.lldpcmd = None
self.dev_exists.return_value = False
self.agent.init_host()
self.assertEqual(LLDP_CMD, self.agent.lldpcmd)
def test_get_peers(self):
self.agent.peers = {}
peers = self.agent._get_peers()
expected = [(mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)]
self.assertEqual(expected,
peers[mocked.SERVICE_HOST_IFACE])
def test_check_for_new_peers_no_peers(self):
self.agent.peers = {}
expected = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
peers = {mocked.SERVICE_HOST_IFACE: [expected]}
context = mock.Mock()
with mock.patch.object(self.agent, '_get_peers',
return_value=peers):
self.agent._check_for_new_peers(context)
self.assertEqual(expected,
self.agent.peers[mocked.SERVICE_HOST_IFACE])
self.agent.service_agent.update_link.assert_called_once_with(
context, *expected)
def test_check_for_new_peers_with_peers(self):
expected = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
peers = {mocked.SERVICE_HOST_IFACE: [expected]}
self.agent.peers = {mocked.SERVICE_HOST_IFACE:
[tuple(x + '1' for x in expected)]}
context = mock.Mock()
with mock.patch.object(self.agent, '_get_peers',
return_value=peers):
self.agent._check_for_new_peers(context)
self.agent.service_agent.update_link.assert_called_with(
context, *expected)

View File

@ -1,336 +0,0 @@
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
sys.modules["apicapi"] = mock.Mock()
from neutron.common import constants as n_constants
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic as md
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron.tests import base
from neutron.tests.unit.plugins.ml2.drivers.cisco.apic import (
base as mocked)
HOST_ID1 = 'ubuntu'
HOST_ID2 = 'rhel'
ENCAP = '101'
SUBNET_GATEWAY = '10.3.2.1'
SUBNET_CIDR = '10.3.1.0/24'
SUBNET_NETMASK = '24'
TEST_SEGMENT1 = 'test-segment1'
TEST_SEGMENT2 = 'test-segment2'
class TestCiscoApicMechDriver(base.BaseTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicMechDriver, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
self.mock_apic_manager_login_responses()
self.driver = md.APICMechanismDriver()
self.driver.synchronizer = None
md.APICMechanismDriver.get_base_synchronizer = mock.Mock()
self.driver.vif_type = 'test-vif_type'
self.driver.cap_port_filter = 'test-cap_port_filter'
self.driver.name_mapper = mock.Mock()
self.driver.name_mapper.tenant.return_value = mocked.APIC_TENANT
self.driver.name_mapper.network.return_value = mocked.APIC_NETWORK
self.driver.name_mapper.subnet.return_value = mocked.APIC_SUBNET
self.driver.name_mapper.port.return_value = mocked.APIC_PORT
self.driver.name_mapper.router.return_value = mocked.APIC_ROUTER
self.driver.name_mapper.app_profile.return_value = mocked.APIC_AP
self.driver.apic_manager = mock.Mock(
name_mapper=mock.Mock(), ext_net_dict=self.external_network_dict)
self.driver.apic_manager.apic.transaction = self.fake_transaction
def test_initialize(self):
self.driver.initialize()
mgr = self.driver.apic_manager
self.assertEqual(1, mgr.ensure_infra_created_on_apic.call_count)
self.assertEqual(1,
mgr.ensure_bgp_pod_policy_created_on_apic.call_count)
def test_update_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1,
device_owner='any')
mgr = self.driver.apic_manager
self.driver.update_port_postcommit(port_ctx)
mgr.ensure_path_created_for_port.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_NETWORK, HOST_ID1,
ENCAP, transaction='transaction')
def test_create_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1,
device_owner='any')
mgr = self.driver.apic_manager
self.driver.create_port_postcommit(port_ctx)
mgr.ensure_path_created_for_port.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_NETWORK, HOST_ID1,
ENCAP, transaction='transaction')
def test_update_port_nobound_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, None,
device_owner='any')
self.driver.update_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_path_created_for_port.called)
def test_create_port_nobound_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, None,
device_owner='any')
self.driver.create_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_path_created_for_port.called)
def test_update_gw_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver.update_port_postcommit(port_ctx)
mgr.get_router_contract.assert_called_once_with(
port_ctx.current['device_id'])
self.assertEqual(1, mgr.ensure_context_enforced.call_count)
mgr.ensure_external_routed_network_created.assert_called_once_with(
mocked.APIC_NETWORK, transaction='transaction')
mgr.ensure_logical_node_profile_created.assert_called_once_with(
mocked.APIC_NETWORK, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT,
mocked.APIC_EXT_ENCAP, mocked.APIC_EXT_CIDR_EXPOSED,
transaction='transaction')
mgr.ensure_static_route_created.assert_called_once_with(
mocked.APIC_NETWORK, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_GATEWAY_IP, transaction='transaction')
mgr.ensure_external_epg_created.assert_called_once_with(
mocked.APIC_NETWORK, transaction='transaction')
mgr.ensure_external_epg_consumed_contract.assert_called_once_with(
mocked.APIC_NETWORK, mgr.get_router_contract.return_value,
transaction='transaction')
mgr.ensure_external_epg_provided_contract.assert_called_once_with(
mocked.APIC_NETWORK, mgr.get_router_contract.return_value,
transaction='transaction')
def test_create_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
mgr = self.driver.apic_manager
self.driver.create_network_postcommit(ctx)
mgr.ensure_bd_created_on_apic.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_NETWORK, transaction='transaction')
mgr.ensure_epg_created.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_NETWORK, transaction='transaction')
def test_create_external_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver.create_network_postcommit(ctx)
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
def test_delete_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
mgr = self.driver.apic_manager
self.driver.delete_network_postcommit(ctx)
mgr.delete_bd_on_apic.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_NETWORK, transaction='transaction')
mgr.delete_epg_for_network.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_NETWORK, transaction='transaction')
def test_delete_external_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver.delete_network_postcommit(ctx)
mgr.delete_external_routed_network.assert_called_once_with(
mocked.APIC_NETWORK)
def test_create_subnet_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
subnet_ctx = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
mgr = self.driver.apic_manager
self.driver.create_subnet_postcommit(subnet_ctx)
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_NETWORK,
'%s/%s' % (SUBNET_GATEWAY, SUBNET_NETMASK))
def test_create_subnet_nogw_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
subnet_ctx = self._get_subnet_context(None,
SUBNET_CIDR,
net_ctx)
mgr = self.driver.apic_manager
self.driver.create_subnet_postcommit(subnet_ctx)
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
def _get_network_context(self, tenant_id, net_id, seg_id=None,
seg_type='vlan', external=False):
network = {'id': net_id,
'name': net_id + '-name',
'tenant_id': tenant_id,
'provider:segmentation_id': seg_id}
if external:
network['router:external'] = True
if seg_id:
network_segments = [{'id': seg_id,
'segmentation_id': ENCAP,
'network_type': seg_type,
'physical_network': 'physnet1'}]
else:
network_segments = []
return FakeNetworkContext(network, network_segments)
def _get_subnet_context(self, gateway_ip, cidr, network):
subnet = {'tenant_id': network.current['tenant_id'],
'network_id': network.current['id'],
'id': '[%s/%s]' % (gateway_ip, cidr),
'gateway_ip': gateway_ip,
'cidr': cidr}
return FakeSubnetContext(subnet, network)
def _get_port_context(self, tenant_id, net_id, vm_id, network, host,
gw=False, device_owner='compute'):
port = {'device_id': vm_id,
'device_owner': device_owner,
'binding:host_id': host,
'tenant_id': tenant_id,
'id': mocked.APIC_PORT,
'name': mocked.APIC_PORT,
'network_id': net_id}
if gw:
port['device_owner'] = n_constants.DEVICE_OWNER_ROUTER_GW
port['device_id'] = mocked.APIC_ROUTER
return FakePortContext(port, network)
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments):
self._network = network
self._segments = segments
@property
def current(self):
return self._network
@property
def network_segments(self):
return self._segments
class FakeSubnetContext(object):
"""To generate subnet context for testing purposes only."""
def __init__(self, subnet, network):
self._subnet = subnet
self._network = network
self._plugin = mock.Mock()
self._plugin_context = mock.Mock()
self._plugin.get_network.return_value = {}
@property
def current(self):
return self._subnet
@property
def network(self):
return self._network
class FakePortContext(object):
"""To generate port context for testing purposes only."""
def __init__(self, port, network):
self._port = port
self._network = network
self._plugin = mock.Mock()
self._plugin_context = mock.Mock()
self._plugin.get_ports.return_value = []
if network.network_segments:
self._bound_segment = network.network_segments[0]
else:
self._bound_segment = None
@property
def current(self):
return self._port
@property
def network(self):
return self._network
@property
def top_bound_segment(self):
return self._bound_segment
def set_binding(self, segment_id, vif_type, cap_port_filter):
pass
@property
def host(self):
return self._port.get(portbindings.HOST_ID)
@property
def original_host(self):
return self._original_port.get(portbindings.HOST_ID)

View File

@ -1,134 +0,0 @@
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
sys.modules["apicapi"] = mock.Mock()
from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic as md
from neutron.services.l3_router import l3_apic
from neutron.tests.unit.plugins.ml2.drivers.cisco.apic import base as mocked
from neutron.tests.unit import testlib_api
TENANT = 'tenant1'
TENANT_CONTRACT = 'abcd'
ROUTER = 'router1'
SUBNET = 'subnet1'
NETWORK = 'network1'
PORT = 'port1'
NETWORK_NAME = 'one_network'
NETWORK_EPG = 'one_network-epg'
TEST_SEGMENT1 = 'test-segment1'
SUBNET_GATEWAY = '10.3.2.1'
SUBNET_CIDR = '10.3.1.0/24'
SUBNET_NETMASK = '24'
class FakeContext(object):
def __init__(self):
self.tenant_id = None
class FakeContract(object):
def __init__(self):
self.contract_id = '123'
class FakeEpg(object):
def __init__(self):
self.epg_id = 'abcd_epg'
class FakePort(object):
def __init__(self):
self.id = 'Fake_port_id'
self.network_id = NETWORK
self.subnet_id = SUBNET
class TestCiscoApicL3Plugin(testlib_api.SqlTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicL3Plugin, self).setUp()
mock.patch('neutron.plugins.ml2.drivers.cisco.apic.apic_model.'
'ApicDbModel').start()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
self.plugin = l3_apic.ApicL3ServicePlugin()
md.APICMechanismDriver.get_router_synchronizer = mock.Mock()
self.context = FakeContext()
self.context.tenant_id = TENANT
self.interface_info = {'subnet': {'subnet_id': SUBNET},
'port': {'port_id': PORT}}
self.subnet = {'network_id': NETWORK, 'tenant_id': TENANT}
self.port = {'tenant_id': TENANT,
'network_id': NETWORK,
'fixed_ips': [{'subnet_id': SUBNET}]}
self.plugin.name_mapper = mock.Mock()
l3_apic.apic_mapper.mapper_context = self.fake_transaction
self.plugin.name_mapper.tenant.return_value = mocked.APIC_TENANT
self.plugin.name_mapper.network.return_value = mocked.APIC_NETWORK
self.plugin.name_mapper.subnet.return_value = mocked.APIC_SUBNET
self.plugin.name_mapper.port.return_value = mocked.APIC_PORT
self.plugin.name_mapper.router.return_value = mocked.APIC_ROUTER
self.plugin.name_mapper.app_profile.return_value = mocked.APIC_AP
self.contract = FakeContract()
self.plugin.get_router = mock.Mock(
return_value={'id': ROUTER, 'admin_state_up': True})
self.plugin.manager = mock.Mock()
self.plugin.manager.apic.transaction = self.fake_transaction
self.plugin.get_subnet = mock.Mock(return_value=self.subnet)
self.plugin.get_network = mock.Mock(return_value=self.interface_info)
self.plugin.get_port = mock.Mock(return_value=self.port)
mock.patch('neutron.db.l3_dvr_db.L3_NAT_with_dvr_db_mixin.'
'_core_plugin').start()
mock.patch('neutron.db.l3_dvr_db.L3_NAT_with_dvr_db_mixin.'
'add_router_interface').start()
mock.patch('neutron.db.l3_dvr_db.L3_NAT_with_dvr_db_mixin.'
'remove_router_interface').start()
mock.patch('oslo_utils.excutils.save_and_reraise_exception').start()
def _test_add_router_interface(self, interface_info):
mgr = self.plugin.manager
self.plugin.add_router_interface(self.context, ROUTER, interface_info)
mgr.create_router.assert_called_once_with(mocked.APIC_ROUTER,
transaction='transaction')
mgr.add_router_interface.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_ROUTER, mocked.APIC_NETWORK)
def _test_remove_router_interface(self, interface_info):
mgr = self.plugin.manager
self.plugin.remove_router_interface(self.context, ROUTER,
interface_info)
mgr.remove_router_interface.assert_called_once_with(
mocked.APIC_TENANT, mocked.APIC_ROUTER, mocked.APIC_NETWORK)
def test_add_router_interface_subnet(self):
self._test_add_router_interface(self.interface_info['subnet'])
def test_add_router_interface_port(self):
self._test_add_router_interface(self.interface_info['port'])
def test_remove_router_interface_subnet(self):
self._test_remove_router_interface(self.interface_info['subnet'])
def test_remove_router_interface_port(self):
self._test_remove_router_interface(self.interface_info['port'])

View File

@ -111,8 +111,6 @@ console_scripts =
neutron-metering-agent = neutron.cmd.eventlet.services.metering_agent:main
neutron-sriov-nic-agent = neutron.plugins.ml2.drivers.mech_sriov.agent.sriov_nic_agent:main
neutron-sanity-check = neutron.cmd.sanity_check:main
neutron-cisco-apic-service-agent = neutron.plugins.ml2.drivers.cisco.apic.apic_topology:service_main
neutron-cisco-apic-host-agent = neutron.plugins.ml2.drivers.cisco.apic.apic_topology:agent_main
neutron.core_plugins =
bigswitch = neutron.plugins.bigswitch.plugin:NeutronRestProxyV2
brocade = neutron.plugins.brocade.NeutronPlugin:BrocadePluginV2
@ -175,7 +173,6 @@ neutron.ml2.mechanism_drivers =
ncs = neutron.plugins.ml2.drivers.cisco.ncs.driver:NCSMechanismDriver
cisco_ncs = neutron.plugins.ml2.drivers.cisco.ncs.driver:NCSMechanismDriver
cisco_nexus = neutron.plugins.ml2.drivers.cisco.nexus.mech_cisco_nexus:CiscoNexusMechanismDriver
cisco_apic = neutron.plugins.ml2.drivers.cisco.apic.mechanism_apic:APICMechanismDriver
cisco_n1kv = neutron.plugins.ml2.drivers.cisco.n1kv.mech_cisco_n1kv:N1KVMechanismDriver
cisco_ucsm = neutron.plugins.ml2.drivers.cisco.ucsm.mech_cisco_ucsm:CiscoUcsmMechanismDriver
l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver

View File

@ -105,7 +105,6 @@ commands = sphinx-build -W -b html doc/source doc/build/html
commands = python -m testtools.run \
neutron.tests.unit.services.metering.drivers.test_iptables \
neutron.tests.unit.services.metering.agents.test_metering_agent \
neutron.tests.unit.services.l3_router.test_l3_apic \
neutron.tests.unit.services.test_provider_configuration \
neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.test_sriov_nic_agent \
neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.test_eswitch_manager \
@ -142,9 +141,6 @@ commands = python -m testtools.run \
neutron.tests.unit.plugins.ml2.drivers.arista.test_mechanism_arista \
neutron.tests.unit.plugins.ml2.drivers.test_type_local \
neutron.tests.unit.plugins.ml2.drivers.mechanism_logger \
neutron.tests.unit.plugins.ml2.drivers.cisco.apic.test_apic_sync \
neutron.tests.unit.plugins.ml2.drivers.cisco.apic.base \
neutron.tests.unit.plugins.ml2.drivers.cisco.apic.test_apic_topology \
neutron.tests.unit.plugins.ml2.drivers.test_type_flat \
neutron.tests.unit.plugins.ml2.drivers.test_type_vlan \
neutron.tests.unit.plugins.ml2.drivers.mechanism_test \