[AIM] ML2 driver changes for external connectivity
This change adds support for external Neutron networks
and floating IPs. Address translation through external
networks is supported in several different ways (no NAT,
distributed NAT, edge NAT). The orchestration details
of each of these modes is abstracted in the NatStrategy
interface provided by AIM. The mechanism driver mainly
translates Neutron operations into appropriate
invocations of this utility class.
Several attribute extensions have been added for external
connectivity support -
* Network:
1. ExternalNetwork DN: This is the DN of the ACI
external-network (within an L3out) that corresponds
to a Neutron network
2. NAT type: To support various ways of address
translation
3. External CIDRs: Restrictions on traffic, based on
their address, that can use an external network
* Subnet:
1. SNAT host pool: Whether a subnet should be used
for allocating SNAT addresses for distributed NAT
* Router:
1. External contracts: Additional contracts that an
external network should provide/consume when a
router is uplinked to the external network
Most of the mechanism driver change deals with handling
these attributes; the code that is involved deals with
determining the proper VRF and applicable contracts
based on current router topology.
Changes are also made to L3-plugin to handle floating-IP
operation, and reporting the floating-IPs in GBP RPCs.
Change-Id: I80d7be7fde27b4dcf6987fd3984c301ed5e5d437
Signed-off-by: Amit Bose <amitbose@gmail.com>
(cherry picked from commit 95051a5daa)
This commit is contained in:
@@ -0,0 +1,155 @@
|
||||
# Copyright (c) 2016 Cisco Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.db import model_base
|
||||
import sqlalchemy as sa
|
||||
|
||||
from gbpservice.neutron.extensions import cisco_apic
|
||||
from gbpservice.neutron.extensions import cisco_apic_l3
|
||||
|
||||
|
||||
class NetworkExtensionDb(model_base.BASEV2):
|
||||
|
||||
__tablename__ = 'apic_aim_network_extensions'
|
||||
|
||||
network_id = sa.Column(
|
||||
sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
external_network_dn = sa.Column(sa.String(1024))
|
||||
nat_type = sa.Column(sa.Enum('distributed', 'edge', ''))
|
||||
|
||||
|
||||
class NetworkExtensionCidrDb(model_base.BASEV2):
|
||||
|
||||
__tablename__ = 'apic_aim_network_external_cidrs'
|
||||
|
||||
network_id = sa.Column(
|
||||
sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
cidr = sa.Column(sa.String(64), primary_key=True)
|
||||
|
||||
|
||||
class SubnetExtensionDb(model_base.BASEV2):
|
||||
|
||||
__tablename__ = 'apic_aim_subnet_extensions'
|
||||
|
||||
subnet_id = sa.Column(
|
||||
sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
snat_host_pool = sa.Column(sa.Boolean)
|
||||
|
||||
|
||||
class RouterExtensionContractDb(model_base.BASEV2):
|
||||
|
||||
__tablename__ = 'apic_aim_router_external_contracts'
|
||||
|
||||
router_id = sa.Column(
|
||||
sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
contract_name = sa.Column(sa.String(64), primary_key=True)
|
||||
provides = sa.Column(sa.Boolean, primary_key=True)
|
||||
|
||||
|
||||
class ExtensionDbMixin(object):
|
||||
|
||||
def _set_if_not_none(self, res_dict, res_attr, db_attr):
|
||||
if db_attr is not None:
|
||||
res_dict[res_attr] = db_attr
|
||||
|
||||
def get_network_extn_db(self, session, network_id):
|
||||
db_obj = (session.query(NetworkExtensionDb).filter_by(
|
||||
network_id=network_id).first())
|
||||
db_cidrs = (session.query(NetworkExtensionCidrDb).filter_by(
|
||||
network_id=network_id).all())
|
||||
result = {}
|
||||
if db_obj:
|
||||
self._set_if_not_none(result, cisco_apic.EXTERNAL_NETWORK,
|
||||
db_obj['external_network_dn'])
|
||||
self._set_if_not_none(result, cisco_apic.NAT_TYPE,
|
||||
db_obj['nat_type'])
|
||||
if result.get(cisco_apic.EXTERNAL_NETWORK):
|
||||
result[cisco_apic.EXTERNAL_CIDRS] = [c['cidr'] for c in db_cidrs]
|
||||
return result
|
||||
|
||||
def set_network_extn_db(self, session, network_id, res_dict):
|
||||
with session.begin(subtransactions=True):
|
||||
db_obj = (session.query(NetworkExtensionDb).filter_by(
|
||||
network_id=network_id).first())
|
||||
db_obj = db_obj or NetworkExtensionDb(network_id=network_id)
|
||||
if cisco_apic.EXTERNAL_NETWORK in res_dict:
|
||||
db_obj['external_network_dn'] = (
|
||||
res_dict[cisco_apic.EXTERNAL_NETWORK])
|
||||
if cisco_apic.NAT_TYPE in res_dict:
|
||||
db_obj['nat_type'] = res_dict[cisco_apic.NAT_TYPE]
|
||||
session.add(db_obj)
|
||||
if cisco_apic.EXTERNAL_CIDRS in res_dict:
|
||||
self._update_list_attr(session, NetworkExtensionCidrDb, 'cidr',
|
||||
res_dict[cisco_apic.EXTERNAL_CIDRS],
|
||||
network_id=network_id)
|
||||
return True
|
||||
|
||||
def get_subnet_extn_db(self, session, subnet_id):
|
||||
db_obj = (session.query(SubnetExtensionDb).filter_by(
|
||||
subnet_id=subnet_id).first())
|
||||
result = {}
|
||||
if db_obj:
|
||||
self._set_if_not_none(result, cisco_apic.SNAT_HOST_POOL,
|
||||
db_obj['snat_host_pool'])
|
||||
return result
|
||||
|
||||
def set_subnet_extn_db(self, session, subnet_id, res_dict):
|
||||
db_obj = (session.query(SubnetExtensionDb).filter_by(
|
||||
subnet_id=subnet_id).first())
|
||||
db_obj = db_obj or SubnetExtensionDb(subnet_id=subnet_id)
|
||||
if cisco_apic.SNAT_HOST_POOL in res_dict:
|
||||
db_obj['snat_host_pool'] = res_dict[cisco_apic.SNAT_HOST_POOL]
|
||||
session.add(db_obj)
|
||||
|
||||
def get_router_extn_db(self, session, router_id):
|
||||
db_contracts = (session.query(RouterExtensionContractDb).filter_by(
|
||||
router_id=router_id).all())
|
||||
return {cisco_apic_l3.EXTERNAL_PROVIDED_CONTRACTS:
|
||||
[c['contract_name'] for c in db_contracts if c['provides']],
|
||||
cisco_apic_l3.EXTERNAL_CONSUMED_CONTRACTS:
|
||||
[c['contract_name'] for c in db_contracts
|
||||
if not c['provides']]}
|
||||
|
||||
def _update_list_attr(self, session, db_model, column,
|
||||
new_values, **filters):
|
||||
rows = session.query(db_model).filter_by(**filters).all()
|
||||
new_values = set(new_values)
|
||||
for r in rows:
|
||||
if r[column] in new_values:
|
||||
new_values.discard(r[column])
|
||||
else:
|
||||
session.delete(r)
|
||||
for v in new_values:
|
||||
attr = {column: v}
|
||||
attr.update(filters)
|
||||
db_obj = db_model(**attr)
|
||||
session.add(db_obj)
|
||||
|
||||
def set_router_extn_db(self, session, router_id, res_dict):
|
||||
with session.begin(subtransactions=True):
|
||||
if cisco_apic_l3.EXTERNAL_PROVIDED_CONTRACTS in res_dict:
|
||||
self._update_list_attr(session, RouterExtensionContractDb,
|
||||
'contract_name',
|
||||
res_dict[cisco_apic_l3.EXTERNAL_PROVIDED_CONTRACTS],
|
||||
router_id=router_id, provides=True)
|
||||
if cisco_apic_l3.EXTERNAL_CONSUMED_CONTRACTS in res_dict:
|
||||
self._update_list_attr(session, RouterExtensionContractDb,
|
||||
'contract_name',
|
||||
res_dict[cisco_apic_l3.EXTERNAL_CONSUMED_CONTRACTS],
|
||||
router_id=router_id, provides=False)
|
||||
@@ -17,16 +17,24 @@ from neutron._i18n import _LE
|
||||
from neutron._i18n import _LI
|
||||
from neutron.api import extensions
|
||||
from neutron import manager as n_manager
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from aim.api import resource as aim_res
|
||||
from aim import exceptions as aim_exc
|
||||
|
||||
from gbpservice.neutron import extensions as extensions_pkg
|
||||
from gbpservice.neutron.extensions import cisco_apic
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
|
||||
extension_db as extn_db)
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class ApicExtensionDriver(api_plus.ExtensionDriver):
|
||||
class ApicExtensionDriver(api_plus.ExtensionDriver,
|
||||
extn_db.ExtensionDbMixin):
|
||||
|
||||
def __init__(self):
|
||||
LOG.info(_LI("APIC AIM ED __init__"))
|
||||
@@ -54,17 +62,72 @@ class ApicExtensionDriver(api_plus.ExtensionDriver):
|
||||
def extend_network_dict(self, session, base_model, result):
|
||||
try:
|
||||
self._md.extend_network_dict(session, base_model, result)
|
||||
res_dict = self.get_network_extn_db(session, result['id'])
|
||||
if cisco_apic.EXTERNAL_NETWORK in res_dict:
|
||||
result.setdefault(cisco_apic.DIST_NAMES, {})[
|
||||
cisco_apic.EXTERNAL_NETWORK] = res_dict.pop(
|
||||
cisco_apic.EXTERNAL_NETWORK)
|
||||
result.update(res_dict)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("APIC AIM extend_network_dict failed"))
|
||||
|
||||
def process_create_network(self, plugin_context, data, result):
|
||||
if (data.get(cisco_apic.DIST_NAMES) and
|
||||
data[cisco_apic.DIST_NAMES].get(cisco_apic.EXTERNAL_NETWORK)):
|
||||
dn = data[cisco_apic.DIST_NAMES][cisco_apic.EXTERNAL_NETWORK]
|
||||
try:
|
||||
aim_res.ExternalNetwork.from_dn(dn)
|
||||
except aim_exc.InvalidDNForAciResource:
|
||||
raise n_exc.InvalidInput(
|
||||
error_message=('%s is not valid ExternalNetwork DN' % dn))
|
||||
res_dict = {cisco_apic.EXTERNAL_NETWORK: dn,
|
||||
cisco_apic.NAT_TYPE:
|
||||
data.get(cisco_apic.NAT_TYPE, 'distributed'),
|
||||
cisco_apic.EXTERNAL_CIDRS:
|
||||
data.get(cisco_apic.EXTERNAL_CIDRS, ['0.0.0.0/0'])}
|
||||
self.set_network_extn_db(plugin_context.session, result['id'],
|
||||
res_dict)
|
||||
result.setdefault(cisco_apic.DIST_NAMES, {})[
|
||||
cisco_apic.EXTERNAL_NETWORK] = res_dict.pop(
|
||||
cisco_apic.EXTERNAL_NETWORK)
|
||||
result.update(res_dict)
|
||||
|
||||
def process_update_network(self, plugin_context, data, result):
|
||||
# only CIDRs can be updated
|
||||
if not cisco_apic.EXTERNAL_CIDRS in data:
|
||||
return
|
||||
if result.get(cisco_apic.DIST_NAMES, {}).get(
|
||||
cisco_apic.EXTERNAL_NETWORK):
|
||||
res_dict = {cisco_apic.EXTERNAL_CIDRS:
|
||||
data[cisco_apic.EXTERNAL_CIDRS]}
|
||||
self.set_network_extn_db(plugin_context.session, result['id'],
|
||||
res_dict)
|
||||
result.update(res_dict)
|
||||
|
||||
def extend_subnet_dict(self, session, base_model, result):
|
||||
try:
|
||||
self._md.extend_subnet_dict(session, base_model, result)
|
||||
res_dict = self.get_subnet_extn_db(session, result['id'])
|
||||
result[cisco_apic.SNAT_HOST_POOL] = (
|
||||
res_dict.get(cisco_apic.SNAT_HOST_POOL, False))
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("APIC AIM extend_subnet_dict failed"))
|
||||
|
||||
def process_create_subnet(self, plugin_context, data, result):
|
||||
res_dict = {cisco_apic.SNAT_HOST_POOL:
|
||||
data.get(cisco_apic.SNAT_HOST_POOL, False)}
|
||||
self.set_subnet_extn_db(plugin_context.session, result['id'],
|
||||
res_dict)
|
||||
|
||||
def process_update_subnet(self, plugin_context, data, result):
|
||||
if not cisco_apic.SNAT_HOST_POOL in data:
|
||||
return
|
||||
res_dict = {cisco_apic.SNAT_HOST_POOL: data[cisco_apic.SNAT_HOST_POOL]}
|
||||
self.set_subnet_extn_db(plugin_context.session, result['id'],
|
||||
res_dict)
|
||||
|
||||
def extend_address_scope_dict(self, session, base_model, result):
|
||||
try:
|
||||
self._md.extend_address_scope_dict(session, base_model, result)
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from aim.aim_lib import nat_strategy
|
||||
from aim import aim_manager
|
||||
from aim.api import resource as aim_resource
|
||||
from aim.common import utils
|
||||
@@ -23,6 +24,7 @@ from neutron._i18n import _LI
|
||||
from neutron._i18n import _LW
|
||||
from neutron.common import constants as n_constants
|
||||
from neutron.common import exceptions
|
||||
from neutron.common import topics as n_topics
|
||||
from neutron.db import address_scope_db
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import l3_db
|
||||
@@ -32,13 +34,15 @@ from neutron import manager
|
||||
from neutron.plugins.common import constants as pconst
|
||||
from neutron.plugins.ml2 import driver_api as api
|
||||
from opflexagent import constants as ofcst
|
||||
from opflexagent import rpc as ofrpc
|
||||
from oslo_log import log
|
||||
|
||||
from gbpservice.neutron.extensions import cisco_apic
|
||||
from gbpservice.neutron.extensions import cisco_apic_l3
|
||||
from gbpservice.neutron.extensions import cisco_apic_l3 as a_l3
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import apic_mapper
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import cache
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import extension_db
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import model
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@@ -65,6 +69,9 @@ class UnsupportedRoutingTopology(exceptions.BadRequest):
|
||||
"same router or the same subnet.")
|
||||
|
||||
|
||||
NO_ADDR_SCOPE = object()
|
||||
|
||||
|
||||
class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
# TODO(rkukura): Derivations of tenant_aname throughout need to
|
||||
# take sharing into account.
|
||||
@@ -90,6 +97,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
self._set_enable_dhcp_opt, 'enable_optimized_dhcp', 'apic')
|
||||
self.ap_name = self.aim_cfg_mgr.get_option_and_subscribe(
|
||||
self._set_ap_name, 'apic_app_profile_name', 'apic')
|
||||
self.notifier = ofrpc.AgentNotifierApi(n_topics.AGENT)
|
||||
|
||||
def ensure_tenant(self, plugin_context, tenant_id):
|
||||
LOG.debug("APIC AIM MD ensuring tenant_id: %s", tenant_id)
|
||||
@@ -136,48 +144,65 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
LOG.debug("APIC AIM MD creating network: %s", context.current)
|
||||
|
||||
session = context._plugin_context.session
|
||||
|
||||
tenant_id = context.current['tenant_id']
|
||||
tenant_aname = self.name_mapper.tenant(session, tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': tenant_id, 'aname': tenant_aname})
|
||||
|
||||
id = context.current['id']
|
||||
name = context.current['name']
|
||||
aname = self.name_mapper.network(session, id, name)
|
||||
LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s",
|
||||
{'id': id, 'name': name, 'aname': aname})
|
||||
dname = aim_utils.sanitize_display_name(name)
|
||||
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
network = context.current
|
||||
|
||||
vrf = self._get_unrouted_vrf(aim_ctx)
|
||||
if self._is_external(network):
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy(network)
|
||||
if not ext_net:
|
||||
return # Unmanaged external network
|
||||
ns.create_l3outside(aim_ctx, l3out)
|
||||
ns.create_external_network(aim_ctx, ext_net)
|
||||
ns.update_external_cidrs(aim_ctx, ext_net,
|
||||
network[cisco_apic.EXTERNAL_CIDRS])
|
||||
else:
|
||||
tenant_id = context.current['tenant_id']
|
||||
tenant_aname = self.name_mapper.tenant(session, tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': tenant_id, 'aname': tenant_aname})
|
||||
|
||||
# REVISIT(rkukura): When AIM changes default
|
||||
# ep_move_detect_mode value to 'garp', remove it here.
|
||||
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
|
||||
name=aname,
|
||||
display_name=dname,
|
||||
vrf_name=vrf.name,
|
||||
enable_arp_flood=True,
|
||||
enable_routing=False,
|
||||
limit_ip_learn_to_subnets=True,
|
||||
ep_move_detect_mode='garp')
|
||||
self.aim.create(aim_ctx, bd)
|
||||
vmms, phys = self.get_aim_domains(aim_ctx)
|
||||
epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
|
||||
app_profile_name=self.ap_name,
|
||||
name=aname, display_name=dname,
|
||||
bd_name=aname,
|
||||
openstack_vmm_domain_names=vmms,
|
||||
physical_domain_names=phys)
|
||||
self.aim.create(aim_ctx, epg)
|
||||
id = context.current['id']
|
||||
name = context.current['name']
|
||||
aname = self.name_mapper.network(session, id, name)
|
||||
LOG.debug("Mapped network_id %(id)s with name %(name)s "
|
||||
"to %(aname)s",
|
||||
{'id': id, 'name': name, 'aname': aname})
|
||||
dname = aim_utils.sanitize_display_name(name)
|
||||
|
||||
vrf = self._get_unrouted_vrf(aim_ctx)
|
||||
|
||||
# REVISIT(rkukura): When AIM changes default
|
||||
# ep_move_detect_mode value to 'garp', remove it here.
|
||||
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
|
||||
name=aname,
|
||||
display_name=dname,
|
||||
vrf_name=vrf.name,
|
||||
enable_arp_flood=True,
|
||||
enable_routing=False,
|
||||
limit_ip_learn_to_subnets=True,
|
||||
ep_move_detect_mode='garp')
|
||||
self.aim.create(aim_ctx, bd)
|
||||
vmms, phys = self.get_aim_domains(aim_ctx)
|
||||
epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
|
||||
app_profile_name=self.ap_name,
|
||||
name=aname, display_name=dname,
|
||||
bd_name=aname,
|
||||
openstack_vmm_domain_names=vmms,
|
||||
physical_domain_names=phys)
|
||||
self.aim.create(aim_ctx, epg)
|
||||
|
||||
def update_network_precommit(self, context):
|
||||
LOG.debug("APIC AIM MD updating network: %s", context.current)
|
||||
|
||||
if context.current['name'] != context.original['name']:
|
||||
session = context._plugin_context.session
|
||||
# TODO(amitbose) - Handle inter-conversion between external and
|
||||
# private networks
|
||||
|
||||
session = context._plugin_context.session
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
|
||||
is_ext = self._is_external(context.current)
|
||||
if (not is_ext and
|
||||
context.current['name'] != context.original['name']):
|
||||
|
||||
tenant_id = context.current['tenant_id']
|
||||
tenant_aname = self.name_mapper.tenant(session, tenant_id)
|
||||
@@ -192,8 +217,6 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
{'id': id, 'name': name, 'aname': aname})
|
||||
dname = aim_utils.sanitize_display_name(context.current['name'])
|
||||
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
|
||||
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
|
||||
name=aname)
|
||||
bd = self.aim.update(aim_ctx, bd, display_name=dname)
|
||||
@@ -202,137 +225,191 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
app_profile_name=self.ap_name,
|
||||
name=aname)
|
||||
epg = self.aim.update(aim_ctx, epg, display_name=dname)
|
||||
if is_ext:
|
||||
_, ext_net, ns = self._get_aim_nat_strategy(context.current)
|
||||
if ext_net:
|
||||
old = sorted(context.original[cisco_apic.EXTERNAL_CIDRS])
|
||||
new = sorted(context.current[cisco_apic.EXTERNAL_CIDRS])
|
||||
if old != new:
|
||||
ns.update_external_cidrs(aim_ctx, ext_net, new)
|
||||
# TODO(amitbose) Propagate name updates to AIM
|
||||
|
||||
def delete_network_precommit(self, context):
|
||||
LOG.debug("APIC AIM MD deleting network: %s", context.current)
|
||||
|
||||
session = context._plugin_context.session
|
||||
|
||||
tenant_id = context.current['tenant_id']
|
||||
tenant_aname = self.name_mapper.tenant(session, tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': tenant_id, 'aname': tenant_aname})
|
||||
|
||||
id = context.current['id']
|
||||
name = context.current['name']
|
||||
aname = self.name_mapper.network(session, id, name)
|
||||
LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s",
|
||||
{'id': id, 'name': name, 'aname': aname})
|
||||
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
network = context.current
|
||||
|
||||
epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
|
||||
app_profile_name=self.ap_name,
|
||||
name=aname)
|
||||
self.aim.delete(aim_ctx, epg)
|
||||
if self._is_external(network):
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy(network)
|
||||
if not ext_net:
|
||||
return # Unmanaged external network
|
||||
ns.delete_external_network(aim_ctx, ext_net)
|
||||
# TODO(amitbose) delete L3out only if no other Neutron
|
||||
# network is using the L3out
|
||||
ns.delete_l3outside(aim_ctx, l3out)
|
||||
else:
|
||||
tenant_id = context.current['tenant_id']
|
||||
tenant_aname = self.name_mapper.tenant(session, tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': tenant_id, 'aname': tenant_aname})
|
||||
|
||||
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
|
||||
name=aname)
|
||||
self.aim.delete(aim_ctx, bd)
|
||||
id = context.current['id']
|
||||
name = context.current['name']
|
||||
aname = self.name_mapper.network(session, id, name)
|
||||
LOG.debug("Mapped network_id %(id)s with name %(name)s "
|
||||
"to %(aname)s",
|
||||
{'id': id, 'name': name, 'aname': aname})
|
||||
|
||||
self.name_mapper.delete_apic_name(session, id)
|
||||
epg = aim_resource.EndpointGroup(
|
||||
tenant_name=tenant_aname,
|
||||
app_profile_name=self.ap_name,
|
||||
name=aname)
|
||||
self.aim.delete(aim_ctx, epg)
|
||||
|
||||
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
|
||||
name=aname)
|
||||
self.aim.delete(aim_ctx, bd)
|
||||
|
||||
self.name_mapper.delete_apic_name(session, id)
|
||||
|
||||
def extend_network_dict(self, session, network_db, result):
|
||||
LOG.debug("APIC AIM MD extending dict for network: %s", result)
|
||||
|
||||
# REVISIT(rkukura): Consider optimizing this method by
|
||||
# persisting the network->VRF relationship.
|
||||
|
||||
sync_state = cisco_apic.SYNC_SYNCED
|
||||
id = network_db.id
|
||||
sync_state = cisco_apic.SYNC_NOT_APPLICABLE
|
||||
dist_names = {}
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
|
||||
tenant_id = network_db.tenant_id
|
||||
tenant_aname = self.name_mapper.tenant(session, tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': tenant_id, 'aname': tenant_aname})
|
||||
|
||||
id = network_db.id
|
||||
name = network_db.name
|
||||
aname = self.name_mapper.network(session, id, name)
|
||||
LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s",
|
||||
{'id': id, 'name': name, 'aname': aname})
|
||||
|
||||
aim_bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
|
||||
name=aname)
|
||||
dist_names[cisco_apic.BD] = aim_bd.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_bd)
|
||||
|
||||
aim_epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
|
||||
app_profile_name=self.ap_name,
|
||||
name=aname)
|
||||
dist_names[cisco_apic.EPG] = aim_epg.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_epg)
|
||||
|
||||
# See if this network is interfaced to any routers.
|
||||
rp = (session.query(l3_db.RouterPort).
|
||||
join(models_v2.Port).
|
||||
filter(models_v2.Port.network_id == network_db.id,
|
||||
l3_db.RouterPort.port_type ==
|
||||
n_constants.DEVICE_OWNER_ROUTER_INTF).first())
|
||||
if rp:
|
||||
# A network is constrained to only one subnetpool per
|
||||
# address family. To support both single and dual stack,
|
||||
# use the IPv4 address scope's VRF if it exists, and
|
||||
# otherwise use the IPv6 address scope's VRF. For dual
|
||||
# stack, the plan is for identity NAT to move IPv6 traffic
|
||||
# from the IPv4 address scope's VRF to the IPv6 address
|
||||
# scope's VRF.
|
||||
#
|
||||
# REVISIT(rkukura): Ignore subnets that are not attached
|
||||
# to any router, or maybe just do a query joining
|
||||
# RouterPorts, Ports, Subnets, SubnetPools and
|
||||
# AddressScopes.
|
||||
pool_dbs = {subnet.subnetpool for subnet in network_db.subnets
|
||||
if subnet.subnetpool}
|
||||
scope_id = None
|
||||
for pool_db in pool_dbs:
|
||||
if pool_db.ip_version == 4:
|
||||
scope_id = pool_db.address_scope_id
|
||||
break
|
||||
elif pool_db.ip_version == 6:
|
||||
scope_id = pool_db.address_scope_id
|
||||
if scope_id:
|
||||
scope_db = self._scope_by_id(session, scope_id)
|
||||
scope_tenant_id = scope_db.tenant_id
|
||||
vrf_tenant_aname = self.name_mapper.tenant(session,
|
||||
scope_tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': scope_tenant_id, 'aname': vrf_tenant_aname})
|
||||
|
||||
vrf_aname = self.name_mapper.address_scope(session, scope_id)
|
||||
LOG.debug("Mapped address_scope_id %(id)s to %(aname)s",
|
||||
{'id': scope_id, 'aname': vrf_aname})
|
||||
else:
|
||||
vrf_tenant_aname = tenant_aname # REVISIT(rkukura)
|
||||
vrf_aname = DEFAULT_VRF_NAME
|
||||
if network_db.external is not None:
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy_db(session,
|
||||
network_db)
|
||||
if ext_net:
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, ext_net)
|
||||
kls = {aim_resource.BridgeDomain: cisco_apic.BD,
|
||||
aim_resource.EndpointGroup: cisco_apic.EPG,
|
||||
aim_resource.VRF: cisco_apic.VRF}
|
||||
for o in (ns.get_l3outside_resources(aim_ctx, l3out) or []):
|
||||
if type(o) in kls:
|
||||
dist_names[kls[type(o)]] = o.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state,
|
||||
o)
|
||||
else:
|
||||
vrf_tenant_aname = COMMON_TENANT_NAME
|
||||
vrf_aname = UNROUTED_VRF_NAME
|
||||
aim_vrf = aim_resource.VRF(tenant_name=vrf_tenant_aname,
|
||||
name=vrf_aname)
|
||||
dist_names[cisco_apic.VRF] = aim_vrf.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_vrf)
|
||||
# REVISIT(rkukura): Consider optimizing this method by
|
||||
# persisting the network->VRF relationship.
|
||||
|
||||
tenant_id = network_db.tenant_id
|
||||
tenant_aname = self.name_mapper.tenant(session, tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': tenant_id, 'aname': tenant_aname})
|
||||
|
||||
name = network_db.name
|
||||
aname = self.name_mapper.network(session, id, name)
|
||||
LOG.debug("Mapped network_id %(id)s with name %(name)s "
|
||||
"to %(aname)s",
|
||||
{'id': id, 'name': name, 'aname': aname})
|
||||
|
||||
aim_bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
|
||||
name=aname)
|
||||
dist_names[cisco_apic.BD] = aim_bd.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_bd)
|
||||
|
||||
aim_epg = aim_resource.EndpointGroup(
|
||||
tenant_name=tenant_aname,
|
||||
app_profile_name=self.ap_name,
|
||||
name=aname)
|
||||
dist_names[cisco_apic.EPG] = aim_epg.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_epg)
|
||||
|
||||
# See if this network is interfaced to any routers.
|
||||
rp = (session.query(l3_db.RouterPort).
|
||||
join(models_v2.Port).
|
||||
filter(models_v2.Port.network_id == network_db.id,
|
||||
l3_db.RouterPort.port_type ==
|
||||
n_constants.DEVICE_OWNER_ROUTER_INTF).first())
|
||||
if rp:
|
||||
# A network is constrained to only one subnetpool per
|
||||
# address family. To support both single and dual stack,
|
||||
# use the IPv4 address scope's VRF if it exists, and
|
||||
# otherwise use the IPv6 address scope's VRF. For dual
|
||||
# stack, the plan is for identity NAT to move IPv6 traffic
|
||||
# from the IPv4 address scope's VRF to the IPv6 address
|
||||
# scope's VRF.
|
||||
#
|
||||
# REVISIT(rkukura): Ignore subnets that are not attached
|
||||
# to any router, or maybe just do a query joining
|
||||
# RouterPorts, Ports, Subnets, SubnetPools and
|
||||
# AddressScopes.
|
||||
pool_dbs = {subnet.subnetpool
|
||||
for subnet in network_db.subnets
|
||||
if subnet.subnetpool}
|
||||
scope_id = None
|
||||
for pool_db in pool_dbs:
|
||||
if pool_db.ip_version == 4:
|
||||
scope_id = pool_db.address_scope_id
|
||||
break
|
||||
elif pool_db.ip_version == 6:
|
||||
scope_id = pool_db.address_scope_id
|
||||
if scope_id:
|
||||
scope_db = self._scope_by_id(session, scope_id)
|
||||
scope_tenant_id = scope_db.tenant_id
|
||||
vrf_tenant_aname = self.name_mapper.tenant(
|
||||
session, scope_tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': scope_tenant_id,
|
||||
'aname': vrf_tenant_aname})
|
||||
|
||||
vrf_aname = self.name_mapper.address_scope(session,
|
||||
scope_id)
|
||||
LOG.debug("Mapped address_scope_id %(id)s "
|
||||
"to %(aname)s",
|
||||
{'id': scope_id, 'aname': vrf_aname})
|
||||
else:
|
||||
vrf_tenant_aname = tenant_aname # REVISIT(rkukura)
|
||||
vrf_aname = DEFAULT_VRF_NAME
|
||||
else:
|
||||
vrf_tenant_aname = COMMON_TENANT_NAME
|
||||
vrf_aname = UNROUTED_VRF_NAME
|
||||
aim_vrf = aim_resource.VRF(tenant_name=vrf_tenant_aname,
|
||||
name=vrf_aname)
|
||||
dist_names[cisco_apic.VRF] = aim_vrf.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_vrf)
|
||||
|
||||
result[cisco_apic.DIST_NAMES] = dist_names
|
||||
result[cisco_apic.SYNC_STATE] = sync_state
|
||||
|
||||
def create_subnet_precommit(self, context):
|
||||
LOG.debug("APIC AIM MD creating subnet: %s", context.current)
|
||||
# Neutron subnets are mapped to AIM Subnets as they are added
|
||||
# to routers as interfaces.
|
||||
subnet = context.current
|
||||
network_id = subnet['network_id']
|
||||
network_db = self.plugin._get_network(context._plugin_context,
|
||||
network_id)
|
||||
if network_db.external is not None and subnet['gateway_ip']:
|
||||
session = context._plugin_context.session
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy_db(session,
|
||||
network_db)
|
||||
if not ext_net:
|
||||
return # Unmanaged external network
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
ns.create_subnet(aim_ctx, l3out,
|
||||
self._subnet_to_gw_ip_mask(subnet))
|
||||
|
||||
# Neutron subnets in non-external networks are mapped to AIM
|
||||
# Subnets as they are added to routers as interfaces.
|
||||
|
||||
def update_subnet_precommit(self, context):
|
||||
LOG.debug("APIC AIM MD updating subnet: %s", context.current)
|
||||
network_id = context.current['network_id']
|
||||
network_db = self.plugin._get_network(context._plugin_context,
|
||||
network_id)
|
||||
is_ext = network_db.external is not None
|
||||
|
||||
if context.current['name'] != context.original['name']:
|
||||
if (not is_ext and
|
||||
context.current['name'] != context.original['name']):
|
||||
session = context._plugin_context.session
|
||||
|
||||
network_id = context.current['network_id']
|
||||
network_db = self.plugin._get_network(context._plugin_context,
|
||||
network_id)
|
||||
|
||||
network_tenant_id = network_db.tenant_id
|
||||
network_tenant_id = network_db['tenant_id']
|
||||
network_tenant_aname = self.name_mapper.tenant(session,
|
||||
network_tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
@@ -360,16 +437,43 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
bd_name=network_aname,
|
||||
gw_ip_mask=gw_ip_mask)
|
||||
self.aim.update(aim_ctx, aim_subnet, display_name=dname)
|
||||
elif (is_ext and context.current['gateway_ip'] !=
|
||||
context.original['gateway_ip']):
|
||||
|
||||
session = context._plugin_context.session
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy_db(session,
|
||||
network_db)
|
||||
if not ext_net:
|
||||
return # Unmanaged external network
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
ns.delete_subnet(aim_ctx, l3out,
|
||||
self._subnet_to_gw_ip_mask(context.original))
|
||||
ns.create_subnet(aim_ctx, l3out,
|
||||
self._subnet_to_gw_ip_mask(context.current))
|
||||
|
||||
def delete_subnet_precommit(self, context):
|
||||
LOG.debug("APIC AIM MD deleting subnet: %s", context.current)
|
||||
# Neutron subnets are unmapped from AIM Subnets as they are
|
||||
# removed from routers.
|
||||
subnet = context.current
|
||||
network_id = context.current['network_id']
|
||||
network_db = self.plugin._get_network(context._plugin_context,
|
||||
network_id)
|
||||
if network_db.external is not None and subnet['gateway_ip']:
|
||||
session = context._plugin_context.session
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy_db(session,
|
||||
network_db)
|
||||
if not ext_net:
|
||||
return # Unmanaged external network
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
ns.delete_subnet(aim_ctx, l3out,
|
||||
self._subnet_to_gw_ip_mask(subnet))
|
||||
|
||||
# Non-external neutron subnets are unmapped from AIM Subnets as
|
||||
# they are removed from routers.
|
||||
|
||||
def extend_subnet_dict(self, session, subnet_db, result):
|
||||
LOG.debug("APIC AIM MD extending dict for subnet: %s", result)
|
||||
|
||||
sync_state = cisco_apic.SYNC_SYNCED
|
||||
sync_state = cisco_apic.SYNC_NOT_APPLICABLE
|
||||
dist_names = {}
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
|
||||
@@ -379,25 +483,39 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
network_db = (session.query(models_v2.Network).
|
||||
filter_by(id=network_id).
|
||||
one())
|
||||
network_tenant_id = network_db.tenant_id
|
||||
if network_db.external is not None:
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy_db(session,
|
||||
network_db)
|
||||
if ext_net:
|
||||
sub = ns.get_subnet(aim_ctx, l3out,
|
||||
self._subnet_to_gw_ip_mask(subnet_db))
|
||||
if sub:
|
||||
dist_names[cisco_apic.SUBNET] = sub.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, sub)
|
||||
else:
|
||||
network_tenant_id = network_db.tenant_id
|
||||
|
||||
network_tenant_aname = self.name_mapper.tenant(session,
|
||||
network_tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': network_tenant_id, 'aname': network_tenant_aname})
|
||||
network_tenant_aname = self.name_mapper.tenant(session,
|
||||
network_tenant_id)
|
||||
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
|
||||
{'id': network_tenant_id,
|
||||
'aname': network_tenant_aname})
|
||||
|
||||
network_aname = self.name_mapper.network(session, network_id)
|
||||
LOG.debug("Mapped network_id %(id)s to %(aname)s",
|
||||
{'id': network_id, 'aname': network_aname})
|
||||
network_aname = self.name_mapper.network(session, network_id)
|
||||
LOG.debug("Mapped network_id %(id)s to %(aname)s",
|
||||
{'id': network_id, 'aname': network_aname})
|
||||
|
||||
subnet_id = subnet_db.id
|
||||
for gw_ip, router_id in self._subnet_router_ips(session, subnet_id):
|
||||
gw_ip_mask = gw_ip + '/' + prefix_len
|
||||
aim_subnet = aim_resource.Subnet(tenant_name=network_tenant_aname,
|
||||
bd_name=network_aname,
|
||||
gw_ip_mask=gw_ip_mask)
|
||||
dist_names[gw_ip] = aim_subnet.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_subnet)
|
||||
subnet_id = subnet_db.id
|
||||
for gw_ip, router_id in self._subnet_router_ips(session,
|
||||
subnet_id):
|
||||
gw_ip_mask = gw_ip + '/' + prefix_len
|
||||
aim_subnet = aim_resource.Subnet(
|
||||
tenant_name=network_tenant_aname,
|
||||
bd_name=network_aname,
|
||||
gw_ip_mask=gw_ip_mask)
|
||||
dist_names[gw_ip] = aim_subnet.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state,
|
||||
aim_subnet)
|
||||
|
||||
result[cisco_apic.DIST_NAMES] = dist_names
|
||||
result[cisco_apic.SYNC_STATE] = sync_state
|
||||
@@ -546,13 +664,16 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
bi_filters=[ANY_FILTER_NAME])
|
||||
self.aim.create(aim_ctx, subject)
|
||||
|
||||
# External-gateway information about the router will be handled
|
||||
# when the first router-interface port is created
|
||||
|
||||
# REVISIT(rkukura): Consider having L3 plugin extend router
|
||||
# dict again after calling this function.
|
||||
sync_state = cisco_apic.SYNC_SYNCED
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, contract)
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, subject)
|
||||
current[cisco_apic.DIST_NAMES] = {cisco_apic_l3.CONTRACT: contract.dn,
|
||||
cisco_apic_l3.CONTRACT_SUBJECT:
|
||||
current[cisco_apic.DIST_NAMES] = {a_l3.CONTRACT: contract.dn,
|
||||
a_l3.CONTRACT_SUBJECT:
|
||||
subject.dn}
|
||||
current[cisco_apic.SYNC_STATE] = sync_state
|
||||
|
||||
@@ -630,6 +751,29 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
gw_ip_mask=gw_ip_mask)
|
||||
self.aim.update(aim_ctx, aim_subnet, display_name=dname)
|
||||
|
||||
def is_diff(old, new, attr):
|
||||
return sorted(old[attr]) != sorted(new[attr])
|
||||
|
||||
old_net = (original.get('external_gateway_info') or
|
||||
{}).get('network_id')
|
||||
new_net = (current.get('external_gateway_info') or
|
||||
{}).get('network_id')
|
||||
if ((old_net != new_net or
|
||||
is_diff(original, current, a_l3.EXTERNAL_PROVIDED_CONTRACTS) or
|
||||
is_diff(original, current, a_l3.EXTERNAL_CONSUMED_CONTRACTS)) and
|
||||
self._get_router_intf_count(context.session, current)):
|
||||
|
||||
if old_net == new_net:
|
||||
old_net = None
|
||||
old_net = self.plugin.get_network(context,
|
||||
old_net) if old_net else None
|
||||
new_net = self.plugin.get_network(context,
|
||||
new_net) if new_net else None
|
||||
self._manage_external_connectivity(context,
|
||||
current, old_net, new_net)
|
||||
|
||||
# REVISIT(rkukura): Update extension attributes?
|
||||
|
||||
def delete_router(self, context, current):
|
||||
LOG.debug("APIC AIM MD deleting router: %s", current)
|
||||
|
||||
@@ -649,6 +793,12 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
|
||||
# Handling of external-gateway information is done when the router
|
||||
# interface ports are deleted, or the external-gateway is
|
||||
# cleared through update_router. At least one of those need
|
||||
# to happen before a router can be deleted, so we don't
|
||||
# need to do anything special when router is deleted
|
||||
|
||||
subject = aim_resource.ContractSubject(tenant_name=tenant_aname,
|
||||
contract_name=aname,
|
||||
name=ROUTER_SUBJECT_NAME)
|
||||
@@ -684,13 +834,13 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
|
||||
aim_contract = aim_resource.Contract(tenant_name=tenant_aname,
|
||||
name=aname)
|
||||
dist_names[cisco_apic_l3.CONTRACT] = aim_contract.dn
|
||||
dist_names[a_l3.CONTRACT] = aim_contract.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_contract)
|
||||
|
||||
aim_subject = aim_resource.ContractSubject(tenant_name=tenant_aname,
|
||||
contract_name=aname,
|
||||
name=ROUTER_SUBJECT_NAME)
|
||||
dist_names[cisco_apic_l3.CONTRACT_SUBJECT] = aim_subject.dn
|
||||
dist_names[a_l3.CONTRACT_SUBJECT] = aim_subject.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_subject)
|
||||
|
||||
# REVISIT(rkukura): Consider moving the SubnetPool query below
|
||||
@@ -770,7 +920,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
|
||||
aim_vrf = aim_resource.VRF(tenant_name=vrf_tenant_aname,
|
||||
name=vrf_aname)
|
||||
dist_names[cisco_apic_l3.VRF] = aim_vrf.dn
|
||||
dist_names[a_l3.VRF] = aim_vrf.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, aim_vrf)
|
||||
|
||||
result[cisco_apic.DIST_NAMES] = dist_names
|
||||
@@ -876,32 +1026,41 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
different_subnet = True
|
||||
if different_router and different_subnet:
|
||||
raise UnsupportedRoutingTopology()
|
||||
else:
|
||||
|
||||
# Number of existing router interface ports excluding the
|
||||
# one we are adding right now
|
||||
intf_count = self._get_router_intf_count(context.session, router)
|
||||
|
||||
if not intfs or not intf_count:
|
||||
address_scope_id = self._get_address_scope_id_for_subnets(
|
||||
context, subnets)
|
||||
|
||||
if not intfs:
|
||||
# No existing interfaces, so enable routing for BD and set
|
||||
# its VRF.
|
||||
|
||||
subnetpool_id = subnets[0]['subnetpool_id']
|
||||
if subnetpool_id:
|
||||
subnetpool_db = self.plugin._get_subnetpool(context,
|
||||
subnetpool_id)
|
||||
address_scope_id = subnetpool_db.address_scope_id
|
||||
if address_scope_id:
|
||||
vrf_aname = self.name_mapper.address_scope(
|
||||
session, address_scope_id)
|
||||
LOG.debug("Mapped address_scope_id %(id)s to %(aname)s",
|
||||
{'id': id, 'aname': vrf_aname})
|
||||
else:
|
||||
vrf_aname = self._get_default_vrf(
|
||||
aim_ctx, router_tenant_aname).name
|
||||
if address_scope_id != NO_ADDR_SCOPE:
|
||||
vrf_aname = self.name_mapper.address_scope(session,
|
||||
address_scope_id)
|
||||
LOG.debug("Mapped address_scope_id %(id)s to %(aname)s",
|
||||
{'id': id, 'aname': vrf_aname})
|
||||
else:
|
||||
vrf_aname = self._get_default_vrf(
|
||||
aim_ctx, router_tenant_aname).name
|
||||
vrf_aname = self._get_default_vrf(aim_ctx,
|
||||
router_tenant_aname).name
|
||||
|
||||
aim_bd = aim_resource.BridgeDomain(
|
||||
tenant_name=network_tenant_aname, name=network_aname)
|
||||
aim_bd = self.aim.update(aim_ctx, aim_bd, enable_routing=True,
|
||||
vrf_name=vrf_aname)
|
||||
|
||||
# If this is first interface-port, then that will determine
|
||||
# the VRF for this router. Setup exteral-connectivity for VRF
|
||||
# if external-gateway is set.
|
||||
if (router.gw_port_id and not intf_count):
|
||||
net = self.plugin.get_network(context,
|
||||
router.gw_port.network_id)
|
||||
self._manage_external_connectivity(
|
||||
context, router, None, net, address_scope_id=address_scope_id)
|
||||
|
||||
def remove_router_interface(self, context, router_id, port_db, subnets):
|
||||
LOG.debug("APIC AIM MD removing subnets %(subnets)s from router "
|
||||
"%(router)s as interface port %(port)s",
|
||||
@@ -986,6 +1145,19 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
aim_bd = self.aim.update(aim_ctx, aim_bd, enable_routing=False,
|
||||
vrf_name=vrf.name)
|
||||
|
||||
# If this was the last interface-port, then we no longer know
|
||||
# the VRF for this router. So update external-conectivity to
|
||||
# exclude this router.
|
||||
if (router_db.gw_port_id and
|
||||
not self._get_router_intf_count(context.session, router_db)):
|
||||
net = self.plugin.get_network(context,
|
||||
router_db.gw_port.network_id)
|
||||
address_scope_id = self._get_address_scope_id_for_subnets(
|
||||
context, subnets)
|
||||
self._manage_external_connectivity(
|
||||
context, router_db, net, None,
|
||||
address_scope_id=address_scope_id)
|
||||
|
||||
def bind_port(self, context):
|
||||
LOG.debug("Attempting to bind port %(port)s on network %(net)s",
|
||||
{'port': context.current['id'],
|
||||
@@ -1012,6 +1184,27 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
self._agent_bind_port(context, ofcst.AGENT_TYPE_OPFLEX_OVS,
|
||||
self._opflex_bind_port)
|
||||
|
||||
def create_floatingip(self, context, current):
|
||||
if current['port_id']:
|
||||
current['status'] = n_constants.FLOATINGIP_STATUS_ACTIVE
|
||||
self._notify_port_update(context, current['port_id'])
|
||||
else:
|
||||
current['status'] = n_constants.FLOATINGIP_STATUS_DOWN
|
||||
|
||||
def update_floatingip(self, context, original, current):
|
||||
if (original['port_id'] and
|
||||
original['port_id'] != current['port_id']):
|
||||
self._notify_port_update(context, original['port_id'])
|
||||
if current['port_id']:
|
||||
current['status'] = n_constants.FLOATINGIP_STATUS_ACTIVE
|
||||
self._notify_port_update(context, current['port_id'])
|
||||
else:
|
||||
current['status'] = n_constants.FLOATINGIP_STATUS_DOWN
|
||||
|
||||
def delete_floatingip(self, context, current):
|
||||
if current['port_id']:
|
||||
self._notify_port_update(context, current['port_id'])
|
||||
|
||||
def _agent_bind_port(self, context, agent_type, bind_strategy):
|
||||
for agent in context.host_agents(agent_type):
|
||||
LOG.debug("Checking agent: %s", agent)
|
||||
@@ -1081,7 +1274,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
sync_state = cisco_apic.SYNC_ERROR
|
||||
elif status.is_build() and sync_state is not cisco_apic.SYNC_ERROR:
|
||||
sync_state = cisco_apic.SYNC_BUILD
|
||||
return sync_state
|
||||
return (cisco_apic.SYNC_SYNCED
|
||||
if sync_state is cisco_apic.SYNC_NOT_APPLICABLE
|
||||
else sync_state)
|
||||
|
||||
def _ip_for_subnet(self, subnet, fixed_ips):
|
||||
subnet_id = subnet['id']
|
||||
@@ -1150,3 +1345,210 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
|
||||
phys = [x.name for x in
|
||||
self.aim.find(aim_ctx, aim_resource.PhysicalDomain)]
|
||||
return vmms, phys
|
||||
|
||||
def _is_external(self, network):
|
||||
return network.get('router:external')
|
||||
|
||||
def _nat_type_to_strategy(self, nat_type):
|
||||
ns_cls = nat_strategy.DistributedNatStrategy
|
||||
if nat_type == '':
|
||||
ns_cls = nat_strategy.NoNatStrategy
|
||||
elif nat_type == 'edge':
|
||||
ns_cls = nat_strategy.EdgeNatStrategy
|
||||
ns = ns_cls(self.aim)
|
||||
ns.app_profile_name = self.ap_name
|
||||
return ns
|
||||
|
||||
def _get_aim_nat_strategy(self, network):
|
||||
if not self._is_external(network):
|
||||
return None, None, None
|
||||
ext_net_dn = (network.get(cisco_apic.DIST_NAMES, {})
|
||||
.get(cisco_apic.EXTERNAL_NETWORK))
|
||||
if not ext_net_dn:
|
||||
return None, None, None
|
||||
nat_type = network.get(cisco_apic.NAT_TYPE)
|
||||
aim_ext_net = aim_resource.ExternalNetwork.from_dn(ext_net_dn)
|
||||
aim_l3out = aim_resource.L3Outside(
|
||||
tenant_name=aim_ext_net.tenant_name, name=aim_ext_net.l3out_name)
|
||||
return aim_l3out, aim_ext_net, self._nat_type_to_strategy(nat_type)
|
||||
|
||||
def _get_aim_nat_strategy_db(self, session, network_db):
|
||||
if network_db.external is not None:
|
||||
extn_db = extension_db.ExtensionDbMixin()
|
||||
extn_info = extn_db.get_network_extn_db(session, network_db.id)
|
||||
if extn_info and cisco_apic.EXTERNAL_NETWORK in extn_info:
|
||||
dn = extn_info[cisco_apic.EXTERNAL_NETWORK]
|
||||
a_ext_net = aim_resource.ExternalNetwork.from_dn(dn)
|
||||
a_l3out = aim_resource.L3Outside(
|
||||
tenant_name=a_ext_net.tenant_name,
|
||||
name=a_ext_net.l3out_name)
|
||||
ns = self._nat_type_to_strategy(
|
||||
extn_info[cisco_apic.NAT_TYPE])
|
||||
return a_l3out, a_ext_net, ns
|
||||
return None, None, None
|
||||
|
||||
def _subnet_to_gw_ip_mask(self, subnet):
|
||||
return aim_resource.Subnet.to_gw_ip_mask(
|
||||
subnet['gateway_ip'], int(subnet['cidr'].split('/')[1]))
|
||||
|
||||
def _get_router_intf_count(self, session, router):
|
||||
return (session.query(l3_db.RouterPort)
|
||||
.filter(l3_db.RouterPort.router_id == router['id'])
|
||||
.filter(l3_db.RouterPort.port_type ==
|
||||
n_constants.DEVICE_OWNER_ROUTER_INTF)
|
||||
.count())
|
||||
|
||||
def _get_address_scope_id_for_subnets(self, context, subnets):
|
||||
# Assuming that all the subnets provided are consistent w.r.t.
|
||||
# address-scope, use the first available subnet to determine
|
||||
# address-scope. If subnets is a mix of v4 and v6 subnets,
|
||||
# then v4 subnets are given preference.
|
||||
subnets = sorted(subnets, key=lambda x: x['ip_version'])
|
||||
|
||||
address_scope_id = NO_ADDR_SCOPE
|
||||
subnetpool_id = subnets[0]['subnetpool_id'] if subnets else None
|
||||
if subnetpool_id:
|
||||
subnetpool_db = self.plugin._get_subnetpool(context,
|
||||
subnetpool_id)
|
||||
address_scope_id = (subnetpool_db.address_scope_id or
|
||||
NO_ADDR_SCOPE)
|
||||
return address_scope_id
|
||||
|
||||
def _get_address_scope_id_for_router(self, session, router):
|
||||
scope_id = NO_ADDR_SCOPE
|
||||
for pool_db in (session.query(models_v2.SubnetPool)
|
||||
.join(models_v2.Subnet,
|
||||
models_v2.Subnet.subnetpool_id ==
|
||||
models_v2.SubnetPool.id)
|
||||
.join(models_v2.IPAllocation)
|
||||
.join(models_v2.Port)
|
||||
.join(l3_db.RouterPort)
|
||||
.filter(l3_db.RouterPort.router_id == router['id'],
|
||||
l3_db.RouterPort.port_type ==
|
||||
n_constants.DEVICE_OWNER_ROUTER_INTF)
|
||||
.filter(models_v2.SubnetPool.address_scope_id is not
|
||||
None)
|
||||
.distinct()):
|
||||
if pool_db.ip_version == 4:
|
||||
scope_id = pool_db.address_scope_id
|
||||
break
|
||||
elif pool_db.ip_version == 6:
|
||||
scope_id = pool_db.address_scope_id
|
||||
return scope_id
|
||||
|
||||
def _get_other_routers_in_same_vrf(self, session, router,
|
||||
address_scope_id=None):
|
||||
as_id = (address_scope_id or
|
||||
self._get_address_scope_id_for_router(session, router))
|
||||
if as_id != NO_ADDR_SCOPE:
|
||||
rtr_dbs = (session.query(l3_db.Router)
|
||||
.join(l3_db.RouterPort)
|
||||
.join(models_v2.Port)
|
||||
.join(models_v2.IPAllocation)
|
||||
.join(models_v2.Subnet)
|
||||
.join(models_v2.SubnetPool,
|
||||
models_v2.Subnet.subnetpool_id ==
|
||||
models_v2.SubnetPool.id)
|
||||
.filter(l3_db.RouterPort.port_type ==
|
||||
n_constants.DEVICE_OWNER_ROUTER_INTF)
|
||||
.filter(models_v2.SubnetPool.address_scope_id ==
|
||||
as_id)
|
||||
.distinct())
|
||||
else:
|
||||
qry = (session.query(l3_db.Router)
|
||||
.join(l3_db.RouterPort)
|
||||
.join(models_v2.Port)
|
||||
.join(models_v2.IPAllocation)
|
||||
.join(models_v2.Subnet)
|
||||
.filter(l3_db.Router.tenant_id == router['tenant_id'])
|
||||
.filter(l3_db.RouterPort.port_type ==
|
||||
n_constants.DEVICE_OWNER_ROUTER_INTF))
|
||||
rtr_dbs = (qry.filter(models_v2.Subnet.subnetpool_id.is_(None))
|
||||
.distinct())
|
||||
rtr_dbs = {r.id: r for r in rtr_dbs}
|
||||
rtr_dbs_1 = (qry.join(models_v2.SubnetPool,
|
||||
models_v2.Subnet.subnetpool_id ==
|
||||
models_v2.SubnetPool.id)
|
||||
.filter(models_v2.SubnetPool.address_scope_id.is_(
|
||||
None))
|
||||
.distinct())
|
||||
rtr_dbs.update({r.id: r for r in rtr_dbs_1})
|
||||
rtr_dbs = rtr_dbs.values()
|
||||
|
||||
return (as_id, [r for r in rtr_dbs if r.id != router['id']])
|
||||
|
||||
def _manage_external_connectivity(self, context, router, old_network,
|
||||
new_network, address_scope_id=None):
|
||||
session = context.session
|
||||
aim_ctx = aim_context.AimContext(db_session=session)
|
||||
as_id, other_rtr_db = self._get_other_routers_in_same_vrf(
|
||||
session, router, address_scope_id=address_scope_id)
|
||||
ext_db = extension_db.ExtensionDbMixin()
|
||||
|
||||
if as_id != NO_ADDR_SCOPE:
|
||||
as_db = (session.query(address_scope_db.AddressScope)
|
||||
.filter_by(id=as_id).one())
|
||||
vrf_tenant_id = as_db.tenant_id
|
||||
vrf_aname = self.name_mapper.address_scope(session, as_id)
|
||||
else:
|
||||
vrf_tenant_id = router['tenant_id']
|
||||
vrf_aname = DEFAULT_VRF_NAME
|
||||
|
||||
tenant_aname = self.name_mapper.tenant(session, vrf_tenant_id)
|
||||
vrf = aim_resource.VRF(tenant_name=tenant_aname,
|
||||
name=vrf_aname)
|
||||
|
||||
prov = set()
|
||||
cons = set()
|
||||
|
||||
def update_contracts(r_id, r_name):
|
||||
contract_aname = self.name_mapper.router(session, r_id, r_name)
|
||||
prov.add(contract_aname)
|
||||
cons.add(contract_aname)
|
||||
|
||||
r_info = ext_db.get_router_extn_db(session, r_id)
|
||||
prov.update(r_info[a_l3.EXTERNAL_PROVIDED_CONTRACTS])
|
||||
cons.update(r_info[a_l3.EXTERNAL_CONSUMED_CONTRACTS])
|
||||
|
||||
if old_network:
|
||||
_, ext_net, ns = self._get_aim_nat_strategy(old_network)
|
||||
if ext_net:
|
||||
rtr_old = [r for r in other_rtr_db
|
||||
if (r.gw_port_id and
|
||||
r.gw_port.network_id == old_network['id'])]
|
||||
prov = set()
|
||||
cons = set()
|
||||
for r in rtr_old:
|
||||
update_contracts(r.id, r.name)
|
||||
|
||||
if rtr_old:
|
||||
ext_net.provided_contract_names = sorted(prov)
|
||||
ext_net.consumed_contract_names = sorted(cons)
|
||||
ns.connect_vrf(aim_ctx, ext_net, vrf)
|
||||
else:
|
||||
ns.disconnect_vrf(aim_ctx, ext_net, vrf)
|
||||
if new_network:
|
||||
_, ext_net, ns = self._get_aim_nat_strategy(new_network)
|
||||
if ext_net:
|
||||
rtr_new = [r for r in other_rtr_db
|
||||
if (r.gw_port_id and
|
||||
r.gw_port.network_id == new_network['id'])]
|
||||
prov = set()
|
||||
cons = set()
|
||||
for r in rtr_new:
|
||||
update_contracts(r.id, r.name)
|
||||
update_contracts(router['id'], router['name'])
|
||||
ext_net.provided_contract_names = sorted(prov)
|
||||
ext_net.consumed_contract_names = sorted(cons)
|
||||
ns.connect_vrf(aim_ctx, ext_net, vrf)
|
||||
|
||||
def _is_port_bound(self, port):
|
||||
return port.get(portbindings.VIF_TYPE) not in [
|
||||
portbindings.VIF_TYPE_UNBOUND,
|
||||
portbindings.VIF_TYPE_BINDING_FAILED]
|
||||
|
||||
def _notify_port_update(self, plugin_context, port_id):
|
||||
port = self.plugin.get_port(plugin_context, port_id)
|
||||
if self._is_port_bound(port):
|
||||
LOG.debug("APIC notify port %s", port['id'])
|
||||
self.notifier.port_update(plugin_context, port)
|
||||
|
||||
Reference in New Issue
Block a user