Merge pull request #58 from akanda/stable/juno

Stable/juno
This commit is contained in:
Ryan Petrello 2015-03-23 15:01:17 -04:00
commit ddc1dbf209
5 changed files with 428 additions and 478 deletions

View File

@ -23,13 +23,12 @@ import random
from neutron.api.v2 import attributes
from neutron.common.config import cfg
from neutron.common import exceptions as q_exc
from neutron.db import db_base_plugin_v2
from neutron.db import models_v2 as qmodels
from neutron.db import l3_db
from neutron import manager
from sqlalchemy.orm import exc
from akanda.neutron.db import models_v2 as akmodels
from neutron.plugins.common import constants
IPV6_ASSIGNMENT_ATTEMPTS = 1000
LOG = logging.getLogger(__name__)
@ -50,29 +49,7 @@ akanda_opts = [
cfg.CONF.register_opts(akanda_opts)
SUPPORTED_EXTENSIONS = [
'dhportforward', 'dhaddressgroup', 'dhaddressentry', 'dhfilterrule',
'dhportalias', 'dhrouterstatus'
]
# Provide a list of the default port aliases to be
# created for a tenant.
# FIXME(dhellmann): This list should come from
# a configuration file somewhere.
DEFAULT_PORT_ALIASES = [
('tcp', 0, 'Any TCP'),
('udp', 0, 'Any UDP'),
('tcp', 22, 'ssh'),
('udp', 53, 'DNS'),
('tcp', 80, 'HTTP'),
('tcp', 443, 'HTTPS'),
]
# Provide a list of the default address entries
# to be created for a tenant.
# FIXME(dhellmann): This list should come from
# a configuration file somewhere.
DEFAULT_ADDRESS_GROUPS = [
('Any', [('Any', '0.0.0.0/0')]),
'dhrouterstatus',
]
@ -109,24 +86,6 @@ def sync_subnet_gateway_port(f):
return wrapper
def auto_add_other_resources(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
LOG.debug('auto_add_other_resources')
retval = f(self, context, *args, **kwargs)
if not context.is_admin:
_auto_add_port_aliases(context)
_auto_add_address_groups(context)
return retval
return wrapper
def monkey_patch_ipv6_generator():
cls = db_base_plugin_v2.NeutronDbPluginV2
cls._generate_mac = _wrap_generate_mac(cls._generate_mac)
cls._generate_ip = _wrap_generate_ip(cls, cls._generate_ip)
def check_subnet_cidr_meets_policy(context, subnet):
if context.is_admin:
return
@ -178,7 +137,8 @@ def _add_subnet_to_router(context, subnet):
if not subnet.get('gateway_ip'):
return
plugin = manager.NeutronManager.get_plugin()
service_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
router_q = context.session.query(l3_db.Router)
router_q = router_q.filter_by(tenant_id=context.tenant_id)
@ -191,9 +151,9 @@ def _add_subnet_to_router(context, subnet):
'name': 'ak-%s' % subnet['tenant_id'],
'admin_state_up': True
}
router = plugin.create_router(context, {'router': router_args})
router = service_plugin.create_router(context, {'router': router_args})
if not _update_internal_gateway_port_ip(context, router['id'], subnet):
plugin.add_router_interface(context.elevated(),
service_plugin.add_router_interface(context.elevated(),
router['id'],
{'subnet_id': subnet['id']})
@ -233,6 +193,8 @@ def _update_internal_gateway_port_ip(context, router_id, subnet):
]
plugin = manager.NeutronManager.get_plugin()
service_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
for index, ip in enumerate(fixed_ips):
if ip['subnet_id'] == subnet['id']:
@ -245,11 +207,12 @@ def _update_internal_gateway_port_ip(context, router_id, subnet):
break
else:
try:
plugin._check_for_dup_router_subnet(
service_plugin._check_for_dup_router_subnet(
context,
routerport.router,
subnet['network_id'],
subnet
subnet['id'],
subnet['cidr']
)
except:
LOG.info(
@ -304,7 +267,9 @@ def _add_ipv6_subnet(context, network):
'name': '',
'cidr': str(candidate_cidr),
'ip_version': candidate_cidr.version,
'enable_dhcp': False,
'enable_dhcp': True,
'ipv6_address_mode': 'slaac',
'ipv6_ra_mode': 'slaac',
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
@ -344,67 +309,8 @@ def _ipv6_subnet_generator(network_range, prefixlen):
yield candidate_cidr
def _wrap_generate_mac(f):
""" Adds mac_address to context object instead of patch Neutron.
Annotating the object requires a less invasive change until upstream
can be fixed in Havana. This version works in concert with
_generate_ip below to make IPv6 stateless addresses correctly.
"""
@staticmethod
@functools.wraps(f)
def wrapper(context, network_id):
mac_addr = f(context, network_id)
context.mac_address = mac_addr
return mac_addr
return wrapper
def _wrap_generate_ip(cls, f):
"""Generate an IP address.
The IP address will be generated from one of the subnets defined on
the network.
NOTE: This method is intended to patch a private method on the
Neutron base plugin. The method prefers to generate an IP from large IPv6
subnets. If a suitable subnet cannot be found, the method will fallback
to the original implementation.
"""
@staticmethod
@functools.wraps(f)
def wrapper(context, subnets):
if hasattr(context, 'mac_address'):
for subnet in subnets:
if subnet['ip_version'] != 6:
continue
elif netaddr.IPNetwork(subnet['cidr']).prefixlen <= 64:
network_id = subnet['network_id']
subnet_id = subnet['id']
candidate = _generate_ipv6_address(
subnet['cidr'],
context.mac_address
)
if cls._check_unique_ip(context, network_id, subnet_id,
candidate):
cls._allocate_specific_ip(
context,
subnet_id,
candidate
)
return {
'ip_address': candidate,
'subnet_id': subnet_id
}
# otherwise fallback to built-in versio
return f(context, subnets)
return wrapper
# Note(rods): we need to keep this method untill the nsx driver won't
# be updated to use neutron's native support for slaac
def _generate_ipv6_address(cidr, mac_address):
network = netaddr.IPNetwork(cidr)
tokens = ['%02x' % int(t, 16) for t in mac_address.split(':')]
@ -412,71 +318,3 @@ def _generate_ipv6_address(cidr, mac_address):
# the bit inversion is required by the RFC
return str(netaddr.IPAddress(network.value + (eui64 ^ 0x0200000000000000)))
def _auto_add_address_groups(context):
"""Create default address groups if the tenant does not have them. """
for ag_name, entries in DEFAULT_ADDRESS_GROUPS:
ag_q = context.session.query(akmodels.AddressGroup)
ag_q = ag_q.filter_by(
tenant_id=context.tenant_id,
name=ag_name,
)
try:
address_group = ag_q.one()
except exc.NoResultFound:
with context.session.begin(subtransactions=True):
address_group = akmodels.AddressGroup(
name=ag_name,
tenant_id=context.tenant_id,
)
context.session.add(address_group)
LOG.debug('Created default address group %s',
address_group.name)
for entry_name, cidr in entries:
entry_q = context.session.query(akmodels.AddressEntry)
entry_q = entry_q.filter_by(
group=address_group,
name=entry_name,
cidr=cidr,
)
try:
entry_q.one()
except exc.NoResultFound:
with context.session.begin(subtransactions=True):
entry = akmodels.AddressEntry(
name=entry_name,
group=address_group,
cidr=cidr,
tenant_id=context.tenant_id,
)
context.session.add(entry)
LOG.debug(
'Created default entry for %s in address group %s',
cidr, address_group.name)
def _auto_add_port_aliases(context):
"""Create the default port aliases for the current tenant, if
they don't already exist.
"""
for protocol, port, name in DEFAULT_PORT_ALIASES:
pa_q = context.session.query(akmodels.PortAlias)
pa_q = pa_q.filter_by(
tenant_id=context.tenant_id,
port=port,
protocol=protocol,
)
try:
pa_q.one()
except exc.NoResultFound:
with context.session.begin(subtransactions=True):
alias = akmodels.PortAlias(
name=name,
protocol=protocol,
port=port,
tenant_id=context.tenant_id,
)
context.session.add(alias)
LOG.debug('Created default port alias %s', alias.name)

View File

@ -14,40 +14,46 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.openvswitch import ovs_neutron_plugin
from neutron.db import l3_db
from neutron.plugins.ml2 import plugin
from neutron.services.l3_router import l3_router_plugin
from akanda.neutron.plugins import decorators as akanda
from akanda.neutron.plugins import floatingip
akanda.monkey_patch_ipv6_generator()
class Ml2Plugin(floatingip.ExplicitFloatingIPAllocationMixin,
plugin.Ml2Plugin):
class OVSNeutronPluginV2(floatingip.ExplicitFloatingIPAllocationMixin,
ovs_neutron_plugin.OVSNeutronPluginV2):
_supported_extension_aliases = (
ovs_neutron_plugin.OVSNeutronPluginV2._supported_extension_aliases +
["dhportforward", "dhaddressgroup", "dhaddressentry",
"dhfilterrule", "dhportalias", "dhrouterstatus"])
plugin.Ml2Plugin._supported_extension_aliases +
["dhrouterstatus"]
)
try:
_supported_extension_aliases.remove('agent_scheduler')
except ValueError:
pass
@akanda.auto_add_other_resources
@akanda.auto_add_ipv6_subnet
def create_network(self, context, network):
return super(OVSNeutronPluginV2, self).create_network(context, network)
return super(Ml2Plugin, self).create_network(context, network)
@akanda.auto_add_subnet_to_router
def create_subnet(self, context, subnet):
return super(OVSNeutronPluginV2, self).create_subnet(context, subnet)
return super(Ml2Plugin, self).create_subnet(context, subnet)
@akanda.sync_subnet_gateway_port
def update_subnet(self, context, id, subnet):
return super(OVSNeutronPluginV2, self).update_subnet(
return super(Ml2Plugin, self).update_subnet(
context, id, subnet)
class L3RouterPlugin(l3_router_plugin.L3RouterPlugin):
# An issue in neutron is making this class inheriting some
# methods from l3_dvr_db.L3_NAT_with_dvr_db_mixin.As a workaround
# we force it to use the original methods in the
# l3_db.L3_NAT_db_mixin class.
get_sync_data = l3_db.L3_NAT_db_mixin.get_sync_data
add_router_interface = l3_db.L3_NAT_db_mixin.add_router_interface
remove_router_interface = l3_db.L3_NAT_db_mixin.remove_router_interface
def list_routers_on_l3_agent(self, context, agent_id):
return {
'routers': self.get_routers(context),

View File

@ -0,0 +1,386 @@
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from sqlalchemy import exc as sql_exc
from neutron.api.rpc.handlers import dhcp_rpc, l3_rpc
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import l3_db
from neutron.openstack.common import log as logging
from oslo.db import exception as db_exc
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import nsx_utils
from neutron.plugins.vmware.common import sync as nsx_sync
from neutron.plugins.vmware.dbexts import db as nsx_db
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.plugins.vmware.plugins import base
from neutron.plugins.vmware.plugins.base import cfg as n_cfg
from akanda.neutron.plugins import decorators as akanda
from akanda.neutron.plugins import floatingip
LOG = logging.getLogger("NeutronPlugin")
def akanda_nvp_ipv6_port_security_wrapper(f):
@functools.wraps(f)
def wrapper(lport_obj, mac_address, fixed_ips, port_security_enabled,
security_profiles, queue_id, mac_learning_enabled,
allowed_address_pairs):
f(lport_obj, mac_address, fixed_ips, port_security_enabled,
security_profiles, queue_id, mac_learning_enabled,
allowed_address_pairs)
# evaulate the state so that we only override the value when enabled
# otherwise we are preserving the underlying behavior of the NVP plugin
if port_security_enabled:
# hotfix to enable egress mulitcast
lport_obj['allow_egress_multicast'] = True
# TODO(mark): investigate moving away from this an wrapping
# (create|update)_port
# add link-local and subnet cidr for IPv6 temp addresses
special_ipv6_addrs = akanda.get_special_ipv6_addrs(
(p['ip_address'] for p in lport_obj['allowed_address_pairs']),
mac_address
)
lport_obj['allowed_address_pairs'].extend(
{'mac_address': mac_address, 'ip_address': addr}
for addr in special_ipv6_addrs
)
return wrapper
base.switchlib._configure_extensions = akanda_nvp_ipv6_port_security_wrapper(
base.switchlib._configure_extensions
)
class AkandaNsxSynchronizer(nsx_sync.NsxSynchronizer):
"""
The NsxSynchronizer class in Neutron runs a synchronization thread to
sync nvp objects with neutron objects. Since we don't use nvp's routers
the sync was failing making neutron showing all the routers like if the
were in Error state. To fix this behaviour we override the two methods
responsible for the routers synchronization in the NsxSynchronizer class
to be a noop
"""
def _synchronize_state(self, *args, **kwargs):
"""
Given the complexicity of the NSX synchronization process, there are
about a million ways for it to go wrong. (MySQL connection issues,
transactional race conditions, etc...) In the event that an exception
is thrown, behavior of the upstream implementation is to immediately
report the exception and kill the synchronizer thread.
This makes it very difficult to detect failure (because the thread just
ends) and the problem can only be fixed by completely restarting
neutron.
This implementation changes the behavior to repeatedly fail (and retry)
and log verbosely during failure so that the failure is more obvious
(and so that auto-recovery is a possibility if e.g., the database
comes back to life or a network-related issue becomes resolved).
"""
try:
return nsx_sync.NsxSynchronizer._synchronize_state(
self, *args, **kwargs
)
except:
LOG.exception("An error occurred while communicating with "
"NSX backend. Will retry synchronization "
"in %d seconds" % self._sync_backoff)
self._sync_backoff = min(self._sync_backoff * 2, 64)
return self._sync_backoff
else:
self._sync_backoff = 1
def _synchronize_lrouters(self, *args, **kwargs):
pass
def synchronize_router(self, *args, **kwargs):
pass
class NsxPluginV2(floatingip.ExplicitFloatingIPAllocationMixin,
base.NsxPluginV2):
"""
NsxPluginV2 is a Neutron plugin that provides L2 Virtual Network
functionality using NSX.
"""
supported_extension_aliases = (
base.NsxPluginV2.supported_extension_aliases +
akanda.SUPPORTED_EXTENSIONS
)
def __init__(self):
# In order to force this driver to not sync neutron routers with
# with NSX routers, we need to use our subclass of the
# NsxSynchronizer object. Sadly, the call to the __init__ method
# of the superclass instantiates a non-customizable NsxSynchronizer
# object wich spawns a sync thread that sets the state of all the
# neutron routers to ERROR when neutron starts. To avoid spawning
# that thread, we need to temporarily override the cfg object and
# disable NSX synchronization in the superclass constructor.
actual = {
'state_sync_interval': n_cfg.CONF.NSX_SYNC.state_sync_interval,
'max_random_sync_delay': n_cfg.CONF.NSX_SYNC.max_random_sync_delay,
'min_sync_req_delay': n_cfg.CONF.NSX_SYNC.min_sync_req_delay
}
for key in actual:
n_cfg.CONF.set_override(key, 0, 'NSX_SYNC')
super(NsxPluginV2, self).__init__()
for key, value in actual.items():
n_cfg.CONF.set_override(key, value, 'NSX_SYNC')
# ---------------------------------------------------------------------
# Original code:
# self._port_drivers = {
# 'create': {l3_db.DEVICE_OWNER_ROUTER_GW:
# self._nsx_create_ext_gw_port,
# l3_db.DEVICE_OWNER_FLOATINGIP:
# self._nsx_create_fip_port,
# l3_db.DEVICE_OWNER_ROUTER_INTF:
# self._nsx_create_router_port,
# networkgw_db.DEVICE_OWNER_NET_GW_INTF:
# self._nsx_create_l2_gw_port,
# 'default': self._nsx_create_port},
# 'delete': {l3_db.DEVICE_OWNER_ROUTER_GW:
# self._nsx_delete_ext_gw_port,
# l3_db.DEVICE_OWNER_ROUTER_INTF:
# self._nsx_delete_router_port,
# l3_db.DEVICE_OWNER_FLOATINGIP:
# self._nsx_delete_fip_port,
# networkgw_db.DEVICE_OWNER_NET_GW_INTF:
# self._nsx_delete_port,
# 'default': self._nsx_delete_port}
# }
self._port_drivers = {
'create': {
l3_db.DEVICE_OWNER_FLOATINGIP: self._nsx_create_fip_port,
'default': self._nsx_create_port
},
'delete': {
l3_db.DEVICE_OWNER_FLOATINGIP: self._nsx_delete_fip_port,
'default': self._nsx_delete_port
}
}
# ---------------------------------------------------------------------
# Create a synchronizer instance for backend sync
# ---------------------------------------------------------------------
# Note(rods):
# We added this code with the only purpose to make the nsx driver use
# our subclass of the NsxSynchronizer object.
#
# DHC-2385
#
# Original code:
# self._synchronizer = sync.NsxSynchronizer(
# self, self.cluster,
# self.nsx_sync_opts.state_sync_interval,
# self.nsx_sync_opts.min_sync_req_delay,
# self.nsx_sync_opts.min_chunk_size,
# self.nsx_sync_opts.max_random_sync_delay)
self._synchronizer = AkandaNsxSynchronizer(
self, self.cluster,
self.nsx_sync_opts.state_sync_interval,
self.nsx_sync_opts.min_sync_req_delay,
self.nsx_sync_opts.min_chunk_size,
self.nsx_sync_opts.max_random_sync_delay)
# ---------------------------------------------------------------------
def setup_dhcpmeta_access(self):
# Ok, so we're going to add L3 here too with the DHCP
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(
topics.PLUGIN,
[dhcp_rpc.DhcpRpcCallback(), agents_db.AgentExtRpcCallback()],
fanout=False
)
self.conn.create_consumer(
topics.L3PLUGIN,
[l3_rpc.L3RpcCallback()],
fanout=False
)
# Consume from all consumers in a thread
self.conn.consume_in_threads()
self.handle_network_dhcp_access_delegate = noop
self.handle_port_dhcp_access_delegate = noop
self.handle_port_metadata_access_delegate = noop
self.handle_metadata_access_delegate = noop
@akanda.auto_add_ipv6_subnet
def create_network(self, context, network):
return super(NsxPluginV2, self).create_network(context, network)
@akanda.auto_add_subnet_to_router
def create_subnet(self, context, subnet):
return super(NsxPluginV2, self).create_subnet(context, subnet)
# we need to use original versions l3_db.L3_NAT_db_mixin mixin and not
# NSX versions that manage NSX's logical router
create_router = l3_db.L3_NAT_db_mixin.create_router
update_router = l3_db.L3_NAT_db_mixin.update_router
delete_router = l3_db.L3_NAT_db_mixin.delete_router
get_router = l3_db.L3_NAT_db_mixin.get_router
get_routers = l3_db.L3_NAT_db_mixin.get_routers
add_router_interface = l3_db.L3_NAT_db_mixin.add_router_interface
remove_router_interface = l3_db.L3_NAT_db_mixin.remove_router_interface
update_floatingip = l3_db.L3_NAT_db_mixin.update_floatingip
delete_floatingip = l3_db.L3_NAT_db_mixin.delete_floatingip
get_floatingip = l3_db.L3_NAT_db_mixin.get_floatingip
get_floatings = l3_db.L3_NAT_db_mixin.get_floatingips
_update_fip_assoc = l3_db.L3_NAT_db_mixin._update_fip_assoc
_update_router_gw_info = l3_db.L3_NAT_db_mixin._update_router_gw_info
disassociate_floatingips = l3_db.L3_NAT_db_mixin.disassociate_floatingips
def _ensure_metadata_host_route(self, *args, **kwargs):
""" Akanda metadata services are provided by router so make no-op/"""
pass
def _nsx_create_port(self, context, port_data):
"""Driver for creating a logical switch port on NSX platform."""
# FIXME(salvatore-orlando): On the NSX platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
# NOTE(rods): Reporting mark's comment on havana version of this patch.
# Akanda does want ports for external networks so this method is
# basically same with external check removed and the auto plugging of
# router ports
# ---------------------------------------------------------------------
# Note(rods): Remove the check on the external network
#
# Original code:
# if self._network_is_external(context, port_data['network_id']):
# LOG.info(_("NSX plugin does not support regular VIF ports on "
# "external networks. Port %s will be down."),
# port_data['network_id'])
# # No need to actually update the DB state - the default is down
# return port_data
# ---------------------------------------------------------------------
lport = None
selected_lswitch = None
try:
selected_lswitch = self._nsx_find_lswitch_for_port(context,
port_data)
lport = self._nsx_create_port_helper(context.session,
selected_lswitch['uuid'],
port_data,
True)
nsx_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
# -----------------------------------------------------------------
# Note(rods): Auto plug router ports
#
# Original code:
# if port_data['device_owner'] not in self.port_special_owners:
# switchlib.plug_vif_interface(
# self.cluster, selected_lswitch['uuid'],
# lport['uuid'], "VifAttachment", port_data['id'])
switchlib.plug_vif_interface(
self.cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment", port_data['id'])
# -----------------------------------------------------------------
LOG.debug(_("_nsx_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id is "
"%(id)s."), port_data)
except (api_exc.NsxApiException, n_exc.NeutronException):
self._handle_create_port_exception(
context, port_data['id'],
selected_lswitch and selected_lswitch['uuid'],
lport and lport['uuid'])
except db_exc.DBError as e:
if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
isinstance(e.inner_exception, sql_exc.IntegrityError)):
msg = (_("Concurrent network deletion detected; Back-end Port "
"%(nsx_id)s creation to be rolled back for Neutron "
"port: %(neutron_id)s")
% {'nsx_id': lport['uuid'],
'neutron_id': port_data['id']})
LOG.warning(msg)
if selected_lswitch and lport:
try:
switchlib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
except n_exc.NotFound:
LOG.debug(_("NSX Port %s already gone"), lport['uuid'])
def _nsx_delete_port(self, context, port_data):
# FIXME(salvatore-orlando): On the NSX platform we do not really have
# external networks. So deleting regular ports from external networks
# does not make sense. However we cannot raise as this would break
# unit tests.
# NOTE(rods): reporting mark's comment on havana version of this patch.
# Akanda does want ports for external networks so this method is
# basically same with external check removed
# ---------------------------------------------------------------------
# Original code:
# if self._network_is_external(context, port_data['network_id']):
# LOG.info(_("NSX plugin does not support regular VIF ports on "
# "external networks. Port %s will be down."),
# port_data['network_id'])
# return
# ---------------------------------------------------------------------
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nsx_port_id:
LOG.debug(_("Port '%s' was already deleted on NSX platform"), id)
return
# TODO(bgh): if this is a bridged network and the lswitch we just got
# back will have zero ports after the delete we should garbage collect
# the lswitch.
try:
switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id)
LOG.debug(_("_nsx_delete_port completed for port %(port_id)s "
"on network %(net_id)s"),
{'port_id': port_data['id'],
'net_id': port_data['network_id']})
except n_exc.NotFound:
LOG.warning(_("Port %s not found in NSX"), port_data['id'])
def noop(*args, **kwargs):
pass

View File

@ -1,279 +0,0 @@
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
from neutron.common import topics
from neutron.db import l3_db
from neutron.db import l3_rpc_base as l3_rpc
from neutron.db import api as db
from neutron.extensions import portbindings as pbin
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.plugins.nicira.common import sync as nvp_sync
from neutron.plugins.nicira.dhcp_meta import rpc as nvp_rpc
from neutron.plugins.nicira.NeutronPlugin import nicira_db
from neutron.plugins.nicira import NeutronPlugin as nvp
from oslo.config import cfg
from akanda.neutron.plugins import decorators as akanda
from akanda.neutron.plugins import floatingip
LOG = logging.getLogger("NeutronPlugin")
akanda.monkey_patch_ipv6_generator()
NVP_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions')
def akanda_nvp_ipv6_port_security_wrapper(f):
@functools.wraps(f)
def wrapper(lport_obj, mac_address, fixed_ips, port_security_enabled,
security_profiles, queue_id, mac_learning_enabled,
allowed_address_pairs):
f(lport_obj, mac_address, fixed_ips, port_security_enabled,
security_profiles, queue_id, mac_learning_enabled,
allowed_address_pairs)
# evaulate the state so that we only override the value when enabled
# otherwise we are preserving the underlying behavior of the NVP plugin
if port_security_enabled:
# hotfix to enable egress mulitcast
lport_obj['allow_egress_multicast'] = True
# TODO(mark): investigate moving away from this an wrapping
# (create|update)_port
# add link-local and subnet cidr for IPv6 temp addresses
special_ipv6_addrs = akanda.get_special_ipv6_addrs(
(p['ip_address'] for p in lport_obj['allowed_address_pairs']),
mac_address
)
lport_obj['allowed_address_pairs'].extend(
{'mac_address': mac_address, 'ip_address': addr}
for addr in special_ipv6_addrs
)
return wrapper
nvp.nvplib._configure_extensions = akanda_nvp_ipv6_port_security_wrapper(
nvp.nvplib._configure_extensions
)
class AkandaNvpRpcCallbacks(l3_rpc.L3RpcCallbackMixin,
nvp_rpc.NVPRpcCallbacks):
pass
class NvpSynchronizer(nvp_sync.NvpSynchronizer):
"""
The NvpSynchronizer class in Neutron runs a synchronization thread to
sync nvp objects with neutron objects. Since we don't use nvp's routers
the sync was failing making neutron showing all the routers like if the
were in Error state. To fix this behaviour we override the two methods
responsible for the routers synchronization in the NvpSynchronizer class
to be a noop
"""
def _synchronize_lrouters(self, *args, **kwargs):
pass
def synchronize_router(self, *args, **kwargs):
pass
class NvpPluginV2(floatingip.ExplicitFloatingIPAllocationMixin,
nvp.NvpPluginV2):
"""
NvpPluginV2 is a Neutron plugin that provides L2 Virtual Network
functionality using NVP.
"""
supported_extension_aliases = (
nvp.NvpPluginV2.supported_extension_aliases +
akanda.SUPPORTED_EXTENSIONS
)
def __init__(self):
# replace port drivers with Akanda compatible versions
self._port_drivers = {
'create': {
l3_db.DEVICE_OWNER_FLOATINGIP: self._nvp_create_fip_port,
'default': self._nvp_create_port
},
'delete': {
l3_db.DEVICE_OWNER_FLOATINGIP: self._nvp_delete_fip_port,
'default': self._nvp_delete_port
}
}
# ---------------------------------------------------------------------
# Note(rods):
# This code has been copied from our custom quantum repo
# https://github.com/dreamhost/quantum/blob/akanda_h2/neutron/plugins/
# nicira/NeutronPlugin.py#L188-L215
# We added this code with the only purpose to make the nsx driver use
# our subclass of the NvpSynchronizer object.
#
# DHC-2385
#
# If no api_extensions_path is provided set the following
if not cfg.CONF.api_extensions_path:
cfg.CONF.set_override('api_extensions_path', NVP_EXT_PATH)
self.nvp_opts = cfg.CONF.NVP
self.nvp_sync_opts = cfg.CONF.NVP_SYNC
self.cluster = nvp.create_nvp_cluster(
cfg.CONF,
self.nvp_opts.concurrent_connections,
self.nvp_opts.nvp_gen_timeout
)
self.base_binding_dict = {
pbin.VIF_TYPE: pbin.VIF_TYPE_OVS,
pbin.CAPABILITIES: {
pbin.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
db.configure_db()
self._extend_fault_map()
self.setup_dhcpmeta_access()
# Set this flag to false as the default gateway has not
# been yet updated from the config file
self._is_default_net_gw_in_sync = False
# Note(rods):
# The following line includes the only change we made to the original
# code
# - self._synchronizer = sync.NvpSynchronizer(
# + self._synchronizer = NvpSynchronizer(
# Create a synchronizer instance for backend sync
self._synchronizer = NvpSynchronizer(
self, self.cluster,
self.nvp_sync_opts.state_sync_interval,
self.nvp_sync_opts.min_sync_req_delay,
self.nvp_sync_opts.min_chunk_size,
self.nvp_sync_opts.max_random_sync_delay)
# ---------------------------------------------------------------------
def setup_dhcpmeta_access(self):
# Ok, so we're going to add L3 here too with the DHCP
self.conn = rpc.create_connection(new=True)
self.conn.create_consumer(
topics.PLUGIN,
AkandaNvpRpcCallbacks().create_rpc_dispatcher(),
fanout=False
)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.handle_network_dhcp_access_delegate = noop
self.handle_port_dhcp_access_delegate = noop
self.handle_port_metadata_access_delegate = noop
self.handle_metadata_access_delegate = noop
@akanda.auto_add_other_resources
@akanda.auto_add_ipv6_subnet
def create_network(self, context, network):
return super(NvpPluginV2, self).create_network(context, network)
@akanda.auto_add_subnet_to_router
def create_subnet(self, context, subnet):
return super(NvpPluginV2, self).create_subnet(context, subnet)
# we need to use original versions l3_db.L3_NAT_db_mixin mixin and not
# NVP versions that manage NVP's logical router
create_router = l3_db.L3_NAT_db_mixin.create_router
update_router = l3_db.L3_NAT_db_mixin.update_router
delete_router = l3_db.L3_NAT_db_mixin.delete_router
get_router = l3_db.L3_NAT_db_mixin.get_router
get_routers = l3_db.L3_NAT_db_mixin.get_routers
add_router_interface = l3_db.L3_NAT_db_mixin.add_router_interface
remove_router_interface = l3_db.L3_NAT_db_mixin.remove_router_interface
update_floatingip = l3_db.L3_NAT_db_mixin.update_floatingip
delete_floatingip = l3_db.L3_NAT_db_mixin.delete_floatingip
get_floatingip = l3_db.L3_NAT_db_mixin.get_floatingip
get_floatings = l3_db.L3_NAT_db_mixin.get_floatingips
_update_fip_assoc = l3_db.L3_NAT_db_mixin._update_fip_assoc
_update_router_gw_info = l3_db.L3_NAT_db_mixin._update_router_gw_info
disassociate_floatingips = l3_db.L3_NAT_db_mixin.disassociate_floatingips
def _ensure_metadata_host_route(self, *args, **kwargs):
""" Akanda metadata services are provided by router so make no-op/"""
pass
def _nvp_create_port(self, context, port_data):
""" Driver for creating a logical switch port on NVP platform """
# NOTE(mark): Akanda does want ports for external networks so
# this method is basically same with external check removed and
# the auto plugging of router ports
lport = None
selected_lswitch = None
try:
selected_lswitch = self._nvp_find_lswitch_for_port(context,
port_data)
lport = self._nvp_create_port_helper(self.cluster,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_neutron_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
nvp.nvplib.plug_interface(self.cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment",
port_data['id'])
LOG.debug(_("_nvp_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id is "
"%(id)s."), port_data)
except (nvp.NvpApiClient.NvpApiException, nvp.q_exc.NeutronException):
self._handle_create_port_exception(
context, port_data['id'],
selected_lswitch and selected_lswitch['uuid'],
lport and lport['uuid'])
def _nvp_delete_port(self, context, port_data):
# NOTE(mark): Akanda does want ports for external networks so
# this method is basically same with external check removed
nvp_port_id = self._nvp_get_port_id(context, self.cluster,
port_data)
if not nvp_port_id:
LOG.debug(_("Port '%s' was already deleted on NVP platform"), id)
return
# TODO(bgh): if this is a bridged network and the lswitch we just got
# back will have zero ports after the delete we should garbage collect
# the lswitch.
try:
nvp.nvplib.delete_port(self.cluster,
port_data['network_id'],
nvp_port_id)
LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
"on network %(net_id)s"),
{'port_id': port_data['id'],
'net_id': port_data['network_id']})
except nvp.q_exc.NotFound:
LOG.warning(_("Port %s not found in NVP"), port_data['id'])
def noop(*args, **kwargs):
pass

View File

@ -26,8 +26,7 @@ setup(
author_email='dev-community@dreamhost.com',
url='http://github.com/dreamhost/akanda',
license='BSD',
install_requires=[
],
install_requires=[],
namespace_packages=['akanda'],
packages=find_packages(exclude=['test', 'smoke']),
include_package_data=True,