Merge remote-tracking branch 'origin/master' into walnut

Change-Id: Ic6314ef9c1db6524fbb0ed8b1bacdc2b081c4775
This commit is contained in:
armando-migliaccio 2015-09-16 17:07:19 -07:00
commit fdc3431ccd
265 changed files with 7074 additions and 12017 deletions

View File

@ -1,4 +1,4 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION | cat
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -0,0 +1,23 @@
SRIOV_AGENT_CONF="${Q_PLUGIN_CONF_PATH}/sriov_agent.ini"
SRIOV_AGENT_BINARY="${NEUTRON_BIN_DIR}/neutron-sriov-nic-agent"
function configure_l2_agent_sriovnicswitch {
if [[ -n "$PHYSICAL_NETWORK" ]] && [[ -n "$PHYSICAL_INTERFACE" ]]; then
PHYSICAL_DEVICE_MAPPINGS=$PHYSICAL_NETWORK:$PHYSICAL_INTERFACE
fi
if [[ -n "$PHYSICAL_DEVICE_MAPPINGS" ]]; then
iniset /$SRIOV_AGENT_CONF sriov_nic physical_device_mappings $PHYSICAL_DEVICE_MAPPINGS
fi
iniset /$SRIOV_AGENT_CONF securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
iniset /$SRIOV_AGENT_CONF agent extensions "$L2_AGENT_EXTENSIONS"
}
function start_l2_agent_sriov {
run_process q-sriov-agt "$SRIOV_AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$SRIOV_AGENT_CONF"
}
function stop_l2_agent_sriov {
stop_process q-sriov-agt
}

View File

@ -1,3 +1,6 @@
source $LIBDIR/ml2_drivers/sriovnicswitch
function enable_ml2_extension_driver {
local extension_driver=$1
if [[ -z "$Q_ML2_PLUGIN_EXT_DRIVERS" ]]; then
@ -11,3 +14,16 @@ function enable_ml2_extension_driver {
function configure_qos_ml2 {
enable_ml2_extension_driver "qos"
}
function configure_ml2 {
OIFS=$IFS;
IFS=",";
mechanism_drivers_array=($Q_ML2_PLUGIN_MECHANISM_DRIVERS);
IFS=$OIFS;
for mechanism_driver in "${mechanism_drivers_array[@]}"; do
if [ "$(type -t configure_ml2_$mechanism_driver)" = function ]; then
configure_ml2_$mechanism_driver
fi
done
}

View File

@ -0,0 +1,3 @@
function configure_ml2_sriovnicswitch {
iniset /$Q_PLUGIN_CONF_FILE ml2_sriov agent_required True
}

View File

@ -1,6 +1,7 @@
LIBDIR=$DEST/neutron/devstack/lib
source $LIBDIR/l2_agent
source $LIBDIR/l2_agent_sriovnicswitch
source $LIBDIR/ml2
source $LIBDIR/qos
@ -15,4 +16,26 @@ if [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if is_service_enabled q-agt; then
configure_l2_agent
fi
#Note: sriov agent should run with OVS or linux bridge agent
#because they are the mechanisms that bind the DHCP and router ports.
#Currently devstack lacks the option to run two agents on the same node.
#Therefore we create new service, q-sriov-agt, and the q-agt should be OVS
#or linux bridge.
if is_service_enabled q-sriov-agt; then
configure_$Q_PLUGIN
configure_l2_agent
configure_l2_agent_sriovnicswitch
fi
fi
if [[ "$1" == "stack" && "$2" == "extra" ]]; then
if is_service_enabled q-sriov-agt; then
start_l2_agent_sriov
fi
fi
if [[ "$1" == "unstack" ]]; then
if is_service_enabled q-sriov-agt; then
stop_l2_agent_sriov
fi
fi

View File

@ -25,8 +25,8 @@ Failure Percentage - Last 10 Days - Rally, LinuxBridge, LBaaS v1/v2<br>
</a>
</td>
<td align="center">
Failure Percentage - Last 10 Days - Large Opts<br>
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Large Opts&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29">
Failure Percentage - Last 10 Days - Large Ops<br>
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Large Ops&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29">
<img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29" width="400">
</a>
</td>

View File

@ -94,18 +94,18 @@ In practical terms this scenario would be translated in the code below:
def callback1(resource, event, trigger, **kwargs):
print 'Callback1 called by trigger: ', trigger
print 'kwargs: ', kwargs
print('Callback1 called by trigger: ', trigger)
print('kwargs: ', kwargs)
def callback2(resource, event, trigger, **kwargs):
print 'Callback2 called by trigger: ', trigger
print 'kwargs: ', kwargs
print('Callback2 called by trigger: ', trigger)
print('kwargs: ', kwargs)
# B and C express interest with I
registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE)
print 'Subscribed'
print('Subscribed')
# A notifies
@ -114,7 +114,7 @@ In practical terms this scenario would be translated in the code below:
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
print 'Notifying...'
print('Notifying...')
do_notify()
@ -171,13 +171,13 @@ to abort events are ignored. The snippet below shows this in action:
raise Exception('I am failing!')
def callback2(resource, event, trigger, **kwargs):
print 'Callback2 called by %s on event %s' % (trigger, event)
print('Callback2 called by %s on event %s' % (trigger, event))
registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE)
registry.subscribe(callback2, resources.ROUTER, events.ABORT_CREATE)
print 'Subscribed'
print('Subscribed')
def do_notify():
@ -185,11 +185,11 @@ to abort events are ignored. The snippet below shows this in action:
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
print 'Notifying...'
print('Notifying...')
try:
do_notify()
except exceptions.CallbackFailure as e:
print 'Error: ', e
print('Error: ', e)
The output is:
@ -237,11 +237,11 @@ The snippet below shows these concepts in action:
def callback1(resource, event, trigger, **kwargs):
print 'Callback1 called by %s on event %s for resource %s' % (trigger, event, resource)
print('Callback1 called by %s on event %s for resource %s' % (trigger, event, resource))
def callback2(resource, event, trigger, **kwargs):
print 'Callback2 called by %s on event %s for resource %s' % (trigger, event, resource)
print('Callback2 called by %s on event %s for resource %s' % (trigger, event, resource))
registry.subscribe(callback1, resources.ROUTER, events.BEFORE_READ)
@ -249,11 +249,11 @@ The snippet below shows these concepts in action:
registry.subscribe(callback1, resources.ROUTER, events.AFTER_DELETE)
registry.subscribe(callback1, resources.PORT, events.BEFORE_UPDATE)
registry.subscribe(callback2, resources.ROUTER_GATEWAY, events.BEFORE_UPDATE)
print 'Subscribed'
print('Subscribed')
def do_notify():
print 'Notifying...'
print('Notifying...')
kwargs = {'foo': 'bar'}
registry.notify(resources.ROUTER, events.BEFORE_READ, do_notify, **kwargs)
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
@ -356,17 +356,17 @@ What kind of function can be a callback?
def callback1(resource, event, trigger, **kwargs):
print 'module callback'
print('module callback')
class MyCallback(object):
def callback2(self, resource, event, trigger, **kwargs):
print 'object callback'
print('object callback')
@classmethod
def callback3(cls, resource, event, trigger, **kwargs):
print 'class callback'
print('class callback')
c = MyCallback()
@ -376,7 +376,7 @@ What kind of function can be a callback?
def do_notify():
def nested_subscribe(resource, event, trigger, **kwargs):
print 'nested callback'
print('nested callback')
registry.subscribe(nested_subscribe, resources.ROUTER, events.BEFORE_CREATE)
@ -384,7 +384,7 @@ What kind of function can be a callback?
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
print 'Notifying...'
print('Notifying...')
do_notify()
And the output is going to be:

View File

@ -506,6 +506,28 @@ Extensions can be loaded in two ways:
variable is commented.
Service Providers
~~~~~~~~~~~~~~~~~
If your project uses service provider(s) the same way VPNAAS and LBAAS do, you
specify your service provider in your ``project_name.conf`` file like so::
[service_providers]
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default][,...]
In order for Neutron to load this correctly, make sure you do the following in
your code::
from neutron.db import servicetype_db
service_type_manager = servicetype_db.ServiceTypeManager.get_instance()
service_type_manager.add_provider_configuration(
YOUR_SERVICE_TYPE,
pconf.ProviderConfiguration(YOUR_SERVICE_MODULE))
This is typically required when you instantiate your service plugin class.
Interface Drivers
~~~~~~~~~~~~~~~~~

View File

@ -83,6 +83,12 @@ When?
stack testing can help here as the full stack infrastructure can restart an
agent during the test.
Prerequisites
-------------
Fullstack test suite assumes 240.0.0.0/3 range in root namespace of the test
machine is available for its usage.
Short Term Goals
----------------
@ -103,9 +109,6 @@ the fact as there will probably be something to copy/paste from.
Long Term Goals
---------------
* Currently we configure the OVS agent with VLANs segmentation (Only because
it's easier). This allows us to validate most functionality, but we might
need to support tunneling somehow.
* How will advanced services use the full stack testing infrastructure? Full
stack tests infrastructure classes are expected to change quite a bit over
the next coming months. This means that other repositories may import these
@ -116,3 +119,4 @@ Long Term Goals
mechanism driver. We may modularize the topology configuration further to
allow to rerun full stack tests against different Neutron plugins or ML2
mechanism drivers.
* Add OVS ARP responder coverage when the gate supports OVS 2.1+

View File

@ -84,8 +84,14 @@ for a port or a network:
Each QoS policy contains zero or more QoS rules. A policy is then applied to a
network or a port, making all rules of the policy applied to the corresponding
Neutron resource (for a network, applying a policy means that the policy will
be applied to all ports that belong to it).
Neutron resource.
When applied through a network association, policy rules could apply or not
to neutron internal ports (like router, dhcp, load balancer, etc..). The QosRule
base object provides a default should_apply_to_port method which could be
overridden. In the future we may want to have a flag in QoSNetworkPolicyBinding
or QosRule to enforce such type of application (for example when limiting all
the ingress of routers devices on an external network automatically).
From database point of view, following objects are defined in schema:

View File

@ -164,12 +164,6 @@ difference between CountableResource and TrackedResource.
Quota Enforcement
-----------------
**NOTE: The reservation engine is currently not wired into the API controller
as issues have been discovered with multiple workers. For more information
see _bug1468134**
.. _bug1468134: https://bugs.launchpad.net/neutron/+bug/1486134
Before dispatching a request to the plugin, the Neutron 'base' controller [#]_
attempts to make a reservation for requested resource(s).
Reservations are made by calling the make_reservation method in

View File

@ -130,19 +130,33 @@ needed.
Sub-Project Release Process
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Only members of the `neutron-release
<https://review.openstack.org/#/admin/groups/150,members>`_ gerrit group can do
releases. Make sure you talk to a member of neutron-release to perform your
release.
To release a sub-project, follow the following steps:
* Only members of the `neutron-release
<https://review.openstack.org/#/admin/groups/150,members>`_ gerrit group can
do releases. Make sure you talk to a member of neutron-release to perform
your release.
* For projects which have not moved to post-versioning, we need to push an
alpha tag to avoid pbr complaining. The neutron-release group will handle
this.
* Modify setup.cfg to remove the version (if you have one), which moves your
project to post-versioning, similar to all the other Neutron projects. You
can skip this step if you don't have a version in setup.cfg.
* Have neutron-release push the tag to gerrit.
* Have neutron-release `tag the release
alpha tag to avoid pbr complaining. A member of the neutron-release group
will handle this.
* A sub-project owner should modify setup.cfg to remove the version (if you
have one), which moves your project to post-versioning, similar to all the
other Neutron projects. You can skip this step if you don't have a version in
setup.cfg.
* A member of neutron-release will then `tag the release
<http://docs.openstack.org/infra/manual/drivers.html#tagging-a-release>`_,
which will release the code to PyPi.
* The releases will now be on PyPi. A sub-project owner should verify this by
going to an URL similar to
`this <https://pypi.python.org/pypi/networking-odl>`_.
* A sub-project owner should next go to Launchpad and release this version
using the "Release Now" button for the release itself.
* A sub-project owner should update any bugs that were fixed with this
release to "Fix Released" in Launchpad.
* A sub-project owner should add the tarball to the Launchpad page for the
release using the "Add download file" link.
* A sub-project owner should add the next milestone to the Launchpad series, or
if a new series is required, create the new series and a new milestone.
* Finally a sub-project owner should send an email to the openstack-announce
mailing list announcing the new release.

View File

@ -64,6 +64,7 @@
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
# This option is deprecated and will be removed in the M release.
# external_network_bridge = br-ex
# TCP Port used by Neutron metadata server

View File

@ -190,9 +190,9 @@
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
@ -306,19 +306,13 @@
# ========== end of items for VLAN trunking networks ==========
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. A value of 0 runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them. If not
# specified, the default value is equal to the number of CPUs available to
# achieve best performance.
# Number of separate API worker processes to spawn. If not specified or < 1,
# the default value is equal to the number of CPUs available.
# api_workers = <number of CPUs>
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Number of separate RPC worker processes to spawn. If not specified or < 1,
# a single RPC worker process is spawned by the parent process.
# rpc_workers = 1
# Timeout for client connections socket operations. If an
# incoming connection is idle for this number of seconds it

View File

@ -1,15 +0,0 @@
[cfg_agent]
# (IntOpt) Interval in seconds for processing of service updates.
# That is when the config agent's process_services() loop executes
# and it lets each service helper to process its service resources.
# rpc_loop_interval = 10
# (StrOpt) Period-separated module path to the routing service helper class.
# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
# (IntOpt) Timeout value in seconds for connecting to a hosting device.
# device_connection_timeout = 30
# (IntOpt) The time in seconds until a backlogged hosting device is
# presumed dead or booted to an error state.
# hosting_device_dead_timeout = 300

View File

@ -1,107 +0,0 @@
[cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# provider VLAN interface. For example, if an interface is being created
# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
#
# provider_vlan_name_prefix = p-
# Example: provider_vlan_name_prefix = PV-
# (BoolOpt) A flag indicating whether Openstack networking should manage the
# creation and removal of VLAN interfaces for provider networks on the Nexus
# switches. If the flag is set to False then Openstack will not create or
# remove VLAN interfaces for provider networks, and the administrator needs
# to manage these interfaces manually or by external orchestration.
#
# provider_vlan_auto_create = True
# (BoolOpt) A flag indicating whether Openstack networking should manage
# the adding and removing of provider VLANs from trunk ports on the Nexus
# switches. If the flag is set to False then Openstack will not add or
# remove provider VLANs from trunk ports, and the administrator needs to
# manage these operations manually or by external orchestration.
#
# provider_vlan_auto_trunk = True
# (StrOpt) Period-separated module path to the model class to use for
# the Cisco neutron plugin.
#
# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2
# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches.
# Note: This feature is not supported on all models/versions of Cisco
# Nexus switches. To use this feature, all of the Nexus switches in the
# deployment must support it.
# nexus_l3_enable = False
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# N1KV Format.
# [N1KV:<IP address of VSM>]
# username=<credential username>
# password=<credential password>
#
# Example:
# [N1KV:2.2.2.2]
# username=admin
# password=mySecretPassword
[cisco_n1k]
# (StrOpt) Specify the name of the integration bridge to which the VIFs are
# attached.
# Default value: br-int
# integration_bridge = br-int
# (StrOpt) Name of the policy profile to be associated with a port when no
# policy profile is specified during port creates.
# Default value: service_profile
# default_policy_profile = service_profile
# (StrOpt) Name of the policy profile to be associated with a port owned by
# network node (dhcp, router).
# Default value: dhcp_pp
# network_node_policy_profile = dhcp_pp
# (StrOpt) Name of the network profile to be associated with a network when no
# network profile is specified during network creates. Admin should pre-create
# a network profile with this name.
# Default value: default_network_profile
# default_network_profile = network_pool
# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
# policy profiles.
# Default value: 60
# poll_duration = 60
# (BoolOpt) Specify whether tenants are restricted from accessing all the
# policy profiles.
# Default value: False, indicating all tenants can access all policy profiles.
#
# restrict_policy_profiles = False
# (IntOpt) Number of threads to use to make HTTP requests to the VSM.
# Default value: 4
# http_pool_size = 4
# (IntOpt) Timeout duration in seconds for the http request
# Default value: 15
# http_timeout = 15
# (BoolOpt) Specify whether tenants are restricted from accessing network
# profiles belonging to other tenants.
# Default value: True, indicating other tenants cannot access network
# profiles belonging to a tenant.
#
# restrict_network_profiles = True

View File

@ -1,76 +0,0 @@
[general]
#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers
# backlog_processing_interval = 10
#(StrOpt) Name of the L3 admin tenant
# l3_admin_tenant = L3AdminTenant
#(StrOpt) Name of management network for hosting device configuration
# management_network = osn_mgmt_nw
#(StrOpt) Default security group applied on management port
# default_security_group = mgmt_sec_grp
#(IntOpt) Seconds of no status update until a cfg agent is considered down
# cfg_agent_down_time = 60
#(StrOpt) Path to templates for hosting devices
# templates_path = /opt/stack/data/neutron/cisco/templates
#(StrOpt) Path to config drive files for service VM instances
# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive
#(BoolOpt) Ensure that Nova is running before attempting to create any VM
# ensure_nova_running = True
[hosting_devices]
# Settings coupled to CSR1kv VM devices
# -------------------------------------
#(StrOpt) Name of Glance image for CSR1kv
# csr1kv_image = csr1kv_openstack_img
#(StrOpt) UUID of Nova flavor for CSR1kv
# csr1kv_flavor = 621
#(StrOpt) Plugging driver for CSR1kv
# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver
#(StrOpt) Hosting device driver for CSR1kv
# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver
#(StrOpt) Config agent router service driver for CSR1kv
# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver
#(StrOpt) Configdrive template file for CSR1kv
# csr1kv_configdrive_template = csr1kv_cfg_template
#(IntOpt) Booting time in seconds before a CSR1kv becomes operational
# csr1kv_booting_time = 420
#(StrOpt) Username to use for CSR1kv configurations
# csr1kv_username = stack
#(StrOpt) Password to use for CSR1kv configurations
# csr1kv_password = cisco
[n1kv]
# Settings coupled to inter-working with N1kv plugin
# --------------------------------------------------
#(StrOpt) Name of N1kv port profile for management ports
# management_port_profile = osn_mgmt_pp
#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic
# from VXLAN segmented networks).
# t1_port_profile = osn_t1_pp
#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic
# from VLAN segmented networks).
# t2_port_profile = osn_t2_pp
#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks
# for VXLAN segmented traffic).
# t1_network_profile = osn_t1_np
#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks
# for VLAN segmented traffic).
# t2_network_profile = osn_t2_np

View File

@ -54,8 +54,28 @@
# ovsdb_connection = tcp:127.0.0.1:6640
# (StrOpt) OpenFlow interface to use.
# 'ovs-ofctl' is currently the only available choice.
# 'ovs-ofctl' or 'native'.
# of_interface = ovs-ofctl
#
# (IPOpt)
# Address to listen on for OpenFlow connections.
# Used only for 'native' driver.
# of_listen_address = 127.0.0.1
#
# (IntOpt)
# Port to listen on for OpenFlow connections.
# Used only for 'native' driver.
# of_listen_port = 6633
#
# (IntOpt)
# Timeout in seconds to wait for the local switch connecting the controller.
# Used only for 'native' driver.
# of_connect_timeout=30
#
# (IntOpt)
# Timeout in seconds to wait for a single OpenFlow request.
# Used only for 'native' driver.
# of_request_timeout=10
# (StrOpt) ovs datapath to use.
# 'system' is the default value and corresponds to the kernel datapath.
@ -143,6 +163,12 @@
#
# extensions =
# (BoolOpt) Set or un-set the checksum on outgoing IP packet
# carrying GRE/VXLAN tunnel. The default value is False.
#
# tunnel_csum = False
[securitygroup]
# Firewall driver for realizing neutron security group function.
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver

View File

@ -8,6 +8,4 @@
[Filters]
# neutron/agent/linux/ebtables_driver.py
ebtables: CommandFilter, ebtables, root
ebtablesEnv: EnvFilter, ebtables, root, EBTABLES_ATOMIC_FILE=

View File

@ -12,6 +12,7 @@
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
ovs-vsctl: CommandFilter, ovs-vsctl, root
# NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
ovs-ofctl: CommandFilter, ovs-ofctl, root
kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
ovsdb-client: CommandFilter, ovsdb-client, root

View File

@ -56,7 +56,9 @@
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"network_device": "field:port:device_owner=~^network:",
"create_port": "",
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
@ -71,6 +73,7 @@
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",

View File

@ -152,7 +152,7 @@ class OVSBridge(BaseOVS):
super(OVSBridge, self).__init__()
self.br_name = br_name
self.datapath_type = datapath_type
self.agent_uuid_stamp = '0x0'
self.agent_uuid_stamp = 0
def set_agent_uuid_stamp(self, val):
self.agent_uuid_stamp = val
@ -195,15 +195,6 @@ class OVSBridge(BaseOVS):
def destroy(self):
self.delete_bridge(self.br_name)
def reset_bridge(self, secure_mode=False):
with self.ovsdb.transaction() as txn:
txn.add(self.ovsdb.del_br(self.br_name))
txn.add(self.ovsdb.add_br(self.br_name,
datapath_type=self.datapath_type))
if secure_mode:
txn.add(self.ovsdb.set_fail_mode(self.br_name,
FAILMODE_SECURE))
def add_port(self, port_name, *interface_attr_tuples):
with self.ovsdb.transaction() as txn:
txn.add(self.ovsdb.add_port(self.br_name, port_name))
@ -299,7 +290,8 @@ class OVSBridge(BaseOVS):
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=p_const.TYPE_GRE,
vxlan_udp_port=p_const.VXLAN_UDP_PORT,
dont_fragment=True):
dont_fragment=True,
tunnel_csum=False):
attrs = [('type', tunnel_type)]
# TODO(twilson) This is an OrderedDict solely to make a test happy
options = collections.OrderedDict()
@ -314,6 +306,8 @@ class OVSBridge(BaseOVS):
options['local_ip'] = local_ip
options['in_key'] = 'flow'
options['out_key'] = 'flow'
if tunnel_csum:
options['csum'] = str(tunnel_csum).lower()
attrs.append(('options', options))
return self.add_port(port_name, *attrs)

View File

@ -51,15 +51,16 @@ class DhcpAgent(manager.Manager):
"""
target = oslo_messaging.Target(version='1.0')
def __init__(self, host=None):
def __init__(self, host=None, conf=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync_reasons = collections.defaultdict(list)
self.conf = cfg.CONF
self.conf = conf or cfg.CONF
self.cache = NetworkCache()
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
ctx, self.conf.use_namespaces)
ctx, self.conf.use_namespaces,
self.conf.host)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
utils.ensure_dir(dhcp_dir)
@ -136,11 +137,11 @@ class DhcpAgent(manager.Manager):
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
{'net_id': network.id, 'action': action})
def schedule_resync(self, reason, network=None):
def schedule_resync(self, reason, network_id=None):
"""Schedule a resync for a given network and reason. If no network is
specified, resync all networks.
"""
self.needs_resync_reasons[network].append(reason)
self.needs_resync_reasons[network_id].append(reason)
@utils.synchronized('dhcp-agent')
def sync_state(self, networks=None):
@ -149,7 +150,7 @@ class DhcpAgent(manager.Manager):
"""
only_nets = set([] if (not networks or None in networks) else networks)
LOG.info(_LI('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
pool = eventlet.GreenPool(self.conf.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())
try:
@ -172,7 +173,11 @@ class DhcpAgent(manager.Manager):
LOG.info(_LI('Synchronizing state complete'))
except Exception as e:
self.schedule_resync(e)
if only_nets:
for network_id in only_nets:
self.schedule_resync(e, network_id)
else:
self.schedule_resync(e)
LOG.exception(_LE('Unable to sync network state.'))
@utils.exception_logger()
@ -399,9 +404,9 @@ class DhcpPluginApi(object):
"""
def __init__(self, topic, context, use_namespaces):
def __init__(self, topic, context, use_namespaces, host):
self.context = context
self.host = cfg.CONF.host
self.host = host
self.use_namespaces = use_namespaces
target = oslo_messaging.Target(
topic=topic,
@ -537,21 +542,21 @@ class NetworkCache(object):
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
def __init__(self, host=None, conf=None):
super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration,
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats},
'dhcp_driver': self.conf.dhcp_driver,
'use_namespaces': self.conf.use_namespaces,
'dhcp_lease_duration': self.conf.dhcp_lease_duration,
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
report_interval = self.conf.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(

View File

@ -28,20 +28,20 @@ from neutron.common import topics
from neutron import service as neutron_service
def register_options():
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
cfg.CONF.register_opts(dhcp_config.DHCP_OPTS)
cfg.CONF.register_opts(dhcp_config.DNSMASQ_OPTS)
cfg.CONF.register_opts(metadata_config.DRIVER_OPTS)
cfg.CONF.register_opts(metadata_config.SHARED_OPTS)
cfg.CONF.register_opts(interface.OPTS)
def register_options(conf):
config.register_interface_driver_opts_helper(conf)
config.register_use_namespaces_opts_helper(conf)
config.register_agent_state_opts_helper(conf)
conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
conf.register_opts(dhcp_config.DHCP_OPTS)
conf.register_opts(dhcp_config.DNSMASQ_OPTS)
conf.register_opts(metadata_config.DRIVER_OPTS)
conf.register_opts(metadata_config.SHARED_OPTS)
conf.register_opts(interface.OPTS)
def main():
register_options()
register_options(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging()
server = neutron_service.Service.create(

View File

@ -17,6 +17,7 @@ import abc
import collections
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from neutron.agent.l2 import agent_extension
@ -24,8 +25,12 @@ from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import exceptions
from neutron.i18n import _LW, _LI
from neutron import manager
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class QosAgentDriver(object):
@ -35,36 +40,130 @@ class QosAgentDriver(object):
for applying QoS Rules on a port.
"""
# Each QoS driver should define the set of rule types that it supports, and
# correspoding handlers that has the following names:
#
# create_<type>
# update_<type>
# delete_<type>
#
# where <type> is one of VALID_RULE_TYPES
SUPPORTED_RULES = set()
@abc.abstractmethod
def initialize(self):
"""Perform QoS agent driver initialization.
"""
@abc.abstractmethod
def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
#TODO(QoS) we may want to provide default implementations of calling
#delete and then update
self._handle_update_create_rules('create', port, qos_policy)
@abc.abstractmethod
def update(self, port, qos_policy):
"""Apply QoS rules on port.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
self._handle_update_create_rules('update', port, qos_policy)
@abc.abstractmethod
def delete(self, port, qos_policy):
def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
if qos_policy is None:
rule_types = self.SUPPORTED_RULES
else:
rule_types = set(
[rule.rule_type
for rule in self._iterate_rules(qos_policy.rules)])
for rule_type in rule_types:
self._handle_rule_delete(port, rule_type)
def _iterate_rules(self, rules):
for rule in rules:
rule_type = rule.rule_type
if rule_type in self.SUPPORTED_RULES:
yield rule
else:
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
'%(rule_type)s; skipping'),
{'rule_id': rule.id, 'rule_type': rule_type})
def _handle_rule_delete(self, port, rule_type):
handler_name = "".join(("delete_", rule_type))
handler = getattr(self, handler_name)
handler(port)
def _handle_update_create_rules(self, action, port, qos_policy):
for rule in self._iterate_rules(qos_policy.rules):
if rule.should_apply_to_port(port):
handler_name = "".join((action, "_", rule.rule_type))
handler = getattr(self, handler_name)
handler(port, rule)
else:
LOG.debug("Port %(port)s excluded from QoS rule %(rule)s",
{'port': port, 'rule': rule.id})
class PortPolicyMap(object):
def __init__(self):
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_policies = {}
self.port_policies = {}
def get_ports(self, policy):
return self.qos_policy_ports[policy.id].values()
def get_policy(self, policy_id):
return self.known_policies.get(policy_id)
def update_policy(self, policy):
self.known_policies[policy.id] = policy
def has_policy_changed(self, port, policy_id):
return self.port_policies.get(port['port_id']) != policy_id
def get_port_policy(self, port):
policy_id = self.port_policies.get(port['port_id'])
if policy_id:
return self.get_policy(policy_id)
def set_port_policy(self, port, policy):
"""Attach a port to policy and return any previous policy on port."""
port_id = port['port_id']
old_policy = self.get_port_policy(port)
self.known_policies[policy.id] = policy
self.port_policies[port_id] = policy.id
self.qos_policy_ports[policy.id][port_id] = port
if old_policy and old_policy.id != policy.id:
del self.qos_policy_ports[old_policy.id][port_id]
return old_policy
def clean_by_port(self, port):
"""Detach port from policy and cleanup data we don't need anymore."""
port_id = port['port_id']
if port_id in self.port_policies:
del self.port_policies[port_id]
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
if not port_dict:
self._clean_policy_info(qos_policy_id)
return
raise exceptions.PortNotFound(port_id=port['port_id'])
def _clean_policy_info(self, qos_policy_id):
del self.qos_policy_ports[qos_policy_id]
del self.known_policies[qos_policy_id]
class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
@ -79,9 +178,7 @@ class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
'neutron.qos.agent_drivers', driver_type)()
self.qos_driver.initialize()
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_ports = set()
self.policy_map = PortPolicyMap()
registry.subscribe(self._handle_notification, resources.QOS_POLICY)
self._register_rpc_consumers(connection)
@ -111,39 +208,50 @@ class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
Update events are handled in _handle_notification.
"""
port_id = port['port_id']
qos_policy_id = port.get('qos_policy_id')
port_qos_policy_id = port.get('qos_policy_id')
network_qos_policy_id = port.get('network_qos_policy_id')
qos_policy_id = port_qos_policy_id or network_qos_policy_id
if qos_policy_id is None:
self._process_reset_port(port)
return
#Note(moshele) check if we have seen this port
#and it has the same policy we do nothing.
if (port_id in self.known_ports and
port_id in self.qos_policy_ports[qos_policy_id]):
if not self.policy_map.has_policy_changed(port, qos_policy_id):
return
self.qos_policy_ports[qos_policy_id][port_id] = port
self.known_ports.add(port_id)
qos_policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, qos_policy_id)
self.qos_driver.create(port, qos_policy)
if qos_policy is None:
LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
"%(port_id)s is not available on server, "
"it has been deleted. Skipping."),
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
self._process_reset_port(port)
else:
old_qos_policy = self.policy_map.set_port_policy(port, qos_policy)
if old_qos_policy:
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
else:
self.qos_driver.create(port, qos_policy)
def delete_port(self, context, port):
self._process_reset_port(port)
def _process_update_policy(self, qos_policy):
for port_id, port in self.qos_policy_ports[qos_policy.id].items():
# TODO(QoS): for now, just reflush the rules on the port. Later, we
# may want to apply the difference between the rules lists only.
self.qos_driver.delete(port, None)
old_qos_policy = self.policy_map.get_policy(qos_policy.id)
for port in self.policy_map.get_ports(qos_policy):
#NOTE(QoS): for now, just reflush the rules on the port. Later, we
# may want to apply the difference between the old and
# new rule lists.
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
self.policy_map.update_policy(qos_policy)
def _process_reset_port(self, port):
port_id = port['port_id']
if port_id in self.known_ports:
self.known_ports.remove(port_id)
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
self.qos_driver.delete(port, None)
return
try:
self.policy_map.clean_by_port(port)
self.qos_driver.delete(port)
except exceptions.PortNotFound:
LOG.info(_LI("QoS extension did have no information about the "
"port %s that we were trying to reset"),
port['port_id'])

View File

@ -80,7 +80,8 @@ class L3PluginApi(object):
to update_ha_routers_states
1.5 - Added update_ha_routers_states
1.6 - Added process_prefix_update
1.7 - DVR support: new L3 plugin methods added.
- delete_agent_gateway_port
"""
def __init__(self, topic, host):
@ -139,6 +140,12 @@ class L3PluginApi(object):
return cctxt.call(context, 'process_prefix_update',
subnets=prefix_update)
def delete_agent_gateway_port(self, context, fip_net):
"""Delete Floatingip_agent_gateway_port."""
cctxt = self.client.prepare(version='1.7')
return cctxt.call(context, 'delete_agent_gateway_port',
host=self.host, network_id=fip_net)
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ha.AgentMixin,
@ -517,10 +524,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
@periodic_task.periodic_task(spacing=1)
def periodic_sync_routers_task(self, context):
self.process_services_sync(context)
LOG.debug("Starting periodic_sync_routers_task - fullsync:%s",
self.fullsync)
if not self.fullsync:
return
LOG.debug("Starting fullsync periodic_sync_routers_task")
# self.fullsync is True at this point. If an exception -- caught or
# uncaught -- prevents setting it to False below then the next call

View File

@ -37,6 +37,7 @@ OPTS = [
"running on a centralized node (or in single-host "
"deployments, e.g. devstack)")),
cfg.StrOpt('external_network_bridge', default='br-ex',
deprecated_for_removal=True,
help=_("Name of bridge used for external network "
"traffic.")),
cfg.IntOpt('metadata_port',

View File

@ -101,13 +101,11 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
if not self.ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port)
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return
is_this_snat_host = ('binding:host_id' in self.ex_gw_port) and (
self.ex_gw_port['binding:host_id'] == self.host)
if not is_this_snat_host:
if not self._is_this_snat_host():
return
snat_interface = self._get_snat_int_device_name(sn_port['id'])

View File

@ -137,6 +137,17 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
# destroying it. The two could end up conflicting on
# creating/destroying interfaces and such. I think I'd like a
# semaphore to sync creation/deletion of this namespace.
# NOTE (Swami): Since we are deleting the namespace here we
# should be able to delete the floatingip agent gateway port
# for the provided external net since we don't need it anymore.
if self.fip_ns.agent_gateway_port:
LOG.debug('Removed last floatingip, so requesting the '
'server to delete Floatingip Agent Gateway port:'
'%s', self.fip_ns.agent_gateway_port)
self.agent.plugin_rpc.delete_agent_gateway_port(
self.agent.context,
self.fip_ns.agent_gateway_port['network_id'])
self.fip_ns.delete()
self.fip_ns = None
@ -303,7 +314,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
if not self.ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port)
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return

View File

@ -26,12 +26,18 @@ class DvrRouterBase(router.RouterInfo):
self.agent = agent
self.host = host
def process(self, agent):
super(DvrRouterBase, self).process(agent)
# NOTE: Keep a copy of the interfaces around for when they are removed
self.snat_ports = self.get_snat_interfaces()
def get_snat_interfaces(self):
return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
def get_snat_port_for_internal_port(self, int_port):
def get_snat_port_for_internal_port(self, int_port, snat_ports=None):
"""Return the SNAT port for the given internal interface port."""
snat_ports = self.get_snat_interfaces()
if snat_ports is None:
snat_ports = self.get_snat_interfaces()
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports

View File

@ -122,10 +122,23 @@ class AgentMixin(object):
'possibly deleted concurrently.'), router_id)
return
self._configure_ipv6_ra_on_ext_gw_port_if_necessary(ri, state)
self._update_metadata_proxy(ri, router_id, state)
self._update_radvd_daemon(ri, state)
self.state_change_notifier.queue_event((router_id, state))
def _configure_ipv6_ra_on_ext_gw_port_if_necessary(self, ri, state):
# If ipv6 is enabled on the platform, ipv6_gateway config flag is
# not set and external_network associated to the router does not
# include any IPv6 subnet, enable the gateway interface to accept
# Router Advts from upstream router for default route.
ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id']
if state == 'master' and ex_gw_port_id and ri.use_ipv6:
gateway_ips = ri._get_external_gw_ips(ri.ex_gw_port)
if not ri.is_v6_gateway_set(gateway_ips):
interface_name = ri.get_external_device_name(ex_gw_port_id)
ri.driver.configure_ipv6_ra(ri.ns_name, interface_name)
def _update_metadata_proxy(self, ri, router_id, state):
if state == 'master':
LOG.debug('Spawning metadata proxy for router %s', router_id)

View File

@ -187,7 +187,7 @@ class HaRouter(router.RouterInfo):
def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
default_gw_rts = []
gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port)
gateway_ips = self._get_external_gw_ips(ex_gw_port)
for gw_ip in gateway_ips:
# TODO(Carl) This is repeated everywhere. A method would
# be nice.
@ -197,9 +197,6 @@ class HaRouter(router.RouterInfo):
default_gw, gw_ip, interface_name))
instance.virtual_routes.gateway_routes = default_gw_rts
if enable_ra_on_gw:
self.driver.configure_ipv6_ra(self.ns_name, interface_name)
def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
extra_subnets = ex_gw_port.get('extra_subnets', [])
instance = self._get_keepalived_instance()
@ -362,10 +359,10 @@ class HaRouter(router.RouterInfo):
interface_name)
def delete(self, agent):
super(HaRouter, self).delete(agent)
self.destroy_state_change_monitor(self.process_monitor)
self.ha_network_removed()
self.disable_keepalived()
super(HaRouter, self).delete(agent)
def process(self, agent):
super(HaRouter, self).process(agent)

View File

@ -202,6 +202,9 @@ class RouterInfo(object):
def remove_floating_ip(self, device, ip_cidr):
device.delete_addr_and_conntrack_state(ip_cidr)
def remove_external_gateway_ip(self, device, ip_cidr):
device.delete_addr_and_conntrack_state(ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
@ -475,7 +478,6 @@ class RouterInfo(object):
def _get_external_gw_ips(self, ex_gw_port):
gateway_ips = []
enable_ra_on_gw = False
if 'subnets' in ex_gw_port:
gateway_ips = [subnet['gateway_ip']
for subnet in ex_gw_port['subnets']
@ -485,11 +487,7 @@ class RouterInfo(object):
if self.agent_conf.ipv6_gateway:
# ipv6_gateway configured, use address for default route.
gateway_ips.append(self.agent_conf.ipv6_gateway)
else:
# ipv6_gateway is also not configured.
# Use RA for default route.
enable_ra_on_gw = True
return gateway_ips, enable_ra_on_gw
return gateway_ips
def _external_gateway_added(self, ex_gw_port, interface_name,
ns_name, preserve_ips):
@ -501,7 +499,12 @@ class RouterInfo(object):
# will be added to the interface.
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port)
gateway_ips = self._get_external_gw_ips(ex_gw_port)
enable_ra_on_gw = False
if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
# There is no IPv6 gw_ip, use RouterAdvt for default route.
enable_ra_on_gw = True
self.driver.init_router_port(
interface_name,
ip_cidrs,
@ -538,6 +541,12 @@ class RouterInfo(object):
def external_gateway_removed(self, ex_gw_port, interface_name):
LOG.debug("External gateway removed: port(%s), interface(%s)",
ex_gw_port, interface_name)
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
for ip_addr in ex_gw_port['fixed_ips']:
self.remove_external_gateway_ip(device,
common_utils.ip_to_cidr(
ip_addr['ip_address'],
ip_addr['prefixlen']))
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,

View File

@ -50,7 +50,7 @@ class AsyncProcess(object):
>>> time.sleep(5)
>>> proc.stop()
>>> for line in proc.iter_stdout():
... print line
... print(line)
"""
def __init__(self, cmd, run_as_root=False, respawn_interval=None,

View File

@ -1030,10 +1030,18 @@ class DeviceManager(object):
# the following loop...
port = None
# Look for an existing DHCP for this network.
# Look for an existing DHCP port for this network.
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
# If using gateway IPs on this port, we can skip the
# following code, whose purpose is just to review and
# update the Neutron-allocated IP addresses for the
# port.
if self.driver.use_gateway_ips:
return port
# Otherwise break out, as we now have the DHCP port
# whose subnets and addresses we need to review.
break
else:
return None
@ -1090,13 +1098,21 @@ class DeviceManager(object):
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Creating new one.',
{'device_id': device_id, 'network_id': network.id})
# Make a list of the subnets that need a unique IP address for
# this DHCP port.
if self.driver.use_gateway_ips:
unique_ip_subnets = []
else:
unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets]
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_subnets])
fixed_ips=unique_ip_subnets)
return self.plugin.create_dhcp_port({'port': port_dict})
def setup_dhcp_port(self, network):
@ -1168,6 +1184,17 @@ class DeviceManager(object):
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if self.driver.use_gateway_ips:
# For each DHCP-enabled subnet, add that subnet's gateway
# IP address to the Linux device for the DHCP port.
for subnet in network.subnets:
if not subnet.enable_dhcp:
continue
gateway = subnet.gateway_ip
if gateway:
net = netaddr.IPNetwork(subnet.cidr)
ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)

View File

@ -1,290 +0,0 @@
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Implement ebtables rules using linux utilities."""
import re
from retrying import retry
from oslo_config import cfg
from oslo_log import log as logging
from neutron.common import utils
ebtables_opts = [
cfg.StrOpt('ebtables_path',
default='$state_path/ebtables-',
help=_('Location of temporary ebtables table files.')),
]
CONF = cfg.CONF
CONF.register_opts(ebtables_opts)
LOG = logging.getLogger(__name__)
# Collection of regexes to parse ebtables output
_RE_FIND_BRIDGE_TABLE_NAME = re.compile(r'^Bridge table:[\s]*([a-z]+)$')
# get chain name, nunmber of entries and policy name.
_RE_FIND_BRIDGE_CHAIN_INFO = re.compile(
r'^Bridge chain:[\s]*(.*),[\s]*entries:[\s]*[0-9]+,[\s]*'
r'policy:[\s]*([A-Z]+)$')
_RE_FIND_BRIDGE_RULE_COUNTERS = re.compile(
r',[\s]*pcnt[\s]*=[\s]*([0-9]+)[\s]*--[\s]*bcnt[\s]*=[\s]*([0-9]+)$')
_RE_FIND_COMMIT_STATEMENT = re.compile(r'^COMMIT$')
_RE_FIND_COMMENTS_AND_BLANKS = re.compile(r'^#|^$')
_RE_FIND_APPEND_RULE = re.compile(r'-A (\S+) ')
# Regexes to parse ebtables rule file input
_RE_RULES_FIND_TABLE_NAME = re.compile(r'^\*([a-z]+)$')
_RE_RULES_FIND_CHAIN_NAME = re.compile(r'^:(.*)[\s]+([A-Z]+)$')
_RE_RULES_FIND_RULE_LINE = re.compile(r'^\[([0-9]+):([0-9]+)\]')
def _process_ebtables_output(lines):
"""Process raw output of ebtables rule listing file.
Empty lines and comments removed, ebtables listing output converted
into ebtables rules.
For example, if the raw ebtables list lines (input to this function) are:
Bridge table: filter
Bridge chain: INPUT, entries: 0, policy: ACCEPT
Bridge chain: FORWARD, entries: 0, policy: ACCEPT
Bridge chain: OUTPUT, entries: 0, policy: ACCEPT
The output then will be:
*filter
:INPUT ACCEPT
:FORWARD ACCEPT
:OUTPUT ACCEPT
COMMIT
Key point: ebtables rules listing output is not the same as the rules
format for setting new rules.
"""
table = None
chain = ''
chains = []
rules = []
for line in lines:
if _RE_FIND_COMMENTS_AND_BLANKS.search(line):
continue
match = _RE_FIND_BRIDGE_RULE_COUNTERS.search(line)
if table and match:
rules.append('[%s:%s] -A %s %s' % (match.group(1),
match.group(2),
chain,
line[:match.start()].strip()))
match = _RE_FIND_BRIDGE_CHAIN_INFO.search(line)
if match:
chains.append(':%s %s' % (match.group(1), match.group(2)))
chain = match.group(1)
continue
match = _RE_FIND_BRIDGE_TABLE_NAME.search(line)
if match:
table = '*%s' % match.group(1)
continue
return [table] + chains + rules + ['COMMIT']
def _match_rule_line(table, line):
match = _RE_RULES_FIND_RULE_LINE.search(line)
if table and match:
args = line[match.end():].split()
res = [(table, args)]
if int(match.group(1)) > 0 and int(match.group(2)) > 0:
p = _RE_FIND_APPEND_RULE
rule = p.sub(r'-C \1 %s %s ', line[match.end() + 1:])
args = (rule % (match.group(1), match.group(2))).split()
res.append((table, args))
return table, res
else:
return table, None
def _match_chain_name(table, tables, line):
match = _RE_RULES_FIND_CHAIN_NAME.search(line)
if table and match:
if match.group(1) not in tables[table]:
args = ['-N', match.group(1), '-P', match.group(2)]
else:
args = ['-P', match.group(1), match.group(2)]
return table, (table, args)
else:
return table, None
def _match_table_name(table, line):
match = _RE_RULES_FIND_TABLE_NAME.search(line)
if match:
# Initialize with current kernel table if we just start out
table = match.group(1)
return table, (table, ['--atomic-init'])
else:
return table, None
def _match_commit_statement(table, line):
match = _RE_FIND_COMMIT_STATEMENT.search(line)
if table and match:
# Conclude by issuing the commit command
return (table, ['--atomic-commit'])
else:
return None
def _process_ebtables_input(lines):
"""Import text ebtables rules. Similar to iptables-restore.
Was based on:
http://sourceforge.net/p/ebtables/code/ci/
3730ceb7c0a81781679321bfbf9eaa39cfcfb04e/tree/userspace/ebtables2/
ebtables-save?format=raw
The function prepares and returns a list of tuples, each tuple consisting
of a table name and ebtables arguments. The caller can then repeatedly call
ebtables on that table with those arguments to get the rules applied.
For example, this input:
*filter
:INPUT ACCEPT
:FORWARD ACCEPT
:OUTPUT ACCEPT
:neutron-nwfilter-spoofing-fallb ACCEPT
:neutron-nwfilter-OUTPUT ACCEPT
:neutron-nwfilter-INPUT ACCEPT
:neutron-nwfilter-FORWARD ACCEPT
[0:0] -A INPUT -j neutron-nwfilter-INPUT
[0:0] -A OUTPUT -j neutron-nwfilter-OUTPUT
[0:0] -A FORWARD -j neutron-nwfilter-FORWARD
[0:0] -A neutron-nwfilter-spoofing-fallb -j DROP
COMMIT
... produces this output:
('filter', ['--atomic-init'])
('filter', ['-P', 'INPUT', 'ACCEPT'])
('filter', ['-P', 'FORWARD', 'ACCEPT'])
('filter', ['-P', 'OUTPUT', 'ACCEPT'])
('filter', ['-N', 'neutron-nwfilter-spoofing-fallb', '-P', 'ACCEPT'])
('filter', ['-N', 'neutron-nwfilter-OUTPUT', '-P', 'ACCEPT'])
('filter', ['-N', 'neutron-nwfilter-INPUT', '-P', 'ACCEPT'])
('filter', ['-N', 'neutron-nwfilter-FORWARD', '-P', 'ACCEPT'])
('filter', ['-A', 'INPUT', '-j', 'neutron-nwfilter-INPUT'])
('filter', ['-A', 'OUTPUT', '-j', 'neutron-nwfilter-OUTPUT'])
('filter', ['-A', 'FORWARD', '-j', 'neutron-nwfilter-FORWARD'])
('filter', ['-A', 'neutron-nwfilter-spoofing-fallb', '-j', 'DROP'])
('filter', ['--atomic-commit'])
"""
tables = {'filter': ['INPUT', 'FORWARD', 'OUTPUT'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING'],
'broute': ['BROUTING']}
table = None
ebtables_args = list()
for line in lines.splitlines():
if _RE_FIND_COMMENTS_AND_BLANKS.search(line):
continue
table, res = _match_rule_line(table, line)
if res:
ebtables_args.extend(res)
continue
table, res = _match_chain_name(table, tables, line)
if res:
ebtables_args.append(res)
continue
table, res = _match_table_name(table, line)
if res:
ebtables_args.append(res)
continue
res = _match_commit_statement(table, line)
if res:
ebtables_args.append(res)
continue
return ebtables_args
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000,
stop_max_delay=10000)
def _cmd_retry(func, *args, **kwargs):
return func(*args, **kwargs)
def run_ebtables(namespace, execute, table, args):
"""Run ebtables utility, with retry if necessary.
Provide table name and list of additional arguments to ebtables.
"""
cmd = ['ebtables', '-t', table]
if CONF.ebtables_path:
f = '%s%s' % (CONF.ebtables_path, table)
cmd += ['--atomic-file', f]
cmd += args
if namespace:
cmd = ['ip', 'netns', 'exec', namespace] + cmd
# TODO(jbrendel): The root helper is used for every ebtables command,
# but as we use an atomic file we only need root for
# init and commit commands.
# But the generated file by init ebtables command is
# only readable and writable by root.
#
# We retry the execution of ebtables in case of failure. Known issue:
# See bug: https://bugs.launchpad.net/nova/+bug/1316621
# See patch: https://review.openstack.org/#/c/140514/3
return _cmd_retry(execute, cmd, **{"run_as_root": True})
def run_ebtables_multiple(namespace, execute, arg_list):
"""Run ebtables utility multiple times.
Similar to run(), but runs ebtables for every element in arg_list.
Each arg_list element is a tuple containing the table name and a list
of ebtables arguments.
"""
for table, args in arg_list:
run_ebtables(namespace, execute, table, args)
@utils.synchronized('ebtables', external=True)
def ebtables_save(execute, tables_names, namespace=None):
"""Generate text output of the ebtables rules.
Based on:
http://sourceforge.net/p/ebtables/code/ci/master/tree/userspace/ebtables2/
ebtables-save?format=raw
"""
raw_outputs = (run_ebtables(namespace, execute,
t, ['-L', '--Lc']).splitlines() for t in tables_names)
parsed_outputs = (_process_ebtables_output(lines) for lines in raw_outputs)
return '\n'.join(l for lines in parsed_outputs for l in lines)
@utils.synchronized('ebtables', external=True)
def ebtables_restore(lines, execute, namespace=None):
"""Import text ebtables rules and apply."""
ebtables_args = _process_ebtables_input(lines)
run_ebtables_multiple(namespace, execute, ebtables_args)

View File

@ -1,253 +0,0 @@
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Implement a manager for ebtables rules.
NOTE: The ebtables manager contains a lot of duplicated or very similar code
from the iptables manager. An option would have been to refactor the
iptables manager so that ebtables and iptables manager can share common
code. However, the iptables manager was considered too brittle and
in need for a larger re-work or full replacement in the future.
Therefore, it was decided not to do any refactoring for now and to accept
the code duplication.
"""
import inspect
import os
from oslo_log import log as logging
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
MAX_CHAIN_LEN_EBTABLES = 31
# NOTE(jbrendel): ebtables supports chain names of up to 31 characters, and
# we add up to 12 characters to prefix_chain which is used
# as a prefix, so we limit it to 19 characters.
POSTROUTING_STR = '-POSTROUTING'
MAX_LEN_PREFIX_CHAIN = MAX_CHAIN_LEN_EBTABLES - len(POSTROUTING_STR)
# When stripping or calculating string lengths, sometimes a '-' which separates
# name components needs to be considered.
DASH_STR_LEN = 1
def binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])
def _get_prefix_chain(prefix_chain=None):
"""Determine the prefix chain."""
if prefix_chain:
return prefix_chain[:MAX_LEN_PREFIX_CHAIN]
else:
return binary_name()[:MAX_LEN_PREFIX_CHAIN]
def get_chain_name(chain_name, wrap=True, prefix_chain=None):
"""Determine the chain name."""
if wrap:
# Get the possible chain name length in function of the prefix name
# length.
chain_len = (MAX_CHAIN_LEN_EBTABLES -
(len(_get_prefix_chain(prefix_chain)) + DASH_STR_LEN))
return chain_name[:chain_len]
else:
return chain_name[:MAX_CHAIN_LEN_EBTABLES]
class EbtablesRule(object):
"""An ebtables rule.
You shouldn't need to use this class directly, it's only used by
EbtablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
prefix_chain=None):
self.prefix_chain = _get_prefix_chain(prefix_chain)
self.chain = get_chain_name(chain, wrap, prefix_chain)
self.rule = rule
self.wrap = wrap
self.top = top
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.prefix_chain, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class EbtablesTable(object):
"""An ebtables table."""
def __init__(self, prefix_chain=None):
self.rules = []
self.rules_to_remove = []
self.chains = set()
self.unwrapped_chains = set()
self.chains_to_remove = set()
self.prefix_chain = _get_prefix_chain(prefix_chain)
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Neutron can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <prefix chain>-<chain name>,
so if neutron-server creates a chain named 'OUTPUT', it'll actually
end up named 'neutron-server-OUTPUT'.
"""
name = get_chain_name(name, wrap, self.prefix_chain)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
self.remove_chain(name, wrap, log_not_found=False)
def remove_chain(self, name, wrap=True, log_not_found=True):
"""Remove named chain.
This removal "cascades". All rules in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found then this is merely logged.
"""
name = get_chain_name(name, wrap, self.prefix_chain)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
if log_not_found:
LOG.warn(_LW('Attempted to remove chain %s '
'which does not exist'), name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.chains_to_remove.add(name)
# first, add rules to remove that have a matching chain name
self.rules_to_remove += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.rules_to_remove += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.prefix_chain, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to ebtables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap, self.prefix_chain)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
self.rules.append(EbtablesRule(chain, rule, wrap, top,
self.prefix_chain))
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
However, if the rule jumps to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap, self.prefix_chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
try:
self.rules.remove(EbtablesRule(chain, rule, wrap, top,
self.prefix_chain))
if not wrap:
self.rules_to_remove.append(
EbtablesRule(chain, rule, wrap, top,
self.prefix_chain))
except ValueError:
LOG.warn(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def _wrap_target_chain(self, s):
if s.startswith('$'):
return ('%s-%s' % (self.prefix_chain, s[1:]))
return s
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap, self.prefix_chain)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)

View File

@ -64,6 +64,46 @@ class LinuxInterfaceDriver(object):
'current_mtu': self.conf.network_device_mtu})
raise SystemExit(1)
@property
def use_gateway_ips(self):
"""Whether to use gateway IPs instead of unique IP allocations.
In each place where the DHCP agent runs, and for each subnet for
which DHCP is handling out IP addresses, the DHCP port needs -
at the Linux level - to have an IP address within that subnet.
Generally this needs to be a unique Neutron-allocated IP
address, because the subnet's underlying L2 domain is bridged
across multiple compute hosts and network nodes, and for HA
there may be multiple DHCP agents running on that same bridged
L2 domain.
However, if the DHCP ports - on multiple compute/network nodes
but for the same network - are _not_ bridged to each other,
they do not need each to have a unique IP address. Instead
they can all share the same address from the relevant subnet.
This works, without creating any ambiguity, because those
ports are not all present on the same L2 domain, and because
no data within the network is ever sent to that address.
(DHCP requests are broadcast, and it is the network's job to
ensure that such a broadcast will reach at least one of the
available DHCP servers. DHCP responses will be sent _from_
the DHCP port address.)
Specifically, for networking backends where it makes sense,
the DHCP agent allows all DHCP ports to use the subnet's
gateway IP address, and thereby to completely avoid any unique
IP address allocation. This behaviour is selected by running
the DHCP agent with a configured interface driver whose
'use_gateway_ips' property is True.
When an operator deploys Neutron with an interface driver that
makes use_gateway_ips True, they should also ensure that a
gateway IP address is defined for each DHCP-enabled subnet,
and that the gateway IP address doesn't change during the
subnet's lifetime.
"""
return False
def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=[], gateway_ips=None,
clean_connections=False):
@ -143,14 +183,16 @@ class LinuxInterfaceDriver(object):
device = ip_lib.IPDevice(device_name, namespace=namespace)
# Manage on-link routes (routes without an associated address)
new_onlink_routes = set(s['cidr'] for s in extra_subnets or [])
existing_onlink_routes = set(
device.route.list_onlink_routes(n_const.IP_VERSION_4) +
device.route.list_onlink_routes(n_const.IP_VERSION_6))
for route in new_onlink_routes - existing_onlink_routes:
new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or [])
v4_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_4)
v6_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_6)
existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink)
for route in new_onlink_cidrs - existing_onlink_cidrs:
LOG.debug("adding onlink route(%s)", route)
device.route.add_onlink_route(route)
for route in existing_onlink_routes - new_onlink_routes:
for route in existing_onlink_cidrs - new_onlink_cidrs:
LOG.debug("deleting onlink route(%s)", route)
device.route.delete_onlink_route(route)

View File

@ -20,6 +20,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import re
import six
from neutron.agent.common import utils
from neutron.common import constants
@ -231,9 +232,10 @@ class IPDevice(SubProcessBase):
This terminates any active connections through an IP.
cidr: the IP address for which state should be removed. This can be
passed as a string with or without /NN. A netaddr.IPAddress or
netaddr.Network representing the IP address can also be passed.
:param cidr: the IP address for which state should be removed.
This can be passed as a string with or without /NN.
A netaddr.IPAddress or netaddr.Network representing the IP address
can also be passed.
"""
self.addr.delete(cidr)
@ -287,6 +289,67 @@ class IPRule(SubProcessBase):
class IpRuleCommand(IpCommandBase):
COMMAND = 'rule'
@staticmethod
def _make_canonical(ip_version, settings):
"""Converts settings to a canonical represention to compare easily"""
def canonicalize_fwmark_string(fwmark_mask):
"""Reformats fwmark/mask in to a canonical form
Examples, these are all equivalent:
"0x1"
0x1
"0x1/0xfffffffff"
(0x1, 0xfffffffff)
:param fwmark_mask: The firewall and mask (default 0xffffffff)
:type fwmark_mask: A string with / as delimiter, an iterable, or a
single value.
"""
# Turn the value we were passed in to an iterable: fwmark[, mask]
if isinstance(fwmark_mask, six.string_types):
# A / separates the optional mask in a string
iterable = fwmark_mask.split('/')
else:
try:
iterable = iter(fwmark_mask)
except TypeError:
# At this point, it must be a single integer
iterable = [fwmark_mask]
def to_i(s):
if isinstance(s, six.string_types):
# Passing 0 as "base" arg to "int" causes it to determine
# the base automatically.
return int(s, 0)
# s isn't a string, can't specify base argument
return int(s)
integers = [to_i(x) for x in iterable]
# The default mask is all ones, the mask is 32 bits.
if len(integers) == 1:
integers.append(0xffffffff)
# We now have two integers in a list. Convert to canonical string.
return '/'.join(map(hex, integers))
def canonicalize(item):
k, v = item
# ip rule shows these as 'any'
if k == 'from' and v == 'all':
return k, constants.IP_ANY[ip_version]
# lookup and table are interchangeable. Use table every time.
if k == 'lookup':
return 'table', v
if k == 'fwmark':
return k, canonicalize_fwmark_string(v)
return k, v
if 'type' not in settings:
settings['type'] = 'unicast'
return {k: str(v) for k, v in map(canonicalize, settings.items())}
def _parse_line(self, ip_version, line):
# Typical rules from 'ip rule show':
# 4030201: from 1.2.3.4/24 lookup 10203040
@ -296,23 +359,21 @@ class IpRuleCommand(IpCommandBase):
if not parts:
return {}
# Format of line is: "priority: <key> <value> ..."
# Format of line is: "priority: <key> <value> ... [<type>]"
settings = {k: v for k, v in zip(parts[1::2], parts[2::2])}
settings['priority'] = parts[0][:-1]
if len(parts) % 2 == 0:
# When line has an even number of columns, last one is the type.
settings['type'] = parts[-1]
# Canonicalize some arguments
if settings.get('from') == "all":
settings['from'] = constants.IP_ANY[ip_version]
if 'lookup' in settings:
settings['table'] = settings.pop('lookup')
return self._make_canonical(ip_version, settings)
return settings
def list_rules(self, ip_version):
lines = self._as_root([ip_version], ['show']).splitlines()
return [self._parse_line(ip_version, line) for line in lines]
def _exists(self, ip_version, **kwargs):
kwargs_strings = {k: str(v) for k, v in kwargs.items()}
lines = self._as_root([ip_version], ['show']).splitlines()
return kwargs_strings in (self._parse_line(ip_version, line)
for line in lines)
return kwargs in self.list_rules(ip_version)
def _make__flat_args_tuple(self, *args, **kwargs):
for kwargs_item in sorted(kwargs.items(), key=lambda i: i[0]):
@ -323,9 +384,10 @@ class IpRuleCommand(IpCommandBase):
ip_version = get_ip_version(ip)
kwargs.update({'from': ip})
canonical_kwargs = self._make_canonical(ip_version, kwargs)
if not self._exists(ip_version, **kwargs):
args_tuple = self._make__flat_args_tuple('add', **kwargs)
if not self._exists(ip_version, **canonical_kwargs):
args_tuple = self._make__flat_args_tuple('add', **canonical_kwargs)
self._as_root([ip_version], args_tuple)
def delete(self, ip, **kwargs):
@ -333,7 +395,9 @@ class IpRuleCommand(IpCommandBase):
# TODO(Carl) ip ignored in delete, okay in general?
args_tuple = self._make__flat_args_tuple('del', **kwargs)
canonical_kwargs = self._make_canonical(ip_version, kwargs)
args_tuple = self._make__flat_args_tuple('del', **canonical_kwargs)
self._as_root([ip_version], args_tuple)
@ -534,35 +598,47 @@ class IpRouteCommand(IpDeviceCommandBase):
raise exceptions.DeviceNotFoundError(
device_name=self.name)
def list_onlink_routes(self, ip_version):
def iterate_routes():
args = ['list']
args += self._dev_args()
args += ['scope', 'link']
args += self._table_args()
output = self._run([ip_version], tuple(args))
for line in output.split('\n'):
line = line.strip()
if line and not line.count('src'):
yield line
def _parse_routes(self, ip_version, output, **kwargs):
for line in output.splitlines():
parts = line.split()
return [x for x in iterate_routes()]
# Format of line is: "<cidr>|default [<key> <value>] ..."
route = {k: v for k, v in zip(parts[1::2], parts[2::2])}
route['cidr'] = parts[0]
# Avoids having to explicitly pass around the IP version
if route['cidr'] == 'default':
route['cidr'] = constants.IP_ANY[ip_version]
# ip route drops things like scope and dev from the output if it
# was specified as a filter. This allows us to add them back.
if self.name:
route['dev'] = self.name
if self._table:
route['table'] = self._table
# Callers add any filters they use as kwargs
route.update(kwargs)
yield route
def list_routes(self, ip_version, **kwargs):
args = ['list']
args += self._dev_args()
args += self._table_args()
for k, v in kwargs.items():
args += [k, v]
output = self._run([ip_version], tuple(args))
return [r for r in self._parse_routes(ip_version, output, **kwargs)]
def list_onlink_routes(self, ip_version):
routes = self.list_routes(ip_version, scope='link')
return [r for r in routes if 'src' not in r]
def add_onlink_route(self, cidr):
ip_version = get_ip_version(cidr)
args = ['replace', cidr]
args += self._dev_args()
args += ['scope', 'link']
args += self._table_args()
self._as_root([ip_version], tuple(args))
self.add_route(cidr, scope='link')
def delete_onlink_route(self, cidr):
ip_version = get_ip_version(cidr)
args = ['del', cidr]
args += self._dev_args()
args += ['scope', 'link']
args += self._table_args()
self._as_root([ip_version], tuple(args))
self.delete_route(cidr, scope='link')
def get_gateway(self, scope=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
@ -644,18 +720,26 @@ class IpRouteCommand(IpDeviceCommandBase):
'proto', 'kernel',
'dev', device))
def add_route(self, cidr, ip, table=None):
def add_route(self, cidr, via=None, table=None, **kwargs):
ip_version = get_ip_version(cidr)
args = ['replace', cidr, 'via', ip]
args = ['replace', cidr]
if via:
args += ['via', via]
args += self._dev_args()
args += self._table_args(table)
for k, v in kwargs.items():
args += [k, v]
self._as_root([ip_version], tuple(args))
def delete_route(self, cidr, ip, table=None):
def delete_route(self, cidr, via=None, table=None, **kwargs):
ip_version = get_ip_version(cidr)
args = ['del', cidr, 'via', ip]
args = ['del', cidr]
if via:
args += ['via', via]
args += self._dev_args()
args += self._table_args(table)
for k, v in kwargs.items():
args += [k, v]
self._as_root([ip_version], tuple(args))

View File

@ -64,7 +64,7 @@ class IPMonitor(async_process.AsyncProcess):
m.start()
for iterable in m:
event = IPMonitorEvent.from_text(iterable)
print event, event.added, event.interface, event.cidr
print(event, event.added, event.interface, event.cidr)
"""
def __init__(self,

View File

@ -559,13 +559,13 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
def _convert_sgr_to_iptables_rules(self, security_group_rules):
iptables_rules = []
self._drop_invalid_packets(iptables_rules)
self._allow_established(iptables_rules)
for rule in security_group_rules:
args = self._convert_sg_rule_to_iptables_args(rule)
if args:
iptables_rules += [' '.join(args)]
self._drop_invalid_packets(iptables_rules)
iptables_rules += [comment_rule('-j $sg-fallback',
comment=ic.UNMATCHED)]
return iptables_rules

View File

@ -79,6 +79,17 @@ class InvalidAuthenticationTypeException(exceptions.NeutronException):
super(InvalidAuthenticationTypeException, self).__init__(**kwargs)
class VIPDuplicateAddressException(exceptions.NeutronException):
message = _('Attempted to add duplicate VIP address, '
'existing vips are: %(existing_vips)s, '
'duplicate vip is: %(duplicate_vip)s')
def __init__(self, **kwargs):
kwargs['existing_vips'] = ', '.join(str(vip) for vip in
kwargs['existing_vips'])
super(VIPDuplicateAddressException, self).__init__(**kwargs)
class KeepalivedVipAddress(object):
"""A virtual address entry of a keepalived configuration."""
@ -87,6 +98,15 @@ class KeepalivedVipAddress(object):
self.interface_name = interface_name
self.scope = scope
def __eq__(self, other):
return (isinstance(other, KeepalivedVipAddress) and
self.ip_address == other.ip_address)
def __str__(self):
return '[%s, %s, %s]' % (self.ip_address,
self.interface_name,
self.scope)
def build_config(self):
result = '%s dev %s' % (self.ip_address, self.interface_name)
if self.scope:
@ -183,7 +203,11 @@ class KeepalivedInstance(object):
self.authentication = (auth_type, password)
def add_vip(self, ip_cidr, interface_name, scope):
self.vips.append(KeepalivedVipAddress(ip_cidr, interface_name, scope))
vip = KeepalivedVipAddress(ip_cidr, interface_name, scope)
if vip in self.vips:
raise VIPDuplicateAddressException(existing_vips=self.vips,
duplicate_vip=vip)
self.vips.append(vip)
def remove_vips_vroutes_by_interface(self, interface_name):
self.vips = [vip for vip in self.vips

View File

@ -83,6 +83,7 @@ def create_process(cmd, run_as_root=False, addl_env=None):
cmd = list(map(str, addl_env_args(addl_env) + cmd))
if run_as_root:
cmd = shlex.split(config.get_root_helper(cfg.CONF)) + cmd
LOG.debug("Running command: %s", cmd)
obj = utils.subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
@ -98,6 +99,7 @@ def execute_rootwrap_daemon(cmd, process_input, addl_env):
# In practice, no neutron code should be trying to execute something that
# would throw those errors, and if it does it should be fixed as opposed to
# just logging the execution error.
LOG.debug("Running command (rootwrap daemon): %s", cmd)
client = RootwrapDaemonHelper.get_client()
return client.execute(cmd, process_input)
@ -132,20 +134,24 @@ def execute(cmd, process_input=None, addl_env=None,
except UnicodeError:
pass
m = _("\nCommand: {cmd}\nExit code: {code}\n").format(
cmd=cmd,
code=returncode)
command_str = {
'cmd': cmd,
'code': returncode
}
m = _("\nCommand: %(cmd)s"
"\nExit code: %(code)d\n") % command_str
extra_ok_codes = extra_ok_codes or []
if returncode and returncode in extra_ok_codes:
returncode = None
if returncode and log_fail_as_error:
m += ("Stdin: {stdin}\n"
"Stdout: {stdout}\nStderr: {stderr}").format(
stdin=process_input or '',
stdout=_stdout,
stderr=_stderr)
command_str['stdin'] = process_input or ''
command_str['stdout'] = _stdout
command_str['stderr'] = _stderr
m += _("Stdin: %(stdin)s\n"
"Stdout: %(stdout)s\n"
"Stderr: %(stderr)s") % command_str
LOG.error(m)
else:
LOG.debug(m)

View File

@ -239,7 +239,7 @@ class API(object):
:type records: list of record ids (names/uuids)
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:param if_exists: Do not fail if the bridge does not exist
:param if_exists: Do not fail if the record does not exist
:type if_exists: bool
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@ -313,7 +313,7 @@ class API(object):
:type bridge: string
:param port: The name of the port
:type port: string
:param may_exist: Do not fail if bridge already exists
:param may_exist: Do not fail if the port already exists
:type may_exist: bool
:returns: :class:`Command` with no result
"""
@ -326,7 +326,7 @@ class API(object):
:type port: string
:param bridge: Only delete port if it is attached to this bridge
:type bridge: string
:param if_exists: Do not fail if the bridge does not exist
:param if_exists: Do not fail if the port does not exist
:type if_exists: bool
:returns: :class:`Command` with no result
"""

View File

@ -45,7 +45,8 @@ class L3RpcCallback(object):
# since it was unused. The RPC version was not changed
# 1.5 Added update_ha_routers_states
# 1.6 Added process_prefix_update to support IPv6 Prefix Delegation
target = oslo_messaging.Target(version='1.6')
# 1.7 Added method delete_agent_gateway_port for DVR Routers
target = oslo_messaging.Target(version='1.7')
@property
def plugin(self):
@ -281,3 +282,11 @@ class L3RpcCallback(object):
subnet_id,
{'subnet': {'cidr': prefix}}))
return updated_subnets
def delete_agent_gateway_port(self, context, **kwargs):
"""Delete Floatingip agent gateway port."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
admin_ctx = neutron_context.get_admin_context()
self.l3plugin.delete_floatingip_agent_gateway_port(
admin_ctx, host, network_id)

View File

@ -19,6 +19,7 @@ from oslo_log import log as logging
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.i18n import _LW
from neutron import manager
@ -80,7 +81,7 @@ class SecurityGroupServerRpcCallback(object):
return dict(
(port['id'], port)
for port in self.plugin.get_ports_from_devices(context, devices)
if port and not port['device_owner'].startswith('network:')
if port and not utils.is_port_trusted(port)
)
def security_group_rules_for_devices(self, context, **kwargs):

View File

@ -731,7 +731,7 @@ RESOURCE_ATTRIBUTE_MAP = {
'is_visible': True},
'device_owner': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_OWNER_MAX_LEN},
'default': '',
'default': '', 'enforce_policy': True,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import netaddr
@ -416,13 +417,15 @@ class Controller(object):
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
deltas = {}
bulk = True
else:
items = [body]
bulk = False
# Ensure policy engine is initialized
policy.init()
# Store requested resource amounts grouping them by tenant
# This won't work with multiple resources. However because of the
# current structure of this controller there will hardly be more than
# one resource for which reservations are being made
request_deltas = collections.defaultdict(int)
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
@ -433,30 +436,31 @@ class Controller(object):
if 'tenant_id' not in item[self._resource]:
# no tenant_id - no quota check
continue
try:
tenant_id = item[self._resource]['tenant_id']
count = quota.QUOTAS.count(request.context, self._resource,
self._plugin, tenant_id)
if bulk:
delta = deltas.get(tenant_id, 0) + 1
deltas[tenant_id] = delta
else:
delta = 1
kwargs = {self._resource: count + delta}
except exceptions.QuotaResourceUnknown as e:
tenant_id = item[self._resource]['tenant_id']
request_deltas[tenant_id] += 1
# Quota enforcement
reservations = []
try:
for (tenant, delta) in request_deltas.items():
reservation = quota.QUOTAS.make_reservation(
request.context,
tenant,
{self._resource: delta},
self._plugin)
reservations.append(reservation)
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
else:
quota.QUOTAS.limit_check(request.context,
item[self._resource]['tenant_id'],
**kwargs)
def notify(create_result):
# Ensure usage trackers for all resources affected by this API
# operation are marked as dirty
# TODO(salv-orlando): This operation will happen in a single
# transaction with reservation commit once that is implemented
resource_registry.set_resources_dirty(request.context)
with request.context.session.begin():
# Commit the reservation(s)
for reservation in reservations:
quota.QUOTAS.commit_reservation(
request.context, reservation.reservation_id)
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
@ -467,11 +471,35 @@ class Controller(object):
notifier_method)
return create_result
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
def do_create(body, bulk=False, emulated=False):
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if bulk and not emulated:
obj_creator = getattr(self._plugin, "%s_bulk" % action)
else:
obj_creator = getattr(self._plugin, action)
try:
if emulated:
return self._emulate_bulk_create(obj_creator, request,
body, parent_id)
else:
if self._collection in body:
# This is weird but fixing it requires changes to the
# plugin interface
kwargs.update({self._collection: body})
else:
kwargs.update({self._resource: body})
return obj_creator(request.context, **kwargs)
except Exception:
# In case of failure the plugin will always raise an
# exception. Cancel the reservation
with excutils.save_and_reraise_exception():
for reservation in reservations:
quota.QUOTAS.cancel_reservation(
request.context, reservation.reservation_id)
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
obj_creator = getattr(self._plugin, "%s_bulk" % action)
objs = obj_creator(request.context, body, **kwargs)
objs = do_create(body, bulk=True)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
@ -480,15 +508,12 @@ class Controller(object):
request.context, obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
obj_creator = getattr(self._plugin, action)
if self._collection in body:
# Emulate atomic bulk behavior
objs = self._emulate_bulk_create(obj_creator, request,
body, parent_id)
objs = do_create(body, bulk=True, emulated=True)
return notify({self._collection: objs})
else:
kwargs.update({self._resource: body})
obj = obj_creator(request.context, **kwargs)
obj = do_create(body)
self._send_nova_notification(action, {},
{self._resource: obj})
return notify({self._resource: self._view(request.context,

View File

@ -12,9 +12,11 @@
# String literals representing core resources.
PORT = 'port'
PROCESS = 'process'
ROUTER = 'router'
ROUTER_GATEWAY = 'router_gateway'
ROUTER_INTERFACE = 'router_interface'
SECURITY_GROUP = 'security_group'
SECURITY_GROUP_RULE = 'security_group_rule'
SUBNET = 'subnet'
SUBNET_GATEWAY = 'subnet_gateway'

View File

@ -134,6 +134,17 @@ def arp_header_match_supported():
actions="NORMAL")
def icmpv6_header_match_supported():
return ofctl_arg_supported(cmd='add-flow',
table=ovs_const.ARP_SPOOF_TABLE,
priority=1,
dl_type=n_consts.ETHERTYPE_IPV6,
nw_proto=n_consts.PROTO_NUM_ICMP_V6,
icmp_type=n_consts.ICMPV6_TYPE_NA,
nd_target='fdf8:f53b:82e4::10',
actions="NORMAL")
def vf_management_supported():
is_supported = True
required_caps = (

View File

@ -39,7 +39,7 @@ def setup_conf():
cfg.CONF.import_group('ml2_sriov',
'neutron.plugins.ml2.drivers.mech_sriov.mech_driver.'
'mech_driver')
dhcp_agent.register_options()
dhcp_agent.register_options(cfg.CONF)
cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
@ -165,6 +165,16 @@ def check_arp_header_match():
return result
def check_icmpv6_header_match():
result = checks.icmpv6_header_match_supported()
if not result:
LOG.error(_LE('Check for Open vSwitch support of ICMPv6 header '
'matching failed. ICMPv6 Neighbor Advt spoofing (part '
'of arp spoofing) suppression will not work. A newer '
'version of OVS is required.'))
return result
def check_vf_management():
result = checks.vf_management_supported()
if not result:
@ -206,6 +216,8 @@ OPTS = [
help=_('Check for ARP responder support')),
BoolOptCallback('arp_header_match', check_arp_header_match,
help=_('Check for ARP header match support')),
BoolOptCallback('icmpv6_header_match', check_icmpv6_header_match,
help=_('Check for ICMPv6 header match support')),
BoolOptCallback('vf_management', check_vf_management,
help=_('Check for VF management support')),
BoolOptCallback('read_netns', check_read_netns,
@ -247,6 +259,7 @@ def enable_tests_from_config():
cfg.CONF.set_override('arp_responder', True)
if cfg.CONF.AGENT.prevent_arp_spoofing:
cfg.CONF.set_override('arp_header_match', True)
cfg.CONF.set_override('icmpv6_header_match', True)
if cfg.CONF.ml2_sriov.agent_required:
cfg.CONF.set_override('vf_management', True)
if not cfg.CONF.AGENT.use_helper_for_ns_read:

View File

@ -41,6 +41,8 @@ DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat"
DEVICE_OWNER_LOADBALANCER = "neutron:LOADBALANCER"
DEVICE_OWNER_LOADBALANCERV2 = "neutron:LOADBALANCERV2"
DEVICE_OWNER_PREFIXES = ["network:", "neutron:"]
# Collection used to identify devices owned by router interfaces.
# DEVICE_OWNER_ROUTER_HA_INTF is a special case and so is not included.
ROUTER_INTERFACE_OWNERS = (DEVICE_OWNER_ROUTER_INTF,
@ -90,7 +92,6 @@ FLOODING_ENTRY = ('00:00:00:00:00:00', '0.0.0.0')
AGENT_TYPE_DHCP = 'DHCP agent'
AGENT_TYPE_OVS = 'Open vSwitch agent'
AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent'
AGENT_TYPE_NEC = 'NEC plugin agent'
AGENT_TYPE_OFA = 'OFA driver agent'
AGENT_TYPE_L3 = 'L3 agent'
AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent'
@ -112,6 +113,8 @@ L3_DISTRIBUTED_EXT_ALIAS = 'dvr'
L3_HA_MODE_EXT_ALIAS = 'l3-ha'
SUBNET_ALLOCATION_EXT_ALIAS = 'subnet_allocation'
ETHERTYPE_IPV6 = 0x86DD
# Protocol names and numbers for Security Groups/Firewalls
PROTO_NAME_TCP = 'tcp'
PROTO_NAME_ICMP = 'icmp'
@ -130,6 +133,7 @@ PROTO_NUM_UDP = 17
# Neighbor Advertisement (136)
ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136]
ICMPV6_TYPE_RA = 134
ICMPV6_TYPE_NA = 136
DHCPV6_STATEFUL = 'dhcpv6-stateful'
DHCPV6_STATELESS = 'dhcpv6-stateless'

View File

@ -45,6 +45,9 @@ class NeutronException(Exception):
def __unicode__(self):
return unicode(self.msg)
def __str__(self):
return self.msg
def use_fatal_exceptions(self):
return False

View File

@ -368,6 +368,7 @@ def is_dvr_serviced(device_owner):
indirectly associated with DVR.
"""
dvr_serviced_device_owners = (n_const.DEVICE_OWNER_LOADBALANCER,
n_const.DEVICE_OWNER_LOADBALANCERV2,
n_const.DEVICE_OWNER_DHCP)
return (device_owner.startswith('compute:') or
device_owner in dvr_serviced_device_owners)
@ -432,6 +433,15 @@ def ip_version_from_int(ip_version_int):
raise ValueError(_('Illegal IP version number'))
def is_port_trusted(port):
"""Used to determine if port can be trusted not to attack network.
Trust is currently based on the device_owner field starting with 'network:'
since we restrict who can use that in the default policy.json file.
"""
return port['device_owner'].startswith('network:')
class DelayedStringRenderer(object):
"""Takes a callable and its args and calls when __str__ is called

View File

@ -40,7 +40,7 @@ LOG = logging.getLogger(__name__)
AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('network_scheduler_driver',
default='neutron.scheduler.'
'dhcp_agent_scheduler.ChanceScheduler',
'dhcp_agent_scheduler.WeightScheduler',
help=_('Driver to use for scheduling network to DHCP agent')),
cfg.BoolOpt('network_auto_schedule', default=True,
help=_('Allow auto scheduling networks to DHCP agent.')),

View File

@ -17,6 +17,7 @@ import contextlib
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session
from oslo_utils import uuidutils
from sqlalchemy import exc
@ -28,8 +29,11 @@ from neutron.db import common_db_mixin
_FACADE = None
MAX_RETRIES = 10
retry_db_errors = oslo_db_api.wrap_db_retry(max_retries=MAX_RETRIES,
retry_on_deadlock=True)
is_deadlock = lambda e: isinstance(e, db_exc.DBDeadlock)
retry_db_errors = oslo_db_api.wrap_db_retry(
max_retries=MAX_RETRIES,
exception_checker=is_deadlock
)
def _create_facade_lazily():

View File

@ -129,7 +129,8 @@ class CommonDbMixin(object):
query_filter = None
if self.model_query_scope(context, model):
if hasattr(model, 'rbac_entries'):
rbac_model, join_params = self._get_rbac_query_params(model)
rbac_model, join_params = self._get_rbac_query_params(
model)[:2]
query = query.outerjoin(*join_params)
query_filter = (
(model.tenant_id == context.tenant_id) |
@ -185,16 +186,24 @@ class CommonDbMixin(object):
@staticmethod
def _get_rbac_query_params(model):
"""Return the class and join params for the rbac relationship."""
"""Return the parameters required to query an model's RBAC entries.
Returns a tuple of 3 containing:
1. the relevant RBAC model for a given model
2. the join parameters required to query the RBAC entries for the model
3. the ID column of the passed in model that matches the object_id
in the rbac entries.
"""
try:
cls = model.rbac_entries.property.mapper.class_
return (cls, (cls, ))
return (cls, (cls, ), model.id)
except AttributeError:
# an association proxy is being used (e.g. subnets
# depends on network's rbac entries)
rbac_model = (model.rbac_entries.target_class.
rbac_entries.property.mapper.class_)
return (rbac_model, model.rbac_entries.attr)
return (rbac_model, model.rbac_entries.attr,
model.rbac_entries.remote_attr.class_.id)
def _apply_filters_to_query(self, query, model, filters, context=None):
if filters:
@ -213,17 +222,29 @@ class CommonDbMixin(object):
elif key == 'shared' and hasattr(model, 'rbac_entries'):
# translate a filter on shared into a query against the
# object's rbac entries
rbac, join_params = self._get_rbac_query_params(model)
rbac, join_params, oid_col = self._get_rbac_query_params(
model)
query = query.outerjoin(*join_params, aliased=True)
matches = [rbac.target_tenant == '*']
if context:
matches.append(rbac.target_tenant == context.tenant_id)
is_shared = and_(
~rbac.object_id.is_(None),
rbac.action == 'access_as_shared',
or_(*matches)
)
query = query.filter(is_shared if value[0] else ~is_shared)
# any 'access_as_shared' records that match the
# wildcard or requesting tenant
is_shared = and_(rbac.action == 'access_as_shared',
or_(*matches))
if not value[0]:
# NOTE(kevinbenton): we need to find objects that don't
# have an entry that matches the criteria above so
# we use a subquery to exclude them.
# We can't just filter the inverse of the query above
# because that will still give us a network shared to
# our tenant (or wildcard) if it's shared to another
# tenant.
is_shared = ~oid_col.in_(
query.session.query(rbac.object_id).
filter(is_shared)
)
query = query.filter(is_shared)
for _nam, hooks in six.iteritems(self._model_query_hooks.get(model,
{})):
result_filter = hooks.get('result_filters', None)

View File

@ -715,13 +715,22 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
s['allocation_pools'] = range_pools
# If either gateway_ip or allocation_pools were specified
gateway_ip = s.get('gateway_ip')
if gateway_ip is not None or s.get('allocation_pools') is not None:
if gateway_ip is None:
gateway_ip = db_subnet.gateway_ip
new_gateway_ip = s.get('gateway_ip')
gateway_ip_changed = (new_gateway_ip and
new_gateway_ip != db_subnet.gateway_ip)
if gateway_ip_changed or s.get('allocation_pools') is not None:
gateway_ip = new_gateway_ip or db_subnet.gateway_ip
pools = range_pools if range_pools is not None else db_pools
self.ipam.validate_gw_out_of_pools(gateway_ip, pools)
if gateway_ip_changed:
# Provide pre-update notification not to break plugins that don't
# support gateway ip change
kwargs = {'context': context, 'subnet_id': id,
'network_id': db_subnet.network_id}
registry.notify(resources.SUBNET_GATEWAY, events.BEFORE_UPDATE,
self, **kwargs)
with context.session.begin(subtransactions=True):
subnet, changes = self.ipam.update_db_subnet(context, id, s,
db_pools)
@ -753,6 +762,12 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
l3_rpc_notifier.routers_updated(context, routers)
if gateway_ip_changed:
kwargs = {'context': context, 'subnet_id': id,
'network_id': db_subnet.network_id}
registry.notify(resources.SUBNET_GATEWAY, events.AFTER_UPDATE,
self, **kwargs)
return result
def _subnet_check_ip_allocations(self, context, subnet_id):

View File

@ -151,6 +151,7 @@ class FlavorManager(common_db_mixin.CommonDbMixin):
res = {'id': flavor_db['id'],
'name': flavor_db['name'],
'description': flavor_db['description'],
'service_type': flavor_db['service_type'],
'enabled': flavor_db['enabled'],
'service_profiles': []}
if flavor_db.service_profiles:
@ -190,6 +191,7 @@ class FlavorManager(common_db_mixin.CommonDbMixin):
fl_db = Flavor(id=uuidutils.generate_uuid(),
name=fl['name'],
description=fl['description'],
service_type=fl['service_type'],
enabled=fl['enabled'])
context.session.add(fl_db)
return self._make_flavor_dict(fl_db)

View File

@ -42,7 +42,8 @@ LOG = logging.getLogger(__name__)
L3_AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('router_scheduler_driver',
default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler',
default='neutron.scheduler.l3_agent_scheduler.'
'LeastRoutersScheduler',
help=_('Driver to use for scheduling '
'router to a default L3 agent')),
cfg.BoolOpt('router_auto_schedule', default=True,
@ -501,6 +502,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
func.count(
RouterL3AgentBinding.router_id
).label('count')).outerjoin(RouterL3AgentBinding).group_by(
agents_db.Agent.id,
RouterL3AgentBinding.l3_agent_id).order_by('count')
res = query.filter(agents_db.Agent.id.in_(agent_ids)).first()
return res[0]

View File

@ -181,8 +181,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
gw_info, router=router_db)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("An exception occurred while creating "
"the router: %s"), router)
LOG.debug("Could not update gateway info, deleting router.")
self.delete_router(context, router_db.id)
return self._make_router_dict(router_db)
@ -851,7 +850,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
if 'id' in fip:
data = {'floatingip_id': fip['id'],
'internal_ip': internal_ip_address}
msg = (_('Floating IP %(floatingip_id) is associated '
msg = (_('Floating IP %(floatingip_id)s is associated '
'with non-IPv4 address %s(internal_ip)s and '
'therefore cannot be bound.') % data)
else:
@ -1179,7 +1178,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
return []
qry = context.session.query(RouterPort)
qry = qry.filter(
Router.id.in_(router_ids),
RouterPort.router_id.in_(router_ids),
RouterPort.port_type.in_(device_owners)
)
@ -1417,11 +1416,32 @@ def _notify_routers_callback(resource, event, trigger, **kwargs):
l3plugin.notify_routers_updated(context, router_ids)
def _notify_subnet_gateway_ip_update(resource, event, trigger, **kwargs):
l3plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if not l3plugin:
return
context = kwargs['context']
network_id = kwargs['network_id']
subnet_id = kwargs['subnet_id']
query = context.session.query(models_v2.Port).filter_by(
network_id=network_id,
device_owner=l3_constants.DEVICE_OWNER_ROUTER_GW)
query = query.join(models_v2.Port.fixed_ips).filter(
models_v2.IPAllocation.subnet_id == subnet_id)
router_ids = set(port['device_id'] for port in query)
for router_id in router_ids:
l3plugin.notify_router_updated(context, router_id)
def subscribe():
registry.subscribe(
_prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE)
registry.subscribe(
_notify_routers_callback, resources.PORT, events.AFTER_DELETE)
registry.subscribe(
_notify_subnet_gateway_ip_update, resources.SUBNET_GATEWAY,
events.AFTER_UPDATE)
# NOTE(armax): multiple l3 service plugins (potentially out of tree) inherit
# from l3_db and may need the callbacks to be processed. Having an implicit

View File

@ -144,12 +144,34 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
return router_db
def _delete_current_gw_port(self, context, router_id, router, new_network):
"""
Overriden here to handle deletion of dvr internal ports.
If there is a valid router update with gateway port to be deleted,
then go ahead and delete the csnat ports and the floatingip
agent gateway port associated with the dvr router.
"""
gw_ext_net_id = (
router.gw_port['network_id'] if router.gw_port else None)
super(L3_NAT_with_dvr_db_mixin,
self)._delete_current_gw_port(context, router_id,
router, new_network)
if router.extra_attributes.distributed:
if (is_distributed_router(router) and
gw_ext_net_id != new_network):
self.delete_csnat_router_interface_ports(
context.elevated(), router)
# NOTE(Swami): Delete the Floatingip agent gateway port
# on all hosts when it is the last gateway port in the
# given external network.
filters = {'network_id': [gw_ext_net_id],
'device_owner': [l3_const.DEVICE_OWNER_ROUTER_GW]}
ext_net_gw_ports = self._core_plugin.get_ports(
context.elevated(), filters)
if not ext_net_gw_ports:
self.delete_floatingip_agent_gateway_port(
context.elevated(), None, gw_ext_net_id)
def _create_gw_port(self, context, router_id, router, new_network,
ext_ips):
@ -184,25 +206,12 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
"""Override to create and delete floating agent gw port for DVR.
"""Override to create floating agent gw port for DVR.
Floating IP Agent gateway port will be created when a
floatingIP association happens.
Floating IP Agent gateway port will be deleted when a
floatingIP disassociation happens.
"""
fip_port = fip.get('port_id')
unused_fip_agent_gw_port = (
fip_port is None and floatingip_db['fixed_port_id'])
if unused_fip_agent_gw_port and floatingip_db.get('router_id'):
admin_ctx = context.elevated()
router_dict = self.get_router(
admin_ctx, floatingip_db['router_id'])
# Check if distributed router and then delete the
# FloatingIP agent gateway port
if router_dict.get('distributed'):
self._clear_unused_fip_agent_gw_port(
admin_ctx, floatingip_db)
super(L3_NAT_with_dvr_db_mixin, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
associate_fip = fip_port and floatingip_db['id']
@ -227,54 +236,12 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
vm_hostid))
LOG.debug("FIP Agent gateway port: %s", fip_agent_port)
def _clear_unused_fip_agent_gw_port(
self, context, floatingip_db):
"""Helper function to check for fip agent gw port and delete.
This function checks on compute nodes to make sure if there
are any VMs using the FIP agent gateway port. If no VMs are
using the FIP agent gateway port, it will go ahead and delete
the FIP agent gateway port. If even a single VM is using the
port it will not delete.
"""
fip_hostid = self._get_vm_port_hostid(
context, floatingip_db['fixed_port_id'])
if fip_hostid and self._check_fips_availability_on_host_ext_net(
context, fip_hostid, floatingip_db['floating_network_id']):
LOG.debug('Deleting the Agent GW Port for ext-net: '
'%s', floatingip_db['floating_network_id'])
self._delete_floatingip_agent_gateway_port(
context, fip_hostid, floatingip_db['floating_network_id'])
def delete_floatingip(self, context, id):
floatingip = self._get_floatingip(context, id)
if floatingip['fixed_port_id']:
admin_ctx = context.elevated()
self._clear_unused_fip_agent_gw_port(
admin_ctx, floatingip)
super(L3_NAT_with_dvr_db_mixin,
self).delete_floatingip(context, id)
def _get_floatingip_on_port(self, context, port_id=None):
"""Helper function to retrieve the fip associated with port."""
fip_qry = context.session.query(l3_db.FloatingIP)
floating_ip = fip_qry.filter_by(fixed_port_id=port_id)
return floating_ip.first()
def disassociate_floatingips(self, context, port_id, do_notify=True):
"""Override disassociate floatingips to delete fip agent gw port."""
with context.session.begin(subtransactions=True):
fip = self._get_floatingip_on_port(
context, port_id=port_id)
if fip:
admin_ctx = context.elevated()
self._clear_unused_fip_agent_gw_port(
admin_ctx, fip)
return super(L3_NAT_with_dvr_db_mixin,
self).disassociate_floatingips(context,
port_id,
do_notify=do_notify)
def add_router_interface(self, context, router_id, interface_info):
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
router = self._get_router(context, router_id)
@ -291,6 +258,20 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
context, router, interface_info['subnet_id'], device_owner)
if new_port:
if router.extra_attributes.distributed and router.gw_port:
try:
admin_context = context.elevated()
self._add_csnat_router_interface_port(
admin_context, router, port['network_id'],
port['fixed_ips'][-1]['subnet_id'])
except Exception:
with excutils.save_and_reraise_exception():
# we need to preserve the original state prior
# the request by rolling back the port creation
# that led to new_port=True
self._core_plugin.delete_port(
admin_context, port['id'])
with context.session.begin(subtransactions=True):
router_port = l3_db.RouterPort(
port_id=port['id'],
@ -299,11 +280,6 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
)
context.session.add(router_port)
if router.extra_attributes.distributed and router.gw_port:
self._add_csnat_router_interface_port(
context.elevated(), router, port['network_id'],
port['fixed_ips'][-1]['subnet_id'])
router_interface_info = self._make_router_interface_info(
router_id, port['tenant_id'], port['id'], subnets[-1]['id'],
[subnet['id'] for subnet in subnets])
@ -511,27 +487,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
query = self._model_query(context, l3_db.Router.id)
return [row[0] for row in query]
def _check_fips_availability_on_host_ext_net(
self, context, host_id, fip_ext_net_id):
"""Query all floating_ips and filter on host and external net."""
fip_count_on_host = 0
with context.session.begin(subtransactions=True):
router_ids = self._get_router_ids(context)
floating_ips = self._get_sync_floating_ips(context, router_ids)
# Check for the active floatingip in the host
for fip in floating_ips:
f_host = self._get_vm_port_hostid(context, fip['port_id'])
if (f_host == host_id and
(fip['floating_network_id'] == fip_ext_net_id)):
fip_count_on_host += 1
# If fip_count greater than 1 or equal to zero no action taken
# if the fip_count is equal to 1, then this would be last active
# fip in the host, so the agent gateway port can be deleted.
if fip_count_on_host == 1:
return True
return False
def _delete_floatingip_agent_gateway_port(
def delete_floatingip_agent_gateway_port(
self, context, host_id, ext_net_id):
"""Function to delete FIP gateway port with given ext_net_id."""
# delete any fip agent gw port
@ -540,9 +496,10 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
ports = self._core_plugin.get_ports(context,
filters=device_filter)
for p in ports:
if self._get_vm_port_hostid(context, p['id'], p) == host_id:
if not host_id or p[portbindings.HOST_ID] == host_id:
self._core_plugin.ipam.delete_port(context, p['id'])
return
if host_id:
return
def create_fip_agent_gw_port_if_not_exists(
self, context, network_id, host):

View File

@ -106,7 +106,6 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
filter_sub = {'fixed_ips': {'subnet_id': [subnet]},
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
router_id = None
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
router_id = port['device_id']
@ -115,8 +114,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
payload = {'subnet_id': subnet}
self.l3_rpc_notifier.routers_updated(
context, [router_id], None, payload)
break
LOG.debug('DVR: dvr_update_router_addvm %s ', router_id)
LOG.debug('DVR: dvr_update_router_addvm %s ', router_id)
def get_dvr_routers_by_portid(self, context, port_id):
"""Gets the dvr routers on vmport subnets."""
@ -161,12 +159,17 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
return True
return False
def dvr_deletens_if_no_port(self, context, port_id):
def dvr_deletens_if_no_port(self, context, port_id, port_host=None):
"""Delete the DVR namespace if no dvr serviced port exists."""
admin_context = context.elevated()
router_ids = self.get_dvr_routers_by_portid(admin_context, port_id)
port_host = ml2_db.get_port_binding_host(admin_context.session,
port_id)
if not port_host:
port_host = ml2_db.get_port_binding_host(admin_context.session,
port_id)
if not port_host:
LOG.debug('Host name not found for port %s', port_id)
return []
if not router_ids:
LOG.debug('No namespaces available for this DVR port %(port)s '
'on host %(host)s', {'port': port_id,
@ -458,16 +461,20 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
context, agent, router)
def remove_router_from_l3_agent(self, context, agent_id, router_id):
binding = None
router = self.get_router(context, router_id)
if router['external_gateway_info'] and router.get('distributed'):
binding = self.unbind_snat(context, router_id, agent_id=agent_id)
# binding only exists when agent mode is dvr_snat
if binding:
notification_not_sent = self.unbind_router_servicenode(context,
router_id, binding)
if notification_not_sent:
self.l3_rpc_notifier.routers_updated(
context, [router_id], schedule_routers=False)
else:
# Below Needs to be done when agent mode is legacy or dvr.
if not binding:
super(L3_DVRsch_db_mixin,
self).remove_router_from_l3_agent(
context, agent_id, router_id)
@ -504,9 +511,37 @@ def _notify_port_delete(event, resource, trigger, **kwargs):
context, router['agent_id'], router['router_id'])
def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
new_port = kwargs.get('port')
original_port = kwargs.get('original_port')
if new_port and original_port:
original_device_owner = original_port.get('device_owner', '')
if (original_device_owner.startswith('compute') and
not new_port.get('device_owner')):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
context = kwargs['context']
removed_routers = l3plugin.dvr_deletens_if_no_port(
context,
original_port['id'],
port_host=original_port['binding:host_id'])
if removed_routers:
removed_router_args = {
'context': context,
'port': original_port,
'removed_routers': removed_routers,
}
_notify_port_delete(
event, resource, trigger, **removed_router_args)
return
_notify_l3_agent_new_port(resource, event, trigger, **kwargs)
def subscribe():
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_UPDATE)
_notify_l3_agent_port_update, resources.PORT, events.AFTER_UPDATE)
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE)
registry.subscribe(

View File

@ -15,12 +15,24 @@
import contextlib
import functools
import alembic
from alembic import context
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import reflection
CREATION_OPERATIONS = (sa.sql.ddl.AddConstraint,
sa.sql.ddl.CreateIndex,
sa.sql.ddl.CreateTable,
sa.sql.ddl.CreateColumn,
)
DROP_OPERATIONS = (sa.sql.ddl.DropConstraint,
sa.sql.ddl.DropIndex,
sa.sql.ddl.DropTable,
alembic.ddl.base.DropColumn)
def skip_if_offline(func):
"""Decorator for skipping migrations in offline mode."""
@functools.wraps(func)

View File

@ -18,8 +18,6 @@
from alembic import op
import sqlalchemy as sa
from neutron.plugins.cisco.common import cisco_constants
segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment',
name='segment_type')
profile_type = sa.Enum('network', 'policy', name='profile_type')
@ -93,7 +91,7 @@ def upgrade():
'cisco_n1kv_profile_bindings',
sa.Column('profile_type', profile_type, nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=False,
server_default=cisco_constants.TENANT_ID_NOT_SET),
server_default='TENANT_ID_NOT_SET'),
sa.Column('profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('tenant_id', 'profile_id'))

View File

@ -21,6 +21,7 @@ import sqlalchemy as sa
from sqlalchemy import event
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration import autogen
from neutron.db.migration.models import head # noqa
from neutron.db import model_base
@ -58,9 +59,13 @@ def set_mysql_engine():
model_base.BASEV2.__table_args__['mysql_engine'])
def include_object(object, name, type_, reflected, compare_to):
def include_object(object_, name, type_, reflected, compare_to):
if type_ == 'table' and name in external.TABLES:
return False
elif type_ == 'index' and reflected and name.startswith("idx_autoinc_"):
# skip indexes created by SQLAlchemy autoincrement=True
# on composite PK integer columns
return False
else:
return True
@ -103,21 +108,25 @@ def run_migrations_online():
"""
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
connection = config.attributes.get('connection')
new_engine = connection is None
if new_engine:
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object
include_object=include_object,
process_revision_directives=autogen.process_revision_directives
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose()
if new_engine:
connection.close()
engine.dispose()
if context.is_offline_mode():

View File

@ -48,6 +48,9 @@ REPO_CISCO_TABLES = [
'ml2_nexus_vxlan_allocations',
'ml2_nexus_vxlan_mcast_groups',
'ml2_ucsm_port_profiles',
'cisco_hosting_devices',
'cisco_port_mappings',
'cisco_router_mappings',
]
# VMware-NSX models moved to openstack/vmware-nsx

View File

@ -1,2 +1,2 @@
11926bcfe72d
34af2b5c5a59
4af11ca47297

View File

@ -0,0 +1,44 @@
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Drop cisco monolithic tables
Revision ID: 4af11ca47297
Revises: 11926bcfe72d
Create Date: 2015-08-13 08:01:19.709839
"""
# revision identifiers, used by Alembic.
revision = '4af11ca47297'
down_revision = '11926bcfe72d'
from alembic import op
def upgrade():
op.drop_table('cisco_n1kv_port_bindings')
op.drop_table('cisco_n1kv_network_bindings')
op.drop_table('cisco_n1kv_multi_segments')
op.drop_table('cisco_provider_networks')
op.drop_table('cisco_n1kv_trunk_segments')
op.drop_table('cisco_n1kv_vmnetworks')
op.drop_table('cisco_n1kv_profile_bindings')
op.drop_table('cisco_qos_policies')
op.drop_table('cisco_credentials')
op.drop_table('cisco_n1kv_vlan_allocations')
op.drop_table('cisco_n1kv_vxlan_allocations')
op.drop_table('cisco_network_profiles')
op.drop_table('cisco_policy_profiles')

View File

@ -0,0 +1,123 @@
# Copyright (c) 2015 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic.operations import ops
from alembic.util import Dispatcher
from alembic.util import rev_id as new_rev_id
from neutron.db.migration import cli
_ec_dispatcher = Dispatcher()
def process_revision_directives(context, revision, directives):
if cli._use_separate_migration_branches(context.config):
directives[:] = [
directive for directive in _assign_directives(context, directives)
]
def _assign_directives(context, directives, phase=None):
for directive in directives:
decider = _ec_dispatcher.dispatch(directive)
if phase is None:
phases = cli.MIGRATION_BRANCHES
else:
phases = (phase,)
for phase in phases:
decided = decider(context, directive, phase)
if decided:
yield decided
@_ec_dispatcher.dispatch_for(ops.MigrationScript)
def _migration_script_ops(context, directive, phase):
"""Generate a new ops.MigrationScript() for a given phase.
E.g. given an ops.MigrationScript() directive from a vanilla autogenerate
and an expand/contract phase name, produce a new ops.MigrationScript()
which contains only those sub-directives appropriate to "expand" or
"contract". Also ensure that the branch directory exists and that
the correct branch labels/depends_on/head revision are set up.
"""
version_path = cli._get_version_branch_path(context.config, phase)
autogen_kwargs = {}
cli._check_bootstrap_new_branch(phase, version_path, autogen_kwargs)
op = ops.MigrationScript(
new_rev_id(),
ops.UpgradeOps(ops=[
d for d in _assign_directives(
context, directive.upgrade_ops.ops, phase)
]),
ops.DowngradeOps(ops=[]),
message=directive.message,
**autogen_kwargs
)
if not op.upgrade_ops.is_empty():
return op
@_ec_dispatcher.dispatch_for(ops.AddConstraintOp)
@_ec_dispatcher.dispatch_for(ops.CreateIndexOp)
@_ec_dispatcher.dispatch_for(ops.CreateTableOp)
@_ec_dispatcher.dispatch_for(ops.AddColumnOp)
def _expands(context, directive, phase):
if phase == 'expand':
return directive
else:
return None
@_ec_dispatcher.dispatch_for(ops.DropConstraintOp)
@_ec_dispatcher.dispatch_for(ops.DropIndexOp)
@_ec_dispatcher.dispatch_for(ops.DropTableOp)
@_ec_dispatcher.dispatch_for(ops.DropColumnOp)
def _contracts(context, directive, phase):
if phase == 'contract':
return directive
else:
return None
@_ec_dispatcher.dispatch_for(ops.AlterColumnOp)
def _alter_column(context, directive, phase):
is_expand = phase == 'expand'
if is_expand and (
directive.modify_nullable is True
):
return directive
elif not is_expand and (
directive.modify_nullable is False
):
return directive
else:
raise NotImplementedError(
"Don't know if operation is an expand or "
"contract at the moment: %s" % directive)
@_ec_dispatcher.dispatch_for(ops.ModifyTableOps)
def _modify_table_ops(context, directive, phase):
op = ops.ModifyTableOps(
directive.table_name,
ops=[
d for d in _assign_directives(context, directive.ops, phase)
],
schema=directive.schema)
if not op.is_empty():
return op

View File

@ -47,16 +47,7 @@ neutron_alembic_ini = os.path.join(os.path.dirname(__file__), 'alembic.ini')
VALID_SERVICES = ['fwaas', 'lbaas', 'vpnaas']
INSTALLED_SERVICES = [service_ for service_ in VALID_SERVICES
if 'neutron-%s' % service_ in migration_entrypoints]
INSTALLED_SERVICE_PROJECTS = ['neutron-%s' % service_
for service_ in INSTALLED_SERVICES]
INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints
if project_ not in INSTALLED_SERVICE_PROJECTS]
service_help = (
_("Can be one of '%s'.") % "', '".join(INSTALLED_SERVICES)
if INSTALLED_SERVICES else
_("(No services are currently installed).")
)
INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints]
_core_opts = [
cfg.StrOpt('core_plugin',
@ -64,12 +55,14 @@ _core_opts = [
help=_('Neutron plugin provider module')),
cfg.StrOpt('service',
choices=INSTALLED_SERVICES,
help=(_("The advanced service to execute the command against. ")
+ service_help)),
help=(_("(Deprecated. Use '--subproject neutron-SERVICE' "
"instead.) The advanced service to execute the "
"command against."))),
cfg.StrOpt('subproject',
choices=INSTALLED_SUBPROJECTS,
help=(_("The subproject to execute the command against. "
"Can be one of %s.") % INSTALLED_SUBPROJECTS)),
"Can be one of: '%s'.")
% "', '".join(INSTALLED_SUBPROJECTS))),
cfg.BoolOpt('split_branches',
default=False,
help=_("Enforce using split branches file structure."))
@ -193,29 +186,21 @@ def _get_branch_head(branch):
return '%s@head' % branch
def _check_bootstrap_new_branch(branch, version_path, addn_kwargs):
addn_kwargs['version_path'] = version_path
addn_kwargs['head'] = _get_branch_head(branch)
if not os.path.exists(version_path):
# Bootstrap initial directory structure
utils.ensure_dir(version_path)
addn_kwargs['branch_label'] = branch
def do_revision(config, cmd):
'''Generate new revision files, one per branch.'''
addn_kwargs = {
'message': CONF.command.message,
'autogenerate': CONF.command.autogenerate,
'sql': CONF.command.sql,
}
if _use_separate_migration_branches(config):
for branch in MIGRATION_BRANCHES:
version_path = _get_version_branch_path(config, branch)
addn_kwargs['version_path'] = version_path
addn_kwargs['head'] = _get_branch_head(branch)
if not os.path.exists(version_path):
# Bootstrap initial directory structure
utils.ensure_dir(version_path)
# Mark the very first revision in the new branch with its label
addn_kwargs['branch_label'] = branch
do_alembic_command(config, cmd, **addn_kwargs)
else:
do_alembic_command(config, cmd, **addn_kwargs)
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
update_heads_file(config)
@ -478,8 +463,8 @@ def get_alembic_configs():
# Get the script locations for the specified or installed projects.
# Which projects to get script locations for is determined by the CLI
# options as follows:
# --service X # only subproject neutron-X
# --subproject Y # only subproject Y
# --service X # only subproject neutron-X (deprecated)
# --subproject Y # only subproject Y (where Y can be neutron)
# (none specified) # neutron and all installed subprojects
script_locations = {}
if CONF.service:

View File

@ -50,9 +50,6 @@ from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa
from neutron.plugins.bigswitch.db import consistency_db # noqa
from neutron.plugins.bigswitch import routerrule_db # noqa
from neutron.plugins.brocade.db import models as brocade_models # noqa
from neutron.plugins.cisco.db.l3 import l3_models # noqa
from neutron.plugins.cisco.db import n1kv_models_v2 # noqa
from neutron.plugins.cisco.db import network_models_v2 # noqa
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
models as ml2_brocade_models)
from neutron.plugins.ml2.drivers import type_flat # noqa

View File

@ -13,6 +13,7 @@
# under the License.
from neutron.api.v2 import attributes as attrs
from neutron.common import utils
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db_common
from neutron.extensions import portsecurity as psec
@ -40,8 +41,7 @@ class PortSecurityDbMixin(portsecurity_db_common.PortSecurityDbCommon):
"""
has_ip = self._ip_on_port(port)
# we don't apply security groups for dhcp, router
if (port.get('device_owner') and
port['device_owner'].startswith('network:')):
if port.get('device_owner') and utils.is_port_trusted(port):
return (False, has_ip)
if attrs.is_attr_set(port.get(psec.PORTSECURITY)):

View File

@ -19,6 +19,7 @@ import sqlalchemy as sa
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import sql
from neutron.db import api as db_api
from neutron.db import common_db_mixin as common_db_api
from neutron.db.quota import models as quota_models
@ -29,12 +30,8 @@ def utcnow():
class QuotaUsageInfo(collections.namedtuple(
'QuotaUsageInfo', ['resource', 'tenant_id', 'used', 'reserved', 'dirty'])):
@property
def total(self):
"""Total resource usage (reserved and used)."""
return self.reserved + self.used
'QuotaUsageInfo', ['resource', 'tenant_id', 'used', 'dirty'])):
"""Information about resource quota usage."""
class ReservationInfo(collections.namedtuple(
@ -66,7 +63,6 @@ def get_quota_usage_by_resource_and_tenant(context, resource, tenant_id,
return QuotaUsageInfo(result.resource,
result.tenant_id,
result.in_use,
result.reserved,
result.dirty)
@ -76,7 +72,6 @@ def get_quota_usage_by_resource(context, resource):
return [QuotaUsageInfo(item.resource,
item.tenant_id,
item.in_use,
item.reserved,
item.dirty) for item in query]
@ -86,12 +81,11 @@ def get_quota_usage_by_tenant_id(context, tenant_id):
return [QuotaUsageInfo(item.resource,
item.tenant_id,
item.in_use,
item.reserved,
item.dirty) for item in query]
def set_quota_usage(context, resource, tenant_id,
in_use=None, reserved=None, delta=False):
in_use=None, delta=False):
"""Set resource quota usage.
:param context: instance of neutron context with db session
@ -100,15 +94,14 @@ def set_quota_usage(context, resource, tenant_id,
being set
:param in_use: integer specifying the new quantity of used resources,
or a delta to apply to current used resource
:param reserved: integer specifying the new quantity of reserved resources,
or a delta to apply to current reserved resources
:param delta: Specififies whether in_use or reserved are absolute numbers
or deltas (default to False)
:param delta: Specifies whether in_use is an absolute number
or a delta (default to False)
"""
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(resource=resource).filter_by(tenant_id=tenant_id)
usage_data = query.first()
with context.session.begin(subtransactions=True):
with db_api.autonested_transaction(context.session):
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(resource=resource).filter_by(
tenant_id=tenant_id)
usage_data = query.first()
if not usage_data:
# Must create entry
usage_data = quota_models.QuotaUsage(
@ -120,16 +113,11 @@ def set_quota_usage(context, resource, tenant_id,
if delta:
in_use = usage_data.in_use + in_use
usage_data.in_use = in_use
if reserved is not None:
if delta:
reserved = usage_data.reserved + reserved
usage_data.reserved = reserved
# After an explicit update the dirty bit should always be reset
usage_data.dirty = False
return QuotaUsageInfo(usage_data.resource,
usage_data.tenant_id,
usage_data.in_use,
usage_data.reserved,
usage_data.dirty)
@ -188,10 +176,6 @@ def create_reservation(context, tenant_id, deltas, expiration=None):
quota_models.ResourceDelta(resource=resource,
amount=delta,
reservation=resv))
# quota_usage for all resources involved in this reservation must
# be marked as dirty
set_resources_quota_usage_dirty(
context, deltas.keys(), tenant_id)
return ReservationInfo(resv['id'],
resv['tenant_id'],
resv['expiration'],
@ -263,7 +247,7 @@ def get_reservations_for_resources(context, tenant_id, resources,
quota_models.ResourceDelta.resource,
quota_models.Reservation.expiration)
return dict((resource, total_reserved)
for (resource, exp, total_reserved) in resv_query)
for (resource, exp, total_reserved) in resv_query)
def remove_expired_reservations(context, tenant_id=None):

View File

@ -126,29 +126,18 @@ class DbQuotaDriver(object):
return dict((k, v) for k, v in quotas.items())
def _handle_expired_reservations(self, context, tenant_id,
resource, expired_amount):
LOG.debug(("Adjusting usage for resource %(resource)s: "
"removing %(expired)d reserved items"),
{'resource': resource,
'expired': expired_amount})
# TODO(salv-orlando): It should be possible to do this
# operation for all resources with a single query.
# Update reservation usage
quota_api.set_quota_usage(
context,
resource,
tenant_id,
reserved=-expired_amount,
delta=True)
def _handle_expired_reservations(self, context, tenant_id):
LOG.debug("Deleting expired reservations for tenant:%s" % tenant_id)
# Delete expired reservations (we don't want them to accrue
# in the database)
quota_api.remove_expired_reservations(
context, tenant_id=tenant_id)
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_interval=0.1,
inc_retry_interval=True,
retry_on_request=True,
retry_on_deadlock=True)
exception_checker=db_api.is_deadlock)
def make_reservation(self, context, tenant_id, resources, deltas, plugin):
# Lock current reservation table
# NOTE(salv-orlando): This routine uses DB write locks.
@ -163,7 +152,21 @@ class DbQuotaDriver(object):
# locks should be ok to use when support for sending "hotspot" writes
# to a single node will be avaialable.
requested_resources = deltas.keys()
with context.session.begin():
with db_api.autonested_transaction(context.session):
# get_tenant_quotes needs in input a dictionary mapping resource
# name to BaseResosurce instances so that the default quota can be
# retrieved
current_limits = self.get_tenant_quotas(
context, resources, tenant_id)
unlimited_resources = set([resource for (resource, limit) in
current_limits.items() if limit < 0])
# Do not even bother counting resources and calculating headroom
# for resources with unlimited quota
LOG.debug(("Resources %s have unlimited quota limit. It is not "
"required to calculated headroom "),
",".join(unlimited_resources))
requested_resources = (set(requested_resources) -
unlimited_resources)
# Gather current usage information
# TODO(salv-orlando): calling count() for every resource triggers
# multiple queries on quota usage. This should be improved, however
@ -173,13 +176,8 @@ class DbQuotaDriver(object):
# instances
current_usages = dict(
(resource, resources[resource].count(
context, plugin, tenant_id)) for
context, plugin, tenant_id, resync_usage=False)) for
resource in requested_resources)
# get_tenant_quotes needs in inout a dictionary mapping resource
# name to BaseResosurce instances so that the default quota can be
# retrieved
current_limits = self.get_tenant_quotas(
context, resources, tenant_id)
# Adjust for expired reservations. Apparently it is cheaper than
# querying everytime for active reservations and counting overall
# quantity of resources reserved
@ -190,13 +188,6 @@ class DbQuotaDriver(object):
for resource in requested_resources:
expired_reservations = expired_deltas.get(resource, 0)
total_usage = current_usages[resource] - expired_reservations
# A negative quota limit means infinite
if current_limits[resource] < 0:
LOG.debug(("Resource %(resource)s has unlimited quota "
"limit. It is possible to allocate %(delta)s "
"items."), {'resource': resource,
'delta': deltas[resource]})
continue
res_headroom = current_limits[resource] - total_usage
LOG.debug(("Attempting to reserve %(delta)d items for "
"resource %(resource)s. Total usage: %(total)d; "
@ -209,8 +200,7 @@ class DbQuotaDriver(object):
if res_headroom < deltas[resource]:
resources_over_limit.append(resource)
if expired_reservations:
self._handle_expired_reservations(
context, tenant_id, resource, expired_reservations)
self._handle_expired_reservations(context, tenant_id)
if resources_over_limit:
raise exceptions.OverQuota(overs=sorted(resources_over_limit))

View File

@ -438,7 +438,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
elif ip_proto == constants.PROTO_NUM_ICMP:
for attr, field in [('port_range_min', 'type'),
('port_range_max', 'code')]:
if rule[attr] is not None and rule[attr] > 255:
if rule[attr] is not None and not (0 <= rule[attr] <= 255):
raise ext_sg.SecurityGroupInvalidIcmpValue(
field=field, attr=attr, value=rule[attr])
if (rule['port_range_min'] is None and
@ -686,15 +686,15 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
:returns: all security groups IDs on port belonging to tenant.
"""
p = port['port']
if not attributes.is_attr_set(p.get(ext_sg.SECURITYGROUPS)):
port = port['port']
if not attributes.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
return
if p.get('device_owner') and p['device_owner'].startswith('network:'):
if port.get('device_owner') and utils.is_port_trusted(port):
return
port_sg = p.get(ext_sg.SECURITYGROUPS, [])
port_sg = port.get(ext_sg.SECURITYGROUPS, [])
filters = {'id': port_sg}
tenant_id = p.get('tenant_id')
tenant_id = port.get('tenant_id')
if tenant_id:
filters['tenant_id'] = [tenant_id]
valid_groups = set(g['id'] for g in
@ -710,14 +710,13 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
def _ensure_default_security_group_on_port(self, context, port):
# we don't apply security groups for dhcp, router
if (port['port'].get('device_owner') and
port['port']['device_owner'].startswith('network:')):
port = port['port']
if port.get('device_owner') and utils.is_port_trusted(port):
return
tenant_id = self._get_tenant_id_for_create(context,
port['port'])
tenant_id = self._get_tenant_id_for_create(context, port)
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attributes.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
if not attributes.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
port[ext_sg.SECURITYGROUPS] = [default_sg]
def _check_update_deletes_security_groups(self, port):
"""Return True if port has as a security group and it's value

View File

@ -45,16 +45,6 @@ class ServiceTypeManager(object):
def __init__(self):
self.config = {}
# TODO(armax): remove these as soon as *-aaS start using
# the newly introduced add_provider_configuration API
self.config['LOADBALANCER'] = (
pconf.ProviderConfiguration('neutron_lbaas'))
self.config['LOADBALANCERV2'] = (
pconf.ProviderConfiguration('neutron_lbaas'))
self.config['FIREWALL'] = (
pconf.ProviderConfiguration('neutron_fwaas'))
self.config['VPN'] = (
pconf.ProviderConfiguration('neutron_vpnaas'))
def add_provider_configuration(self, service_type, configuration):
"""Add or update the provider configuration for the service type."""

View File

@ -38,6 +38,7 @@ class NeutronDebugAgent(object):
OPTS = [
# Needed for drivers
cfg.StrOpt('external_network_bridge', default='br-ex',
deprecated_for_removal=True,
help=_("Name of bridge used for external network "
"traffic.")),
]

View File

@ -49,9 +49,13 @@ class AllowedAddressPairExhausted(nexception.BadRequest):
def _validate_allowed_address_pairs(address_pairs, valid_values=None):
unique_check = {}
if len(address_pairs) > cfg.CONF.max_allowed_address_pair:
raise AllowedAddressPairExhausted(
quota=cfg.CONF.max_allowed_address_pair)
try:
if len(address_pairs) > cfg.CONF.max_allowed_address_pair:
raise AllowedAddressPairExhausted(
quota=cfg.CONF.max_allowed_address_pair)
except TypeError:
raise webob.exc.HTTPBadRequest(
_("Allowed address pairs must be a list."))
for address_pair in address_pairs:
# mac_address is optional, if not set we use the mac on the port

View File

@ -69,7 +69,7 @@ def _validate_dns_format(data, max_len=FQDN_MAX_LEN):
raise TypeError(_("TLD '%s' must not be all numeric") % names[-1])
except TypeError as e:
msg = _("'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s") % {
'data': data, 'reason': e.message}
'data': data, 'reason': str(e)}
return msg

View File

@ -84,7 +84,12 @@ SUB_RESOURCE_ATTRIBUTE_MAP = {
'member_name': 'flavor'},
'parameters': {'id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True}}
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string':
attr.TENANT_ID_MAX_LEN},
'is_visible': True}}
}
}

View File

@ -246,3 +246,8 @@ class NeutronManager(object):
service_plugins = cls.get_instance().service_plugins
return dict((x, weakref.proxy(y))
for x, y in six.iteritems(service_plugins))
@classmethod
def get_unique_service_plugins(cls):
service_plugins = cls.get_instance().service_plugins
return tuple(weakref.proxy(x) for x in set(service_plugins.values()))

View File

@ -389,3 +389,12 @@ class NeutronPluginBaseV2(object):
"""
return (self.__class__.start_rpc_listeners !=
NeutronPluginBaseV2.start_rpc_listeners)
def get_workers(self):
"""Returns a collection NeutronWorker instances
If a plugin needs to define worker processes outside of API/RPC workers
then it will override this and return a collection of NeutronWorker
instances
"""
return ()

View File

@ -20,7 +20,6 @@ from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import uuidutils
from sqlalchemy.orm import attributes as sql_attr
@ -94,18 +93,14 @@ class Notifier(object):
'nova',
auth=auth)
# NOTE(andreykurilin): novaclient.v1_1 was renamed to v2 and there is
# no way to import the contrib module directly without referencing v2,
# which would only work for novaclient >= 2.21.0.
novaclient_cls = nova_client.get_client_class(NOVA_API_VERSION)
server_external_events = importutils.import_module(
novaclient_cls.__module__.replace(
".client", ".contrib.server_external_events"))
self.nclient = novaclient_cls(
extensions = [
ext for ext in nova_client.discover_extensions(NOVA_API_VERSION)
if ext.name == "server_external_events"]
self.nclient = nova_client.Client(
NOVA_API_VERSION,
session=session,
region_name=cfg.CONF.nova.region_name,
extensions=[server_external_events])
extensions=extensions)
self.batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self.send_events)

View File

@ -20,6 +20,7 @@ from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
import six
from neutron.common import constants
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db.qos import models as qos_db_model
@ -57,6 +58,22 @@ class QosRule(base.NeutronDbObject):
dict_['type'] = self.rule_type
return dict_
def should_apply_to_port(self, port):
"""Check whether a rule can be applied to a specific port.
This function has the logic to decide whether a rule should
be applied to a port or not, depending on the source of the
policy (network, or port). Eventually rules could override
this method, or we could make it abstract to allow different
rule behaviour.
"""
is_network_rule = self.qos_policy_id != port[qos_consts.QOS_POLICY_ID]
is_network_device_port = any(port['device_owner'].startswith(prefix)
for prefix
in constants.DEVICE_OWNER_PREFIXES)
return not (is_network_rule and is_network_device_port)
@obj_base.VersionedObjectRegistry.register
class QosBandwidthLimitRule(QosRule):

View File

@ -1,7 +0,0 @@
Cisco Neutron Virtual Network Plugin
This plugin implements Neutron v2 APIs and helps configure
topologies consisting of virtual and physical switches.
For more details on use please refer to:
http://wiki.openstack.org/cisco-neutron

View File

@ -1,118 +0,0 @@
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Attachment attributes
INSTANCE_ID = 'instance_id'
TENANT_ID = 'tenant_id'
TENANT_NAME = 'tenant_name'
HOST_NAME = 'host_name'
# Network attributes
NET_ID = 'id'
NET_NAME = 'name'
NET_VLAN_ID = 'vlan_id'
NET_VLAN_NAME = 'vlan_name'
NET_PORTS = 'ports'
CREDENTIAL_ID = 'credential_id'
CREDENTIAL_NAME = 'credential_name'
CREDENTIAL_USERNAME = 'user_name'
CREDENTIAL_PASSWORD = 'password'
CREDENTIAL_TYPE = 'type'
MASKED_PASSWORD = '********'
USERNAME = 'username'
PASSWORD = 'password'
LOGGER_COMPONENT_NAME = "cisco_plugin"
VSWITCH_PLUGIN = 'vswitch_plugin'
DEVICE_IP = 'device_ip'
NETWORK_ADMIN = 'network_admin'
NETWORK = 'network'
PORT = 'port'
BASE_PLUGIN_REF = 'base_plugin_ref'
CONTEXT = 'context'
SUBNET = 'subnet'
#### N1Kv CONSTANTS
# Special vlan_id value in n1kv_vlan_allocations table indicating flat network
FLAT_VLAN_ID = -1
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
# Maximum VXLAN range configurable for one network profile.
MAX_VXLAN_RANGE = 1000000
# Values for network_type
NETWORK_TYPE_FLAT = 'flat'
NETWORK_TYPE_VLAN = 'vlan'
NETWORK_TYPE_VXLAN = 'vxlan'
NETWORK_TYPE_LOCAL = 'local'
NETWORK_TYPE_NONE = 'none'
NETWORK_TYPE_TRUNK = 'trunk'
NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment'
# Values for network sub_type
NETWORK_TYPE_OVERLAY = 'overlay'
NETWORK_SUBTYPE_NATIVE_VXLAN = 'native_vxlan'
NETWORK_SUBTYPE_TRUNK_VLAN = NETWORK_TYPE_VLAN
NETWORK_SUBTYPE_TRUNK_VXLAN = NETWORK_TYPE_OVERLAY
# Prefix for VM Network name
VM_NETWORK_NAME_PREFIX = 'vmn_'
SET = 'set'
INSTANCE = 'instance'
PROPERTIES = 'properties'
NAME = 'name'
ID = 'id'
POLICY = 'policy'
TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET'
ENCAPSULATIONS = 'encapsulations'
STATE = 'state'
ONLINE = 'online'
MAPPINGS = 'mappings'
MAPPING = 'mapping'
SEGMENTS = 'segments'
SEGMENT = 'segment'
BRIDGE_DOMAIN_SUFFIX = '_bd'
LOGICAL_NETWORK_SUFFIX = '_log_net'
ENCAPSULATION_PROFILE_SUFFIX = '_profile'
UUID_LENGTH = 36
# N1KV vlan and vxlan segment range
N1KV_VLAN_RESERVED_MIN = 3968
N1KV_VLAN_RESERVED_MAX = 4047
N1KV_VXLAN_MIN = 4096
N1KV_VXLAN_MAX = 16000000
# Type and topic for Cisco cfg agent
# ==================================
AGENT_TYPE_CFG = 'Cisco cfg agent'
# Topic for Cisco configuration agent
CFG_AGENT = 'cisco_cfg_agent'
# Topic for routing service helper in Cisco configuration agent
CFG_AGENT_L3_ROUTING = 'cisco_cfg_agent_l3_routing'
# Values for network profile fields
ADD_TENANTS = 'add_tenants'
REMOVE_TENANTS = 'remove_tenants'

View File

@ -1,53 +0,0 @@
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as cexc
from neutron.plugins.cisco.common import config
from neutron.plugins.cisco.db import network_db_v2 as cdb
class Store(object):
"""Credential Store."""
@staticmethod
def initialize():
dev_dict = config.get_device_dictionary()
for key in dev_dict:
dev_id, dev_ip, dev_key = key
if dev_key == const.USERNAME:
try:
cdb.add_credential(
dev_ip,
dev_dict[dev_id, dev_ip, const.USERNAME],
dev_dict[dev_id, dev_ip, const.PASSWORD],
dev_id)
except cexc.CredentialAlreadyExists:
# We are quietly ignoring this, since it only happens
# if this class module is loaded more than once, in
# which case, the credentials are already populated
pass
@staticmethod
def get_username(cred_name):
"""Get the username."""
credential = cdb.get_credential_name(cred_name)
return credential[const.CREDENTIAL_USERNAME]
@staticmethod
def get_password(cred_name):
"""Get the password."""
credential = cdb.get_credential_name(cred_name)
return credential[const.CREDENTIAL_PASSWORD]

View File

@ -1,236 +0,0 @@
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions used by the Cisco plugin."""
from neutron.common import exceptions
class NetworkSegmentIDNotFound(exceptions.NeutronException):
"""Segmentation ID for network is not found."""
message = _("Segmentation ID for network %(net_id)s is not found.")
class NoMoreNics(exceptions.NeutronException):
"""No more dynamic NICs are available in the system."""
message = _("Unable to complete operation. No more dynamic NICs are "
"available in the system.")
class NetworkVlanBindingAlreadyExists(exceptions.NeutronException):
"""Binding cannot be created, since it already exists."""
message = _("NetworkVlanBinding for %(vlan_id)s and network "
"%(network_id)s already exists.")
class VlanIDNotFound(exceptions.NeutronException):
"""VLAN ID cannot be found."""
message = _("Vlan ID %(vlan_id)s not found.")
class VlanIDOutsidePool(exceptions.NeutronException):
"""VLAN ID cannot be allocated, since it is outside the configured pool."""
message = _("Unable to complete operation. VLAN ID exists outside of the "
"configured network segment range.")
class VlanIDNotAvailable(exceptions.NeutronException):
"""No VLAN ID available."""
message = _("No Vlan ID available.")
class QosNotFound(exceptions.NeutronException):
"""QoS level with this ID cannot be found."""
message = _("QoS level %(qos_id)s could not be found "
"for tenant %(tenant_id)s.")
class QosNameAlreadyExists(exceptions.NeutronException):
"""QoS Name already exists."""
message = _("QoS level with name %(qos_name)s already exists "
"for tenant %(tenant_id)s.")
class CredentialNotFound(exceptions.NeutronException):
"""Credential with this ID cannot be found."""
message = _("Credential %(credential_id)s could not be found.")
class CredentialNameNotFound(exceptions.NeutronException):
"""Credential Name could not be found."""
message = _("Credential %(credential_name)s could not be found.")
class CredentialAlreadyExists(exceptions.NeutronException):
"""Credential already exists."""
message = _("Credential %(credential_name)s already exists.")
class ProviderNetworkExists(exceptions.NeutronException):
"""Provider network already exists."""
message = _("Provider network %s already exists")
class NexusComputeHostNotConfigured(exceptions.NeutronException):
"""Connection to compute host is not configured."""
message = _("Connection to %(host)s is not configured.")
class NexusConnectFailed(exceptions.NeutronException):
"""Failed to connect to Nexus switch."""
message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.")
class NexusConfigFailed(exceptions.NeutronException):
"""Failed to configure Nexus switch."""
message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.")
class NexusPortBindingNotFound(exceptions.NeutronException):
"""NexusPort Binding is not present."""
message = _("Nexus Port Binding (%(filters)s) is not present.")
def __init__(self, **kwargs):
filters = ','.join('%s=%s' % i for i in kwargs.items())
super(NexusPortBindingNotFound, self).__init__(filters=filters)
class NoNexusSviSwitch(exceptions.NeutronException):
"""No usable nexus switch found."""
message = _("No usable Nexus switch found to create SVI interface.")
class PortVnicBindingAlreadyExists(exceptions.NeutronException):
"""PortVnic Binding already exists."""
message = _("PortVnic Binding %(port_id)s already exists.")
class PortVnicNotFound(exceptions.NeutronException):
"""PortVnic Binding is not present."""
message = _("PortVnic Binding %(port_id)s is not present.")
class SubnetNotSpecified(exceptions.NeutronException):
"""Subnet id not specified."""
message = _("No subnet_id specified for router gateway.")
class SubnetInterfacePresent(exceptions.NeutronException):
"""Subnet SVI interface already exists."""
message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.")
class PortIdForNexusSvi(exceptions.NeutronException):
"""Port Id specified for Nexus SVI."""
message = _('Nexus hardware router gateway only uses Subnet Ids.')
class InvalidDetach(exceptions.NeutronException):
message = _("Unable to unplug the attachment %(att_id)s from port "
"%(port_id)s for network %(net_id)s. The attachment "
"%(att_id)s does not exist.")
class PolicyProfileAlreadyExists(exceptions.NeutronException):
"""Policy Profile cannot be created since it already exists."""
message = _("Policy Profile %(profile_id)s "
"already exists.")
class PolicyProfileIdNotFound(exceptions.NotFound):
"""Policy Profile with the given UUID cannot be found."""
message = _("Policy Profile %(profile_id)s could not be found.")
class PolicyProfileNameNotFound(exceptions.NotFound):
"""Policy Profile with the given name cannot be found."""
message = _("Policy Profile %(profile_name)s could not be found.")
class NetworkProfileAlreadyExists(exceptions.NeutronException):
"""Network Profile cannot be created since it already exists."""
message = _("Network Profile %(profile_id)s "
"already exists.")
class NetworkProfileNotFound(exceptions.NotFound):
"""Network Profile with the given UUID/name cannot be found."""
message = _("Network Profile %(profile)s could not be found.")
class NetworkProfileInUse(exceptions.InUse):
"""Network Profile with the given UUID is in use."""
message = _("One or more network segments belonging to network "
"profile %(profile)s is in use.")
class NoMoreNetworkSegments(exceptions.NoNetworkAvailable):
"""Network segments exhausted for the given network profile."""
message = _("No more segments available in network segment pool "
"%(network_profile_name)s.")
class VMNetworkNotFound(exceptions.NotFound):
"""VM Network with the given name cannot be found."""
message = _("VM Network %(name)s could not be found.")
class VxlanIDInUse(exceptions.InUse):
"""VXLAN ID is in use."""
message = _("Unable to create the network. "
"The VXLAN ID %(vxlan_id)s is in use.")
class VxlanIDNotFound(exceptions.NotFound):
"""VXLAN ID cannot be found."""
message = _("Vxlan ID %(vxlan_id)s not found.")
class VxlanIDOutsidePool(exceptions.NeutronException):
"""VXLAN ID cannot be allocated, as it is outside the configured pool."""
message = _("Unable to complete operation. VXLAN ID exists outside of the "
"configured network segment range.")
class VSMConnectionFailed(exceptions.ServiceUnavailable):
"""Connection to VSM failed."""
message = _("Connection to VSM failed: %(reason)s.")
class VSMError(exceptions.NeutronException):
"""Error has occurred on the VSM."""
message = _("Internal VSM Error: %(reason)s.")
class NetworkBindingNotFound(exceptions.NotFound):
"""Network Binding for network cannot be found."""
message = _("Network Binding for network %(network_id)s could "
"not be found.")
class PortBindingNotFound(exceptions.NotFound):
"""Port Binding for port cannot be found."""
message = _("Port Binding for port %(port_id)s could "
"not be found.")
class ProfileTenantBindingNotFound(exceptions.NotFound):
"""Profile to Tenant binding for given profile ID cannot be found."""
message = _("Profile-Tenant binding for profile %(profile_id)s could "
"not be found.")
class NoClusterFound(exceptions.NotFound):
"""No service cluster found to perform multi-segment bridging."""
message = _("No service cluster found to perform multi-segment bridging.")

View File

@ -1,134 +0,0 @@
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.dec
from neutron import wsgi
class Fault(webob.exc.HTTPException):
"""Error codes for API faults."""
_fault_names = {
400: "malformedRequest",
401: "unauthorized",
451: "CredentialNotFound",
452: "QoSNotFound",
453: "NovatenantNotFound",
454: "MultiportNotFound",
470: "serviceUnavailable",
471: "pluginFault"
}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Generate a WSGI response.
Response is generated based on the exception passed to constructor.
"""
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "neutronServiceFault")
fault_data = {
fault_name: {
'code': code,
'message': self.wrapped_exc.explanation}}
# 'code' is an attribute on the fault tag itself
content_type = req.best_match_content_type()
self.wrapped_exc.body = wsgi.Serializer().serialize(
fault_data, content_type)
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
class PortNotFound(webob.exc.HTTPClientError):
"""PortNotFound exception.
subclass of :class:`~HTTPClientError`
This indicates that the server did not find the port specified
in the HTTP request for a given network
code: 430, title: Port not Found
"""
code = 430
title = _('Port not Found')
explanation = _('Unable to find a port with the specified identifier.')
class CredentialNotFound(webob.exc.HTTPClientError):
"""CredentialNotFound exception.
subclass of :class:`~HTTPClientError`
This indicates that the server did not find the Credential specified
in the HTTP request
code: 451, title: Credential not Found
"""
code = 451
title = _('Credential Not Found')
explanation = _('Unable to find a Credential with'
' the specified identifier.')
class QosNotFound(webob.exc.HTTPClientError):
"""QosNotFound exception.
subclass of :class:`~HTTPClientError`
This indicates that the server did not find the QoS specified
in the HTTP request
code: 452, title: QoS not Found
"""
code = 452
title = _('QoS Not Found')
explanation = _('Unable to find a QoS with'
' the specified identifier.')
class NovatenantNotFound(webob.exc.HTTPClientError):
"""NovatenantNotFound exception.
subclass of :class:`~HTTPClientError`
This indicates that the server did not find the Novatenant specified
in the HTTP request
code: 453, title: Nova tenant not Found
"""
code = 453
title = _('Nova tenant Not Found')
explanation = _('Unable to find a Novatenant with'
' the specified identifier.')
class RequestedStateInvalid(webob.exc.HTTPClientError):
"""RequestedStateInvalid exception.
subclass of :class:`~HTTPClientError`
This indicates that the server could not update the port state
to the request value
code: 431, title: Requested State Invalid
"""
code = 431
title = _('Requested State Invalid')
explanation = _('Unable to update port state with specified value.')

View File

@ -1,138 +0,0 @@
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cisco_opts = [
cfg.StrOpt('vlan_name_prefix', default='q-',
help=_("VLAN Name prefix")),
cfg.StrOpt('provider_vlan_name_prefix', default='p-',
help=_("VLAN Name prefix for provider vlans")),
cfg.BoolOpt('provider_vlan_auto_create', default=True,
help=_('Provider VLANs are automatically created as needed '
'on the Nexus switch')),
cfg.BoolOpt('provider_vlan_auto_trunk', default=True,
help=_('Provider VLANs are automatically trunked as needed '
'on the ports of the Nexus switch')),
cfg.BoolOpt('nexus_l3_enable', default=False,
help=_("Enable L3 support on the Nexus switches")),
cfg.BoolOpt('svi_round_robin', default=False,
help=_("Distribute SVI interfaces over all switches")),
cfg.StrOpt('model_class',
default='neutron.plugins.cisco.models.virt_phy_sw_v2.'
'VirtualPhysicalSwitchModelV2',
help=_("Model Class")),
]
cisco_n1k_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("N1K Integration Bridge")),
cfg.BoolOpt('enable_tunneling', default=True,
help=_("N1K Enable Tunneling")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("N1K Tunnel Bridge")),
cfg.StrOpt('local_ip', default='10.0.0.3',
help=_("N1K Local IP")),
cfg.StrOpt('tenant_network_type', default='local',
help=_("N1K Tenant Network Type")),
cfg.StrOpt('bridge_mappings', default='',
help=_("N1K Bridge Mappings")),
cfg.StrOpt('vxlan_id_ranges', default='5000:10000',
help=_("N1K VXLAN ID Ranges")),
cfg.StrOpt('network_vlan_ranges', default='vlan:1:4095',
help=_("N1K Network VLAN Ranges")),
cfg.StrOpt('default_network_profile', default='default_network_profile',
help=_("N1K default network profile")),
cfg.StrOpt('default_policy_profile', default='service_profile',
help=_("N1K default policy profile")),
cfg.StrOpt('network_node_policy_profile', default='dhcp_pp',
help=_("N1K policy profile for network node")),
cfg.IntOpt('poll_duration', default=60,
help=_("N1K Policy profile polling duration in seconds")),
cfg.BoolOpt('restrict_policy_profiles', default=False,
help=_("Restrict the visibility of policy profiles to the "
"tenants")),
cfg.IntOpt('http_pool_size', default=4,
help=_("Number of threads to use to make HTTP requests")),
cfg.IntOpt('http_timeout', default=15,
help=_("N1K http timeout duration in seconds")),
cfg.BoolOpt('restrict_network_profiles', default=True,
help=_("Restrict tenants from accessing network profiles "
"belonging to some other tenant")),
]
cfg.CONF.register_opts(cisco_opts, "CISCO")
cfg.CONF.register_opts(cisco_n1k_opts, "CISCO_N1K")
# shortcuts
CONF = cfg.CONF
CISCO = cfg.CONF.CISCO
CISCO_N1K = cfg.CONF.CISCO_N1K
#
# device_dictionary - Contains all external device configuration.
#
# When populated the device dictionary format is:
# {('<device ID>', '<device ipaddr>', '<keyword>'): '<value>', ...}
#
# Example:
# {('NEXUS_SWITCH', '1.1.1.1', 'username'): 'admin',
# ('NEXUS_SWITCH', '1.1.1.1', 'password'): 'mySecretPassword',
# ('NEXUS_SWITCH', '1.1.1.1', 'compute1'): '1/1', ...}
#
device_dictionary = {}
#
# first_device_ip - IP address of first switch discovered in config
#
# Used for SVI placement when round-robin placement is disabled
#
first_device_ip = None
class CiscoConfigOptions(object):
"""Cisco Configuration Options Class."""
def __init__(self):
self._create_device_dictionary()
def _create_device_dictionary(self):
"""
Create the device dictionary from the cisco_plugins.ini
device supported sections. Ex. NEXUS_SWITCH, N1KV.
"""
global first_device_ip
multi_parser = cfg.MultiConfigParser()
read_ok = multi_parser.read(CONF.config_file)
if len(read_ok) != len(CONF.config_file):
raise cfg.Error(_("Some config files were not parsed properly"))
first_device_ip = None
for parsed_file in multi_parser.parsed:
for parsed_item in parsed_file.keys():
dev_id, sep, dev_ip = parsed_item.partition(':')
if dev_id.lower() == 'n1kv':
for dev_key, value in parsed_file[parsed_item].items():
if dev_ip and not first_device_ip:
first_device_ip = dev_ip
device_dictionary[dev_id, dev_ip, dev_key] = value[0]
def get_device_dictionary():
return device_dictionary

View File

@ -1,97 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import agents_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
class HostingDevice(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents an appliance hosting Neutron router(s).
When the hosting device is a Nova VM 'id' is uuid of that VM.
"""
__tablename__ = 'cisco_hosting_devices'
# complementary id to enable identification of associated Neutron resources
complementary_id = sa.Column(sa.String(36))
# manufacturer id of the device, e.g., its serial number
device_id = sa.Column(sa.String(255))
admin_state_up = sa.Column(sa.Boolean, nullable=False, default=True)
# 'management_port_id' is the Neutron Port used for management interface
management_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete="SET NULL"))
management_port = orm.relationship(models_v2.Port)
# 'protocol_port' is udp/tcp port of hosting device. May be empty.
protocol_port = sa.Column(sa.Integer)
cfg_agent_id = sa.Column(sa.String(36),
sa.ForeignKey('agents.id'),
nullable=True)
cfg_agent = orm.relationship(agents_db.Agent)
# Service VMs take time to boot so we store creation time
# so we can give preference to older ones when scheduling
created_at = sa.Column(sa.DateTime, nullable=False)
status = sa.Column(sa.String(16))
class HostedHostingPortBinding(model_base.BASEV2):
"""Represents binding of logical resource's port to its hosting port."""
__tablename__ = 'cisco_port_mappings'
logical_resource_id = sa.Column(sa.String(36), primary_key=True)
logical_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete="CASCADE"),
primary_key=True)
logical_port = orm.relationship(
models_v2.Port,
primaryjoin='Port.id==HostedHostingPortBinding.logical_port_id',
backref=orm.backref('hosting_info', cascade='all', uselist=False))
# type of hosted port, e.g., router_interface, ..._gateway, ..._floatingip
port_type = sa.Column(sa.String(32))
# type of network the router port belongs to
network_type = sa.Column(sa.String(32))
hosting_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete='CASCADE'))
hosting_port = orm.relationship(
models_v2.Port,
primaryjoin='Port.id==HostedHostingPortBinding.hosting_port_id')
# VLAN tag for trunk ports
segmentation_id = sa.Column(sa.Integer, autoincrement=False)
class RouterHostingDeviceBinding(model_base.BASEV2):
"""Represents binding between Neutron routers and their hosting devices."""
__tablename__ = 'cisco_router_mappings'
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
primary_key=True)
router = orm.relationship(
l3_db.Router,
backref=orm.backref('hosting_info', cascade='all', uselist=False))
# If 'auto_schedule' is True then router is automatically scheduled
# if it lacks a hosting device or its hosting device fails.
auto_schedule = sa.Column(sa.Boolean, default=True, nullable=False)
# id of hosting device hosting this router, None/NULL if unscheduled.
hosting_device_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_hosting_devices.id',
ondelete='SET NULL'))
hosting_device = orm.relationship(HostingDevice)

File diff suppressed because it is too large Load Diff

View File

@ -1,185 +0,0 @@
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import sql
from neutron.db import model_base
from neutron.db import models_v2
from neutron.plugins.cisco.common import cisco_constants
LOG = logging.getLogger(__name__)
class N1kvVlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network."""
__tablename__ = 'cisco_n1kv_vlan_allocations'
physical_network = sa.Column(sa.String(64),
nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sql.false())
network_profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id',
ondelete="CASCADE"),
nullable=False)
class N1kvVxlanAllocation(model_base.BASEV2):
"""Represents allocation state of vxlan_id."""
__tablename__ = 'cisco_n1kv_vxlan_allocations'
vxlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sql.false())
network_profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id',
ondelete="CASCADE"),
nullable=False)
class N1kvPortBinding(model_base.BASEV2):
"""Represents binding of ports to policy profile."""
__tablename__ = 'cisco_n1kv_port_bindings'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_policy_profiles.id'))
class N1kvNetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization."""
__tablename__ = 'cisco_n1kv_network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer)
multicast_ip = sa.Column(sa.String(32))
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id'))
class N1kVmNetwork(model_base.BASEV2):
"""Represents VM Network information."""
__tablename__ = 'cisco_n1kv_vmnetworks'
name = sa.Column(sa.String(80), primary_key=True)
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_policy_profiles.id'))
network_id = sa.Column(sa.String(36))
port_count = sa.Column(sa.Integer)
class NetworkProfile(model_base.BASEV2, models_v2.HasId):
"""
Nexus1000V Network Profiles
segment_type - VLAN, OVERLAY, TRUNK, MULTI_SEGMENT
sub_type - TRUNK_VLAN, TRUNK_VXLAN, native_vxlan, enhanced_vxlan
segment_range - '<integer>-<integer>'
multicast_ip_index - <integer>
multicast_ip_range - '<ip>-<ip>'
physical_network - Name for the physical network
"""
__tablename__ = 'cisco_network_profiles'
name = sa.Column(sa.String(255))
segment_type = sa.Column(sa.Enum(cisco_constants.NETWORK_TYPE_VLAN,
cisco_constants.NETWORK_TYPE_OVERLAY,
cisco_constants.NETWORK_TYPE_TRUNK,
cisco_constants.
NETWORK_TYPE_MULTI_SEGMENT,
name='segment_type'),
nullable=False)
sub_type = sa.Column(sa.String(255))
segment_range = sa.Column(sa.String(255))
multicast_ip_index = sa.Column(sa.Integer, default=0,
server_default='0')
multicast_ip_range = sa.Column(sa.String(255))
physical_network = sa.Column(sa.String(255))
class PolicyProfile(model_base.BASEV2):
"""
Nexus1000V Network Profiles
Both 'id' and 'name' are coming from Nexus1000V switch
"""
__tablename__ = 'cisco_policy_profiles'
id = sa.Column(sa.String(36), primary_key=True)
name = sa.Column(sa.String(255))
class ProfileBinding(model_base.BASEV2):
"""
Represents a binding of Network Profile
or Policy Profile to tenant_id
"""
__tablename__ = 'cisco_n1kv_profile_bindings'
profile_type = sa.Column(sa.Enum(cisco_constants.NETWORK,
cisco_constants.POLICY,
name='profile_type'))
tenant_id = sa.Column(sa.String(36),
primary_key=True,
default=cisco_constants.TENANT_ID_NOT_SET,
server_default=cisco_constants.TENANT_ID_NOT_SET)
profile_id = sa.Column(sa.String(36), primary_key=True)
class N1kvTrunkSegmentBinding(model_base.BASEV2):
"""Represents binding of segments in trunk networks."""
__tablename__ = 'cisco_n1kv_trunk_segments'
trunk_segment_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id',
ondelete="CASCADE"),
primary_key=True)
segment_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
dot1qtag = sa.Column(sa.String(36), nullable=False, primary_key=True)
class N1kvMultiSegmentNetworkBinding(model_base.BASEV2):
"""Represents binding of segments in multi-segment networks."""
__tablename__ = 'cisco_n1kv_multi_segments'
multi_segment_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id',
ondelete="CASCADE"),
primary_key=True)
segment1_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
segment2_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
encap_profile_name = sa.Column(sa.String(36))

View File

@ -1,280 +0,0 @@
# Copyright 2012, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from sqlalchemy.orm import exc
from neutron.db import api as db
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.db import network_models_v2
LOG = logging.getLogger(__name__)
def get_all_qoss(tenant_id):
"""Lists all the qos to tenant associations."""
LOG.debug("get_all_qoss() called")
session = db.get_session()
return (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).all())
def get_qos(tenant_id, qos_id):
"""Lists the qos given a tenant_id and qos_id."""
LOG.debug("get_qos() called")
session = db.get_session()
try:
return (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
except exc.NoResultFound:
raise c_exc.QosNotFound(qos_id=qos_id,
tenant_id=tenant_id)
def add_qos(tenant_id, qos_name, qos_desc):
"""Adds a qos to tenant association."""
LOG.debug("add_qos() called")
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_name=qos_name).one())
raise c_exc.QosNameAlreadyExists(qos_name=qos_name,
tenant_id=tenant_id)
except exc.NoResultFound:
qos = network_models_v2.QoS(qos_id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
qos_name=qos_name,
qos_desc=qos_desc)
session.add(qos)
session.flush()
return qos
def remove_qos(tenant_id, qos_id):
"""Removes a qos to tenant association."""
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
session.delete(qos)
session.flush()
return qos
except exc.NoResultFound:
pass
def update_qos(tenant_id, qos_id, new_qos_name=None):
"""Updates a qos to tenant association."""
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
if new_qos_name:
qos["qos_name"] = new_qos_name
session.merge(qos)
session.flush()
return qos
except exc.NoResultFound:
raise c_exc.QosNotFound(qos_id=qos_id,
tenant_id=tenant_id)
def get_all_credentials():
"""Lists all the creds for a tenant."""
session = db.get_session()
return (session.query(network_models_v2.Credential).all())
def get_credential(credential_id):
"""Lists the creds for given a cred_id."""
session = db.get_session()
try:
return (session.query(network_models_v2.Credential).
filter_by(credential_id=credential_id).one())
except exc.NoResultFound:
raise c_exc.CredentialNotFound(credential_id=credential_id)
def get_credential_name(credential_name):
"""Lists the creds for given a cred_name."""
session = db.get_session()
try:
return (session.query(network_models_v2.Credential).
filter_by(credential_name=credential_name).one())
except exc.NoResultFound:
raise c_exc.CredentialNameNotFound(credential_name=credential_name)
def add_credential(credential_name, user_name, password, type):
"""Create a credential."""
session = db.get_session()
try:
cred = (session.query(network_models_v2.Credential).
filter_by(credential_name=credential_name).one())
raise c_exc.CredentialAlreadyExists(credential_name=credential_name)
except exc.NoResultFound:
cred = network_models_v2.Credential(
credential_id=uuidutils.generate_uuid(),
credential_name=credential_name,
user_name=user_name,
password=password,
type=type)
session.add(cred)
session.flush()
return cred
def remove_credential(credential_id):
"""Removes a credential."""
session = db.get_session()
try:
cred = (session.query(network_models_v2.Credential).
filter_by(credential_id=credential_id).one())
session.delete(cred)
session.flush()
return cred
except exc.NoResultFound:
pass
def update_credential(credential_id,
new_user_name=None, new_password=None):
"""Updates a credential for a tenant."""
session = db.get_session()
try:
cred = (session.query(network_models_v2.Credential).
filter_by(credential_id=credential_id).one())
if new_user_name:
cred["user_name"] = new_user_name
if new_password:
cred["password"] = new_password
session.merge(cred)
session.flush()
return cred
except exc.NoResultFound:
raise c_exc.CredentialNotFound(credential_id=credential_id)
def get_all_n1kv_credentials():
session = db.get_session()
return (session.query(network_models_v2.Credential).
filter_by(type='n1kv'))
def delete_all_n1kv_credentials():
session = db.get_session()
session.query(network_models_v2.Credential).filter_by(type='n1kv').delete()
def add_provider_network(network_id, network_type, segmentation_id):
"""Add a network to the provider network table."""
session = db.get_session()
if session.query(network_models_v2.ProviderNetwork).filter_by(
network_id=network_id).first():
raise c_exc.ProviderNetworkExists(network_id)
pnet = network_models_v2.ProviderNetwork(network_id=network_id,
network_type=network_type,
segmentation_id=segmentation_id)
session.add(pnet)
session.flush()
def remove_provider_network(network_id):
"""Remove network_id from the provider network table.
:param network_id: Any network id. If it is not in the table, do nothing.
:return: network_id if it was in the table and successfully removed.
"""
session = db.get_session()
pnet = (session.query(network_models_v2.ProviderNetwork).
filter_by(network_id=network_id).first())
if pnet:
session.delete(pnet)
session.flush()
return network_id
def is_provider_network(network_id):
"""Return True if network_id is in the provider network table."""
session = db.get_session()
if session.query(network_models_v2.ProviderNetwork).filter_by(
network_id=network_id).first():
return True
def is_provider_vlan(vlan_id):
"""Check for a for a vlan provider network with the specified vland_id.
Returns True if the provider network table contains a vlan network
with the specified vlan_id.
"""
session = db.get_session()
if (session.query(network_models_v2.ProviderNetwork).
filter_by(network_type=const.NETWORK_TYPE_VLAN,
segmentation_id=vlan_id).first()):
return True
class Credential_db_mixin(object):
"""Mixin class for Cisco Credentials as a resource."""
def _make_credential_dict(self, credential, fields=None):
res = {'credential_id': credential['credential_id'],
'credential_name': credential['credential_name'],
'user_name': credential['user_name'],
'password': credential['password'],
'type': credential['type']}
return self._fields(res, fields)
def create_credential(self, context, credential):
"""Create a credential."""
c = credential['credential']
cred = add_credential(c['credential_name'],
c['user_name'],
c['password'],
c['type'])
return self._make_credential_dict(cred)
def get_credentials(self, context, filters=None, fields=None):
"""Retrieve a list of credentials."""
return self._get_collection(context,
network_models_v2.Credential,
self._make_credential_dict,
filters=filters,
fields=fields)
def get_credential(self, context, id, fields=None):
"""Retireve the requested credential based on its id."""
credential = get_credential(id)
return self._make_credential_dict(credential, fields)
def update_credential(self, context, id, credential):
"""Update a credential based on its id."""
c = credential['credential']
cred = update_credential(id,
c['user_name'],
c['password'])
return self._make_credential_dict(cred)
def delete_credential(self, context, id):
"""Delete a credential based on its id."""
return remove_credential(id)

View File

@ -1,52 +0,0 @@
# Copyright 2012, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from neutron.db import model_base
class QoS(model_base.BASEV2):
"""Represents QoS policies for a tenant."""
__tablename__ = 'cisco_qos_policies'
qos_id = sa.Column(sa.String(255))
tenant_id = sa.Column(sa.String(255), primary_key=True)
qos_name = sa.Column(sa.String(255), primary_key=True)
qos_desc = sa.Column(sa.String(255))
class Credential(model_base.BASEV2):
"""Represents credentials for a tenant to control Cisco switches."""
__tablename__ = 'cisco_credentials'
credential_id = sa.Column(sa.String(255))
credential_name = sa.Column(sa.String(255), primary_key=True)
user_name = sa.Column(sa.String(255))
password = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
class ProviderNetwork(model_base.BASEV2):
"""Represents networks that were created as provider networks."""
__tablename__ = 'cisco_provider_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(255), nullable=False)
segmentation_id = sa.Column(sa.Integer, nullable=False)

View File

@ -1,47 +0,0 @@
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def get_view_builder(req):
base_url = req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
"""ViewBuilder for Credential, derived from neutron.views.networks."""
def __init__(self, base_url):
"""Initialize builder.
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build(self, credential_data, is_detail=False):
"""Generic method used to generate a credential entity."""
if is_detail:
credential = self._build_detail(credential_data)
else:
credential = self._build_simple(credential_data)
return credential
def _build_simple(self, credential_data):
"""Return a simple description of credential."""
return dict(credential=dict(id=credential_data['credential_id']))
def _build_detail(self, credential_data):
"""Return a detailed description of credential."""
return dict(credential=dict(id=credential_data['credential_id'],
name=credential_data['user_name'],
password=credential_data['password']))

Some files were not shown because too many files have changed in this diff Show More