Merge remote-tracking branch 'origin/master' into walnut
Change-Id: Ic6314ef9c1db6524fbb0ed8b1bacdc2b081c4775
This commit is contained in:
commit
fdc3431ccd
@ -1,4 +1,4 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION
|
||||
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION | cat
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
||||
|
23
devstack/lib/l2_agent_sriovnicswitch
Executable file
23
devstack/lib/l2_agent_sriovnicswitch
Executable file
@ -0,0 +1,23 @@
|
||||
SRIOV_AGENT_CONF="${Q_PLUGIN_CONF_PATH}/sriov_agent.ini"
|
||||
SRIOV_AGENT_BINARY="${NEUTRON_BIN_DIR}/neutron-sriov-nic-agent"
|
||||
|
||||
function configure_l2_agent_sriovnicswitch {
|
||||
if [[ -n "$PHYSICAL_NETWORK" ]] && [[ -n "$PHYSICAL_INTERFACE" ]]; then
|
||||
PHYSICAL_DEVICE_MAPPINGS=$PHYSICAL_NETWORK:$PHYSICAL_INTERFACE
|
||||
fi
|
||||
if [[ -n "$PHYSICAL_DEVICE_MAPPINGS" ]]; then
|
||||
iniset /$SRIOV_AGENT_CONF sriov_nic physical_device_mappings $PHYSICAL_DEVICE_MAPPINGS
|
||||
fi
|
||||
|
||||
iniset /$SRIOV_AGENT_CONF securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
|
||||
|
||||
iniset /$SRIOV_AGENT_CONF agent extensions "$L2_AGENT_EXTENSIONS"
|
||||
}
|
||||
|
||||
function start_l2_agent_sriov {
|
||||
run_process q-sriov-agt "$SRIOV_AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$SRIOV_AGENT_CONF"
|
||||
}
|
||||
|
||||
function stop_l2_agent_sriov {
|
||||
stop_process q-sriov-agt
|
||||
}
|
@ -1,3 +1,6 @@
|
||||
source $LIBDIR/ml2_drivers/sriovnicswitch
|
||||
|
||||
|
||||
function enable_ml2_extension_driver {
|
||||
local extension_driver=$1
|
||||
if [[ -z "$Q_ML2_PLUGIN_EXT_DRIVERS" ]]; then
|
||||
@ -11,3 +14,16 @@ function enable_ml2_extension_driver {
|
||||
function configure_qos_ml2 {
|
||||
enable_ml2_extension_driver "qos"
|
||||
}
|
||||
|
||||
|
||||
function configure_ml2 {
|
||||
OIFS=$IFS;
|
||||
IFS=",";
|
||||
mechanism_drivers_array=($Q_ML2_PLUGIN_MECHANISM_DRIVERS);
|
||||
IFS=$OIFS;
|
||||
for mechanism_driver in "${mechanism_drivers_array[@]}"; do
|
||||
if [ "$(type -t configure_ml2_$mechanism_driver)" = function ]; then
|
||||
configure_ml2_$mechanism_driver
|
||||
fi
|
||||
done
|
||||
}
|
3
devstack/lib/ml2_drivers/sriovnicswitch
Executable file
3
devstack/lib/ml2_drivers/sriovnicswitch
Executable file
@ -0,0 +1,3 @@
|
||||
function configure_ml2_sriovnicswitch {
|
||||
iniset /$Q_PLUGIN_CONF_FILE ml2_sriov agent_required True
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
LIBDIR=$DEST/neutron/devstack/lib
|
||||
|
||||
source $LIBDIR/l2_agent
|
||||
source $LIBDIR/l2_agent_sriovnicswitch
|
||||
source $LIBDIR/ml2
|
||||
source $LIBDIR/qos
|
||||
|
||||
@ -15,4 +16,26 @@ if [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
if is_service_enabled q-agt; then
|
||||
configure_l2_agent
|
||||
fi
|
||||
#Note: sriov agent should run with OVS or linux bridge agent
|
||||
#because they are the mechanisms that bind the DHCP and router ports.
|
||||
#Currently devstack lacks the option to run two agents on the same node.
|
||||
#Therefore we create new service, q-sriov-agt, and the q-agt should be OVS
|
||||
#or linux bridge.
|
||||
if is_service_enabled q-sriov-agt; then
|
||||
configure_$Q_PLUGIN
|
||||
configure_l2_agent
|
||||
configure_l2_agent_sriovnicswitch
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$1" == "stack" && "$2" == "extra" ]]; then
|
||||
if is_service_enabled q-sriov-agt; then
|
||||
start_l2_agent_sriov
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$1" == "unstack" ]]; then
|
||||
if is_service_enabled q-sriov-agt; then
|
||||
stop_l2_agent_sriov
|
||||
fi
|
||||
fi
|
@ -25,8 +25,8 @@ Failure Percentage - Last 10 Days - Rally, LinuxBridge, LBaaS v1/v2<br>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
Failure Percentage - Last 10 Days - Large Opts<br>
|
||||
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Large Opts&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29">
|
||||
Failure Percentage - Last 10 Days - Large Ops<br>
|
||||
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Large Ops&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29">
|
||||
<img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29" width="400">
|
||||
</a>
|
||||
</td>
|
||||
|
@ -94,18 +94,18 @@ In practical terms this scenario would be translated in the code below:
|
||||
|
||||
|
||||
def callback1(resource, event, trigger, **kwargs):
|
||||
print 'Callback1 called by trigger: ', trigger
|
||||
print 'kwargs: ', kwargs
|
||||
print('Callback1 called by trigger: ', trigger)
|
||||
print('kwargs: ', kwargs)
|
||||
|
||||
def callback2(resource, event, trigger, **kwargs):
|
||||
print 'Callback2 called by trigger: ', trigger
|
||||
print 'kwargs: ', kwargs
|
||||
print('Callback2 called by trigger: ', trigger)
|
||||
print('kwargs: ', kwargs)
|
||||
|
||||
|
||||
# B and C express interest with I
|
||||
registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
|
||||
registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE)
|
||||
print 'Subscribed'
|
||||
print('Subscribed')
|
||||
|
||||
|
||||
# A notifies
|
||||
@ -114,7 +114,7 @@ In practical terms this scenario would be translated in the code below:
|
||||
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
|
||||
|
||||
|
||||
print 'Notifying...'
|
||||
print('Notifying...')
|
||||
do_notify()
|
||||
|
||||
|
||||
@ -171,13 +171,13 @@ to abort events are ignored. The snippet below shows this in action:
|
||||
raise Exception('I am failing!')
|
||||
|
||||
def callback2(resource, event, trigger, **kwargs):
|
||||
print 'Callback2 called by %s on event %s' % (trigger, event)
|
||||
print('Callback2 called by %s on event %s' % (trigger, event))
|
||||
|
||||
|
||||
registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
|
||||
registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE)
|
||||
registry.subscribe(callback2, resources.ROUTER, events.ABORT_CREATE)
|
||||
print 'Subscribed'
|
||||
print('Subscribed')
|
||||
|
||||
|
||||
def do_notify():
|
||||
@ -185,11 +185,11 @@ to abort events are ignored. The snippet below shows this in action:
|
||||
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
|
||||
|
||||
|
||||
print 'Notifying...'
|
||||
print('Notifying...')
|
||||
try:
|
||||
do_notify()
|
||||
except exceptions.CallbackFailure as e:
|
||||
print 'Error: ', e
|
||||
print('Error: ', e)
|
||||
|
||||
The output is:
|
||||
|
||||
@ -237,11 +237,11 @@ The snippet below shows these concepts in action:
|
||||
|
||||
|
||||
def callback1(resource, event, trigger, **kwargs):
|
||||
print 'Callback1 called by %s on event %s for resource %s' % (trigger, event, resource)
|
||||
print('Callback1 called by %s on event %s for resource %s' % (trigger, event, resource))
|
||||
|
||||
|
||||
def callback2(resource, event, trigger, **kwargs):
|
||||
print 'Callback2 called by %s on event %s for resource %s' % (trigger, event, resource)
|
||||
print('Callback2 called by %s on event %s for resource %s' % (trigger, event, resource))
|
||||
|
||||
|
||||
registry.subscribe(callback1, resources.ROUTER, events.BEFORE_READ)
|
||||
@ -249,11 +249,11 @@ The snippet below shows these concepts in action:
|
||||
registry.subscribe(callback1, resources.ROUTER, events.AFTER_DELETE)
|
||||
registry.subscribe(callback1, resources.PORT, events.BEFORE_UPDATE)
|
||||
registry.subscribe(callback2, resources.ROUTER_GATEWAY, events.BEFORE_UPDATE)
|
||||
print 'Subscribed'
|
||||
print('Subscribed')
|
||||
|
||||
|
||||
def do_notify():
|
||||
print 'Notifying...'
|
||||
print('Notifying...')
|
||||
kwargs = {'foo': 'bar'}
|
||||
registry.notify(resources.ROUTER, events.BEFORE_READ, do_notify, **kwargs)
|
||||
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
|
||||
@ -356,17 +356,17 @@ What kind of function can be a callback?
|
||||
|
||||
|
||||
def callback1(resource, event, trigger, **kwargs):
|
||||
print 'module callback'
|
||||
print('module callback')
|
||||
|
||||
|
||||
class MyCallback(object):
|
||||
|
||||
def callback2(self, resource, event, trigger, **kwargs):
|
||||
print 'object callback'
|
||||
print('object callback')
|
||||
|
||||
@classmethod
|
||||
def callback3(cls, resource, event, trigger, **kwargs):
|
||||
print 'class callback'
|
||||
print('class callback')
|
||||
|
||||
|
||||
c = MyCallback()
|
||||
@ -376,7 +376,7 @@ What kind of function can be a callback?
|
||||
|
||||
def do_notify():
|
||||
def nested_subscribe(resource, event, trigger, **kwargs):
|
||||
print 'nested callback'
|
||||
print('nested callback')
|
||||
|
||||
registry.subscribe(nested_subscribe, resources.ROUTER, events.BEFORE_CREATE)
|
||||
|
||||
@ -384,7 +384,7 @@ What kind of function can be a callback?
|
||||
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
|
||||
|
||||
|
||||
print 'Notifying...'
|
||||
print('Notifying...')
|
||||
do_notify()
|
||||
|
||||
And the output is going to be:
|
||||
|
@ -506,6 +506,28 @@ Extensions can be loaded in two ways:
|
||||
variable is commented.
|
||||
|
||||
|
||||
Service Providers
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
If your project uses service provider(s) the same way VPNAAS and LBAAS do, you
|
||||
specify your service provider in your ``project_name.conf`` file like so::
|
||||
|
||||
[service_providers]
|
||||
# Must be in form:
|
||||
# service_provider=<service_type>:<name>:<driver>[:default][,...]
|
||||
|
||||
In order for Neutron to load this correctly, make sure you do the following in
|
||||
your code::
|
||||
|
||||
from neutron.db import servicetype_db
|
||||
service_type_manager = servicetype_db.ServiceTypeManager.get_instance()
|
||||
service_type_manager.add_provider_configuration(
|
||||
YOUR_SERVICE_TYPE,
|
||||
pconf.ProviderConfiguration(YOUR_SERVICE_MODULE))
|
||||
|
||||
This is typically required when you instantiate your service plugin class.
|
||||
|
||||
|
||||
Interface Drivers
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -83,6 +83,12 @@ When?
|
||||
stack testing can help here as the full stack infrastructure can restart an
|
||||
agent during the test.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
Fullstack test suite assumes 240.0.0.0/3 range in root namespace of the test
|
||||
machine is available for its usage.
|
||||
|
||||
Short Term Goals
|
||||
----------------
|
||||
|
||||
@ -103,9 +109,6 @@ the fact as there will probably be something to copy/paste from.
|
||||
Long Term Goals
|
||||
---------------
|
||||
|
||||
* Currently we configure the OVS agent with VLANs segmentation (Only because
|
||||
it's easier). This allows us to validate most functionality, but we might
|
||||
need to support tunneling somehow.
|
||||
* How will advanced services use the full stack testing infrastructure? Full
|
||||
stack tests infrastructure classes are expected to change quite a bit over
|
||||
the next coming months. This means that other repositories may import these
|
||||
@ -116,3 +119,4 @@ Long Term Goals
|
||||
mechanism driver. We may modularize the topology configuration further to
|
||||
allow to rerun full stack tests against different Neutron plugins or ML2
|
||||
mechanism drivers.
|
||||
* Add OVS ARP responder coverage when the gate supports OVS 2.1+
|
||||
|
@ -84,8 +84,14 @@ for a port or a network:
|
||||
|
||||
Each QoS policy contains zero or more QoS rules. A policy is then applied to a
|
||||
network or a port, making all rules of the policy applied to the corresponding
|
||||
Neutron resource (for a network, applying a policy means that the policy will
|
||||
be applied to all ports that belong to it).
|
||||
Neutron resource.
|
||||
|
||||
When applied through a network association, policy rules could apply or not
|
||||
to neutron internal ports (like router, dhcp, load balancer, etc..). The QosRule
|
||||
base object provides a default should_apply_to_port method which could be
|
||||
overridden. In the future we may want to have a flag in QoSNetworkPolicyBinding
|
||||
or QosRule to enforce such type of application (for example when limiting all
|
||||
the ingress of routers devices on an external network automatically).
|
||||
|
||||
From database point of view, following objects are defined in schema:
|
||||
|
||||
|
@ -164,12 +164,6 @@ difference between CountableResource and TrackedResource.
|
||||
Quota Enforcement
|
||||
-----------------
|
||||
|
||||
**NOTE: The reservation engine is currently not wired into the API controller
|
||||
as issues have been discovered with multiple workers. For more information
|
||||
see _bug1468134**
|
||||
|
||||
.. _bug1468134: https://bugs.launchpad.net/neutron/+bug/1486134
|
||||
|
||||
Before dispatching a request to the plugin, the Neutron 'base' controller [#]_
|
||||
attempts to make a reservation for requested resource(s).
|
||||
Reservations are made by calling the make_reservation method in
|
||||
|
@ -130,19 +130,33 @@ needed.
|
||||
Sub-Project Release Process
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Only members of the `neutron-release
|
||||
<https://review.openstack.org/#/admin/groups/150,members>`_ gerrit group can do
|
||||
releases. Make sure you talk to a member of neutron-release to perform your
|
||||
release.
|
||||
|
||||
To release a sub-project, follow the following steps:
|
||||
|
||||
* Only members of the `neutron-release
|
||||
<https://review.openstack.org/#/admin/groups/150,members>`_ gerrit group can
|
||||
do releases. Make sure you talk to a member of neutron-release to perform
|
||||
your release.
|
||||
* For projects which have not moved to post-versioning, we need to push an
|
||||
alpha tag to avoid pbr complaining. The neutron-release group will handle
|
||||
this.
|
||||
* Modify setup.cfg to remove the version (if you have one), which moves your
|
||||
project to post-versioning, similar to all the other Neutron projects. You
|
||||
can skip this step if you don't have a version in setup.cfg.
|
||||
* Have neutron-release push the tag to gerrit.
|
||||
* Have neutron-release `tag the release
|
||||
alpha tag to avoid pbr complaining. A member of the neutron-release group
|
||||
will handle this.
|
||||
* A sub-project owner should modify setup.cfg to remove the version (if you
|
||||
have one), which moves your project to post-versioning, similar to all the
|
||||
other Neutron projects. You can skip this step if you don't have a version in
|
||||
setup.cfg.
|
||||
* A member of neutron-release will then `tag the release
|
||||
<http://docs.openstack.org/infra/manual/drivers.html#tagging-a-release>`_,
|
||||
which will release the code to PyPi.
|
||||
* The releases will now be on PyPi. A sub-project owner should verify this by
|
||||
going to an URL similar to
|
||||
`this <https://pypi.python.org/pypi/networking-odl>`_.
|
||||
* A sub-project owner should next go to Launchpad and release this version
|
||||
using the "Release Now" button for the release itself.
|
||||
* A sub-project owner should update any bugs that were fixed with this
|
||||
release to "Fix Released" in Launchpad.
|
||||
* A sub-project owner should add the tarball to the Launchpad page for the
|
||||
release using the "Add download file" link.
|
||||
* A sub-project owner should add the next milestone to the Launchpad series, or
|
||||
if a new series is required, create the new series and a new milestone.
|
||||
* Finally a sub-project owner should send an email to the openstack-announce
|
||||
mailing list announcing the new release.
|
||||
|
@ -64,6 +64,7 @@
|
||||
# Name of bridge used for external network traffic. This should be set to
|
||||
# empty value for the linux bridge. when this parameter is set, each L3 agent
|
||||
# can be associated with no more than one external network.
|
||||
# This option is deprecated and will be removed in the M release.
|
||||
# external_network_bridge = br-ex
|
||||
|
||||
# TCP Port used by Neutron metadata server
|
||||
|
@ -190,9 +190,9 @@
|
||||
|
||||
# =========== items for agent scheduler extension =============
|
||||
# Driver to use for scheduling network to DHCP agent
|
||||
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
|
||||
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
|
||||
# Driver to use for scheduling router to a default L3 agent
|
||||
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
|
||||
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
|
||||
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
|
||||
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
|
||||
|
||||
@ -306,19 +306,13 @@
|
||||
# ========== end of items for VLAN trunking networks ==========
|
||||
|
||||
# =========== WSGI parameters related to the API server ==============
|
||||
# Number of separate worker processes to spawn. A value of 0 runs the
|
||||
# worker thread in the current process. Greater than 0 launches that number of
|
||||
# child processes as workers. The parent process manages them. If not
|
||||
# specified, the default value is equal to the number of CPUs available to
|
||||
# achieve best performance.
|
||||
# Number of separate API worker processes to spawn. If not specified or < 1,
|
||||
# the default value is equal to the number of CPUs available.
|
||||
# api_workers = <number of CPUs>
|
||||
|
||||
# Number of separate RPC worker processes to spawn. The default, 0, runs the
|
||||
# worker thread in the current process. Greater than 0 launches that number of
|
||||
# child processes as RPC workers. The parent process manages them.
|
||||
# This feature is experimental until issues are addressed and testing has been
|
||||
# enabled for various plugins for compatibility.
|
||||
# rpc_workers = 0
|
||||
# Number of separate RPC worker processes to spawn. If not specified or < 1,
|
||||
# a single RPC worker process is spawned by the parent process.
|
||||
# rpc_workers = 1
|
||||
|
||||
# Timeout for client connections socket operations. If an
|
||||
# incoming connection is idle for this number of seconds it
|
||||
|
@ -1,15 +0,0 @@
|
||||
[cfg_agent]
|
||||
# (IntOpt) Interval in seconds for processing of service updates.
|
||||
# That is when the config agent's process_services() loop executes
|
||||
# and it lets each service helper to process its service resources.
|
||||
# rpc_loop_interval = 10
|
||||
|
||||
# (StrOpt) Period-separated module path to the routing service helper class.
|
||||
# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
|
||||
|
||||
# (IntOpt) Timeout value in seconds for connecting to a hosting device.
|
||||
# device_connection_timeout = 30
|
||||
|
||||
# (IntOpt) The time in seconds until a backlogged hosting device is
|
||||
# presumed dead or booted to an error state.
|
||||
# hosting_device_dead_timeout = 300
|
@ -1,107 +0,0 @@
|
||||
[cisco]
|
||||
|
||||
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
|
||||
# VLAN interface. For example, if an interface is being created for
|
||||
# VLAN 2001 it will be named 'q-2001' using the default prefix.
|
||||
#
|
||||
# vlan_name_prefix = q-
|
||||
# Example: vlan_name_prefix = vnet-
|
||||
|
||||
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
|
||||
# provider VLAN interface. For example, if an interface is being created
|
||||
# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
|
||||
#
|
||||
# provider_vlan_name_prefix = p-
|
||||
# Example: provider_vlan_name_prefix = PV-
|
||||
|
||||
# (BoolOpt) A flag indicating whether Openstack networking should manage the
|
||||
# creation and removal of VLAN interfaces for provider networks on the Nexus
|
||||
# switches. If the flag is set to False then Openstack will not create or
|
||||
# remove VLAN interfaces for provider networks, and the administrator needs
|
||||
# to manage these interfaces manually or by external orchestration.
|
||||
#
|
||||
# provider_vlan_auto_create = True
|
||||
|
||||
# (BoolOpt) A flag indicating whether Openstack networking should manage
|
||||
# the adding and removing of provider VLANs from trunk ports on the Nexus
|
||||
# switches. If the flag is set to False then Openstack will not add or
|
||||
# remove provider VLANs from trunk ports, and the administrator needs to
|
||||
# manage these operations manually or by external orchestration.
|
||||
#
|
||||
# provider_vlan_auto_trunk = True
|
||||
|
||||
# (StrOpt) Period-separated module path to the model class to use for
|
||||
# the Cisco neutron plugin.
|
||||
#
|
||||
# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2
|
||||
|
||||
# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches.
|
||||
# Note: This feature is not supported on all models/versions of Cisco
|
||||
# Nexus switches. To use this feature, all of the Nexus switches in the
|
||||
# deployment must support it.
|
||||
# nexus_l3_enable = False
|
||||
|
||||
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
|
||||
# svi_round_robin = False
|
||||
|
||||
# Cisco Nexus Switch configurations.
|
||||
# Each switch to be managed by Openstack Neutron must be configured here.
|
||||
#
|
||||
# N1KV Format.
|
||||
# [N1KV:<IP address of VSM>]
|
||||
# username=<credential username>
|
||||
# password=<credential password>
|
||||
#
|
||||
# Example:
|
||||
# [N1KV:2.2.2.2]
|
||||
# username=admin
|
||||
# password=mySecretPassword
|
||||
|
||||
[cisco_n1k]
|
||||
|
||||
# (StrOpt) Specify the name of the integration bridge to which the VIFs are
|
||||
# attached.
|
||||
# Default value: br-int
|
||||
# integration_bridge = br-int
|
||||
|
||||
# (StrOpt) Name of the policy profile to be associated with a port when no
|
||||
# policy profile is specified during port creates.
|
||||
# Default value: service_profile
|
||||
# default_policy_profile = service_profile
|
||||
|
||||
# (StrOpt) Name of the policy profile to be associated with a port owned by
|
||||
# network node (dhcp, router).
|
||||
# Default value: dhcp_pp
|
||||
# network_node_policy_profile = dhcp_pp
|
||||
|
||||
# (StrOpt) Name of the network profile to be associated with a network when no
|
||||
# network profile is specified during network creates. Admin should pre-create
|
||||
# a network profile with this name.
|
||||
# Default value: default_network_profile
|
||||
# default_network_profile = network_pool
|
||||
|
||||
# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
|
||||
# policy profiles.
|
||||
# Default value: 60
|
||||
# poll_duration = 60
|
||||
|
||||
# (BoolOpt) Specify whether tenants are restricted from accessing all the
|
||||
# policy profiles.
|
||||
# Default value: False, indicating all tenants can access all policy profiles.
|
||||
#
|
||||
# restrict_policy_profiles = False
|
||||
|
||||
# (IntOpt) Number of threads to use to make HTTP requests to the VSM.
|
||||
# Default value: 4
|
||||
# http_pool_size = 4
|
||||
|
||||
# (IntOpt) Timeout duration in seconds for the http request
|
||||
# Default value: 15
|
||||
# http_timeout = 15
|
||||
|
||||
# (BoolOpt) Specify whether tenants are restricted from accessing network
|
||||
# profiles belonging to other tenants.
|
||||
# Default value: True, indicating other tenants cannot access network
|
||||
# profiles belonging to a tenant.
|
||||
#
|
||||
# restrict_network_profiles = True
|
@ -1,76 +0,0 @@
|
||||
[general]
|
||||
#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers
|
||||
# backlog_processing_interval = 10
|
||||
|
||||
#(StrOpt) Name of the L3 admin tenant
|
||||
# l3_admin_tenant = L3AdminTenant
|
||||
|
||||
#(StrOpt) Name of management network for hosting device configuration
|
||||
# management_network = osn_mgmt_nw
|
||||
|
||||
#(StrOpt) Default security group applied on management port
|
||||
# default_security_group = mgmt_sec_grp
|
||||
|
||||
#(IntOpt) Seconds of no status update until a cfg agent is considered down
|
||||
# cfg_agent_down_time = 60
|
||||
|
||||
#(StrOpt) Path to templates for hosting devices
|
||||
# templates_path = /opt/stack/data/neutron/cisco/templates
|
||||
|
||||
#(StrOpt) Path to config drive files for service VM instances
|
||||
# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive
|
||||
|
||||
#(BoolOpt) Ensure that Nova is running before attempting to create any VM
|
||||
# ensure_nova_running = True
|
||||
|
||||
[hosting_devices]
|
||||
# Settings coupled to CSR1kv VM devices
|
||||
# -------------------------------------
|
||||
#(StrOpt) Name of Glance image for CSR1kv
|
||||
# csr1kv_image = csr1kv_openstack_img
|
||||
|
||||
#(StrOpt) UUID of Nova flavor for CSR1kv
|
||||
# csr1kv_flavor = 621
|
||||
|
||||
#(StrOpt) Plugging driver for CSR1kv
|
||||
# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver
|
||||
|
||||
#(StrOpt) Hosting device driver for CSR1kv
|
||||
# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver
|
||||
|
||||
#(StrOpt) Config agent router service driver for CSR1kv
|
||||
# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver
|
||||
|
||||
#(StrOpt) Configdrive template file for CSR1kv
|
||||
# csr1kv_configdrive_template = csr1kv_cfg_template
|
||||
|
||||
#(IntOpt) Booting time in seconds before a CSR1kv becomes operational
|
||||
# csr1kv_booting_time = 420
|
||||
|
||||
#(StrOpt) Username to use for CSR1kv configurations
|
||||
# csr1kv_username = stack
|
||||
|
||||
#(StrOpt) Password to use for CSR1kv configurations
|
||||
# csr1kv_password = cisco
|
||||
|
||||
[n1kv]
|
||||
# Settings coupled to inter-working with N1kv plugin
|
||||
# --------------------------------------------------
|
||||
#(StrOpt) Name of N1kv port profile for management ports
|
||||
# management_port_profile = osn_mgmt_pp
|
||||
|
||||
#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic
|
||||
# from VXLAN segmented networks).
|
||||
# t1_port_profile = osn_t1_pp
|
||||
|
||||
#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic
|
||||
# from VLAN segmented networks).
|
||||
# t2_port_profile = osn_t2_pp
|
||||
|
||||
#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks
|
||||
# for VXLAN segmented traffic).
|
||||
# t1_network_profile = osn_t1_np
|
||||
|
||||
#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks
|
||||
# for VLAN segmented traffic).
|
||||
# t2_network_profile = osn_t2_np
|
@ -54,8 +54,28 @@
|
||||
# ovsdb_connection = tcp:127.0.0.1:6640
|
||||
|
||||
# (StrOpt) OpenFlow interface to use.
|
||||
# 'ovs-ofctl' is currently the only available choice.
|
||||
# 'ovs-ofctl' or 'native'.
|
||||
# of_interface = ovs-ofctl
|
||||
#
|
||||
# (IPOpt)
|
||||
# Address to listen on for OpenFlow connections.
|
||||
# Used only for 'native' driver.
|
||||
# of_listen_address = 127.0.0.1
|
||||
#
|
||||
# (IntOpt)
|
||||
# Port to listen on for OpenFlow connections.
|
||||
# Used only for 'native' driver.
|
||||
# of_listen_port = 6633
|
||||
#
|
||||
# (IntOpt)
|
||||
# Timeout in seconds to wait for the local switch connecting the controller.
|
||||
# Used only for 'native' driver.
|
||||
# of_connect_timeout=30
|
||||
#
|
||||
# (IntOpt)
|
||||
# Timeout in seconds to wait for a single OpenFlow request.
|
||||
# Used only for 'native' driver.
|
||||
# of_request_timeout=10
|
||||
|
||||
# (StrOpt) ovs datapath to use.
|
||||
# 'system' is the default value and corresponds to the kernel datapath.
|
||||
@ -143,6 +163,12 @@
|
||||
#
|
||||
# extensions =
|
||||
|
||||
# (BoolOpt) Set or un-set the checksum on outgoing IP packet
|
||||
# carrying GRE/VXLAN tunnel. The default value is False.
|
||||
#
|
||||
# tunnel_csum = False
|
||||
|
||||
|
||||
[securitygroup]
|
||||
# Firewall driver for realizing neutron security group function.
|
||||
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
|
||||
|
@ -8,6 +8,4 @@
|
||||
|
||||
[Filters]
|
||||
|
||||
# neutron/agent/linux/ebtables_driver.py
|
||||
ebtables: CommandFilter, ebtables, root
|
||||
ebtablesEnv: EnvFilter, ebtables, root, EBTABLES_ATOMIC_FILE=
|
||||
|
@ -12,6 +12,7 @@
|
||||
# unclear whether both variants are necessary, but I'm transliterating
|
||||
# from the old mechanism
|
||||
ovs-vsctl: CommandFilter, ovs-vsctl, root
|
||||
# NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
|
||||
ovs-ofctl: CommandFilter, ovs-ofctl, root
|
||||
kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
|
||||
ovsdb-client: CommandFilter, ovsdb-client, root
|
||||
|
@ -56,7 +56,9 @@
|
||||
"update_network:router:external": "rule:admin_only",
|
||||
"delete_network": "rule:admin_or_owner",
|
||||
|
||||
"network_device": "field:port:device_owner=~^network:",
|
||||
"create_port": "",
|
||||
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
@ -71,6 +73,7 @@
|
||||
"get_port:binding:host_id": "rule:admin_only",
|
||||
"get_port:binding:profile": "rule:admin_only",
|
||||
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
|
||||
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
|
@ -152,7 +152,7 @@ class OVSBridge(BaseOVS):
|
||||
super(OVSBridge, self).__init__()
|
||||
self.br_name = br_name
|
||||
self.datapath_type = datapath_type
|
||||
self.agent_uuid_stamp = '0x0'
|
||||
self.agent_uuid_stamp = 0
|
||||
|
||||
def set_agent_uuid_stamp(self, val):
|
||||
self.agent_uuid_stamp = val
|
||||
@ -195,15 +195,6 @@ class OVSBridge(BaseOVS):
|
||||
def destroy(self):
|
||||
self.delete_bridge(self.br_name)
|
||||
|
||||
def reset_bridge(self, secure_mode=False):
|
||||
with self.ovsdb.transaction() as txn:
|
||||
txn.add(self.ovsdb.del_br(self.br_name))
|
||||
txn.add(self.ovsdb.add_br(self.br_name,
|
||||
datapath_type=self.datapath_type))
|
||||
if secure_mode:
|
||||
txn.add(self.ovsdb.set_fail_mode(self.br_name,
|
||||
FAILMODE_SECURE))
|
||||
|
||||
def add_port(self, port_name, *interface_attr_tuples):
|
||||
with self.ovsdb.transaction() as txn:
|
||||
txn.add(self.ovsdb.add_port(self.br_name, port_name))
|
||||
@ -299,7 +290,8 @@ class OVSBridge(BaseOVS):
|
||||
def add_tunnel_port(self, port_name, remote_ip, local_ip,
|
||||
tunnel_type=p_const.TYPE_GRE,
|
||||
vxlan_udp_port=p_const.VXLAN_UDP_PORT,
|
||||
dont_fragment=True):
|
||||
dont_fragment=True,
|
||||
tunnel_csum=False):
|
||||
attrs = [('type', tunnel_type)]
|
||||
# TODO(twilson) This is an OrderedDict solely to make a test happy
|
||||
options = collections.OrderedDict()
|
||||
@ -314,6 +306,8 @@ class OVSBridge(BaseOVS):
|
||||
options['local_ip'] = local_ip
|
||||
options['in_key'] = 'flow'
|
||||
options['out_key'] = 'flow'
|
||||
if tunnel_csum:
|
||||
options['csum'] = str(tunnel_csum).lower()
|
||||
attrs.append(('options', options))
|
||||
|
||||
return self.add_port(port_name, *attrs)
|
||||
|
@ -51,15 +51,16 @@ class DhcpAgent(manager.Manager):
|
||||
"""
|
||||
target = oslo_messaging.Target(version='1.0')
|
||||
|
||||
def __init__(self, host=None):
|
||||
def __init__(self, host=None, conf=None):
|
||||
super(DhcpAgent, self).__init__(host=host)
|
||||
self.needs_resync_reasons = collections.defaultdict(list)
|
||||
self.conf = cfg.CONF
|
||||
self.conf = conf or cfg.CONF
|
||||
self.cache = NetworkCache()
|
||||
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
|
||||
ctx = context.get_admin_context_without_session()
|
||||
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
|
||||
ctx, self.conf.use_namespaces)
|
||||
ctx, self.conf.use_namespaces,
|
||||
self.conf.host)
|
||||
# create dhcp dir to store dhcp info
|
||||
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
|
||||
utils.ensure_dir(dhcp_dir)
|
||||
@ -136,11 +137,11 @@ class DhcpAgent(manager.Manager):
|
||||
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
|
||||
{'net_id': network.id, 'action': action})
|
||||
|
||||
def schedule_resync(self, reason, network=None):
|
||||
def schedule_resync(self, reason, network_id=None):
|
||||
"""Schedule a resync for a given network and reason. If no network is
|
||||
specified, resync all networks.
|
||||
"""
|
||||
self.needs_resync_reasons[network].append(reason)
|
||||
self.needs_resync_reasons[network_id].append(reason)
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def sync_state(self, networks=None):
|
||||
@ -149,7 +150,7 @@ class DhcpAgent(manager.Manager):
|
||||
"""
|
||||
only_nets = set([] if (not networks or None in networks) else networks)
|
||||
LOG.info(_LI('Synchronizing state'))
|
||||
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
|
||||
pool = eventlet.GreenPool(self.conf.num_sync_threads)
|
||||
known_network_ids = set(self.cache.get_network_ids())
|
||||
|
||||
try:
|
||||
@ -172,7 +173,11 @@ class DhcpAgent(manager.Manager):
|
||||
LOG.info(_LI('Synchronizing state complete'))
|
||||
|
||||
except Exception as e:
|
||||
self.schedule_resync(e)
|
||||
if only_nets:
|
||||
for network_id in only_nets:
|
||||
self.schedule_resync(e, network_id)
|
||||
else:
|
||||
self.schedule_resync(e)
|
||||
LOG.exception(_LE('Unable to sync network state.'))
|
||||
|
||||
@utils.exception_logger()
|
||||
@ -399,9 +404,9 @@ class DhcpPluginApi(object):
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, topic, context, use_namespaces):
|
||||
def __init__(self, topic, context, use_namespaces, host):
|
||||
self.context = context
|
||||
self.host = cfg.CONF.host
|
||||
self.host = host
|
||||
self.use_namespaces = use_namespaces
|
||||
target = oslo_messaging.Target(
|
||||
topic=topic,
|
||||
@ -537,21 +542,21 @@ class NetworkCache(object):
|
||||
|
||||
|
||||
class DhcpAgentWithStateReport(DhcpAgent):
|
||||
def __init__(self, host=None):
|
||||
super(DhcpAgentWithStateReport, self).__init__(host=host)
|
||||
def __init__(self, host=None, conf=None):
|
||||
super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf)
|
||||
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
|
||||
self.agent_state = {
|
||||
'binary': 'neutron-dhcp-agent',
|
||||
'host': host,
|
||||
'topic': topics.DHCP_AGENT,
|
||||
'configurations': {
|
||||
'dhcp_driver': cfg.CONF.dhcp_driver,
|
||||
'use_namespaces': cfg.CONF.use_namespaces,
|
||||
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration,
|
||||
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats},
|
||||
'dhcp_driver': self.conf.dhcp_driver,
|
||||
'use_namespaces': self.conf.use_namespaces,
|
||||
'dhcp_lease_duration': self.conf.dhcp_lease_duration,
|
||||
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
|
||||
'start_flag': True,
|
||||
'agent_type': constants.AGENT_TYPE_DHCP}
|
||||
report_interval = cfg.CONF.AGENT.report_interval
|
||||
report_interval = self.conf.AGENT.report_interval
|
||||
self.use_call = True
|
||||
if report_interval:
|
||||
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
|
||||
|
@ -28,20 +28,20 @@ from neutron.common import topics
|
||||
from neutron import service as neutron_service
|
||||
|
||||
|
||||
def register_options():
|
||||
config.register_interface_driver_opts_helper(cfg.CONF)
|
||||
config.register_use_namespaces_opts_helper(cfg.CONF)
|
||||
config.register_agent_state_opts_helper(cfg.CONF)
|
||||
cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
|
||||
cfg.CONF.register_opts(dhcp_config.DHCP_OPTS)
|
||||
cfg.CONF.register_opts(dhcp_config.DNSMASQ_OPTS)
|
||||
cfg.CONF.register_opts(metadata_config.DRIVER_OPTS)
|
||||
cfg.CONF.register_opts(metadata_config.SHARED_OPTS)
|
||||
cfg.CONF.register_opts(interface.OPTS)
|
||||
def register_options(conf):
|
||||
config.register_interface_driver_opts_helper(conf)
|
||||
config.register_use_namespaces_opts_helper(conf)
|
||||
config.register_agent_state_opts_helper(conf)
|
||||
conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
|
||||
conf.register_opts(dhcp_config.DHCP_OPTS)
|
||||
conf.register_opts(dhcp_config.DNSMASQ_OPTS)
|
||||
conf.register_opts(metadata_config.DRIVER_OPTS)
|
||||
conf.register_opts(metadata_config.SHARED_OPTS)
|
||||
conf.register_opts(interface.OPTS)
|
||||
|
||||
|
||||
def main():
|
||||
register_options()
|
||||
register_options(cfg.CONF)
|
||||
common_config.init(sys.argv[1:])
|
||||
config.setup_logging()
|
||||
server = neutron_service.Service.create(
|
||||
|
@ -17,6 +17,7 @@ import abc
|
||||
import collections
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from neutron.agent.l2 import agent_extension
|
||||
@ -24,8 +25,12 @@ from neutron.api.rpc.callbacks.consumer import registry
|
||||
from neutron.api.rpc.callbacks import events
|
||||
from neutron.api.rpc.callbacks import resources
|
||||
from neutron.api.rpc.handlers import resources_rpc
|
||||
from neutron.common import exceptions
|
||||
from neutron.i18n import _LW, _LI
|
||||
from neutron import manager
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class QosAgentDriver(object):
|
||||
@ -35,36 +40,130 @@ class QosAgentDriver(object):
|
||||
for applying QoS Rules on a port.
|
||||
"""
|
||||
|
||||
# Each QoS driver should define the set of rule types that it supports, and
|
||||
# correspoding handlers that has the following names:
|
||||
#
|
||||
# create_<type>
|
||||
# update_<type>
|
||||
# delete_<type>
|
||||
#
|
||||
# where <type> is one of VALID_RULE_TYPES
|
||||
SUPPORTED_RULES = set()
|
||||
|
||||
@abc.abstractmethod
|
||||
def initialize(self):
|
||||
"""Perform QoS agent driver initialization.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def create(self, port, qos_policy):
|
||||
"""Apply QoS rules on port for the first time.
|
||||
|
||||
:param port: port object.
|
||||
:param qos_policy: the QoS policy to be applied on port.
|
||||
"""
|
||||
#TODO(QoS) we may want to provide default implementations of calling
|
||||
#delete and then update
|
||||
self._handle_update_create_rules('create', port, qos_policy)
|
||||
|
||||
@abc.abstractmethod
|
||||
def update(self, port, qos_policy):
|
||||
"""Apply QoS rules on port.
|
||||
|
||||
:param port: port object.
|
||||
:param qos_policy: the QoS policy to be applied on port.
|
||||
"""
|
||||
self._handle_update_create_rules('update', port, qos_policy)
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete(self, port, qos_policy):
|
||||
def delete(self, port, qos_policy=None):
|
||||
"""Remove QoS rules from port.
|
||||
|
||||
:param port: port object.
|
||||
:param qos_policy: the QoS policy to be removed from port.
|
||||
"""
|
||||
if qos_policy is None:
|
||||
rule_types = self.SUPPORTED_RULES
|
||||
else:
|
||||
rule_types = set(
|
||||
[rule.rule_type
|
||||
for rule in self._iterate_rules(qos_policy.rules)])
|
||||
|
||||
for rule_type in rule_types:
|
||||
self._handle_rule_delete(port, rule_type)
|
||||
|
||||
def _iterate_rules(self, rules):
|
||||
for rule in rules:
|
||||
rule_type = rule.rule_type
|
||||
if rule_type in self.SUPPORTED_RULES:
|
||||
yield rule
|
||||
else:
|
||||
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
|
||||
'%(rule_type)s; skipping'),
|
||||
{'rule_id': rule.id, 'rule_type': rule_type})
|
||||
|
||||
def _handle_rule_delete(self, port, rule_type):
|
||||
handler_name = "".join(("delete_", rule_type))
|
||||
handler = getattr(self, handler_name)
|
||||
handler(port)
|
||||
|
||||
def _handle_update_create_rules(self, action, port, qos_policy):
|
||||
for rule in self._iterate_rules(qos_policy.rules):
|
||||
if rule.should_apply_to_port(port):
|
||||
handler_name = "".join((action, "_", rule.rule_type))
|
||||
handler = getattr(self, handler_name)
|
||||
handler(port, rule)
|
||||
else:
|
||||
LOG.debug("Port %(port)s excluded from QoS rule %(rule)s",
|
||||
{'port': port, 'rule': rule.id})
|
||||
|
||||
|
||||
class PortPolicyMap(object):
|
||||
def __init__(self):
|
||||
# we cannot use a dict of sets here because port dicts are not hashable
|
||||
self.qos_policy_ports = collections.defaultdict(dict)
|
||||
self.known_policies = {}
|
||||
self.port_policies = {}
|
||||
|
||||
def get_ports(self, policy):
|
||||
return self.qos_policy_ports[policy.id].values()
|
||||
|
||||
def get_policy(self, policy_id):
|
||||
return self.known_policies.get(policy_id)
|
||||
|
||||
def update_policy(self, policy):
|
||||
self.known_policies[policy.id] = policy
|
||||
|
||||
def has_policy_changed(self, port, policy_id):
|
||||
return self.port_policies.get(port['port_id']) != policy_id
|
||||
|
||||
def get_port_policy(self, port):
|
||||
policy_id = self.port_policies.get(port['port_id'])
|
||||
if policy_id:
|
||||
return self.get_policy(policy_id)
|
||||
|
||||
def set_port_policy(self, port, policy):
|
||||
"""Attach a port to policy and return any previous policy on port."""
|
||||
port_id = port['port_id']
|
||||
old_policy = self.get_port_policy(port)
|
||||
self.known_policies[policy.id] = policy
|
||||
self.port_policies[port_id] = policy.id
|
||||
self.qos_policy_ports[policy.id][port_id] = port
|
||||
if old_policy and old_policy.id != policy.id:
|
||||
del self.qos_policy_ports[old_policy.id][port_id]
|
||||
return old_policy
|
||||
|
||||
def clean_by_port(self, port):
|
||||
"""Detach port from policy and cleanup data we don't need anymore."""
|
||||
port_id = port['port_id']
|
||||
if port_id in self.port_policies:
|
||||
del self.port_policies[port_id]
|
||||
for qos_policy_id, port_dict in self.qos_policy_ports.items():
|
||||
if port_id in port_dict:
|
||||
del port_dict[port_id]
|
||||
if not port_dict:
|
||||
self._clean_policy_info(qos_policy_id)
|
||||
return
|
||||
raise exceptions.PortNotFound(port_id=port['port_id'])
|
||||
|
||||
def _clean_policy_info(self, qos_policy_id):
|
||||
del self.qos_policy_ports[qos_policy_id]
|
||||
del self.known_policies[qos_policy_id]
|
||||
|
||||
|
||||
class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
|
||||
@ -79,9 +178,7 @@ class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
|
||||
'neutron.qos.agent_drivers', driver_type)()
|
||||
self.qos_driver.initialize()
|
||||
|
||||
# we cannot use a dict of sets here because port dicts are not hashable
|
||||
self.qos_policy_ports = collections.defaultdict(dict)
|
||||
self.known_ports = set()
|
||||
self.policy_map = PortPolicyMap()
|
||||
|
||||
registry.subscribe(self._handle_notification, resources.QOS_POLICY)
|
||||
self._register_rpc_consumers(connection)
|
||||
@ -111,39 +208,50 @@ class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
|
||||
Update events are handled in _handle_notification.
|
||||
"""
|
||||
port_id = port['port_id']
|
||||
qos_policy_id = port.get('qos_policy_id')
|
||||
port_qos_policy_id = port.get('qos_policy_id')
|
||||
network_qos_policy_id = port.get('network_qos_policy_id')
|
||||
qos_policy_id = port_qos_policy_id or network_qos_policy_id
|
||||
if qos_policy_id is None:
|
||||
self._process_reset_port(port)
|
||||
return
|
||||
|
||||
#Note(moshele) check if we have seen this port
|
||||
#and it has the same policy we do nothing.
|
||||
if (port_id in self.known_ports and
|
||||
port_id in self.qos_policy_ports[qos_policy_id]):
|
||||
if not self.policy_map.has_policy_changed(port, qos_policy_id):
|
||||
return
|
||||
|
||||
self.qos_policy_ports[qos_policy_id][port_id] = port
|
||||
self.known_ports.add(port_id)
|
||||
qos_policy = self.resource_rpc.pull(
|
||||
context, resources.QOS_POLICY, qos_policy_id)
|
||||
self.qos_driver.create(port, qos_policy)
|
||||
if qos_policy is None:
|
||||
LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
|
||||
"%(port_id)s is not available on server, "
|
||||
"it has been deleted. Skipping."),
|
||||
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
|
||||
self._process_reset_port(port)
|
||||
else:
|
||||
old_qos_policy = self.policy_map.set_port_policy(port, qos_policy)
|
||||
if old_qos_policy:
|
||||
self.qos_driver.delete(port, old_qos_policy)
|
||||
self.qos_driver.update(port, qos_policy)
|
||||
else:
|
||||
self.qos_driver.create(port, qos_policy)
|
||||
|
||||
def delete_port(self, context, port):
|
||||
self._process_reset_port(port)
|
||||
|
||||
def _process_update_policy(self, qos_policy):
|
||||
for port_id, port in self.qos_policy_ports[qos_policy.id].items():
|
||||
# TODO(QoS): for now, just reflush the rules on the port. Later, we
|
||||
# may want to apply the difference between the rules lists only.
|
||||
self.qos_driver.delete(port, None)
|
||||
old_qos_policy = self.policy_map.get_policy(qos_policy.id)
|
||||
for port in self.policy_map.get_ports(qos_policy):
|
||||
#NOTE(QoS): for now, just reflush the rules on the port. Later, we
|
||||
# may want to apply the difference between the old and
|
||||
# new rule lists.
|
||||
self.qos_driver.delete(port, old_qos_policy)
|
||||
self.qos_driver.update(port, qos_policy)
|
||||
self.policy_map.update_policy(qos_policy)
|
||||
|
||||
def _process_reset_port(self, port):
|
||||
port_id = port['port_id']
|
||||
if port_id in self.known_ports:
|
||||
self.known_ports.remove(port_id)
|
||||
for qos_policy_id, port_dict in self.qos_policy_ports.items():
|
||||
if port_id in port_dict:
|
||||
del port_dict[port_id]
|
||||
self.qos_driver.delete(port, None)
|
||||
return
|
||||
try:
|
||||
self.policy_map.clean_by_port(port)
|
||||
self.qos_driver.delete(port)
|
||||
except exceptions.PortNotFound:
|
||||
LOG.info(_LI("QoS extension did have no information about the "
|
||||
"port %s that we were trying to reset"),
|
||||
port['port_id'])
|
||||
|
@ -80,7 +80,8 @@ class L3PluginApi(object):
|
||||
to update_ha_routers_states
|
||||
1.5 - Added update_ha_routers_states
|
||||
1.6 - Added process_prefix_update
|
||||
|
||||
1.7 - DVR support: new L3 plugin methods added.
|
||||
- delete_agent_gateway_port
|
||||
"""
|
||||
|
||||
def __init__(self, topic, host):
|
||||
@ -139,6 +140,12 @@ class L3PluginApi(object):
|
||||
return cctxt.call(context, 'process_prefix_update',
|
||||
subnets=prefix_update)
|
||||
|
||||
def delete_agent_gateway_port(self, context, fip_net):
|
||||
"""Delete Floatingip_agent_gateway_port."""
|
||||
cctxt = self.client.prepare(version='1.7')
|
||||
return cctxt.call(context, 'delete_agent_gateway_port',
|
||||
host=self.host, network_id=fip_net)
|
||||
|
||||
|
||||
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
|
||||
ha.AgentMixin,
|
||||
@ -517,10 +524,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
|
||||
@periodic_task.periodic_task(spacing=1)
|
||||
def periodic_sync_routers_task(self, context):
|
||||
self.process_services_sync(context)
|
||||
LOG.debug("Starting periodic_sync_routers_task - fullsync:%s",
|
||||
self.fullsync)
|
||||
if not self.fullsync:
|
||||
return
|
||||
LOG.debug("Starting fullsync periodic_sync_routers_task")
|
||||
|
||||
# self.fullsync is True at this point. If an exception -- caught or
|
||||
# uncaught -- prevents setting it to False below then the next call
|
||||
|
@ -37,6 +37,7 @@ OPTS = [
|
||||
"running on a centralized node (or in single-host "
|
||||
"deployments, e.g. devstack)")),
|
||||
cfg.StrOpt('external_network_bridge', default='br-ex',
|
||||
deprecated_for_removal=True,
|
||||
help=_("Name of bridge used for external network "
|
||||
"traffic.")),
|
||||
cfg.IntOpt('metadata_port',
|
||||
|
@ -101,13 +101,11 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
|
||||
if not self.ex_gw_port:
|
||||
return
|
||||
|
||||
sn_port = self.get_snat_port_for_internal_port(port)
|
||||
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
|
||||
if not sn_port:
|
||||
return
|
||||
|
||||
is_this_snat_host = ('binding:host_id' in self.ex_gw_port) and (
|
||||
self.ex_gw_port['binding:host_id'] == self.host)
|
||||
if not is_this_snat_host:
|
||||
if not self._is_this_snat_host():
|
||||
return
|
||||
|
||||
snat_interface = self._get_snat_int_device_name(sn_port['id'])
|
||||
|
@ -137,6 +137,17 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
||||
# destroying it. The two could end up conflicting on
|
||||
# creating/destroying interfaces and such. I think I'd like a
|
||||
# semaphore to sync creation/deletion of this namespace.
|
||||
|
||||
# NOTE (Swami): Since we are deleting the namespace here we
|
||||
# should be able to delete the floatingip agent gateway port
|
||||
# for the provided external net since we don't need it anymore.
|
||||
if self.fip_ns.agent_gateway_port:
|
||||
LOG.debug('Removed last floatingip, so requesting the '
|
||||
'server to delete Floatingip Agent Gateway port:'
|
||||
'%s', self.fip_ns.agent_gateway_port)
|
||||
self.agent.plugin_rpc.delete_agent_gateway_port(
|
||||
self.agent.context,
|
||||
self.fip_ns.agent_gateway_port['network_id'])
|
||||
self.fip_ns.delete()
|
||||
self.fip_ns = None
|
||||
|
||||
@ -303,7 +314,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
||||
if not self.ex_gw_port:
|
||||
return
|
||||
|
||||
sn_port = self.get_snat_port_for_internal_port(port)
|
||||
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
|
||||
if not sn_port:
|
||||
return
|
||||
|
||||
|
@ -26,12 +26,18 @@ class DvrRouterBase(router.RouterInfo):
|
||||
self.agent = agent
|
||||
self.host = host
|
||||
|
||||
def process(self, agent):
|
||||
super(DvrRouterBase, self).process(agent)
|
||||
# NOTE: Keep a copy of the interfaces around for when they are removed
|
||||
self.snat_ports = self.get_snat_interfaces()
|
||||
|
||||
def get_snat_interfaces(self):
|
||||
return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
|
||||
|
||||
def get_snat_port_for_internal_port(self, int_port):
|
||||
def get_snat_port_for_internal_port(self, int_port, snat_ports=None):
|
||||
"""Return the SNAT port for the given internal interface port."""
|
||||
snat_ports = self.get_snat_interfaces()
|
||||
if snat_ports is None:
|
||||
snat_ports = self.get_snat_interfaces()
|
||||
fixed_ip = int_port['fixed_ips'][0]
|
||||
subnet_id = fixed_ip['subnet_id']
|
||||
match_port = [p for p in snat_ports
|
||||
|
@ -122,10 +122,23 @@ class AgentMixin(object):
|
||||
'possibly deleted concurrently.'), router_id)
|
||||
return
|
||||
|
||||
self._configure_ipv6_ra_on_ext_gw_port_if_necessary(ri, state)
|
||||
self._update_metadata_proxy(ri, router_id, state)
|
||||
self._update_radvd_daemon(ri, state)
|
||||
self.state_change_notifier.queue_event((router_id, state))
|
||||
|
||||
def _configure_ipv6_ra_on_ext_gw_port_if_necessary(self, ri, state):
|
||||
# If ipv6 is enabled on the platform, ipv6_gateway config flag is
|
||||
# not set and external_network associated to the router does not
|
||||
# include any IPv6 subnet, enable the gateway interface to accept
|
||||
# Router Advts from upstream router for default route.
|
||||
ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id']
|
||||
if state == 'master' and ex_gw_port_id and ri.use_ipv6:
|
||||
gateway_ips = ri._get_external_gw_ips(ri.ex_gw_port)
|
||||
if not ri.is_v6_gateway_set(gateway_ips):
|
||||
interface_name = ri.get_external_device_name(ex_gw_port_id)
|
||||
ri.driver.configure_ipv6_ra(ri.ns_name, interface_name)
|
||||
|
||||
def _update_metadata_proxy(self, ri, router_id, state):
|
||||
if state == 'master':
|
||||
LOG.debug('Spawning metadata proxy for router %s', router_id)
|
||||
|
@ -187,7 +187,7 @@ class HaRouter(router.RouterInfo):
|
||||
|
||||
def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
|
||||
default_gw_rts = []
|
||||
gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port)
|
||||
gateway_ips = self._get_external_gw_ips(ex_gw_port)
|
||||
for gw_ip in gateway_ips:
|
||||
# TODO(Carl) This is repeated everywhere. A method would
|
||||
# be nice.
|
||||
@ -197,9 +197,6 @@ class HaRouter(router.RouterInfo):
|
||||
default_gw, gw_ip, interface_name))
|
||||
instance.virtual_routes.gateway_routes = default_gw_rts
|
||||
|
||||
if enable_ra_on_gw:
|
||||
self.driver.configure_ipv6_ra(self.ns_name, interface_name)
|
||||
|
||||
def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
|
||||
extra_subnets = ex_gw_port.get('extra_subnets', [])
|
||||
instance = self._get_keepalived_instance()
|
||||
@ -362,10 +359,10 @@ class HaRouter(router.RouterInfo):
|
||||
interface_name)
|
||||
|
||||
def delete(self, agent):
|
||||
super(HaRouter, self).delete(agent)
|
||||
self.destroy_state_change_monitor(self.process_monitor)
|
||||
self.ha_network_removed()
|
||||
self.disable_keepalived()
|
||||