Browse Source

Merge remote-tracking branch 'origin/master' into walnut

Change-Id: Ic6314ef9c1db6524fbb0ed8b1bacdc2b081c4775
tags/7.0.0.0rc1
armando-migliaccio 4 years ago
parent
commit
fdc3431ccd
100 changed files with 1280 additions and 4231 deletions
  1. +1
    -1
      .testr.conf
  2. +23
    -0
      devstack/lib/l2_agent_sriovnicswitch
  3. +16
    -0
      devstack/lib/ml2
  4. +3
    -0
      devstack/lib/ml2_drivers/sriovnicswitch
  5. +23
    -0
      devstack/plugin.sh
  6. +2
    -2
      doc/dashboards/graphite.dashboard.html
  7. +19
    -19
      doc/source/devref/callbacks.rst
  8. +22
    -0
      doc/source/devref/contribute.rst
  9. +7
    -3
      doc/source/devref/fullstack_testing.rst
  10. +8
    -2
      doc/source/devref/quality_of_service.rst
  11. +0
    -6
      doc/source/devref/quota.rst
  12. +25
    -11
      doc/source/devref/sub_project_guidelines.rst
  13. +1
    -0
      etc/l3_agent.ini
  14. +7
    -13
      etc/neutron.conf
  15. +0
    -15
      etc/neutron/plugins/cisco/cisco_cfg_agent.ini
  16. +0
    -107
      etc/neutron/plugins/cisco/cisco_plugins.ini
  17. +0
    -76
      etc/neutron/plugins/cisco/cisco_router_plugin.ini
  18. +27
    -1
      etc/neutron/plugins/ml2/openvswitch_agent.ini
  19. +0
    -2
      etc/neutron/rootwrap.d/ebtables.filters
  20. +1
    -0
      etc/neutron/rootwrap.d/openvswitch-plugin.filters
  21. +3
    -0
      etc/policy.json
  22. +5
    -11
      neutron/agent/common/ovs_lib.py
  23. +21
    -16
      neutron/agent/dhcp/agent.py
  24. +11
    -11
      neutron/agent/dhcp_agent.py
  25. +137
    -29
      neutron/agent/l2/extensions/qos.py
  26. +9
    -3
      neutron/agent/l3/agent.py
  27. +1
    -0
      neutron/agent/l3/config.py
  28. +2
    -4
      neutron/agent/l3/dvr_edge_router.py
  29. +12
    -1
      neutron/agent/l3/dvr_local_router.py
  30. +8
    -2
      neutron/agent/l3/dvr_router_base.py
  31. +13
    -0
      neutron/agent/l3/ha.py
  32. +2
    -5
      neutron/agent/l3/ha_router.py
  33. +16
    -7
      neutron/agent/l3/router_info.py
  34. +1
    -1
      neutron/agent/linux/async_process.py
  35. +29
    -2
      neutron/agent/linux/dhcp.py
  36. +0
    -290
      neutron/agent/linux/ebtables_driver.py
  37. +0
    -253
      neutron/agent/linux/ebtables_manager.py
  38. +48
    -6
      neutron/agent/linux/interface.py
  39. +129
    -45
      neutron/agent/linux/ip_lib.py
  40. +1
    -1
      neutron/agent/linux/ip_monitor.py
  41. +1
    -1
      neutron/agent/linux/iptables_firewall.py
  42. +25
    -1
      neutron/agent/linux/keepalived.py
  43. +14
    -8
      neutron/agent/linux/utils.py
  44. +3
    -3
      neutron/agent/ovsdb/api.py
  45. +10
    -1
      neutron/api/rpc/handlers/l3_rpc.py
  46. +2
    -1
      neutron/api/rpc/handlers/securitygroups_rpc.py
  47. +1
    -1
      neutron/api/v2/attributes.py
  48. +54
    -29
      neutron/api/v2/base.py
  49. +2
    -0
      neutron/callbacks/resources.py
  50. +11
    -0
      neutron/cmd/sanity/checks.py
  51. +14
    -1
      neutron/cmd/sanity_check.py
  52. +5
    -1
      neutron/common/constants.py
  53. +3
    -0
      neutron/common/exceptions.py
  54. +10
    -0
      neutron/common/utils.py
  55. +1
    -1
      neutron/db/agentschedulers_db.py
  56. +6
    -2
      neutron/db/api.py
  57. +32
    -11
      neutron/db/common_db_mixin.py
  58. +19
    -4
      neutron/db/db_base_plugin_v2.py
  59. +2
    -0
      neutron/db/flavors_db.py
  60. +3
    -1
      neutron/db/l3_agentschedulers_db.py
  61. +24
    -4
      neutron/db/l3_db.py
  62. +42
    -85
      neutron/db/l3_dvr_db.py
  63. +43
    -8
      neutron/db/l3_dvrscheduler_db.py
  64. +12
    -0
      neutron/db/migration/__init__.py
  65. +1
    -3
      neutron/db/migration/alembic_migrations/cisco_init_ops.py
  66. +16
    -7
      neutron/db/migration/alembic_migrations/env.py
  67. +3
    -0
      neutron/db/migration/alembic_migrations/external.py
  68. +1
    -1
      neutron/db/migration/alembic_migrations/versions/HEADS
  69. +44
    -0
      neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.py
  70. +123
    -0
      neutron/db/migration/autogen.py
  71. +21
    -36
      neutron/db/migration/cli.py
  72. +0
    -3
      neutron/db/migration/models/head.py
  73. +2
    -2
      neutron/db/portsecurity_db.py
  74. +12
    -28
      neutron/db/quota/api.py
  75. +22
    -32
      neutron/db/quota/driver.py
  76. +11
    -12
      neutron/db/securitygroups_db.py
  77. +0
    -10
      neutron/db/servicetype_db.py
  78. +1
    -0
      neutron/debug/debug_agent.py
  79. +7
    -3
      neutron/extensions/allowedaddresspairs.py
  80. +1
    -1
      neutron/extensions/dns.py
  81. +6
    -1
      neutron/extensions/flavors.py
  82. +5
    -0
      neutron/manager.py
  83. +9
    -0
      neutron/neutron_plugin_base_v2.py
  84. +6
    -11
      neutron/notifiers/nova.py
  85. +17
    -0
      neutron/objects/qos/rule.py
  86. +0
    -7
      neutron/plugins/cisco/README
  87. +0
    -118
      neutron/plugins/cisco/common/cisco_constants.py
  88. +0
    -53
      neutron/plugins/cisco/common/cisco_credentials_v2.py
  89. +0
    -236
      neutron/plugins/cisco/common/cisco_exceptions.py
  90. +0
    -134
      neutron/plugins/cisco/common/cisco_faults.py
  91. +0
    -138
      neutron/plugins/cisco/common/config.py
  92. +0
    -0
      neutron/plugins/cisco/db/__init__.py
  93. +0
    -0
      neutron/plugins/cisco/db/l3/__init__.py
  94. +0
    -97
      neutron/plugins/cisco/db/l3/l3_models.py
  95. +0
    -1673
      neutron/plugins/cisco/db/n1kv_db_v2.py
  96. +0
    -185
      neutron/plugins/cisco/db/n1kv_models_v2.py
  97. +0
    -280
      neutron/plugins/cisco/db/network_db_v2.py
  98. +0
    -52
      neutron/plugins/cisco/db/network_models_v2.py
  99. +0
    -0
      neutron/plugins/cisco/extensions/__init__.py
  100. +0
    -0
      neutron/plugins/cisco/extensions/_credential_view.py

+ 1
- 1
.testr.conf View File

@@ -1,4 +1,4 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION | cat
test_id_option=--load-list $IDFILE
test_list_option=--list

+ 23
- 0
devstack/lib/l2_agent_sriovnicswitch View File

@@ -0,0 +1,23 @@
SRIOV_AGENT_CONF="${Q_PLUGIN_CONF_PATH}/sriov_agent.ini"
SRIOV_AGENT_BINARY="${NEUTRON_BIN_DIR}/neutron-sriov-nic-agent"

function configure_l2_agent_sriovnicswitch {
if [[ -n "$PHYSICAL_NETWORK" ]] && [[ -n "$PHYSICAL_INTERFACE" ]]; then
PHYSICAL_DEVICE_MAPPINGS=$PHYSICAL_NETWORK:$PHYSICAL_INTERFACE
fi
if [[ -n "$PHYSICAL_DEVICE_MAPPINGS" ]]; then
iniset /$SRIOV_AGENT_CONF sriov_nic physical_device_mappings $PHYSICAL_DEVICE_MAPPINGS
fi

iniset /$SRIOV_AGENT_CONF securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver

iniset /$SRIOV_AGENT_CONF agent extensions "$L2_AGENT_EXTENSIONS"
}

function start_l2_agent_sriov {
run_process q-sriov-agt "$SRIOV_AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$SRIOV_AGENT_CONF"
}

function stop_l2_agent_sriov {
stop_process q-sriov-agt
}

+ 16
- 0
devstack/lib/ml2 View File

@@ -1,3 +1,6 @@
source $LIBDIR/ml2_drivers/sriovnicswitch


function enable_ml2_extension_driver {
local extension_driver=$1
if [[ -z "$Q_ML2_PLUGIN_EXT_DRIVERS" ]]; then
@@ -11,3 +14,16 @@ function enable_ml2_extension_driver {
function configure_qos_ml2 {
enable_ml2_extension_driver "qos"
}


function configure_ml2 {
OIFS=$IFS;
IFS=",";
mechanism_drivers_array=($Q_ML2_PLUGIN_MECHANISM_DRIVERS);
IFS=$OIFS;
for mechanism_driver in "${mechanism_drivers_array[@]}"; do
if [ "$(type -t configure_ml2_$mechanism_driver)" = function ]; then
configure_ml2_$mechanism_driver
fi
done
}

+ 3
- 0
devstack/lib/ml2_drivers/sriovnicswitch View File

@@ -0,0 +1,3 @@
function configure_ml2_sriovnicswitch {
iniset /$Q_PLUGIN_CONF_FILE ml2_sriov agent_required True
}

+ 23
- 0
devstack/plugin.sh View File

@@ -1,6 +1,7 @@
LIBDIR=$DEST/neutron/devstack/lib

source $LIBDIR/l2_agent
source $LIBDIR/l2_agent_sriovnicswitch
source $LIBDIR/ml2
source $LIBDIR/qos

@@ -15,4 +16,26 @@ if [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if is_service_enabled q-agt; then
configure_l2_agent
fi
#Note: sriov agent should run with OVS or linux bridge agent
#because they are the mechanisms that bind the DHCP and router ports.
#Currently devstack lacks the option to run two agents on the same node.
#Therefore we create new service, q-sriov-agt, and the q-agt should be OVS
#or linux bridge.
if is_service_enabled q-sriov-agt; then
configure_$Q_PLUGIN
configure_l2_agent
configure_l2_agent_sriovnicswitch
fi
fi

if [[ "$1" == "stack" && "$2" == "extra" ]]; then
if is_service_enabled q-sriov-agt; then
start_l2_agent_sriov
fi
fi

if [[ "$1" == "unstack" ]]; then
if is_service_enabled q-sriov-agt; then
stop_l2_agent_sriov
fi
fi

+ 2
- 2
doc/dashboards/graphite.dashboard.html View File

@@ -25,8 +25,8 @@ Failure Percentage - Last 10 Days - Rally, LinuxBridge, LBaaS v1/v2<br>
</a>
</td>
<td align="center">
Failure Percentage - Last 10 Days - Large Opts<br>
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Large Opts&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29">
Failure Percentage - Last 10 Days - Large Ops<br>
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Large Ops&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29">
<img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29" width="400">
</a>
</td>

+ 19
- 19
doc/source/devref/callbacks.rst View File

@@ -94,18 +94,18 @@ In practical terms this scenario would be translated in the code below:


def callback1(resource, event, trigger, **kwargs):
print 'Callback1 called by trigger: ', trigger
print 'kwargs: ', kwargs
print('Callback1 called by trigger: ', trigger)
print('kwargs: ', kwargs)

def callback2(resource, event, trigger, **kwargs):
print 'Callback2 called by trigger: ', trigger
print 'kwargs: ', kwargs
print('Callback2 called by trigger: ', trigger)
print('kwargs: ', kwargs)


# B and C express interest with I
registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE)
print 'Subscribed'
print('Subscribed')


# A notifies
@@ -114,7 +114,7 @@ In practical terms this scenario would be translated in the code below:
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)


print 'Notifying...'
print('Notifying...')
do_notify()


@@ -171,13 +171,13 @@ to abort events are ignored. The snippet below shows this in action:
raise Exception('I am failing!')

def callback2(resource, event, trigger, **kwargs):
print 'Callback2 called by %s on event %s' % (trigger, event)
print('Callback2 called by %s on event %s' % (trigger, event))


registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE)
registry.subscribe(callback2, resources.ROUTER, events.ABORT_CREATE)
print 'Subscribed'
print('Subscribed')


def do_notify():
@@ -185,11 +185,11 @@ to abort events are ignored. The snippet below shows this in action:
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)


print 'Notifying...'
print('Notifying...')
try:
do_notify()
except exceptions.CallbackFailure as e:
print 'Error: ', e
print('Error: ', e)

The output is:

@@ -237,11 +237,11 @@ The snippet below shows these concepts in action:


def callback1(resource, event, trigger, **kwargs):
print 'Callback1 called by %s on event %s for resource %s' % (trigger, event, resource)
print('Callback1 called by %s on event %s for resource %s' % (trigger, event, resource))


def callback2(resource, event, trigger, **kwargs):
print 'Callback2 called by %s on event %s for resource %s' % (trigger, event, resource)
print('Callback2 called by %s on event %s for resource %s' % (trigger, event, resource))


registry.subscribe(callback1, resources.ROUTER, events.BEFORE_READ)
@@ -249,11 +249,11 @@ The snippet below shows these concepts in action:
registry.subscribe(callback1, resources.ROUTER, events.AFTER_DELETE)
registry.subscribe(callback1, resources.PORT, events.BEFORE_UPDATE)
registry.subscribe(callback2, resources.ROUTER_GATEWAY, events.BEFORE_UPDATE)
print 'Subscribed'
print('Subscribed')


def do_notify():
print 'Notifying...'
print('Notifying...')
kwargs = {'foo': 'bar'}
registry.notify(resources.ROUTER, events.BEFORE_READ, do_notify, **kwargs)
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
@@ -356,17 +356,17 @@ What kind of function can be a callback?


def callback1(resource, event, trigger, **kwargs):
print 'module callback'
print('module callback')


class MyCallback(object):

def callback2(self, resource, event, trigger, **kwargs):
print 'object callback'
print('object callback')

@classmethod
def callback3(cls, resource, event, trigger, **kwargs):
print 'class callback'
print('class callback')


c = MyCallback()
@@ -376,7 +376,7 @@ What kind of function can be a callback?

def do_notify():
def nested_subscribe(resource, event, trigger, **kwargs):
print 'nested callback'
print('nested callback')

registry.subscribe(nested_subscribe, resources.ROUTER, events.BEFORE_CREATE)

@@ -384,7 +384,7 @@ What kind of function can be a callback?
registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)


print 'Notifying...'
print('Notifying...')
do_notify()

And the output is going to be:

+ 22
- 0
doc/source/devref/contribute.rst View File

@@ -506,6 +506,28 @@ Extensions can be loaded in two ways:
variable is commented.


Service Providers
~~~~~~~~~~~~~~~~~

If your project uses service provider(s) the same way VPNAAS and LBAAS do, you
specify your service provider in your ``project_name.conf`` file like so::

[service_providers]
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default][,...]

In order for Neutron to load this correctly, make sure you do the following in
your code::

from neutron.db import servicetype_db
service_type_manager = servicetype_db.ServiceTypeManager.get_instance()
service_type_manager.add_provider_configuration(
YOUR_SERVICE_TYPE,
pconf.ProviderConfiguration(YOUR_SERVICE_MODULE))

This is typically required when you instantiate your service plugin class.


Interface Drivers
~~~~~~~~~~~~~~~~~


+ 7
- 3
doc/source/devref/fullstack_testing.rst View File

@@ -83,6 +83,12 @@ When?
stack testing can help here as the full stack infrastructure can restart an
agent during the test.

Prerequisites
-------------

Fullstack test suite assumes 240.0.0.0/3 range in root namespace of the test
machine is available for its usage.

Short Term Goals
----------------

@@ -103,9 +109,6 @@ the fact as there will probably be something to copy/paste from.
Long Term Goals
---------------

* Currently we configure the OVS agent with VLANs segmentation (Only because
it's easier). This allows us to validate most functionality, but we might
need to support tunneling somehow.
* How will advanced services use the full stack testing infrastructure? Full
stack tests infrastructure classes are expected to change quite a bit over
the next coming months. This means that other repositories may import these
@@ -116,3 +119,4 @@ Long Term Goals
mechanism driver. We may modularize the topology configuration further to
allow to rerun full stack tests against different Neutron plugins or ML2
mechanism drivers.
* Add OVS ARP responder coverage when the gate supports OVS 2.1+

+ 8
- 2
doc/source/devref/quality_of_service.rst View File

@@ -84,8 +84,14 @@ for a port or a network:

Each QoS policy contains zero or more QoS rules. A policy is then applied to a
network or a port, making all rules of the policy applied to the corresponding
Neutron resource (for a network, applying a policy means that the policy will
be applied to all ports that belong to it).
Neutron resource.

When applied through a network association, policy rules could apply or not
to neutron internal ports (like router, dhcp, load balancer, etc..). The QosRule
base object provides a default should_apply_to_port method which could be
overridden. In the future we may want to have a flag in QoSNetworkPolicyBinding
or QosRule to enforce such type of application (for example when limiting all
the ingress of routers devices on an external network automatically).

From database point of view, following objects are defined in schema:


+ 0
- 6
doc/source/devref/quota.rst View File

@@ -164,12 +164,6 @@ difference between CountableResource and TrackedResource.
Quota Enforcement
-----------------

**NOTE: The reservation engine is currently not wired into the API controller
as issues have been discovered with multiple workers. For more information
see _bug1468134**

.. _bug1468134: https://bugs.launchpad.net/neutron/+bug/1486134

Before dispatching a request to the plugin, the Neutron 'base' controller [#]_
attempts to make a reservation for requested resource(s).
Reservations are made by calling the make_reservation method in

+ 25
- 11
doc/source/devref/sub_project_guidelines.rst View File

@@ -130,19 +130,33 @@ needed.
Sub-Project Release Process
~~~~~~~~~~~~~~~~~~~~~~~~~~~

Only members of the `neutron-release
<https://review.openstack.org/#/admin/groups/150,members>`_ gerrit group can do
releases. Make sure you talk to a member of neutron-release to perform your
release.

To release a sub-project, follow the following steps:

* Only members of the `neutron-release
<https://review.openstack.org/#/admin/groups/150,members>`_ gerrit group can
do releases. Make sure you talk to a member of neutron-release to perform
your release.
* For projects which have not moved to post-versioning, we need to push an
alpha tag to avoid pbr complaining. The neutron-release group will handle
this.
* Modify setup.cfg to remove the version (if you have one), which moves your
project to post-versioning, similar to all the other Neutron projects. You
can skip this step if you don't have a version in setup.cfg.
* Have neutron-release push the tag to gerrit.
* Have neutron-release `tag the release
alpha tag to avoid pbr complaining. A member of the neutron-release group
will handle this.
* A sub-project owner should modify setup.cfg to remove the version (if you
have one), which moves your project to post-versioning, similar to all the
other Neutron projects. You can skip this step if you don't have a version in
setup.cfg.
* A member of neutron-release will then `tag the release
<http://docs.openstack.org/infra/manual/drivers.html#tagging-a-release>`_,
which will release the code to PyPi.
* The releases will now be on PyPi. A sub-project owner should verify this by
going to an URL similar to
`this <https://pypi.python.org/pypi/networking-odl>`_.
* A sub-project owner should next go to Launchpad and release this version
using the "Release Now" button for the release itself.
* A sub-project owner should update any bugs that were fixed with this
release to "Fix Released" in Launchpad.
* A sub-project owner should add the tarball to the Launchpad page for the
release using the "Add download file" link.
* A sub-project owner should add the next milestone to the Launchpad series, or
if a new series is required, create the new series and a new milestone.
* Finally a sub-project owner should send an email to the openstack-announce
mailing list announcing the new release.

+ 1
- 0
etc/l3_agent.ini View File

@@ -64,6 +64,7 @@
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
# This option is deprecated and will be removed in the M release.
# external_network_bridge = br-ex

# TCP Port used by Neutron metadata server

+ 7
- 13
etc/neutron.conf View File

@@ -190,9 +190,9 @@

# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler

@@ -306,19 +306,13 @@
# ========== end of items for VLAN trunking networks ==========

# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. A value of 0 runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them. If not
# specified, the default value is equal to the number of CPUs available to
# achieve best performance.
# Number of separate API worker processes to spawn. If not specified or < 1,
# the default value is equal to the number of CPUs available.
# api_workers = <number of CPUs>

# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Number of separate RPC worker processes to spawn. If not specified or < 1,
# a single RPC worker process is spawned by the parent process.
# rpc_workers = 1

# Timeout for client connections socket operations. If an
# incoming connection is idle for this number of seconds it

+ 0
- 15
etc/neutron/plugins/cisco/cisco_cfg_agent.ini View File

@@ -1,15 +0,0 @@
[cfg_agent]
# (IntOpt) Interval in seconds for processing of service updates.
# That is when the config agent's process_services() loop executes
# and it lets each service helper to process its service resources.
# rpc_loop_interval = 10

# (StrOpt) Period-separated module path to the routing service helper class.
# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper

# (IntOpt) Timeout value in seconds for connecting to a hosting device.
# device_connection_timeout = 30

# (IntOpt) The time in seconds until a backlogged hosting device is
# presumed dead or booted to an error state.
# hosting_device_dead_timeout = 300

+ 0
- 107
etc/neutron/plugins/cisco/cisco_plugins.ini View File

@@ -1,107 +0,0 @@
[cisco]

# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-

# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# provider VLAN interface. For example, if an interface is being created
# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
#
# provider_vlan_name_prefix = p-
# Example: provider_vlan_name_prefix = PV-

# (BoolOpt) A flag indicating whether Openstack networking should manage the
# creation and removal of VLAN interfaces for provider networks on the Nexus
# switches. If the flag is set to False then Openstack will not create or
# remove VLAN interfaces for provider networks, and the administrator needs
# to manage these interfaces manually or by external orchestration.
#
# provider_vlan_auto_create = True

# (BoolOpt) A flag indicating whether Openstack networking should manage
# the adding and removing of provider VLANs from trunk ports on the Nexus
# switches. If the flag is set to False then Openstack will not add or
# remove provider VLANs from trunk ports, and the administrator needs to
# manage these operations manually or by external orchestration.
#
# provider_vlan_auto_trunk = True

# (StrOpt) Period-separated module path to the model class to use for
# the Cisco neutron plugin.
#
# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2

# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches.
# Note: This feature is not supported on all models/versions of Cisco
# Nexus switches. To use this feature, all of the Nexus switches in the
# deployment must support it.
# nexus_l3_enable = False

# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False

# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# N1KV Format.
# [N1KV:<IP address of VSM>]
# username=<credential username>
# password=<credential password>
#
# Example:
# [N1KV:2.2.2.2]
# username=admin
# password=mySecretPassword

[cisco_n1k]

# (StrOpt) Specify the name of the integration bridge to which the VIFs are
# attached.
# Default value: br-int
# integration_bridge = br-int

# (StrOpt) Name of the policy profile to be associated with a port when no
# policy profile is specified during port creates.
# Default value: service_profile
# default_policy_profile = service_profile

# (StrOpt) Name of the policy profile to be associated with a port owned by
# network node (dhcp, router).
# Default value: dhcp_pp
# network_node_policy_profile = dhcp_pp

# (StrOpt) Name of the network profile to be associated with a network when no
# network profile is specified during network creates. Admin should pre-create
# a network profile with this name.
# Default value: default_network_profile
# default_network_profile = network_pool

# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
# policy profiles.
# Default value: 60
# poll_duration = 60

# (BoolOpt) Specify whether tenants are restricted from accessing all the
# policy profiles.
# Default value: False, indicating all tenants can access all policy profiles.
#
# restrict_policy_profiles = False

# (IntOpt) Number of threads to use to make HTTP requests to the VSM.
# Default value: 4
# http_pool_size = 4

# (IntOpt) Timeout duration in seconds for the http request
# Default value: 15
# http_timeout = 15

# (BoolOpt) Specify whether tenants are restricted from accessing network
# profiles belonging to other tenants.
# Default value: True, indicating other tenants cannot access network
# profiles belonging to a tenant.
#
# restrict_network_profiles = True

+ 0
- 76
etc/neutron/plugins/cisco/cisco_router_plugin.ini View File

@@ -1,76 +0,0 @@
[general]
#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers
# backlog_processing_interval = 10

#(StrOpt) Name of the L3 admin tenant
# l3_admin_tenant = L3AdminTenant

#(StrOpt) Name of management network for hosting device configuration
# management_network = osn_mgmt_nw

#(StrOpt) Default security group applied on management port
# default_security_group = mgmt_sec_grp

#(IntOpt) Seconds of no status update until a cfg agent is considered down
# cfg_agent_down_time = 60

#(StrOpt) Path to templates for hosting devices
# templates_path = /opt/stack/data/neutron/cisco/templates

#(StrOpt) Path to config drive files for service VM instances
# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive

#(BoolOpt) Ensure that Nova is running before attempting to create any VM
# ensure_nova_running = True

[hosting_devices]
# Settings coupled to CSR1kv VM devices
# -------------------------------------
#(StrOpt) Name of Glance image for CSR1kv
# csr1kv_image = csr1kv_openstack_img

#(StrOpt) UUID of Nova flavor for CSR1kv
# csr1kv_flavor = 621

#(StrOpt) Plugging driver for CSR1kv
# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver

#(StrOpt) Hosting device driver for CSR1kv
# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver

#(StrOpt) Config agent router service driver for CSR1kv
# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver

#(StrOpt) Configdrive template file for CSR1kv
# csr1kv_configdrive_template = csr1kv_cfg_template

#(IntOpt) Booting time in seconds before a CSR1kv becomes operational
# csr1kv_booting_time = 420

#(StrOpt) Username to use for CSR1kv configurations
# csr1kv_username = stack

#(StrOpt) Password to use for CSR1kv configurations
# csr1kv_password = cisco

[n1kv]
# Settings coupled to inter-working with N1kv plugin
# --------------------------------------------------
#(StrOpt) Name of N1kv port profile for management ports
# management_port_profile = osn_mgmt_pp

#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic
# from VXLAN segmented networks).
# t1_port_profile = osn_t1_pp

#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic
# from VLAN segmented networks).
# t2_port_profile = osn_t2_pp

#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks
# for VXLAN segmented traffic).
# t1_network_profile = osn_t1_np

#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks
# for VLAN segmented traffic).
# t2_network_profile = osn_t2_np

+ 27
- 1
etc/neutron/plugins/ml2/openvswitch_agent.ini View File

@@ -54,8 +54,28 @@
# ovsdb_connection = tcp:127.0.0.1:6640

# (StrOpt) OpenFlow interface to use.
# 'ovs-ofctl' is currently the only available choice.
# 'ovs-ofctl' or 'native'.
# of_interface = ovs-ofctl
#
# (IPOpt)
# Address to listen on for OpenFlow connections.
# Used only for 'native' driver.
# of_listen_address = 127.0.0.1
#
# (IntOpt)
# Port to listen on for OpenFlow connections.
# Used only for 'native' driver.
# of_listen_port = 6633
#
# (IntOpt)
# Timeout in seconds to wait for the local switch connecting the controller.
# Used only for 'native' driver.
# of_connect_timeout=30
#
# (IntOpt)
# Timeout in seconds to wait for a single OpenFlow request.
# Used only for 'native' driver.
# of_request_timeout=10

# (StrOpt) ovs datapath to use.
# 'system' is the default value and corresponds to the kernel datapath.
@@ -143,6 +163,12 @@
#
# extensions =

# (BoolOpt) Set or un-set the checksum on outgoing IP packet
# carrying GRE/VXLAN tunnel. The default value is False.
#
# tunnel_csum = False


[securitygroup]
# Firewall driver for realizing neutron security group function.
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver

+ 0
- 2
etc/neutron/rootwrap.d/ebtables.filters View File

@@ -8,6 +8,4 @@

[Filters]

# neutron/agent/linux/ebtables_driver.py
ebtables: CommandFilter, ebtables, root
ebtablesEnv: EnvFilter, ebtables, root, EBTABLES_ATOMIC_FILE=

+ 1
- 0
etc/neutron/rootwrap.d/openvswitch-plugin.filters View File

@@ -12,6 +12,7 @@
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
ovs-vsctl: CommandFilter, ovs-vsctl, root
# NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
ovs-ofctl: CommandFilter, ovs-ofctl, root
kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
ovsdb-client: CommandFilter, ovsdb-client, root

+ 3
- 0
etc/policy.json View File

@@ -56,7 +56,9 @@
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",

"network_device": "field:port:device_owner=~^network:",
"create_port": "",
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
@@ -71,6 +73,7 @@
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",

+ 5
- 11
neutron/agent/common/ovs_lib.py View File

@@ -152,7 +152,7 @@ class OVSBridge(BaseOVS):
super(OVSBridge, self).__init__()
self.br_name = br_name
self.datapath_type = datapath_type
self.agent_uuid_stamp = '0x0'
self.agent_uuid_stamp = 0

def set_agent_uuid_stamp(self, val):
self.agent_uuid_stamp = val
@@ -195,15 +195,6 @@ class OVSBridge(BaseOVS):
def destroy(self):
self.delete_bridge(self.br_name)

def reset_bridge(self, secure_mode=False):
with self.ovsdb.transaction() as txn:
txn.add(self.ovsdb.del_br(self.br_name))
txn.add(self.ovsdb.add_br(self.br_name,
datapath_type=self.datapath_type))
if secure_mode:
txn.add(self.ovsdb.set_fail_mode(self.br_name,
FAILMODE_SECURE))

def add_port(self, port_name, *interface_attr_tuples):
with self.ovsdb.transaction() as txn:
txn.add(self.ovsdb.add_port(self.br_name, port_name))
@@ -299,7 +290,8 @@ class OVSBridge(BaseOVS):
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=p_const.TYPE_GRE,
vxlan_udp_port=p_const.VXLAN_UDP_PORT,
dont_fragment=True):
dont_fragment=True,
tunnel_csum=False):
attrs = [('type', tunnel_type)]
# TODO(twilson) This is an OrderedDict solely to make a test happy
options = collections.OrderedDict()
@@ -314,6 +306,8 @@ class OVSBridge(BaseOVS):
options['local_ip'] = local_ip
options['in_key'] = 'flow'
options['out_key'] = 'flow'
if tunnel_csum:
options['csum'] = str(tunnel_csum).lower()
attrs.append(('options', options))

return self.add_port(port_name, *attrs)

+ 21
- 16
neutron/agent/dhcp/agent.py View File

@@ -51,15 +51,16 @@ class DhcpAgent(manager.Manager):
"""
target = oslo_messaging.Target(version='1.0')

def __init__(self, host=None):
def __init__(self, host=None, conf=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync_reasons = collections.defaultdict(list)
self.conf = cfg.CONF
self.conf = conf or cfg.CONF
self.cache = NetworkCache()
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
ctx, self.conf.use_namespaces)
ctx, self.conf.use_namespaces,
self.conf.host)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
utils.ensure_dir(dhcp_dir)
@@ -136,11 +137,11 @@ class DhcpAgent(manager.Manager):
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
{'net_id': network.id, 'action': action})

def schedule_resync(self, reason, network=None):
def schedule_resync(self, reason, network_id=None):
"""Schedule a resync for a given network and reason. If no network is
specified, resync all networks.
"""
self.needs_resync_reasons[network].append(reason)
self.needs_resync_reasons[network_id].append(reason)

@utils.synchronized('dhcp-agent')
def sync_state(self, networks=None):
@@ -149,7 +150,7 @@ class DhcpAgent(manager.Manager):
"""
only_nets = set([] if (not networks or None in networks) else networks)
LOG.info(_LI('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
pool = eventlet.GreenPool(self.conf.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())

try:
@@ -172,7 +173,11 @@ class DhcpAgent(manager.Manager):
LOG.info(_LI('Synchronizing state complete'))

except Exception as e:
self.schedule_resync(e)
if only_nets:
for network_id in only_nets:
self.schedule_resync(e, network_id)
else:
self.schedule_resync(e)
LOG.exception(_LE('Unable to sync network state.'))

@utils.exception_logger()
@@ -399,9 +404,9 @@ class DhcpPluginApi(object):

"""

def __init__(self, topic, context, use_namespaces):
def __init__(self, topic, context, use_namespaces, host):
self.context = context
self.host = cfg.CONF.host
self.host = host
self.use_namespaces = use_namespaces
target = oslo_messaging.Target(
topic=topic,
@@ -537,21 +542,21 @@ class NetworkCache(object):


class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
def __init__(self, host=None, conf=None):
super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration,
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats},
'dhcp_driver': self.conf.dhcp_driver,
'use_namespaces': self.conf.use_namespaces,
'dhcp_lease_duration': self.conf.dhcp_lease_duration,
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
report_interval = self.conf.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(

+ 11
- 11
neutron/agent/dhcp_agent.py View File

@@ -28,20 +28,20 @@ from neutron.common import topics
from neutron import service as neutron_service


def register_options():
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
cfg.CONF.register_opts(dhcp_config.DHCP_OPTS)
cfg.CONF.register_opts(dhcp_config.DNSMASQ_OPTS)
cfg.CONF.register_opts(metadata_config.DRIVER_OPTS)
cfg.CONF.register_opts(metadata_config.SHARED_OPTS)
cfg.CONF.register_opts(interface.OPTS)
def register_options(conf):
config.register_interface_driver_opts_helper(conf)
config.register_use_namespaces_opts_helper(conf)
config.register_agent_state_opts_helper(conf)
conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
conf.register_opts(dhcp_config.DHCP_OPTS)
conf.register_opts(dhcp_config.DNSMASQ_OPTS)
conf.register_opts(metadata_config.DRIVER_OPTS)
conf.register_opts(metadata_config.SHARED_OPTS)
conf.register_opts(interface.OPTS)


def main():
register_options()
register_options(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging()
server = neutron_service.Service.create(

+ 137
- 29
neutron/agent/l2/extensions/qos.py View File

@@ -17,6 +17,7 @@ import abc
import collections

from oslo_concurrency import lockutils
from oslo_log import log as logging
import six

from neutron.agent.l2 import agent_extension
@@ -24,8 +25,12 @@ from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import exceptions
from neutron.i18n import _LW, _LI
from neutron import manager

LOG = logging.getLogger(__name__)


@six.add_metaclass(abc.ABCMeta)
class QosAgentDriver(object):
@@ -35,36 +40,130 @@ class QosAgentDriver(object):
for applying QoS Rules on a port.
"""

# Each QoS driver should define the set of rule types that it supports, and
# correspoding handlers that has the following names:
#
# create_<type>
# update_<type>
# delete_<type>
#
# where <type> is one of VALID_RULE_TYPES
SUPPORTED_RULES = set()

@abc.abstractmethod
def initialize(self):
"""Perform QoS agent driver initialization.
"""

@abc.abstractmethod
def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.

:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
#TODO(QoS) we may want to provide default implementations of calling
#delete and then update
self._handle_update_create_rules('create', port, qos_policy)

@abc.abstractmethod
def update(self, port, qos_policy):
"""Apply QoS rules on port.

:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
self._handle_update_create_rules('update', port, qos_policy)

@abc.abstractmethod
def delete(self, port, qos_policy):
def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.

:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
if qos_policy is None:
rule_types = self.SUPPORTED_RULES
else:
rule_types = set(
[rule.rule_type
for rule in self._iterate_rules(qos_policy.rules)])

for rule_type in rule_types:
self._handle_rule_delete(port, rule_type)

def _iterate_rules(self, rules):
for rule in rules:
rule_type = rule.rule_type
if rule_type in self.SUPPORTED_RULES:
yield rule
else:
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
'%(rule_type)s; skipping'),
{'rule_id': rule.id, 'rule_type': rule_type})

def _handle_rule_delete(self, port, rule_type):
handler_name = "".join(("delete_", rule_type))
handler = getattr(self, handler_name)
handler(port)

def _handle_update_create_rules(self, action, port, qos_policy):
for rule in self._iterate_rules(qos_policy.rules):
if rule.should_apply_to_port(port):
handler_name = "".join((action, "_", rule.rule_type))
handler = getattr(self, handler_name)
handler(port, rule)
else:
LOG.debug("Port %(port)s excluded from QoS rule %(rule)s",
{'port': port, 'rule': rule.id})


class PortPolicyMap(object):
def __init__(self):
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_policies = {}
self.port_policies = {}

def get_ports(self, policy):
return self.qos_policy_ports[policy.id].values()

def get_policy(self, policy_id):
return self.known_policies.get(policy_id)

def update_policy(self, policy):
self.known_policies[policy.id] = policy

def has_policy_changed(self, port, policy_id):
return self.port_policies.get(port['port_id']) != policy_id

def get_port_policy(self, port):
policy_id = self.port_policies.get(port['port_id'])
if policy_id:
return self.get_policy(policy_id)

def set_port_policy(self, port, policy):
"""Attach a port to policy and return any previous policy on port."""
port_id = port['port_id']
old_policy = self.get_port_policy(port)
self.known_policies[policy.id] = policy
self.port_policies[port_id] = policy.id
self.qos_policy_ports[policy.id][port_id] = port
if old_policy and old_policy.id != policy.id:
del self.qos_policy_ports[old_policy.id][port_id]
return old_policy

def clean_by_port(self, port):
"""Detach port from policy and cleanup data we don't need anymore."""
port_id = port['port_id']
if port_id in self.port_policies:
del self.port_policies[port_id]
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
if not port_dict:
self._clean_policy_info(qos_policy_id)
return
raise exceptions.PortNotFound(port_id=port['port_id'])

def _clean_policy_info(self, qos_policy_id):
del self.qos_policy_ports[qos_policy_id]
del self.known_policies[qos_policy_id]


class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
@@ -79,9 +178,7 @@ class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
'neutron.qos.agent_drivers', driver_type)()
self.qos_driver.initialize()

# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_ports = set()
self.policy_map = PortPolicyMap()

registry.subscribe(self._handle_notification, resources.QOS_POLICY)
self._register_rpc_consumers(connection)
@@ -111,39 +208,50 @@ class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
Update events are handled in _handle_notification.
"""
port_id = port['port_id']
qos_policy_id = port.get('qos_policy_id')
port_qos_policy_id = port.get('qos_policy_id')
network_qos_policy_id = port.get('network_qos_policy_id')
qos_policy_id = port_qos_policy_id or network_qos_policy_id
if qos_policy_id is None:
self._process_reset_port(port)
return

#Note(moshele) check if we have seen this port
#and it has the same policy we do nothing.
if (port_id in self.known_ports and
port_id in self.qos_policy_ports[qos_policy_id]):
if not self.policy_map.has_policy_changed(port, qos_policy_id):
return

self.qos_policy_ports[qos_policy_id][port_id] = port
self.known_ports.add(port_id)
qos_policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, qos_policy_id)
self.qos_driver.create(port, qos_policy)
if qos_policy is None:
LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
"%(port_id)s is not available on server, "
"it has been deleted. Skipping."),
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
self._process_reset_port(port)
else:
old_qos_policy = self.policy_map.set_port_policy(port, qos_policy)
if old_qos_policy:
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
else:
self.qos_driver.create(port, qos_policy)

def delete_port(self, context, port):
self._process_reset_port(port)

def _process_update_policy(self, qos_policy):
for port_id, port in self.qos_policy_ports[qos_policy.id].items():
# TODO(QoS): for now, just reflush the rules on the port. Later, we
# may want to apply the difference between the rules lists only.
self.qos_driver.delete(port, None)
old_qos_policy = self.policy_map.get_policy(qos_policy.id)
for port in self.policy_map.get_ports(qos_policy):
#NOTE(QoS): for now, just reflush the rules on the port. Later, we
# may want to apply the difference between the old and
# new rule lists.
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
self.policy_map.update_policy(qos_policy)

def _process_reset_port(self, port):
port_id = port['port_id']
if port_id in self.known_ports:
self.known_ports.remove(port_id)
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
self.qos_driver.delete(port, None)
return
try:
self.policy_map.clean_by_port(port)
self.qos_driver.delete(port)
except exceptions.PortNotFound:
LOG.info(_LI("QoS extension did have no information about the "
"port %s that we were trying to reset"),
port['port_id'])

+ 9
- 3
neutron/agent/l3/agent.py View File

@@ -80,7 +80,8 @@ class L3PluginApi(object):
to update_ha_routers_states
1.5 - Added update_ha_routers_states
1.6 - Added process_prefix_update

1.7 - DVR support: new L3 plugin methods added.
- delete_agent_gateway_port
"""

def __init__(self, topic, host):
@@ -139,6 +140,12 @@ class L3PluginApi(object):
return cctxt.call(context, 'process_prefix_update',
subnets=prefix_update)

def delete_agent_gateway_port(self, context, fip_net):
"""Delete Floatingip_agent_gateway_port."""
cctxt = self.client.prepare(version='1.7')
return cctxt.call(context, 'delete_agent_gateway_port',
host=self.host, network_id=fip_net)


class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ha.AgentMixin,
@@ -517,10 +524,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
@periodic_task.periodic_task(spacing=1)
def periodic_sync_routers_task(self, context):
self.process_services_sync(context)
LOG.debug("Starting periodic_sync_routers_task - fullsync:%s",
self.fullsync)
if not self.fullsync:
return
LOG.debug("Starting fullsync periodic_sync_routers_task")

# self.fullsync is True at this point. If an exception -- caught or
# uncaught -- prevents setting it to False below then the next call

+ 1
- 0
neutron/agent/l3/config.py View File

@@ -37,6 +37,7 @@ OPTS = [
"running on a centralized node (or in single-host "
"deployments, e.g. devstack)")),
cfg.StrOpt('external_network_bridge', default='br-ex',
deprecated_for_removal=True,
help=_("Name of bridge used for external network "
"traffic.")),
cfg.IntOpt('metadata_port',

+ 2
- 4
neutron/agent/l3/dvr_edge_router.py View File

@@ -101,13 +101,11 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
if not self.ex_gw_port:
return

sn_port = self.get_snat_port_for_internal_port(port)
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return

is_this_snat_host = ('binding:host_id' in self.ex_gw_port) and (
self.ex_gw_port['binding:host_id'] == self.host)
if not is_this_snat_host:
if not self._is_this_snat_host():
return

snat_interface = self._get_snat_int_device_name(sn_port['id'])

+ 12
- 1
neutron/agent/l3/dvr_local_router.py View File

@@ -137,6 +137,17 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
# destroying it. The two could end up conflicting on
# creating/destroying interfaces and such. I think I'd like a
# semaphore to sync creation/deletion of this namespace.

# NOTE (Swami): Since we are deleting the namespace here we
# should be able to delete the floatingip agent gateway port
# for the provided external net since we don't need it anymore.
if self.fip_ns.agent_gateway_port:
LOG.debug('Removed last floatingip, so requesting the '
'server to delete Floatingip Agent Gateway port:'
'%s', self.fip_ns.agent_gateway_port)
self.agent.plugin_rpc.delete_agent_gateway_port(
self.agent.context,
self.fip_ns.agent_gateway_port['network_id'])
self.fip_ns.delete()
self.fip_ns = None

@@ -303,7 +314,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
if not self.ex_gw_port:
return

sn_port = self.get_snat_port_for_internal_port(port)
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return


+ 8
- 2
neutron/agent/l3/dvr_router_base.py View File

@@ -26,12 +26,18 @@ class DvrRouterBase(router.RouterInfo):
self.agent = agent
self.host = host

def process(self, agent):
super(DvrRouterBase, self).process(agent)
# NOTE: Keep a copy of the interfaces around for when they are removed
self.snat_ports = self.get_snat_interfaces()

def get_snat_interfaces(self):
return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])

def get_snat_port_for_internal_port(self, int_port):
def get_snat_port_for_internal_port(self, int_port, snat_ports=None):
"""Return the SNAT port for the given internal interface port."""
snat_ports = self.get_snat_interfaces()
if snat_ports is None:
snat_ports = self.get_snat_interfaces()
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports

+ 13
- 0
neutron/agent/l3/ha.py View File

@@ -122,10 +122,23 @@ class AgentMixin(object):
'possibly deleted concurrently.'), router_id)
return

self._configure_ipv6_ra_on_ext_gw_port_if_necessary(ri, state)
self._update_metadata_proxy(ri, router_id, state)
self._update_radvd_daemon(ri, state)
self.state_change_notifier.queue_event((router_id, state))

def _configure_ipv6_ra_on_ext_gw_port_if_necessary(self, ri, state):
# If ipv6 is enabled on the platform, ipv6_gateway config flag is
# not set and external_network associated to the router does not
# include any IPv6 subnet, enable the gateway interface to accept
# Router Advts from upstream router for default route.
ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id']
if state == 'master' and ex_gw_port_id and ri.use_ipv6:
gateway_ips = ri._get_external_gw_ips(ri.ex_gw_port)
if not ri.is_v6_gateway_set(gateway_ips):
interface_name = ri.get_external_device_name(ex_gw_port_id)
ri.driver.configure_ipv6_ra(ri.ns_name, interface_name)

def _update_metadata_proxy(self, ri, router_id, state):
if state == 'master':
LOG.debug('Spawning metadata proxy for router %s', router_id)

+ 2
- 5
neutron/agent/l3/ha_router.py View File

@@ -187,7 +187,7 @@ class HaRouter(router.RouterInfo):

def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
default_gw_rts = []
gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port)
gateway_ips = self._get_external_gw_ips(ex_gw_port)
for gw_ip in gateway_ips:
# TODO(Carl) This is repeated everywhere. A method would
# be nice.
@@ -197,9 +197,6 @@ class HaRouter(router.RouterInfo):
default_gw, gw_ip, interface_name))
instance.virtual_routes.gateway_routes = default_gw_rts

if enable_ra_on_gw:
self.driver.configure_ipv6_ra(self.ns_name, interface_name)

def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
extra_subnets = ex_gw_port.get('extra_subnets', [])
instance = self._get_keepalived_instance()
@@ -362,10 +359,10 @@ class HaRouter(router.RouterInfo):
interface_name)

def delete(self, agent):
super(HaRouter, self).delete(agent)
self.destroy_state_change_monitor(self.process_monitor)
self.ha_network_removed()
self.disable_keepalived()
super(HaRouter, self).delete(agent)

def process(self, agent):
super(HaRouter, self).process(agent)

+ 16
- 7
neutron/agent/l3/router_info.py View File

@@ -202,6 +202,9 @@ class RouterInfo(object):
def remove_floating_ip(self, device, ip_cidr):
device.delete_addr_and_conntrack_state(ip_cidr)

def remove_external_gateway_ip(self, device, ip_cidr):
device.delete_addr_and_conntrack_state(ip_cidr)

def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])

@@ -475,7 +478,6 @@ class RouterInfo(object):

def _get_external_gw_ips(self, ex_gw_port):
gateway_ips = []
enable_ra_on_gw = False
if 'subnets' in ex_gw_port:
gateway_ips = [subnet['gateway_ip']
for subnet in ex_gw_port['subnets']
@@ -485,11 +487,7 @@ class RouterInfo(object):
if self.agent_conf.ipv6_gateway:
# ipv6_gateway configured, use address for default route.
gateway_ips.append(self.agent_conf.ipv6_gateway)
else:
# ipv6_gateway is also not configured.
# Use RA for default route.
enable_ra_on_gw = True
return gateway_ips, enable_ra_on_gw
return gateway_ips

def _external_gateway_added(self, ex_gw_port, interface_name,
ns_name, preserve_ips):
@@ -501,7 +499,12 @@ class RouterInfo(object):
# will be added to the interface.
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])

gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port)
gateway_ips = self._get_external_gw_ips(ex_gw_port)
enable_ra_on_gw = False
if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
# There is no IPv6 gw_ip, use RouterAdvt for default route.
enable_ra_on_gw = True

self.driver.init_router_port(
interface_name,
ip_cidrs,
@@ -538,6 +541,12 @@ class RouterInfo(object):
def external_gateway_removed(self, ex_gw_port, interface_name):
LOG.debug("External gateway removed: port(%s), interface(%s)",
ex_gw_port, interface_name)
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
for ip_addr in ex_gw_port['fixed_ips']:
self.remove_external_gateway_ip(device,
common_utils.ip_to_cidr(
ip_addr['ip_address'],
ip_addr['prefixlen']))
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,

+ 1
- 1
neutron/agent/linux/async_process.py View File

@@ -50,7 +50,7 @@ class AsyncProcess(object):
>>> time.sleep(5)
>>> proc.stop()
>>> for line in proc.iter_stdout():
... print line
... print(line)
"""

def __init__(self, cmd, run_as_root=False, respawn_interval=None,

+ 29
- 2
neutron/agent/linux/dhcp.py View File

@@ -1030,10 +1030,18 @@ class DeviceManager(object):
# the following loop...
port = None

# Look for an existing DHCP for this network.
# Look for an existing DHCP port for this network.
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
# If using gateway IPs on this port, we can skip the
# following code, whose purpose is just to review and
# update the Neutron-allocated IP addresses for the
# port.
if self.driver.use_gateway_ips:
return port
# Otherwise break out, as we now have the DHCP port
# whose subnets and addresses we need to review.
break
else:
return None
@@ -1090,13 +1098,21 @@ class DeviceManager(object):
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Creating new one.',
{'device_id': device_id, 'network_id': network.id})

# Make a list of the subnets that need a unique IP address for
# this DHCP port.
if self.driver.use_gateway_ips:
unique_ip_subnets = []
else:
unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets]

port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_subnets])
fixed_ips=unique_ip_subnets)
return self.plugin.create_dhcp_port({'port': port_dict})

def setup_dhcp_port(self, network):
@@ -1168,6 +1184,17 @@ class DeviceManager(object):
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)

if self.driver.use_gateway_ips:
# For each DHCP-enabled subnet, add that subnet's gateway
# IP address to the Linux device for the DHCP port.
for subnet in network.subnets:
if not subnet.enable_dhcp:
continue
gateway = subnet.gateway_ip
if gateway:
net = netaddr.IPNetwork(subnet.cidr)
ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))

if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)

+ 0
- 290
neutron/agent/linux/ebtables_driver.py View File

@@ -1,290 +0,0 @@
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#

"""Implement ebtables rules using linux utilities."""

import re

from retrying import retry

from oslo_config import cfg
from oslo_log import log as logging

from neutron.common import utils

ebtables_opts = [
cfg.StrOpt('ebtables_path',
default='$state_path/ebtables-',
help=_('Location of temporary ebtables table files.')),
]

CONF = cfg.CONF
CONF.register_opts(ebtables_opts)

LOG = logging.getLogger(__name__)

# Collection of regexes to parse ebtables output
_RE_FIND_BRIDGE_TABLE_NAME = re.compile(r'^Bridge table:[\s]*([a-z]+)$')
# get chain name, nunmber of entries and policy name.
_RE_FIND_BRIDGE_CHAIN_INFO = re.compile(
r'^Bridge chain:[\s]*(.*),[\s]*entries:[\s]*[0-9]+,[\s]*'
r'policy:[\s]*([A-Z]+)$')
_RE_FIND_BRIDGE_RULE_COUNTERS = re.compile(
r',[\s]*pcnt[\s]*=[\s]*([0-9]+)[\s]*--[\s]*bcnt[\s]*=[\s]*([0-9]+)$')
_RE_FIND_COMMIT_STATEMENT = re.compile(r'^COMMIT$')
_RE_FIND_COMMENTS_AND_BLANKS = re.compile(r'^#|^$')
_RE_FIND_APPEND_RULE = re.compile(r'-A (\S+) ')

# Regexes to parse ebtables rule file input
_RE_RULES_FIND_TABLE_NAME = re.compile(r'^\*([a-z]+)$')
_RE_RULES_FIND_CHAIN_NAME = re.compile(r'^:(.*)[\s]+([A-Z]+)$')
_RE_RULES_FIND_RULE_LINE = re.compile(r'^\[([0-9]+):([0-9]+)\]')


def _process_ebtables_output(lines):
"""Process raw output of ebtables rule listing file.

Empty lines and comments removed, ebtables listing output converted
into ebtables rules.

For example, if the raw ebtables list lines (input to this function) are:

Bridge table: filter
Bridge chain: INPUT, entries: 0, policy: ACCEPT
Bridge chain: FORWARD, entries: 0, policy: ACCEPT
Bridge chain: OUTPUT, entries: 0, policy: ACCEPT

The output then will be:

*filter
:INPUT ACCEPT
:FORWARD ACCEPT
:OUTPUT ACCEPT
COMMIT

Key point: ebtables rules listing output is not the same as the rules
format for setting new rules.

"""
table = None
chain = ''
chains = []
rules = []

for line in lines:
if _RE_FIND_COMMENTS_AND_BLANKS.search(line):
continue
match = _RE_FIND_BRIDGE_RULE_COUNTERS.search(line)
if table and match:
rules.append('[%s:%s] -A %s %s' % (match.group(1),
match.group(2),
chain,
line[:match.start()].strip()))
match = _RE_FIND_BRIDGE_CHAIN_INFO.search(line)
if match:
chains.append(':%s %s' % (match.group(1), match.group(2)))
chain = match.group(1)
continue
match = _RE_FIND_BRIDGE_TABLE_NAME.search(line)
if match:
table = '*%s' % match.group(1)
continue
return [table] + chains + rules + ['COMMIT']


def _match_rule_line(table, line):
match = _RE_RULES_FIND_RULE_LINE.search(line)
if table and match:
args = line[match.end():].split()
res = [(table, args)]
if int(match.group(1)) > 0 and int(match.group(2)) > 0:
p = _RE_FIND_APPEND_RULE
rule = p.sub(r'-C \1 %s %s ', line[match.end() + 1:])
args = (rule % (match.group(1), match.group(2))).split()
res.append((table, args))
return table, res
else:
return table, None


def _match_chain_name(table, tables, line):
match = _RE_RULES_FIND_CHAIN_NAME.search(line)
if table and match:
if match.group(1) not in tables[table]:
args = ['-N', match.group(1), '-P', match.group(2)]
else:
args = ['-P', match.group(1), match.group(2)]
return table, (table, args)
else:
return table, None


def _match_table_name(table, line):
match = _RE_RULES_FIND_TABLE_NAME.search(line)
if match:
# Initialize with current kernel table if we just start out
table = match.group(1)
return table, (table, ['--atomic-init'])
else:
return table, None


def _match_commit_statement(table, line):
match = _RE_FIND_COMMIT_STATEMENT.search(line)
if table and match:
# Conclude by issuing the commit command
return (table, ['--atomic-commit'])
else:
return None


def _process_ebtables_input(lines):
"""Import text ebtables rules. Similar to iptables-restore.

Was based on:
http://sourceforge.net/p/ebtables/code/ci/
3730ceb7c0a81781679321bfbf9eaa39cfcfb04e/tree/userspace/ebtables2/
ebtables-save?format=raw

The function prepares and returns a list of tuples, each tuple consisting
of a table name and ebtables arguments. The caller can then repeatedly call
ebtables on that table with those arguments to get the rules applied.

For example, this input:

*filter
:INPUT ACCEPT
:FORWARD ACCEPT
:OUTPUT ACCEPT
:neutron-nwfilter-spoofing-fallb ACCEPT
:neutron-nwfilter-OUTPUT ACCEPT
:neutron-nwfilter-INPUT ACCEPT
:neutron-nwfilter-FORWARD ACCEPT
[0:0] -A INPUT -j neutron-nwfilter-INPUT
[0:0] -A OUTPUT -j neutron-nwfilter-OUTPUT
[0:0] -A FORWARD -j neutron-nwfilter-FORWARD
[0:0] -A neutron-nwfilter-spoofing-fallb -j DROP
COMMIT

... produces this output:

('filter', ['--atomic-init'])
('filter', ['-P', 'INPUT', 'ACCEPT'])
('filter', ['-P', 'FORWARD', 'ACCEPT'])
('filter', ['-P', 'OUTPUT', 'ACCEPT'])
('filter', ['-N', 'neutron-nwfilter-spoofing-fallb', '-P', 'ACCEPT'])
('filter', ['-N', 'neutron-nwfilter-OUTPUT', '-P', 'ACCEPT'])
('filter', ['-N', 'neutron-nwfilter-INPUT', '-P', 'ACCEPT'])
('filter', ['-N', 'neutron-nwfilter-FORWARD', '-P', 'ACCEPT'])
('filter', ['-A', 'INPUT', '-j', 'neutron-nwfilter-INPUT'])
('filter', ['-A', 'OUTPUT', '-j', 'neutron-nwfilter-OUTPUT'])
('filter', ['-A', 'FORWARD', '-j', 'neutron-nwfilter-FORWARD'])
('filter', ['-A', 'neutron-nwfilter-spoofing-fallb', '-j', 'DROP'])
('filter', ['--atomic-commit'])

"""
tables = {'filter': ['INPUT', 'FORWARD', 'OUTPUT'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING'],
'broute': ['BROUTING']}
table = None

ebtables_args = list()
for line in lines.splitlines():
if _RE_FIND_COMMENTS_AND_BLANKS.search(line):
continue
table, res = _match_rule_line(table, line)
if res:
ebtables_args.extend(res)
continue
table, res = _match_chain_name(table, tables, line)
if res:
ebtables_args.append(res)
continue
table, res = _match_table_name(table, line)
if res:
ebtables_args.append(res)
continue
res = _match_commit_statement(table, line)
if res:
ebtables_args.append(res)
continue

return ebtables_args


@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000,
stop_max_delay=10000)
def _cmd_retry(func, *args, **kwargs):
return func(*args, **kwargs)


def run_ebtables(namespace, execute, table, args):
"""Run ebtables utility, with retry if necessary.

Provide table name and list of additional arguments to ebtables.

"""
cmd = ['ebtables', '-t', table]
if CONF.ebtables_path:
f = '%s%s' % (CONF.ebtables_path, table)
cmd += ['--atomic-file', f]
cmd += args
if namespace:
cmd = ['ip', 'netns', 'exec', namespace] + cmd
# TODO(jbrendel): The root helper is used for every ebtables command,
# but as we use an atomic file we only need root for
# init and commit commands.
# But the generated file by init ebtables command is
# only readable and writable by root.
#
# We retry the execution of ebtables in case of failure. Known issue:
# See bug: https://bugs.launchpad.net/nova/+bug/1316621
# See patch: https://review.openstack.org/#/c/140514/3
return _cmd_retry(execute, cmd, **{"run_as_root": True})


def run_ebtables_multiple(namespace, execute, arg_list):
"""Run ebtables utility multiple times.

Similar to run(), but runs ebtables for every element in arg_list.
Each arg_list element is a tuple containing the table name and a list
of ebtables arguments.

"""
for table, args in arg_list:
run_ebtables(namespace, execute, table, args)


@utils.synchronized('ebtables', external=True)
def ebtables_save(execute, tables_names, namespace=None):
"""Generate text output of the ebtables rules.

Based on:
http://sourceforge.net/p/ebtables/code/ci/master/tree/userspace/ebtables2/
ebtables-save?format=raw

"""
raw_outputs = (run_ebtables(namespace, execute,
t, ['-L', '--Lc']).splitlines() for t in tables_names)
parsed_outputs = (_process_ebtables_output(lines) for lines in raw_outputs)
return '\n'.join(l for lines in parsed_outputs for l in lines)


@utils.synchronized('ebtables', external=True)
def ebtables_restore(lines, execute, namespace=None):
"""Import text ebtables rules and apply."""
ebtables_args = _process_ebtables_input(lines)
run_ebtables_multiple(namespace, execute, ebtables_args)

+ 0
- 253
neutron/agent/linux/ebtables_manager.py View File

@@ -1,253 +0,0 @@
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#

"""
Implement a manager for ebtables rules.

NOTE: The ebtables manager contains a lot of duplicated or very similar code
from the iptables manager. An option would have been to refactor the
iptables manager so that ebtables and iptables manager can share common
code. However, the iptables manager was considered too brittle and
in need for a larger re-work or full replacement in the future.
Therefore, it was decided not to do any refactoring for now and to accept
the code duplication.

"""

import inspect
import os

from oslo_log import log as logging

from neutron.i18n import _LW


LOG = logging.getLogger(__name__)


MAX_CHAIN_LEN_EBTABLES = 31
# NOTE(jbrendel): ebtables supports chain names of up to 31 characters, and
# we add up to 12 characters to prefix_chain which is used
# as a prefix, so we limit it to 19 characters.
POSTROUTING_STR = '-POSTROUTING'
MAX_LEN_PREFIX_CHAIN = MAX_CHAIN_LEN_EBTABLES - len(POSTROUTING_STR)

# When stripping or calculating string lengths, sometimes a '-' which separates
# name components needs to be considered.
DASH_STR_LEN = 1


def binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])


def _get_prefix_chain(prefix_chain=None):
"""Determine the prefix chain."""
if prefix_chain:
return prefix_chain[:MAX_LEN_PREFIX_CHAIN]
else:
return binary_name()[:MAX_LEN_PREFIX_CHAIN]


def get_chain_name(chain_name, wrap=True, prefix_chain=None):
"""Determine the chain name."""
if wrap:
# Get the possible chain name length in function of the prefix name
# length.
chain_len = (MAX_CHAIN_LEN_EBTABLES -
(len(_get_prefix_chain(prefix_chain)) + DASH_STR_LEN))
return chain_name[:chain_len]
else:
return chain_name[:MAX_CHAIN_LEN_EBTABLES]


class EbtablesRule(object):
"""An ebtables rule.

You shouldn't need to use this class directly, it's only used by
EbtablesManager.

"""

def __init__(self, chain, rule, wrap=True, top=False,
prefix_chain=None):
self.prefix_chain = _get_prefix_chain(prefix_chain)
self.chain = get_chain_name(chain, wrap, prefix_chain)
self.rule = rule
self.wrap = wrap
self.top = top

def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))

def __ne__(self, other):
return not self == other

def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.prefix_chain, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)


class EbtablesTable(object):
"""An ebtables table."""

def __init__(self, prefix_chain=None):
self.rules = []
self.rules_to_remove = []
self.chains = set()
self.unwrapped_chains = set()
self.chains_to_remove = set()
self.prefix_chain = _get_prefix_chain(prefix_chain)

def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.

The chain name is wrapped to be unique for the component creating
it, so different components of Neutron can safely create identically
named chains without interfering with one another.

At the moment, its wrapped name is <prefix chain>-<chain name>,
so if neutron-server creates a chain named 'OUTPUT', it'll actually
end up named 'neutron-server-OUTPUT'.

"""
name = get_chain_name(name, wrap, self.prefix_chain)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)

def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains

def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.

This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
self.remove_chain(name, wrap, log_not_found=False)

def remove_chain(self, name, wrap=True, log_not_found=True):
"""Remove named chain.

This removal "cascades". All rules in the chain are removed, as are
all rules in other chains that jump to it.

If the chain is not found then this is merely logged.

"""
name = get_chain_name(name, wrap, self.prefix_chain)
chain_set = self._select_chain_set(wrap)

if name not in chain_set:
if log_not_found:
LOG.warn(_LW('Attempted to remove chain %s '
'which does not exist'), name)
return

chain_set.remove(name)

if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.chains_to_remove.add(name)

# first, add rules to remove that have a matching chain name
self.rules_to_remove += [r for r in self.rules if r.chain == name]

# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]

if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.rules_to_remove += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.prefix_chain, name)

# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]

def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.

This is just like what you'd feed to ebtables, just without
the '-A <chain name>' bit at the start.

However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.

"""
chain = get_chain_name(chain, wrap, self.prefix_chain)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)

if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))

self.rules.append(EbtablesRule(chain, rule, wrap, top,
self.prefix_chain))

def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.

However, if the rule jumps to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap, self.prefix_chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))