Browse Source

Merge remote-tracking branch 'origin/master' into merge-branch

Change-Id: I9c3cb40fd12c3e404aa1e585f76970e2ee49f4d8
tags/7.0.0.0rc1
Kyle Mestery 4 years ago
parent
commit
ec799c4589
100 changed files with 2342 additions and 2412 deletions
  1. +2
    -2
      doc/source/devref/callbacks.rst
  2. +485
    -335
      doc/source/devref/contribute.rst
  3. +1
    -0
      doc/source/devref/index.rst
  4. +26
    -1
      doc/source/devref/linuxbridge_agent.rst
  5. +35
    -0
      doc/source/devref/neutron_api.rst
  6. +4
    -0
      etc/dhcp_agent.ini
  7. +4
    -0
      etc/l3_agent.ini
  8. +4
    -0
      etc/metadata_agent.ini
  9. +0
    -28
      etc/neutron/plugins/ml2/linuxbridge_agent.ini
  10. +3
    -0
      etc/neutron/plugins/ml2/openvswitch_agent.ini
  11. +3
    -0
      etc/neutron/plugins/vmware/nsx.ini
  12. +10
    -0
      etc/neutron/plugins/vmware/policy/network-gateways.json
  13. +7
    -0
      etc/neutron/plugins/vmware/policy/routers.json
  14. +8
    -0
      etc/policy.json
  15. +2
    -0
      neutron/agent/common/config.py
  16. +52
    -19
      neutron/agent/common/ovs_lib.py
  17. +3
    -2
      neutron/agent/dhcp/agent.py
  18. +2
    -2
      neutron/agent/dhcp_agent.py
  19. +6
    -4
      neutron/agent/l3/agent.py
  20. +0
    -6
      neutron/agent/l3/dvr.py
  21. +12
    -5
      neutron/agent/l3/dvr_local_router.py
  22. +1
    -3
      neutron/agent/l3/ha_router.py
  23. +23
    -18
      neutron/agent/l3/namespace_manager.py
  24. +11
    -9
      neutron/agent/l3/router_info.py
  25. +2
    -2
      neutron/agent/l3_agent.py
  26. +42
    -1
      neutron/agent/linux/dhcp.py
  27. +32
    -4
      neutron/agent/linux/interface.py
  28. +53
    -17
      neutron/agent/linux/ip_lib.py
  29. +2
    -1
      neutron/agent/metadata/agent.py
  30. +28
    -10
      neutron/agent/ovsdb/native/commands.py
  31. +6
    -0
      neutron/agent/rpc.py
  32. +5
    -5
      neutron/api/api_common.py
  33. +1
    -1
      neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py
  34. +2
    -1
      neutron/api/v2/base.py
  35. +7
    -1
      neutron/api/v2/resource_helper.py
  36. +1
    -0
      neutron/common/constants.py
  37. +1
    -1
      neutron/common/rpc.py
  38. +7
    -7
      neutron/common/utils.py
  39. +105
    -0
      neutron/db/address_scope_db.py
  40. +19
    -7
      neutron/db/agents_db.py
  41. +1
    -1
      neutron/db/agentschedulers_db.py
  42. +3
    -7
      neutron/db/db_base_plugin_common.py
  43. +21
    -61
      neutron/db/db_base_plugin_v2.py
  44. +2
    -2
      neutron/db/dvr_mac_db.py
  45. +64
    -46
      neutron/db/ipam_backend_mixin.py
  46. +45
    -4
      neutron/db/ipam_non_pluggable_backend.py
  47. +6
    -6
      neutron/db/l3_dvrscheduler_db.py
  48. +36
    -0
      neutron/db/migration/alembic_migrations/versions/52c5312f6baf_address_scopes.py
  49. +1
    -1
      neutron/db/migration/alembic_migrations/versions/HEAD
  50. +4
    -4
      neutron/db/portsecurity_db_common.py
  51. +11
    -11
      neutron/db/securitygroups_rpc_base.py
  52. +138
    -0
      neutron/extensions/address_scope.py
  53. +4
    -2
      neutron/ipam/__init__.py
  54. +8
    -5
      neutron/ipam/utils.py
  55. +3
    -2
      neutron/manager.py
  56. +0
    -151
      neutron/openstack/common/eventlet_backdoor.py
  57. +0
    -147
      neutron/openstack/common/loopingcall.py
  58. +0
    -232
      neutron/openstack/common/periodic_task.py
  59. +0
    -507
      neutron/openstack/common/service.py
  60. +0
    -105
      neutron/openstack/common/systemd.py
  61. +0
    -150
      neutron/openstack/common/threadgroup.py
  62. +5
    -5
      neutron/plugins/brocade/NeutronPlugin.py
  63. +2
    -2
      neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py
  64. +1
    -17
      neutron/plugins/common/constants.py
  65. +1
    -1
      neutron/plugins/hyperv/agent/l2_agent.py
  66. +1
    -1
      neutron/plugins/ibm/agent/sdnve_neutron_agent.py
  67. +2
    -2
      neutron/plugins/ibm/sdnve_api.py
  68. +1
    -1
      neutron/plugins/ml2/drivers/cisco/apic/apic_sync.py
  69. +3
    -3
      neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py
  70. +128
    -0
      neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py
  71. +16
    -14
      neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py
  72. +23
    -13
      neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
  73. +6
    -6
      neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py
  74. +3
    -2
      neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py
  75. +3
    -1
      neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py
  76. +32
    -18
      neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py
  77. +7
    -4
      neutron/plugins/ml2/plugin.py
  78. +5
    -5
      neutron/plugins/ml2/rpc.py
  79. +3
    -3
      neutron/plugins/oneconvergence/plugin.py
  80. +12
    -6
      neutron/plugins/plumgrid/README
  81. +5
    -4
      neutron/service.py
  82. +4
    -4
      neutron/services/l3_router/l3_arista.py
  83. +3
    -3
      neutron/services/l3_router/l3_router_plugin.py
  84. +2
    -2
      neutron/services/l3_router/l3_sdnve.py
  85. +4
    -4
      neutron/services/metering/agents/metering_agent.py
  86. +1
    -0
      neutron/services/metering/metering_plugin.py
  87. +3
    -2
      neutron/services/provider_configuration.py
  88. +5
    -6
      neutron/tests/base.py
  89. +9
    -10
      neutron/tests/common/machine_fixtures.py
  90. +245
    -27
      neutron/tests/common/net_helpers.py
  91. +8
    -0
      neutron/tests/etc/policy.json
  92. +8
    -9
      neutron/tests/fullstack/config_fixtures.py
  93. +13
    -27
      neutron/tests/fullstack/fullstack_fixtures.py
  94. +2
    -2
      neutron/tests/fullstack/test_l3_agent.py
  95. +3
    -226
      neutron/tests/functional/agent/linux/helpers.py
  96. +0
    -34
      neutron/tests/functional/agent/linux/test_helpers.py
  97. +11
    -12
      neutron/tests/functional/agent/linux/test_iptables.py
  98. +100
    -0
      neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py
  99. +298
    -0
      neutron/tests/functional/agent/test_l2_ovs_agent.py
  100. +0
    -0
      neutron/tests/functional/agent/test_l3_agent.py

+ 2
- 2
doc/source/devref/callbacks.rst View File

@@ -69,7 +69,7 @@ do whatever they are supposed to do. In a callback-less world this would work li
C->my_random_very_difficult_to_remember_method_about_router_created()

If B and/or C change, things become sour. In a callback-based world, things become a lot
more uniform and straightward:
more uniform and straightforward:

::

@@ -319,7 +319,7 @@ Is the registry thread-safe?

Short answer is no: it is not safe to make mutations while callbacks are being called (more
details as to why can be found `here <https://hg.python.org/releasing/2.7.9/file/753a8f457ddc/Objects/dictobject.c#l937>`_).
A mutation could happen if a 'subscribe'/'unsuscribe' operation interleaves with the execution
A mutation could happen if a 'subscribe'/'unsubscribe' operation interleaves with the execution
of the notify loop. Albeit there is a possibility that things may end up in a bad state, the
registry works correctly under the assumption that subscriptions happen at the very beginning
of the life of the process and that the unsubscriptions (if any) take place at the very end.

+ 485
- 335
doc/source/devref/contribute.rst View File

@@ -1,6 +1,19 @@
Contributing new extensions to Neutron
======================================

**NOTE!**
---------

**Third-party plugins/drivers which do not start decomposition in Liberty will
be marked as deprecated, and they will be removed before the Mxxx-3
milestone.**

Read on for details ...


Introduction
------------

Neutron has a pluggable architecture, with a number of extension points.
This documentation covers aspects relevant to contributing new Neutron
v2 core (aka monolithic) plugins, ML2 mechanism drivers, and L3 service
@@ -16,22 +29,44 @@ within the OpenStack Networking project. If you are a developer who
wants to provide a Neutron-based solution without interacting with the
Neutron community, you are free to do so, but you can stop reading now,
as this guide is not for you.
In fact, from the Kilo release onwards, the Neutron core team propose that
additions to the codebase adopt a structure where the *monolithic plugins*,
*ML2 MechanismDrivers*, and *L3 service plugins* are integration-only
(called "vendor integration" hereinafter) to code that lives outside the
tree (called "vendor library" hereinafter); the same applies for any
vendor-specific agents. The only part that is to stay in the tree is the
agent 'main' (a small python file that imports agent code from the vendor
library and starts it). 'Outside the tree' can be anything that is publicly
available: it may be a stackforge repo for instance, a tarball, a pypi package,
etc. A plugin/drivers maintainer team self-governs in order to promote sharing,
reuse, innovation, and release of the 'out-of-tree' deliverable. It should not
be required for any member of the core team to be involved with this process,

Plugins and drivers for non-reference implementations are known as
"third-party" code. This includes code for supporting vendor products, as well
as code for supporting open-source networking implementations.

Before the Kilo release these plugins and drivers were included in the Neutron
tree. During the Kilo cycle the third-party plugins and drivers underwent the
first phase of a process called decomposition. During this phase, each plugin
and driver moved the bulk of its logic to a separate git repository, while
leaving a thin "shim" in the neutron tree together with the DB models and
migrations (and perhaps some config examples).

During the Liberty cycle the decomposition concept was taken to its conclusion
by allowing third-party code to exist entirely out of tree. Further extension
mechanisms have been provided to better support external plugins and drivers
that alter the API and/or the data model.

In the Mxxx cycle we will **require** all third-party code to be moved out of
the neutron tree completely.

'Outside the tree' can be anything that is publicly available: it may be a repo
on git.openstack.org for instance, a tarball, a pypi package, etc. A
plugin/drivers maintainer team self-governs in order to promote sharing, reuse,
innovation, and release of the 'out-of-tree' deliverable. It should not be
required for any member of the core team to be involved with this process,
although core members of the Neutron team can participate in whichever capacity
is deemed necessary to facilitate out-of-tree development.

Below, the following strategies will be documented:
This guide is aimed at you as the maintainer of code that integrates with
Neutron but resides in a separate repository.


Contribution Process
--------------------

If you want to extend OpenStack Networking with your technology, and you want
to do it within the visibility of the OpenStack project, follow the guidelines
and examples below. We'll describe best practices for:

* Design and Development;
* Testing and Continuous Integration;
@@ -40,105 +75,61 @@ Below, the following strategies will be documented:
* DevStack Integration;
* Documentation;

This document will then provide a working example on how to contribute
new additions to Neutron.
Once you have everything in place you may want to add your project to the list
of Neutron sub-projects. Submit a patch via a gerrit review to neutron to add
your project to ``doc/source/devref/sub_projects.rst``.


Design and Development
----------------------

Blueprint Spec Submission Strategy
Assuming you have a working repository, any development to your own repo does
not need any blueprint, specification or bugs against Neutron. However, if your
project is a part of the Neutron Stadium effort, you are expected to
participate in the principles of the Four Opens, meaning your design should be
done in the open. Thus, it is encouraged to file documentation for changes in
your own repository.

If your code is hosted on git.openstack.org then the gerrit review system is
automatically provided. Contributors should follow the review guidelines
similar to those of Neutron. However, you as the maintainer have the
flexibility to choose who can approve/merge changes in your own repo.

It is recommended (but not required, see `policies
<http://docs.openstack.org/developer/neutron/policies/thirdparty-ci.html>`_)
that you set up a third-party CI system. This will provide a vehicle for
checking the third-party code against Neutron changes. See `Testing and
Continuous Integration`_ below for more detailed recommendations.

Design documents can still be supplied in form of Restructured Text (RST)
documents, within the same third-party library repo. If changes to the common
Neutron code are required, an `RFE
<http://docs.openstack.org/developer/neutron/policies/blueprints.html#neutron-request-for-feature-enhancements>`_
may need to be filed. However every case is different and you are invited to
seek guidance from Neutron core reviewers about what steps to follow.


Testing and Continuous Integration
----------------------------------

Provided contributors adhere to the abovementioned development footprint
they should not be required to follow the spec process for changes that
only affect their vendor integration and library. New contributions can
simply be submitted for code review, with the proviso that adequate
documentation and 3rd CI party is supplied at the time of the code
submission. For tracking purposes, the review itself can be tagged
with a Launchpad bug report. The bug should be marked as wishlist to
avoid complicating tracking of Neutron's primary deliverables. Design
documents can still be supplied in form of RST documents, within the same
vendor library repo. If substantial change to the common Neutron code are
required, a spec that targets common Neutron code will be required, however
every case is different and a contributor is invited to seek guidance from
the Neutron core team as to what steps to follow, and whether a spec or
a bug report is more suited for what a contributor needs to deliver.

Once again, for submitting the integration module to the Neutron codebase,
no spec is required.

Development Strategy
--------------------
The following strategies are recommendations only, since third-party CI testing
is not a enforced requirement. However, these strategies are employed by the
majority of the plugin/driver contributors that actively participate in the
Neutron development community, since they have learned from experience how
quickly their code can fall out of sync with the rapidly changing Neutron core
code base.

* The following elements are suggested to be contributed in the tree
for plugins and drivers (called vendor integration hereinafter):

* Data models
* Extension definitions
* Configuration files
* Requirements file targeting vendor code

* Things that do not remain in the tree (called vendor library hereinafter):

* Vendor specific logic
* Associated unit tests

The idea here would be to provide in-tree the plugin/driver code that
implements an API, but have it delegate to out-of-tree code for
backend-specific interactions. The vendor integration will then typically
involve minor passthrough/parsing of parameters, minor handling of DB objects
as well as handling of responses, whereas the vendor library will do the
heavylifting and implement the vendor-specific logic. The boundary between
the in-tree layer and the out-of-tree one should be defined by the contributor
while asking these types of questions:

* If something changes in my backend, do I need to alter the integration
layer drastically? Clearly, the less impact there is, the better the
separation being achieved.
* If I expose vendor details (e.g. protocols, auth, etc.), can I easily swap
and replace the targeted backend (e.g. hardware with a newer version
being supplied) without affecting the integration too much? Clearly, the
more reusable the integration the better the separation.

As mentioned above, the vendor code *must* be available publicly, and a git
repository makes the most sense. By doing so, the module itself can be made
accessible using a pip requirements file. This file should not be confused
with the Neutron requirements file that lists all common dependencies. Instead
it should be a file 'requirements.txt' that is located in neutron/plugins/pluginXXX/,
whose content is something along the lines of 'my_plugin_xxx_library>=X.Y.Z'.
Vendors are responsible for ensuring that their library does not depend on
libraries conflicting with global requirements, but it could depend on
libraries not included in the global requirements. Just as in Neutron's
main requirements.txt, it will be possible to pin the version of the vendor
library.

For instance, a vendor integration module can become as simple as one that
performs only the following:

* Registering config options
* Registering the plugin class
* Registering the models
* Registering the extensions

Testing Strategy
----------------

The testing process will be as follow:

* No unit tests for the vendor integration of plugins and drivers are deemed
necessary. The expectation is that contributors would run unit test in their
own external library (e.g. in stackforge where Jenkins setup is for free).
For unit tests that validate the vendor library, it is the responsibility of
the vendor to choose what CI system they see fit to run them. There is no
need or requirement to use OpenStack CI resources if they do not want to.
Having said that, it may be useful to provide coverage for the shim layer in
the form of basic validation as done in `ODL <https://git.openstack.org/cgit/openstack/networking-odl/tree/networking_odl/tests/unit/ml2/test_mechanism_odl.py>`_ and `LBaaS A10 driver <https://git.openstack.org/cgit/openstack/neutron-lbaas/tree/neutron_lbaas/tests/unit/services/loadbalancer/drivers/a10networks/test_driver_v1.py>`_.

* 3rd Party CI will continue to validate vendor integration with Neutron via
functional testing. 3rd Party CI is a communication mechanism. This objective
of this mechanism is as follows:

* it communicates to plugin/driver contributors when someone has contributed
a change that is potentially breaking. It is then up to a given
contributor maintaining the affected plugin to determine whether the
failure is transient or real, and resolve the problem if it is.
* You should run unit tests in your own external library (e.g. on
git.openstack.org where Jenkins setup is for free).

* Your third-party CI should validate third-party integration with Neutron via
functional testing. The third-party CI is a communication mechanism. The
objective of this mechanism is as follows:

* it communicates to you when someone has contributed a change that
potentially breaks your code. It is then up to you maintaining the affected
plugin/driver to determine whether the failure is transient or real, and
resolve the problem if it is.
* it communicates to a patch author that they may be breaking a plugin/driver.
If they have the time/energy/relationship with the maintainer of the
plugin/driver in question, then they can (at their discretion) work to
@@ -146,69 +137,104 @@ The testing process will be as follow:
* it communicates to the community at large whether a given plugin/driver
is being actively maintained.
* A maintainer that is perceived to be responsive to failures in their
3rd party CI jobs is likely to generate community goodwill.

It is worth noting that if the vendor library is hosted on StackForge, due to
current openstack-infra limitations, it is not possible to have 3rd party CI systems
participating in the gate pipeline for the StackForge repo. This means that the only
validation provided during the merge process to the StackForge repo is through unit
tests. Post-merge hooks can still be exploited to provide 3rd party CI feedback, and
alert the contributor/reviewer of potential issues. As mentioned above, 3rd party CI
systems will continue to validate Neutron core commits. This will allow them to
detect when incompatible changes occur, whether they are in Neutron or in the vendor
library repo.

Review and Defect Management Strategies
---------------------------------------

The usual process applies to the code that is part of OpenStack Neutron. More
precisely:

* Bugs that affect vendor code can be filed against the Neutron integration,
if the integration code is at fault. Otherwise, the code maintainer may
decide to fix a bug without oversight, and update their requirements file
to target a new version of their vendor library. It makes sense to
require 3rd party CI for a given plugin/driver to pass when changing their
dependency before merging to any branch (i.e. both master and stable branches).
* Vendor specific code should follow the same review guidelines as any other
code in the tree. However, the maintainer has flexibility to choose who
can approve/merge changes in this repo.
third-party CI jobs is likely to generate community goodwill.

It is worth noting that if the plugin/driver repository is hosted on
git.openstack.org, due to current openstack-infra limitations, it is not
possible to have third-party CI systems participating in the gate pipeline
for the repo. This means that the only validation provided during the merge
process to the repo is through unit tests. Post-merge hooks can still be
exploited to provide third-party CI feedback, and alert you of potential
issues. As mentioned above, third-party CI systems will continue to validate
Neutron core commits. This will allow them to detect when incompatible
changes occur, whether they are in Neutron or in the third-party repo.


Defect Management
-----------------

Bugs affecting third-party code should *not* be filed in the Neutron project on
launchpad. Bug tracking can be done in any system you choose, but by creating a
third-party project in launchpad, bugs that affect both Neutron and your code
can be more easily tracked using launchpad's "also affects project" feature.

Security Issues
~~~~~~~~~~~~~~~

Here are some answers to how to handle security issues in your repo, taken
from `this openstack-dev mailing list message
<http://lists.openstack.org/pipermail/openstack-dev/2015-July/068617.html>`_:

- How should security your issues be managed?

The OpenStack Vulnerability Management Team (VMT) follows a `documented process
<https://security.openstack.org/vmt-process.html>`_ which can basically be
reused by any project-team when needed.

- Should the OpenStack security team be involved?

The OpenStack VMT directly oversees vulnerability reporting and disclosure for
a `subset of OpenStack source code repositories
<https://wiki.openstack.org/wiki/Security_supported_projects>`_. However they
are still quite happy to answer any questions you might have about
vulnerability management for your own projects even if they're not part of that
set. Feel free to reach out to the VMT in public or in private.

Also, the VMT is an autonomous subgroup of the much larger `OpenStack Security
project-team
<http://governance.openstack.org/reference/projects/security.html>`_. They're a
knowledgeable bunch and quite responsive if you want to get their opinions or
help with security-related issues (vulnerabilities or otherwise).

- Does a CVE need to be filed?

It can vary widely. If a commercial distribution such as Red Hat is
redistributing a vulnerable version of your software then they may assign one
anyway even if you don't request one yourself. Or the reporter may request one;
the reporter may even be affiliated with an organization who has already
assigned/obtained a CVE before they initiate contact with you.

- Do the maintainers need to publish OSSN or equivalent documents?

OpenStack Security Advisories (OSSA) are official publications of the OpenStack
VMT and only cover VMT-supported software. OpenStack Security Notes (OSSN) are
published by editors within the OpenStack Security project-team on more general
security topics and may even cover issues in non-OpenStack software commonly
used in conjunction with OpenStack, so it's at their discretion as to whether
they would be able to accommodate a particular issue with an OSSN.

However, these are all fairly arbitrary labels, and what really matters in the
grand scheme of things is that vulnerabilities are handled seriously, fixed
with due urgency and care, and announced widely -- not just on relevant
OpenStack mailing lists but also preferably somewhere with broader distribution
like the `Open Source Security mailing list
<http://oss-security.openwall.org/wiki/mailing-lists/oss-security>`_. The goal
is to get information on your vulnerabilities, mitigating measures and fixes
into the hands of the people using your software in a timely manner.

- Anything else to consider here?

The OpenStack VMT is in the process of trying to reinvent itself so that it can
better scale within the context of the "Big Tent." This includes making sure
the policy/process documentation is more consumable and reusable even by
project-teams working on software outside the scope of our charter. It's a work
in progress, and any input is welcome on how we can make this function well for
everyone.


Backport Management Strategies
------------------------------

As outlined in the `Spec proposal <http://specs.openstack.org/openstack/neutron-specs/specs/kilo/core-vendor-decomposition.html>`_
all new plugins and drivers will have to follow the contribution model
described here. As for existing plugins and drivers, no in-tree features can
be merged until some progress has been done to make the solution adhere to
this model. That said, there is the question of critical fixes and/or backports
to `stable branches <https://wiki.openstack.org/wiki/StableBranch>`_. The possible
scenarios are:

* The decomposition just completed, we are in the cycle (X) where the decomposition
initiated: in this case, the Neutron master branch no longer have the vendor
library code, but the stable branch still does. Backports via straight
cherry-picks may not be possible, or as easy, therefore a custom backport to
stable could be deemed acceptable to Neutron's stable branches (e.g. stable/X-1
and/or stable/X-2), as required.
* The decomposition is complete, we are in the next cycle where the
decomposition work completed (X+1): backports will be done to the stable branch
available of the vendor library (stable/X), and Neutron's stable branch
(stable/X-1), as outlined in the previous step.
* The decomposition is complete, we are in two or more cycles after the
decomposition work completed (X+2, or later). Backports will be done to the
stable branch(s) available of the vendor library (stable/X, stable/X+1).
* The decomposition is in progress: as long as the vendor code is still in
master, patches will need to go to master before a backport to stable.
Acceptance will be determined on the scope of changes (based on both the
amount of work and severity of the issue). In this case, the plugin or
driver maintainer will need to ensure that the fix gets applied to the
external repo, if necessary (to avoid missing it during the migration process).
* The decomposition has not started: in this case, depending on the issue,
review attention from core members is best effort, and although there is no
explicit rule to prevent them from merging to master, it is in the best interest
of the maintainer to avoid introducing or modifying existing code that will
ultimately be deprecated.
This section applies only to third-party maintainers who had code in the
Neutron tree during the Kilo and earlier releases. It will be obsolete once the
Kilo release is no longer supported.

If a change made to out-of-tree third-party code needs to be back-ported to
in-tree code in a stable branch, you may submit a review without a
corresponding master branch change. The change will be evaluated by core
reviewers for stable branches to ensure that the backport is justified and that
it does not affect Neutron core code stability.


DevStack Integration Strategies
-------------------------------
@@ -221,79 +247,34 @@ make sense depending on whether you are contributing a new or existing plugin or
driver.

If you are contributing a new plugin, the approach to choose should be based on
`Extras.d Hooks' externally hosted plugins <http://docs.openstack.org/developer/devstack/plugins.html#extras-d-hooks>`_.
With the extra.d hooks, the DevStack integration is colocated with the vendor integration
library, and it leads to the greatest level of flexibility when dealing with DevStack based
dev/test deployments.

Having said that, most Neutron plugins developed in the past likely already have
integration with DevStack in the form of `neutron_plugins <https://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/neutron_plugins>`_.
If the plugin is being decomposed in vendor integration plus vendor library, it would
be necessary to adjust the instructions provided in the neutron_plugin file to pull the
vendor library code as a new dependency. For instance, the instructions below:

::

INSTALL_FROM_REQUIREMENTS=$(trueorfalse True INSTALL_FROM_REQUIREMENTS)

if [[ "$INSTALL_FROM_REQUIREMENTS" == "False" ]]; then
git_clone $NEUTRON_LIB_REPO $NEUTRON_LIB_DIR $NEUTRON_LIB_BRANCH
setup_package $NEUTRON_LIB_DIR
else
# Retrieve the package from the vendor library's requirements.txt
plugin_package=$(cat $NEUTRON_LIB_REQUIREMENTS_FILE)
pip_install "$plugin_package"
fi

could be placed in 'neutron_plugin_configure_service', ahead of the service
configuration. An alternative could be under the `third_party section
<https://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/neutron_thirdparty>`_,
if available. This solution can be similarly exploited for both monolithic
plugins or ML2 mechanism drivers. The configuration of the plugin or driver itself can be
done by leveraging the extensibility mechanisms provided by `local.conf <http://docs.openstack.org/developer/devstack/configuration.html>`_. In fact, since the .ini file for the vendor plugin or driver lives
in the Neutron tree, it is possible to do add the section below to local.conf:

::

[[post-config|$THE_FILE_YOU_NEED_TO_CUSTOMIZE]]

# Override your section config as you see fit
[DEFAULT]
verbose=True

Which in turn it is going to edit the file with the options outlined in the post-config
section.

The above mentioned approach, albeit valid, has the shortcoming of depending on DevStack's
explicit support for the plugin installation and configuration, and the plugin maintainer
is strongly encouraged to revise the existing DevStack integration, in order to evolve it
in an extras.d hooks based approach.

One final consideration is worth making for 3rd party CI setups: if `Devstack Gate
<https://git.openstack.org/cgit/openstack-infra/devstack-gate>`_ is used, it does provide hook
functions that can be executed at specific times of the devstack-gate-wrap script run.
For example, the `Neutron Functional job <https://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/neutron.yaml>`_ uses them. For more details see `devstack-vm-gate-wrap.sh <https://git.openstack.org/cgit/openstack-infra/devstack-gate/tree/devstack-vm-gate-wrap.sh>`_.

Documentation Strategies
------------------------

It is the duty of the new contributor to provide working links that can be
referenced from the OpenStack upstream documentation.
#TODO(armax): provide more info, when available.

How-to
------

The how-to below assumes that the vendor library will be hosted on StackForge.
Stackforge lets you tap in the entire OpenStack CI infrastructure and can be
a great place to start from to contribute your new or existing driver/plugin.
The list of steps below are somewhat the tl;dr; version of what you can find
on http://docs.openstack.org/infra/manual/creators.html. They are meant to
`Extras.d Hooks' externally hosted plugins
<http://docs.openstack.org/developer/devstack/plugins.html#extras-d-hooks>`_.
With the extra.d hooks, the DevStack integration is co-located with the
third-party integration library, and it leads to the greatest level of
flexibility when dealing with DevStack based dev/test deployments.

One final consideration is worth making for third-party CI setups: if `Devstack
Gate <https://git.openstack.org/cgit/openstack-infra/devstack-gate>`_ is used,
it does provide hook functions that can be executed at specific times of the
devstack-gate-wrap script run. For example, the `Neutron Functional job
<https://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/neutron.yaml>`_
uses them. For more details see `devstack-vm-gate-wrap.sh
<https://git.openstack.org/cgit/openstack-infra/devstack-gate/tree/devstack-vm-gate-wrap.sh>`_.


Project Initial Setup
---------------------

The how-to below assumes that the third-party library will be hosted on
git.openstack.org. This lets you tap in the entire OpenStack CI infrastructure
and can be a great place to start from to contribute your new or existing
driver/plugin. The list of steps below are summarized version of what you can
find on http://docs.openstack.org/infra/manual/creators.html. They are meant to
be the bare minimum you have to complete in order to get you off the ground.

* Create a public repository: this can be a personal git.openstack.org repo or any
publicly available git repo, e.g. ``https://github.com/john-doe/foo.git``. This
would be a temporary buffer to be used to feed the StackForge one.
would be a temporary buffer to be used to feed the one on git.openstack.org.
* Initialize the repository: if you are starting afresh, you may *optionally*
want to use cookiecutter to get a skeleton project. You can learn how to use
cookiecutter on https://git.openstack.org/cgit/openstack-dev/cookiecutter.
@@ -301,104 +282,273 @@ be the bare minimum you have to complete in order to get you off the ground.
want to skip this step now, build the history first (next step), and come back
here to initialize the remainder of the repository with other files being
generated by the cookiecutter (like tox.ini, setup.cfg, setup.py, etc.).
* Building the history: if you are contributing an existing driver/plugin,
you may want to preserve the existing history. If not, you can go to the
next step. To import the history from an existing project this is what
you need to do:

* Clone a copy of the neutron repository to be manipulated.
* Go into the Neutron repo to be changed.
* Execute file split.sh, available in ./tools, and follow instructions.

::

git clone https://git.openstack.org/openstack/neutron.git
cd neutron
./tools/split.sh
# Sit and wait for a while, or grab a cup of your favorite drink

At this point you will have the project pruned of everything else but
the files you want to export, with their history. The next steps are:

* Check out stable branches for the project: even though stable branches
are not strictly necessary during the creation of the StackForge repository
(as outlined in the next step below), they do not hurt, and it is
recommended to keep them during the import process.
* Add a remote that points to the repository created before.
* (Optional) If the repository has already being initialized with
cookiecutter, you need to pull first; if not, you can either push
the existing commits/tags or apply and commit further changes to fix
up the structure of repo the way you see fit.
* Finally, push commits and tags to the public repository. If you followed
theses instructions step-by-step, you will have a source repository
that contains both a master and stable branches, as well as tags. Some
of these steps are outlined below:

::

git remote add <foo> https://github.com/john-doe/foo.git
git pull foo master # OPTIONAL, if foo is non-empty
git push --all foo && git push --tags foo

* Create a StackForge repository: for this you need the help of the OpenStack
infra team. It is worth noting that you only get one shot at creating the
StackForge repository. This is the time you get to choose whether you want
to start from a clean slate, or you want to import the repo created during
the previous step. In the latter case, you can do so by specifying the
upstream section for your project in project-config/gerrit/project.yaml.
Steps are documented on the
`Repository Creator's Guide <http://docs.openstack.org/infra/manual/creators.html>`_.
* Create a repository on git.openstack.org (see `Official Sub-Projects
<http://docs.openstack.org/developer/neutron/devref/sub_projects.html>`_). For
this you need the help of the OpenStack infra team. It is worth noting that
you only get one shot at creating the repository on git.openstack.org. This
is the time you get to choose whether you want to start from a clean slate,
or you want to import the repo created during the previous step. In the
latter case, you can do so by specifying the upstream section for your
project in project-config/gerrit/project.yaml. Steps are documented on the
`Repository Creator's Guide
<http://docs.openstack.org/infra/manual/creators.html>`_.
* Ask for a Launchpad user to be assigned to the core team created. Steps are
documented in
`this section <http://docs.openstack.org/infra/manual/creators.html#update-the-gerrit-group-members>`_.
* Fix, fix, fix: at this point you have an external base to work on. You
can develop against the new stackforge project, the same way you work
with any other OpenStack project: you have pep8, docs, and python27 CI
jobs that validate your patches when posted to Gerrit. For instance, one
thing you would need to do is to define an entry point for your plugin
or driver in your own setup.cfg similarly as to how it is done
`here <https://git.openstack.org/cgit/openstack/networking-odl/tree/setup.cfg#n31>`_.
documented in `this section
<http://docs.openstack.org/infra/manual/creators.html#update-the-gerrit-group-members>`_.
* Fix, fix, fix: at this point you have an external base to work on. You can
develop against the new git.openstack.org project, the same way you work with
any other OpenStack project: you have pep8, docs, and python27 CI jobs that
validate your patches when posted to Gerrit. For instance, one thing you
would need to do is to define an entry point for your plugin or driver in
your own setup.cfg similarly as to how it is done in the `setup.cfg for ODL
<https://git.openstack.org/cgit/openstack/networking-odl/tree/setup.cfg#n31>`_.
* Define an entry point for your plugin or driver in setup.cfg
* Create 3rd Party CI account: if you do not already have one, follow
instructions for
`3rd Party CI <http://docs.openstack.org/infra/system-config/third_party.html>`_ to get one.
* TODO(armax): ...


Decomposition progress chart
============================

The chart below captures the progress of the core-vendor-decomposition effort
for existing plugins and drivers at the time the decomp effort started. New
drivers and plugins are not required to be listed here. This chart is short
lived: once the effort is complete, this chart no longer needs to exist and
will be removed. The following aspects are captured:

* Name: the name of the project that implements a Neutron plugin or driver. The
name is an internal target for links that point to source code, etc.
* Plugins/Drivers: whether the source code contains a core (aka monolithic)
plugin, a set of ML2 drivers, and/or (service) plugins (or extensions) for
firewall, vpn, and load balancers.
* Launchpad: whether the project is managed through Launchpad.
* PyPI: whether the project deliverables are available through PyPI.
* State: a code to represent the current state of the decomposition. Possible
values are:

* [A] External repo available, no code decomposition
* [B] External repo available, partial code decomposition
* [C] External repo available, code decomposition is complete
* [D] Not deemed required. Driver is already bare-bone and decomposition
effort is not considered justified. Assessment may change in the
future.

Absence of an entry for an existing plugin or driver means no active effort
has been observed or potentially not required.
* Completed in: the release in which the effort is considered completed. Code
completion can be deemed as such, if there is no overlap/duplication between
what exists in the Neutron tree, and what it exists in the vendor repo.

+-------------------------------+-----------------------+-----------+------------------+---------+--------------+
| Name | Plugins/Drivers | Launchpad | PyPI | State | Completed in |
+===============================+=======================+===========+==================+=========+==============+
| freescale-nscs | ml2,fw | no | no | [D] | |
+-------------------------------+-----------------------+-----------+------------------+---------+--------------+
* Create third-party CI account: if you do not already have one, follow
instructions for `third-party CI
<http://docs.openstack.org/infra/system-config/third_party.html>`_ to get
one.


Integrating with the Neutron system
-----------------------------------

(This section currently describes the goals and progress of the completion of
the decomposition work during the Liberty development cycle. The content here
will be updated as the work progresses. In its final form this section will be
merged with the previous section. When all existing plugins/drivers are fully
decomposed, this document will be a recipe for how to add a new Neutron plugin
or driver completely out-of-tree.)

For the Liberty cycle we aim to move all the existing third-party code out of
the Neutron tree. Each category of code and its removal plan is described
below.


Existing Shims
~~~~~~~~~~~~~~

Liberty Steps
+++++++++++++

The existing shims shall now be moved out of tree, together with any test
code. The entry points shall be moved as described below in `Entry Points`_.


Configuration Files
~~~~~~~~~~~~~~~~~~~

The ``data_files`` in the ``[files]`` section of ``setup.cfg`` of Neutron shall
not contain any third-party references. These shall be located in the same
section of the third-party repo's own ``setup.cfg`` file.

* Note: Care should be taken when naming sections in configuration files. When
the Neutron service or an agent starts, oslo.config loads sections from all
specified config files. This means that if a section [foo] exists in multiple
config files, duplicate settings will collide. It is therefore recommended to
prefix section names with a third-party string, e.g. [vendor_foo].

Liberty Steps
+++++++++++++

Third-party configuration files still in the neutron tree have no dependencies
and can simply be moved. The maintainers should add their configuration file(s)
to their repo and then remove them from neutron.

**ToDo: Inclusion in OpenStack documentation?**
Is there a recommended way to have third-party config options listed in the
configuration guide in docs.openstack.org?


Database Models and Migrations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

A third-party repo may contain database models for its own tables. Although
these tables are in the Neutron database, they are independently managed
entirely within the third-party code. Third-party code shall **never** modify
neutron core tables in any way.

Each repo has its own alembic migration branch that adds, removes and modifies
its own tables in the neutron database schema.

* Note: Care should be taken when adding new tables. To prevent collision of
table names it is recommended to prefix them with a vendor/plugin string.

* Note: A third-party maintainer may opt to use a separate database for their
tables. This may complicate cases where there are foreign key constraints
across schemas for DBMS that do not support this well. Third-party maintainer
discretion advised.

The database tables owned by a third-party repo can have references to fields
in neutron core tables. However, the alembic branch for a plugin/driver repo
shall never update any part of a table that it does not own.

**Note: What happens when a referenced item changes?**

* **Q:** If a driver's table has a reference (for example a foreign key) to a
neutron core table, and the referenced item is changed in neutron, what
should you do?

* **A:** Fortunately, this should be an extremely rare occurrence. Neutron core
reviewers will not allow such a change unless there is a very carefully
thought-out design decision behind it. That design will include how to
address any third-party code affected. (This is another good reason why you
should stay actively involved with the Neutron developer community.)

The ``neutron-db-manage`` alembic wrapper script for neutron detects alembic
branches for installed third-party repos, and the upgrade command automatically
applies to all of them. A third-party repo must register its alembic migrations
at installation time. This is done by providing an entrypoint in setup.cfg as
follows:

For a third-party repo named ``networking-foo``, add the alembic_migrations
directory as an entrypoint in the ``neutron.db.alembic_migrations`` group::

[entry_points]
neutron.db.alembic_migrations =
networking-foo = networking_foo.db.migration:alembic_migrations

Liberty Steps
+++++++++++++

Each decomposed plugin/driver that has its own tables in the neutron database
should take these steps to move the models for the tables out of tree.

#. Add the models to the external repo.
#. Create a start migration for the repo's alembic branch. Note: it is
recommended to keep the migration file(s) in the same location in the
third-party repo as is done in the neutron repo,
i.e. ``networking_foo/db/migration/alembic_migrations/versions/*.py``
#. Remove the models from the neutron repo.
#. Add the names of the removed tables to ``DRIVER_TABLES`` in
``neutron/db/migration/alembic_migrations/external.py`` (this is used for
testing, see below).

**ToDo: neutron-db-manage autogenerate**
The alembic autogenerate command needs to support branches in external
repos. Bug #1471333 has been filed for this.


DB Model/Migration Testing
~~~~~~~~~~~~~~~~~~~~~~~~~~

Here is a `template functional test
<https://bugs.launchpad.net/neutron/+bug/1470678>`_ (TODO:Ann) third-party
maintainers can use to develop tests for model-vs-migration sync in their
repos. It is recommended that each third-party CI sets up such a test, and runs
it regularly against Neutron master.

Liberty Steps
+++++++++++++

The model_sync test will be updated to ignore the models that have been moved
out of tree. A ``DRIVER_TABLES`` list will be maintained in
``neutron/db/migration/alembic_migrations/external.py``.


Entry Points
~~~~~~~~~~~~

The `Python setuptools <https://pythonhosted.org/setuptools>`_ installs all
entry points for packages in one global namespace for an environment. Thus each
third-party repo can define its package's own ``[entry_points]`` in its own
``setup.cfg`` file.

For example, for the ``networking-foo`` repo::

[entry_points]
console_scripts =
neutron-foo-agent = networking_foo.cmd.eventlet.agents.foo:main
neutron.core_plugins =
foo_monolithic = networking_foo.plugins.monolithic.plugin:FooPluginV2
neutron.service_plugins =
foo_l3 = networking_foo.services.l3_router.l3_foo:FooL3ServicePlugin
neutron.ml2.type_drivers =
foo_type = networking_foo.plugins.ml2.drivers.foo:FooType
neutron.ml2.mechanism_drivers =
foo_ml2 = networking_foo.plugins.ml2.drivers.foo:FooDriver
neutron.ml2.extension_drivers =
foo_ext = networking_foo.plugins.ml2.drivers.foo:FooExtensionDriver

* Note: It is advisable to include ``foo`` in the names of these entry points to
avoid conflicts with other third-party packages that may get installed in the
same environment.


API Extensions
~~~~~~~~~~~~~~

Extensions can be loaded in two ways:

#. Use the ``append_api_extensions_path()`` library API. This method is defined
in ``neutron/api/extensions.py`` in the neutron tree.
#. Leverage the ``api_extensions_path`` config variable when deploying. See the
example config file ``etc/neutron.conf`` in the neutron tree where this
variable is commented.


Interface Drivers
~~~~~~~~~~~~~~~~~

Interface (VIF) drivers for the reference implementations are defined in
``neutron/agent/linux/interface.py``. Third-party interface drivers shall be
defined in a similar location within their own repo.

The entry point for the interface driver is a Neutron config option. It is up to
the installer to configure this item in the ``[default]`` section. For example::

[default]
interface_driver = networking_foo.agent.linux.interface.FooInterfaceDriver

**ToDo: Interface Driver port bindings.**
These are currently defined by the ``VIF_TYPES`` in
``neutron/extensions/portbindings.py``. We could make this config-driven
for agents. For Nova, selecting the VIF driver can be done outside of
Neutron (using the new `os-vif python library
<https://review.openstack.org/193668>`_?). Armando and Akihiro to discuss.


Rootwrap Filters
~~~~~~~~~~~~~~~~

If a third-party repo needs a rootwrap filter for a command that is not used by
Neutron core, then the filter shall be defined in the third-party repo.

For example, to add a rootwrap filters for commands in repo ``networking-foo``:

* In the repo, create the file:
``etc/neutron/rootwrap.d/foo.filters``

* In the repo's ``setup.cfg`` add the filters to data_files::

[files]
data_files =
etc/neutron/rootwrap.d =
etc/neutron/rootwrap.d/foo.filters


Extending python-neutronclient
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

The maintainer of a third-party component may wish to add extensions to the
Neutron CLI client. Thanks to https://review.openstack.org/148318 this can now
be accomplished. See `Client Command Extensions
<client_command_extensions.html>`_.


Other repo-split items
~~~~~~~~~~~~~~~~~~~~~~

(These are still TBD.)

* Splitting policy.json? **ToDo** Armando will investigate.

* Generic instructions (or a template) for installing an out-of-tree plugin or
driver for Neutron. Possibly something for the networking guide, and/or a
template that plugin/driver maintainers can modify and include with their
package.


Decomposition Phase II Progress Chart
=====================================

TBD.

+ 1
- 0
doc/source/devref/index.rst View File

@@ -32,6 +32,7 @@ Programming HowTos and Tutorials

development.environment
contribute
neutron_api
sub_projects



+ 26
- 1
doc/source/devref/linuxbridge_agent.rst View File

@@ -1,2 +1,27 @@
===============================
L2 Networking with Linux Bridge
-------------------------------
===============================

This Agent uses the `Linux Bridge
<http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge>`_ to
provide L2 connectivity for VM instances running on the compute node to the
public network. A graphical illustration of the deployment can be found in
`OpenStack Admin Guide Linux Bridge
<http://docs.openstack.org/admin-guide-cloud/content/under_the_hood_linuxbridge.html>`_

In most common deployments, there is a compute and a network node. On both the
compute and the network node, the Linux Bridge Agent will manage virtual
switches, connectivity among them, and interaction via virtual ports with other
network components such as namespaces and underlying interfaces. Additionally,
on the compute node, the Linux Bridge Agent will manage security groups.

Three use cases and their packet flow are documented as follows:

1. `Legacy implementation with Linux Bridge
<http://docs.openstack.org/networking-guide/deploy_scenario1b.html>`_

2. `High Availability using L3HA with Linux Bridge
<http://docs.openstack.org/networking-guide/deploy_scenario3b.html>`_

3. `Provider networks with Linux Bridge
<http://docs.openstack.org/networking-guide/deploy_scenario4b.html>`_

+ 35
- 0
doc/source/devref/neutron_api.rst View File

@@ -0,0 +1,35 @@
Neutron public API
==================

Neutron main tree serves as a library for multiple subprojects that rely on
different modules from neutron.* namespace to accomodate their needs.
Specifically, advanced service repositories and open source or vendor
plugin/driver repositories do it.

Neutron modules differ in their API stability a lot, and there is no part of it
that is explicitly marked to be consumed by other projects.

That said, there are modules that other projects should definitely avoid relying on.

Specifically, no external repository should use anything located under
neutron.openstack.common.* import path. This code belongs to oslo-incubator
modules and is not meant to work for consumers other than neutron main tree
itself. (The only exception is made for advanced service repositories that are
tightly controlled by neutron community.) Long story short, if your repository
uses those modules, please switch to corresponding oslo libraries or use your
own copy of oslo-incubator files.


Breakages
---------

Neutron API is not very stable, and there are cases when a desired change in
neutron tree is expected to trigger breakage for one or more external
repositories under the neutron tent. Below you can find a list of known
incompatible changes that could or are known to trigger those breakages.

* change: oslo.service adopted.

- commit: 6e693fc91dd79cfbf181e3b015a1816d985ad02c
- solution: switch using oslo_service.* namespace; stop using ANY neutron.openstack.* contents.
- severity: low (plugins must not rely on that subtree).

+ 4
- 0
etc/dhcp_agent.ini View File

@@ -90,3 +90,7 @@
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

[AGENT]
# Log agent heartbeats from this DHCP agent
# log_agent_heartbeats = False

+ 4
- 0
etc/l3_agent.ini View File

@@ -122,3 +122,7 @@

# The advertisement interval in seconds
# ha_vrrp_advert_int = 2

[AGENT]
# Log agent heartbeats from this L3 agent
# log_agent_heartbeats = False

+ 4
- 0
etc/metadata_agent.ini View File

@@ -66,3 +66,7 @@ admin_password = %SERVICE_PASSWORD%
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url = memory://?default_ttl=5

[AGENT]
# Log agent heartbeats from this Metadata agent
# log_agent_heartbeats = False

+ 0
- 28
etc/neutron/plugins/ml2/linuxbridge_agent.ini View File

@@ -1,25 +1,3 @@
[vlans]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value 'local' is useful only for single-box testing and
# provides no connectivity between hosts. You MUST change this to
# 'vlan' and configure network_vlan_ranges below in order for tenant
# networks to provide connectivity between hosts. Set to 'none' to
# disable creation of tenant networks.
#
# tenant_network_type = local
# Example: tenant_network_type = vlan

# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999

[linux_bridge]
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_interface> tuples mapping physical
@@ -62,12 +40,6 @@
# Agent's polling interval in seconds
# polling_interval = 2

# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
# agents.
#
# rpc_support_old_agents = False
# Example: rpc_support_old_agents = True

# (IntOpt) Set new timeout in seconds for new rpc calls after agent receives
# SIGTERM. If value is set to 0, rpc timeout won't be changed.
#

+ 3
- 0
etc/neutron/plugins/ml2/openvswitch_agent.ini View File

@@ -58,6 +58,9 @@
# of_interface = ovs-ofctl

[agent]
# Log agent heartbeats from this OVS agent
# log_agent_heartbeats = False

# Agent's polling interval in seconds
# polling_interval = 2


+ 3
- 0
etc/neutron/plugins/vmware/nsx.ini View File

@@ -156,6 +156,9 @@
# lock management.
# locking_coordinator_url =

# (Optional) DHCP lease time
# dhcp_lease_time = 86400

[nsx]
# Maximum number of ports for each bridged logical switch
# The recommended value for this parameter varies with NSX version

+ 10
- 0
etc/neutron/plugins/vmware/policy/network-gateways.json View File

@@ -0,0 +1,10 @@
{
"create_network_gateway": "rule:admin_or_owner",
"update_network_gateway": "rule:admin_or_owner",
"delete_network_gateway": "rule:admin_or_owner",
"connect_network": "rule:admin_or_owner",
"disconnect_network": "rule:admin_or_owner",
"create_gateway_device": "rule:admin_or_owner",
"update_gateway_device": "rule:admin_or_owner",
"delete_gateway_device": "rule_admin_or_owner"
}

+ 7
- 0
etc/neutron/plugins/vmware/policy/routers.json View File

@@ -0,0 +1,7 @@
{
"create_router:external_gateway_info:enable_snat": "rule:admin_or_owner",
"create_router:distributed": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_or_owner",
"update_router:external_gateway_info:enable_snat": "rule:admin_or_owner",
"update_router:distributed": "rule:admin_or_owner"
}

+ 8
- 0
etc/policy.json View File

@@ -9,6 +9,7 @@
"shared_firewalls": "field:firewalls:shared=True",
"shared_firewall_policies": "field:firewall_policies:shared=True",
"shared_subnetpools": "field:subnetpools:shared=True",
"shared_address_scopes": "field:address_scopes:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",

@@ -23,6 +24,13 @@
"update_subnetpool": "rule:admin_or_owner",
"delete_subnetpool": "rule:admin_or_owner",

"create_address_scope": "",
"create_address_scope:shared": "rule:admin_only",
"get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes",
"update_address_scope": "rule:admin_or_owner",
"update_address_scope:shared": "rule:admin_only",
"delete_address_scope": "rule:admin_or_owner",

"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",

+ 2
- 0
neutron/agent/common/config.py View File

@@ -44,6 +44,8 @@ AGENT_STATE_OPTS = [
help=_('Seconds between nodes reporting state to server; '
'should be less than agent_down_time, best if it '
'is half or less than agent_down_time.')),
cfg.BoolOpt('log_agent_heartbeats', default=False,
help=_('Log agent heartbeats')),
]

INTERFACE_DRIVER_OPTS = [

+ 52
- 19
neutron/agent/common/ovs_lib.py View File

@@ -141,6 +141,12 @@ class BaseOVS(object):
return self.ovsdb.db_get(table, record, column).execute(
check_error=check_error, log_errors=log_errors)

def db_list(self, table, records=None, columns=None,
check_error=True, log_errors=True, if_exists=False):
return (self.ovsdb.db_list(table, records=records, columns=columns,
if_exists=if_exists).
execute(check_error=check_error, log_errors=log_errors))


class OVSBridge(BaseOVS):
def __init__(self, br_name):
@@ -319,11 +325,12 @@ class OVSBridge(BaseOVS):
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
port_info = self.db_list(
'Interface', columns=['name', 'external_ids', 'ofport'])
by_name = {x['name']: x for x in port_info}
for name in port_names:
external_ids = self.db_get_val("Interface", name, "external_ids",
check_error=True)
ofport = self.db_get_val("Interface", name, "ofport",
check_error=True)
external_ids = by_name[name]['external_ids']
ofport = by_name[name]['ofport']
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
@@ -341,10 +348,9 @@ class OVSBridge(BaseOVS):

def get_vif_port_to_ofport_map(self):
port_names = self.get_port_name_list()
cmd = self.ovsdb.db_list(
'Interface', port_names,
columns=['name', 'external_ids', 'ofport'], if_exists=True)
results = cmd.execute(check_error=True)
results = self.db_list(
'Interface', port_names, ['name', 'external_ids', 'ofport'],
if_exists=True)
port_map = {}
for r in results:
# fall back to basic interface name
@@ -359,10 +365,9 @@ class OVSBridge(BaseOVS):
def get_vif_port_set(self):
edge_ports = set()
port_names = self.get_port_name_list()
cmd = self.ovsdb.db_list(
'Interface', port_names,
columns=['name', 'external_ids', 'ofport'], if_exists=True)
results = cmd.execute(check_error=True)
results = self.db_list(
'Interface', port_names, ['name', 'external_ids', 'ofport'],
if_exists=True)
for result in results:
if result['ofport'] == UNASSIGNED_OFPORT:
LOG.warn(_LW("Found not yet ready openvswitch port: %s"),
@@ -400,11 +405,42 @@ class OVSBridge(BaseOVS):

"""
port_names = self.get_port_name_list()
cmd = self.ovsdb.db_list('Port', port_names, columns=['name', 'tag'],
if_exists=True)
results = cmd.execute(check_error=True)
results = self.db_list('Port', port_names, ['name', 'tag'],
if_exists=True)
return {p['name']: p['tag'] for p in results}

def get_vifs_by_ids(self, port_ids):
interface_info = self.db_list(
"Interface", columns=["name", "external_ids", "ofport"])
by_id = {x['external_ids'].get('iface-id'): x for x in interface_info}
intfs_on_bridge = self.ovsdb.list_ports(self.br_name).execute(
check_error=True)
result = {}
for port_id in port_ids:
result[port_id] = None
if (port_id not in by_id or
by_id[port_id]['name'] not in intfs_on_bridge):
LOG.info(_LI("Port %(port_id)s not present in bridge "
"%(br_name)s"),
{'port_id': port_id, 'br_name': self.br_name})
continue
pinfo = by_id[port_id]
if not self._check_ofport(port_id, pinfo):
continue
mac = pinfo['external_ids'].get('attached-mac')
result[port_id] = VifPort(pinfo['name'], pinfo['ofport'],
port_id, mac, self)
return result

@staticmethod
def _check_ofport(port_id, port_info):
if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a"
" positive integer"),
{'ofport': port_info['ofport'], 'vif': port_id})
return False
return True

def get_vif_port_by_id(self, port_id):
ports = self.ovsdb.db_find(
'Interface', ('external_ids', '=', {'iface-id': port_id}),
@@ -413,10 +449,7 @@ class OVSBridge(BaseOVS):
for port in ports:
if self.br_name != self.get_bridge_for_iface(port['name']):
continue
if port['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a"
" positive integer"),
{'ofport': port['ofport'], 'vif': port_id})
if not self._check_ofport(port_id, port):
continue
mac = port['external_ids'].get('attached-mac')
return VifPort(port['name'], port['ofport'], port_id, mac, self)

+ 3
- 2
neutron/agent/dhcp/agent.py View File

@@ -21,6 +21,7 @@ import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_utils import importutils

from neutron.agent.linux import dhcp
@@ -36,7 +37,6 @@ from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import loopingcall

LOG = logging.getLogger(__name__)

@@ -548,7 +548,8 @@ class DhcpAgentWithStateReport(DhcpAgent):
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration,
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval

+ 2
- 2
neutron/agent/dhcp_agent.py View File

@@ -17,6 +17,7 @@
import sys

from oslo_config import cfg
from oslo_service import service

from neutron.agent.common import config
from neutron.agent.dhcp import config as dhcp_config
@@ -24,7 +25,6 @@ from neutron.agent.linux import interface
from neutron.agent.metadata import config as metadata_config
from neutron.common import config as common_config
from neutron.common import topics
from neutron.openstack.common import service
from neutron import service as neutron_service


@@ -49,4 +49,4 @@ def main():
topic=topics.DHCP_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.agent.dhcp.agent.DhcpAgentWithStateReport')
service.launch(server).wait()
service.launch(cfg.CONF, server).wait()

+ 6
- 4
neutron/agent/l3/agent.py View File

@@ -18,6 +18,8 @@ import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
@@ -47,8 +49,6 @@ from neutron.common import topics
from neutron import context as n_context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task

try:
from neutron_fwaas.services.firewall.agents.l3reference \
@@ -339,7 +339,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ri = self.router_info.get(router_id)
if ri is None:
LOG.warn(_LW("Info for router %s was not found. "
"Skipping router removal"), router_id)
"Performing router cleanup"), router_id)
self.namespaces_manager.ensure_router_cleanup(router_id)
return

registry.notify(resources.ROUTER, events.BEFORE_DELETE,
@@ -595,7 +596,8 @@ class L3NATAgentWithStateReport(L3NATAgent):
'external_network_bridge': self.conf.external_network_bridge,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver},
'interface_driver': self.conf.interface_driver,
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = self.conf.AGENT.report_interval

+ 0
- 6
neutron/agent/l3/dvr.py View File

@@ -18,7 +18,6 @@ from oslo_log import log as logging

from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import namespaces

LOG = logging.getLogger(__name__)

@@ -50,11 +49,6 @@ class AgentMixin(object):

return fip_ns

def _destroy_fip_namespace(self, ns):
ex_net_id = namespaces.get_id_from_ns_name(ns)
fip_ns = self.get_fip_ns(ex_net_id)
fip_ns.delete()

def get_ports_by_subnet(self, subnet_id):
return self.plugin_rpc.get_ports_by_subnet(self.context, subnet_id)


+ 12
- 5
neutron/agent/l3/dvr_local_router.py View File

@@ -84,7 +84,9 @@ class DvrLocalRouter(router.RouterInfo):
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
ip_rule.rule.add(ip=fixed_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
@@ -114,7 +116,9 @@ class DvrLocalRouter(router.RouterInfo):
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
ip_rule.rule.delete(ip=floating_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
self.fip_ns.deallocate_rule_priority(rule_pr)
#TODO(rajeev): Handle else case - exception/log?

@@ -258,7 +262,9 @@ class DvrLocalRouter(router.RouterInfo):
if is_add:
ns_ipd.route.add_gateway(gw_ip_addr,
table=snat_idx)
ns_ipr.rule.add(sn_port_cidr, snat_idx, snat_idx)
ns_ipr.rule.add(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
ns_ipwrapr.netns.execute(
['sysctl', '-w',
'net.ipv4.conf.%s.send_redirects=0' % sn_int])
@@ -266,8 +272,9 @@ class DvrLocalRouter(router.RouterInfo):
self._delete_gateway_device_if_exists(ns_ipd,
gw_ip_addr,
snat_idx)
ns_ipr.rule.delete(sn_port_cidr, snat_idx,
snat_idx)
ns_ipr.rule.delete(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
break
except Exception:
if is_add:

+ 1
- 3
neutron/agent/l3/ha_router.py View File

@@ -191,9 +191,7 @@ class HaRouter(router.RouterInfo):
for gw_ip in gateway_ips:
# TODO(Carl) This is repeated everywhere. A method would
# be nice.
default_gw = (n_consts.IPv4_ANY if
netaddr.IPAddress(gw_ip).version == 4 else
n_consts.IPv6_ANY)
default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version]
instance = self._get_keepalived_instance()
default_gw_rts.append(keepalived.KeepalivedVirtualRoute(
default_gw, gw_ip, interface_name))

+ 23
- 18
neutron/agent/l3/namespace_manager.py View File

@@ -81,24 +81,7 @@ class NamespaceManager(object):
_ns_prefix, ns_id = self.get_prefix_and_id(ns)
if ns_id in self._ids_to_keep:
continue
if _ns_prefix == namespaces.NS_PREFIX:
ns = namespaces.RouterNamespace(ns_id,
self.agent_conf,
self.driver,
use_ipv6=False)
else:
ns = dvr_snat_ns.SnatNamespace(ns_id,
self.agent_conf,
self.driver,
use_ipv6=False)
try:
if self.metadata_driver:
# cleanup stale metadata proxy processes first
self.metadata_driver.destroy_monitored_metadata_proxy(
self.process_monitor, ns_id, self.agent_conf)
ns.delete()
except RuntimeError:
LOG.exception(_LE('Failed to destroy stale namespace %s'), ns)
self._cleanup(_ns_prefix, ns_id)

return True

@@ -131,3 +114,25 @@ class NamespaceManager(object):
LOG.exception(_LE('RuntimeError in obtaining namespace list for '
'namespace cleanup.'))
return set()

def ensure_router_cleanup(self, router_id):
"""Performs cleanup for a router"""
for ns in self.list_all():
if ns.endswith(router_id):
ns_prefix, ns_id = self.get_prefix_and_id(ns)
self._cleanup(ns_prefix, ns_id)

def _cleanup(self, ns_prefix, ns_id):
if ns_prefix == namespaces.NS_PREFIX:
ns_class = namespaces.RouterNamespace
else:
ns_class = dvr_snat_ns.SnatNamespace
ns = ns_class(ns_id, self.agent_conf, self.driver, use_ipv6=False)
try:
if self.metadata_driver:
# cleanup stale metadata proxy processes first
self.metadata_driver.destroy_monitored_metadata_proxy(
self.process_monitor, ns_id, self.agent_conf)
ns.delete()
except RuntimeError:
LOG.exception(_LE('Failed to destroy stale namespace %s'), ns)

+ 11
- 9
neutron/agent/l3/router_info.py View File

@@ -291,7 +291,8 @@ class RouterInfo(object):
prefix=prefix)

ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name)
self.driver.init_router_port(
interface_name, ip_cidrs, namespace=ns_name)
for fixed_ip in fixed_ips:
ip_lib.send_ip_addr_adv_notif(ns_name,
interface_name,
@@ -456,14 +457,15 @@ class RouterInfo(object):
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])

gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port)
self.driver.init_l3(interface_name,
ip_cidrs,
namespace=ns_name,
gateway_ips=gateway_ips,
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips,
enable_ra_on_gw=enable_ra_on_gw,
clean_connections=True)
self.driver.init_router_port(
interface_name,
ip_cidrs,
namespace=ns_name,
gateway_ips=gateway_ips,
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips,
enable_ra_on_gw=enable_ra_on_gw,
clean_connections=True)
for fixed_ip in ex_gw_port['fixed_ips']:
ip_lib.send_ip_addr_adv_notif(ns_name,
interface_name,

+ 2
- 2
neutron/agent/l3_agent.py View File

@@ -17,6 +17,7 @@
import sys

from oslo_config import cfg
from oslo_service import service

from neutron.agent.common import config
from neutron.agent.l3 import config as l3_config
@@ -26,7 +27,6 @@ from neutron.agent.linux import interface
from neutron.agent.metadata import config as metadata_config
from neutron.common import config as common_config
from neutron.common import topics
from neutron.openstack.common import service
from neutron import service as neutron_service


@@ -51,4 +51,4 @@ def main(manager='neutron.agent.l3.agent.L3NATAgentWithStateReport'):
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager=manager)
service.launch(server).wait()
service.launch(cfg.CONF, server).wait()

+ 42
- 1
neutron/agent/linux/dhcp.py View File

@@ -434,6 +434,44 @@ class Dnsmasq(DhcpLocalProcess):
LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name)

def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets):
"""Sort fixed_ips so that stateless IPv6 subnets appear first.

For example, If a port with v6 extra_dhcp_opts is on a network with
IPv4 and IPv6 stateless subnets. Then dhcp host file will have
below 2 entries for same MAC,

fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for IPv4 dhcp)
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for stateless IPv6 for v6 options)

dnsmasq internal details for processing host file entries
1) dnsmaq reads the host file from EOF.
2) So it first picks up stateless IPv6 entry,
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
3) But dnsmasq doesn't have sufficient checks to skip this entry and
pick next entry, to process dhcp IPv4 request.
4) So dnsmaq uses this this entry to process dhcp IPv4 request.
5) As there is no ip in this entry, dnsmaq logs "no address available"
and fails to send DHCPOFFER message.

As we rely on internal details of dnsmasq to understand and fix the
issue, Ihar sent a mail to dnsmasq-discuss mailing list
http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/
009650.html

So If we reverse the order of writing entries in host file,
so that entry for stateless IPv6 comes first,
then dnsmasq can correctly fetch the IPv4 address.
"""
return sorted(
fixed_ips,
key=lambda fip: ((fip.subnet_id in v6_nets) and (
v6_nets[fip.subnet_id].ipv6_address_mode == (
constants.DHCPV6_STATELESS))),
reverse=True)

def _iter_hosts(self):
"""Iterate over hosts.

@@ -449,8 +487,11 @@ class Dnsmasq(DhcpLocalProcess):
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)

for port in self.network.ports:
for alloc in port.fixed_ips:
fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
v6_nets)
for alloc in fixed_ips:
# Note(scollins) Only create entries that are
# associated with the subnet being managed by this
# dhcp agent

+ 32
- 4
neutron/agent/linux/interface.py View File

@@ -78,14 +78,13 @@ class LinuxInterfaceDriver(object):
self.conf = conf

def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=[], gateway_ips=None, extra_subnets=[],
enable_ra_on_gw=False, clean_connections=False):
preserve_ips=[], gateway_ips=None,
clean_connections=False):
"""Set the L3 settings for the interface using data from the port.

ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
gateway_ips: For gateway ports, list of external gateway ip addresses
enable_ra_on_gw: Boolean to indicate configuring acceptance of IPv6 RA
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
"""
@@ -123,10 +122,39 @@ class LinuxInterfaceDriver(object):
for gateway_ip in gateway_ips or []:
device.route.add_gateway(gateway_ip)

def init_router_port(self,
device_name,
ip_cidrs,
namespace,
preserve_ips=None,
gateway_ips=None,
extra_subnets=None,
enable_ra_on_gw=False,
clean_connections=False):
"""Set the L3 settings for a router interface using data from the port.

ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
gateway_ips: For gateway ports, list of external gateway ip addresses
enable_ra_on_gw: Boolean to indicate configuring acceptance of IPv6 RA
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
extra_subnets: An iterable of cidrs to add as routes without address
"""
self.init_l3(device_name=device_name,
ip_cidrs=ip_cidrs,
namespace=namespace,
preserve_ips=preserve_ips or [],
gateway_ips=gateway_ips,
clean_connections=clean_connections)

if enable_ra_on_gw:
self.configure_ipv6_ra(namespace, device_name)

new_onlink_routes = set(s['cidr'] for s in extra_subnets)
device = ip_lib.IPDevice(device_name, namespace=namespace)

# Manage on-link routes (routes without an associated address)
new_onlink_routes = set(s['cidr'] for s in extra_subnets or [])
existing_onlink_routes = set(
device.route.list_onlink_routes(n_const.IP_VERSION_4) +
device.route.list_onlink_routes(n_const.IP_VERSION_6))

+ 53
- 17
neutron/agent/linux/ip_lib.py View File

@@ -22,6 +22,7 @@ from oslo_utils import excutils
import re

from neutron.agent.common import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.i18n import _LE

@@ -281,30 +282,54 @@ class IPRule(SubProcessBase):
class IpRuleCommand(IpCommandBase):
COMMAND = 'rule'

def _exists(self, ip, ip_version, table, rule_pr):
# Typical rule from 'ip rule show':
def _parse_line(self, ip_version, line):
# Typical rules from 'ip rule show':
# 4030201: from 1.2.3.4/24 lookup 10203040
# 1024: from all iif qg-c43b1928-48 lookup noscope

rule_pr = str(rule_pr) + ":"
for line in self._as_root([ip_version], ['show']).splitlines():
parts = line.split()
if parts and (parts[0] == rule_pr and
parts[2] == str(ip) and
parts[-1] == str(table)):
return True
parts = line.split()
if not parts:
return {}

return False
# Format of line is: "priority: <key> <value> ..."
settings = {k: v for k, v in zip(parts[1::2], parts[2::2])}
settings['priority'] = parts[0][:-1]

# Canonicalize some arguments
if settings.get('from') == "all":
settings['from'] = constants.IP_ANY[ip_version]
if 'lookup' in settings:
settings['table'] = settings.pop('lookup')

def add(self, ip, table, rule_pr):
return settings

def _exists(self, ip_version, **kwargs):
kwargs_strings = {k: str(v) for k, v in kwargs.items()}
lines = self._as_root([ip_version], ['show']).splitlines()
return kwargs_strings in (self._parse_line(ip_version, line)
for line in lines)

def _make__flat_args_tuple(self, *args, **kwargs):
for kwargs_item in sorted(kwargs.items(), key=lambda i: i[0]):
args += kwargs_item
return tuple(args)

def add(self, ip, **kwargs):
ip_version = get_ip_version(ip)
if not self._exists(ip, ip_version, table, rule_pr):
args = ['add', 'from', ip, 'table', table, 'priority', rule_pr]
self._as_root([ip_version], tuple(args))

def delete(self, ip, table, rule_pr):
kwargs.update({'from': ip})

if not self._exists(ip_version, **kwargs):
args_tuple = self._make__flat_args_tuple('add', **kwargs)
self._as_root([ip_version], args_tuple)

def delete(self, ip, **kwargs):
ip_version = get_ip_version(ip)
args = ['del', 'table', table, 'priority', rule_pr]
self._as_root([ip_version], tuple(args))

# TODO(Carl) ip ignored in delete, okay in general?

args_tuple = self._make__flat_args_tuple('del', **kwargs)
self._as_root([ip_version], args_tuple)


class IpDeviceCommandBase(IpCommandBase):
@@ -634,6 +659,17 @@ class IpNeighCommand(IpDeviceCommandBase):
('show',
'dev', self.name))

def flush(self, ip_version, ip_address):
"""Flush neighbour entries

Given address entry is removed from neighbour cache (ARP or NDP). To
flush all entries pass string 'all' as an address.

:param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively
:param ip_address: The prefix selecting the neighbours to flush
"""
self._as_root([ip_version], ('flush', 'to', ip_address))


class IpNetnsCommand(IpCommandBase):
COMMAND = 'netns'

+ 2
- 1
neutron/agent/metadata/agent.py View File

@@ -20,6 +20,7 @@ from neutronclient.v2_0 import client
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
import six.moves.urllib.parse as urlparse
import webob
@@ -34,7 +35,6 @@ from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LW
from neutron.openstack.common.cache import cache
from neutron.openstack.common import loopingcall

LOG = logging.getLogger(__name__)

@@ -289,6 +289,7 @@ class UnixDomainMetadataProxy(object):
'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket,
'nova_metadata_ip': cfg.CONF.nova_metadata_ip,
'nova_metadata_port': cfg.CONF.nova_metadata_port,
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats,
},
'start_flag': True,
'agent_type': n_const.AGENT_TYPE_METADATA}

+ 28
- 10
neutron/agent/ovsdb/native/commands.py View File

@@ -351,24 +351,42 @@ class PortToBridgeCommand(BaseCommand):
class DbListCommand(BaseCommand):
def __init__(self, api, table, records, columns, if_exists):
super(DbListCommand, self).__init__(api)
self.requested_info = {'records': records, 'columns': columns,
'table': table}
self.table = self.api._tables[table]
self.columns = columns or self.table.columns.keys() + ['_uuid']
self.if_exists = if_exists
if records:
self.records = [
idlutils.row_by_record(self.api.idl, table, record).uuid
for record in records]
self.records = []
for record in records:
try:
self.records.append(idlutils.row_by_record(
self.api.idl, table, record).uuid)
except idlutils.RowNotFound:
if self.if_exists:
continue
raise
else:
self.records = self.table.rows.keys()

def run_idl(self, txn):
self.result = [
{
c: idlutils.get_column_value(self.table.rows[uuid], c)
for c in self.columns
}
for uuid in self.records
]
try:
self.result = [
{
c: idlutils.get_column_value(self.table.rows[uuid], c)
for c in self.columns
if not self.if_exists or uuid in self.table.rows
}
for uuid in self.records
]
except KeyError:
# NOTE(kevinbenton): this is converted to a RuntimeError for compat
# with the vsctl version. It might make more sense to change this
# to a RowNotFoundError in the future.
raise RuntimeError(_LE(
"Row removed from DB during listing. Request info: "
"Table=%(table)s. Columns=%(columns)s. "
"Records=%(records)s.") % self.requested_info)


class DbFindCommand(BaseCommand):

+ 6
- 0
neutron/agent/rpc.py View File

@@ -18,6 +18,7 @@ import itertools
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import timeutils
from oslo_utils import uuidutils

from neutron.common import constants
from neutron.common import rpc as n_rpc
@@ -72,6 +73,11 @@ class PluginReportStateAPI(object):

def report_state(self, context, agent_state, use_call=False):
cctxt = self.client.prepare()
# add unique identifier to a report
# that can be logged on server side.
# This create visible correspondence between events on
# the agent and on the server
agent_state['uuid'] = uuidutils.generate_uuid()
kwargs = {
'agent_state': {'agent_state': agent_state},
'time': timeutils.strtime(),

+ 5
- 5
neutron/api/api_common.py View File

@@ -14,11 +14,11 @@
# under the License.

import functools
import urllib

from oslo_config import cfg
from oslo_log import log as logging
import six
from six.moves.urllib import parse
from webob import exc

from neutron.common import constants
@@ -60,7 +60,7 @@ def get_previous_link(request, items, id_key):
marker = items[0][id_key]
params['marker'] = marker
params['page_reverse'] = True
return "%s?%s" % (request.path_url, urllib.urlencode(params))
return "%s?%s" % (request.path_url, parse.urlencode(params))


def get_next_link(request, items, id_key):
@@ -70,7 +70,7 @@ def get_next_link(request, items, id_key):
marker = items[-1][id_key]
params['marker'] = marker
params.pop('page_reverse', None)
return "%s?%s" % (request.path_url, urllib.urlencode(params))
return "%s?%s" % (request.path_url, parse.urlencode(params))


def get_limit_and_marker(request):
@@ -147,8 +147,8 @@ def get_sorts(request, attr_info):
'asc': constants.SORT_DIRECTION_ASC,
'desc': constants.SORT_DIRECTION_DESC})
raise exc.HTTPBadRequest(explanation=msg)
return zip(sort_keys,
[x == constants.SORT_DIRECTION_ASC for x in sort_dirs])
return list(zip(sort_keys,
[x == constants.SORT_DIRECTION_ASC for x in sort_dirs]))


def get_page_reverse(request):

+ 1
- 1
neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py View File

@@ -168,7 +168,7 @@ class DhcpAgentNotifyAPI(object):
# data is {'key' : 'value'} with only one key
if method_name not in self.VALID_METHOD_NAMES:
return
obj_type = data.keys()[0]
obj_type = list(data.keys())[0]
if obj_type not in self.VALID_RESOURCES:
return
obj_value = data[obj_type]

+ 2
- 1
neutron/api/v2/base.py View File

@@ -391,7 +391,8 @@ class Controller(object):
self._notifier.info(request.context,
self._resource + '.create.start',
body)
body = Controller.prepare_request_body(request.context, body, True,
body = Controller.prepare_request_body(request.context,
copy.deepcopy(body), True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]

+ 7
- 1
neutron/api/v2/resource_helper.py View File

@@ -14,6 +14,7 @@
# under the License.

from oslo_config import cfg
from oslo_log import log as logging

from neutron.api import extensions
from neutron.api.v2 import base
@@ -21,6 +22,8 @@ from neutron import manager
from neutron.plugins.common import constants
from neutron import quota

LOG = logging.getLogger(__name__)


def build_plural_mappings(special_mappings, resource_map):
"""Create plural to singular mapping for all resources.
@@ -68,6 +71,9 @@ def build_resource_info(plural_mappings, resource_map, which_service,
plugin = manager.NeutronManager.get_service_plugins()[which_service]
else:
plugin = manager.NeutronManager.get_plugin()
path_prefix = getattr(plugin, "path_prefix", "")
LOG.debug('Service %(service)s assigned prefix: %(prefix)s'
% {'service': which_service, 'prefix': path_prefix})
for collection_name in resource_map:
resource_name = plural_mappings[collection_name]
params = resource_map.get(collection_name, {})
@@ -85,7 +91,7 @@ def build_resource_info(plural_mappings, resource_map, which_service,
resource = extensions.ResourceExtension(
collection_name,
controller,
path_prefix=constants.COMMON_PREFIXES[which_service],
path_prefix=path_prefix,
member_actions=member_actions,
attr_map=params)
resources.append(resource)

+ 1
- 0
neutron/common/constants.py View File

@@ -74,6 +74,7 @@ IPv6_BITS = 128

IPv4_ANY = '0.0.0.0/0'
IPv6_ANY = '::/0'
IP_ANY = {IP_VERSION_4: IPv4_ANY, IP_VERSION_6: IPv6_ANY}

DHCP_RESPONSE_PORT = 68


+ 1
- 1
neutron/common/rpc.py View File

@@ -18,10 +18,10 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_messaging import serializer as om_serializer
from oslo_service import service

from neutron.common import exceptions
from neutron import context
from neutron.openstack.common import service

<