Browse Source

Merge remote-tracking branch 'origin/master' into merge-branch

Change-Id: Ic237c09131f6579f3df1a3a10ba1e5f7a3d42bde
tags/7.0.0.0rc1
Kyle Mestery 4 years ago
parent
commit
a7b91632fc
100 changed files with 2264 additions and 2228 deletions
  1. +102
    -85
      TESTING.rst
  2. +25
    -3
      doc/source/devref/client_command_extensions.rst
  3. +24
    -1
      doc/source/devref/contribute.rst
  4. +10
    -0
      doc/source/devref/development.environment.rst
  5. +31
    -12
      doc/source/devref/fullstack_testing.rst
  6. +9
    -0
      doc/source/devref/index.rst
  7. +23
    -0
      doc/source/devref/neutron_api.rst
  8. +54
    -31
      doc/source/devref/sub_projects.rst
  9. +11
    -10
      doc/source/policies/thirdparty-ci.rst
  10. +5
    -0
      etc/dhcp_agent.ini
  11. +0
    -0
      etc/neutron.conf
  12. +0
    -6
      etc/neutron/plugins/midonet/midonet.ini
  13. +6
    -2
      etc/neutron/plugins/ml2/linuxbridge_agent.ini
  14. +0
    -100
      etc/neutron/plugins/ml2/ml2_conf_arista.ini
  15. +1
    -1
      etc/neutron/plugins/vmware/nsx.ini
  16. +23
    -31
      neutron/agent/common/ovs_lib.py
  17. +5
    -0
      neutron/agent/dhcp/config.py
  18. +9
    -0
      neutron/agent/firewall.py
  19. +6
    -0
      neutron/agent/l3/agent.py
  20. +3
    -0
      neutron/agent/l3/dvr_local_router.py
  21. +104
    -0
      neutron/agent/l3/item_allocator.py
  22. +11
    -70
      neutron/agent/l3/link_local_allocator.py
  23. +3
    -0
      neutron/agent/l3/namespace_manager.py
  24. +20
    -1
      neutron/agent/linux/dhcp.py
  25. +89
    -0
      neutron/agent/linux/ip_conntrack.py
  26. +8
    -0
      neutron/agent/linux/ip_lib.py
  27. +95
    -0
      neutron/agent/linux/iptables_firewall.py
  28. +1
    -1
      neutron/agent/rpc.py
  29. +8
    -3
      neutron/agent/securitygroups_rpc.py
  30. +6
    -0
      neutron/api/rpc/handlers/dhcp_rpc.py
  31. +64
    -16
      neutron/api/rpc/handlers/l3_rpc.py
  32. +13
    -1
      neutron/api/v2/attributes.py
  33. +4
    -1
      neutron/callbacks/exceptions.py
  34. +4
    -2
      neutron/cmd/netns_cleanup.py
  35. +9
    -5
      neutron/cmd/sanity/checks.py
  36. +8
    -0
      neutron/common/constants.py
  37. +15
    -0
      neutron/common/exceptions.py
  38. +7
    -0
      neutron/common/ipv6_utils.py
  39. +4
    -0
      neutron/common/utils.py
  40. +17
    -0
      neutron/db/address_scope_db.py
  41. +1
    -1
      neutron/db/agentschedulers_db.py
  42. +8
    -1
      neutron/db/db_base_plugin_common.py
  43. +153
    -5
      neutron/db/db_base_plugin_v2.py
  44. +1
    -1
      neutron/db/flavors_db.py
  45. +14
    -9
      neutron/db/ipam_backend_mixin.py
  46. +5
    -3
      neutron/db/ipam_non_pluggable_backend.py
  47. +2
    -2
      neutron/db/ipam_pluggable_backend.py
  48. +12
    -3
      neutron/db/l3_db.py
  49. +4
    -4
      neutron/db/l3_dvr_db.py
  50. +39
    -3
      neutron/db/l3_hamode_db.py
  51. +4
    -0
      neutron/db/migration/alembic_migrations/external.py
  52. +2
    -2
      neutron/db/migration/alembic_migrations/versions/HEADS
  53. +36
    -0
      neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.py
  54. +2
    -2
      neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py
  55. +35
    -0
      neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.py
  56. +0
    -1
      neutron/db/migration/models/head.py
  57. +2
    -0
      neutron/db/models_v2.py
  58. +4
    -5
      neutron/db/securitygroups_db.py
  59. +14
    -0
      neutron/db/securitygroups_rpc_base.py
  60. +12
    -4
      neutron/extensions/address_scope.py
  61. +4
    -0
      neutron/extensions/l3.py
  62. +2
    -1
      neutron/extensions/portbindings.py
  63. +24
    -4
      neutron/ipam/subnet_alloc.py
  64. +4
    -4
      neutron/locale/de/LC_MESSAGES/neutron-log-info.po
  65. +177
    -4
      neutron/locale/es/LC_MESSAGES/neutron-log-info.po
  66. +18
    -14
      neutron/locale/fr/LC_MESSAGES/neutron-log-info.po
  67. +4
    -4
      neutron/locale/it/LC_MESSAGES/neutron-log-info.po
  68. +5
    -5
      neutron/locale/ja/LC_MESSAGES/neutron-log-info.po
  69. +4
    -4
      neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po
  70. +5
    -5
      neutron/locale/neutron-log-critical.pot
  71. +144
    -133
      neutron/locale/neutron-log-error.pot
  72. +105
    -116
      neutron/locale/neutron-log-info.pot
  73. +68
    -56
      neutron/locale/neutron-log-warning.pot
  74. +291
    -336
      neutron/locale/neutron.pot
  75. +4
    -12
      neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po
  76. +4
    -16
      neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po
  77. +4
    -4
      neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po
  78. +18
    -1
      neutron/plugins/cisco/n1kv/n1kv_client.py
  79. +5
    -0
      neutron/plugins/ml2/common/exceptions.py
  80. +0
    -12
      neutron/plugins/ml2/drivers/arista/README
  81. +0
    -128
      neutron/plugins/ml2/drivers/arista/config.py
  82. +0
    -80
      neutron/plugins/ml2/drivers/arista/db.py
  83. +0
    -35
      neutron/plugins/ml2/drivers/arista/exceptions.py
  84. +0
    -470
      neutron/plugins/ml2/drivers/arista/mechanism_arista.py
  85. +0
    -0
      neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py
  86. +5
    -1
      neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py
  87. +47
    -7
      neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
  88. +24
    -0
      neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py
  89. +16
    -0
      neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py
  90. +14
    -3
      neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py
  91. +2
    -2
      neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py
  92. +22
    -20
      neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py
  93. +24
    -16
      neutron/plugins/ml2/managers.py
  94. +14
    -7
      neutron/plugins/ml2/plugin.py
  95. +7
    -8
      neutron/quota/resource.py
  96. +2
    -2
      neutron/scheduler/dhcp_agent_scheduler.py
  97. +10
    -10
      neutron/scheduler/l3_agent_scheduler.py
  98. +0
    -0
      neutron/server/__init__.py
  99. +0
    -280
      neutron/services/l3_router/l3_arista.py
  100. +0
    -0
      neutron/services/l3_router/l3_router_plugin.py

+ 102
- 85
TESTING.rst View File

@@ -1,13 +1,36 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.


Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)


Testing Neutron
=============================================================
===============

Overview
--------

Neutron relies on different types of testing to ensure its quality, as
described below. In addition to in-tree testing, `Tempest`_ is
Neutron relies on unit, functional, fullstack and API tests to ensure its
quality, as described below. In addition to in-tree testing, `Tempest`_ is
responsible for validating Neutron's integration with other OpenStack
components, and `Rally`_ is responsible for benchmarking.
components via scenario tests, and `Rally`_ is responsible for benchmarking.

.. _Tempest: http://docs.openstack.org/developer/tempest/
.. _Rally: http://rally.readthedocs.org/en/latest/
@@ -16,45 +39,59 @@ Unit Tests
~~~~~~~~~~

Unit tests (neutron/test/unit/) are meant to cover as much code as
possible and should be executed without the service running. They are
designed to test the various pieces of the neutron tree to make sure
any new changes don't break existing functionality.
possible. They are designed to test the various pieces of the Neutron tree to
make sure any new changes don't break existing functionality. Unit tests have
no requirements nor make changes to the system they are running on. They use
an in-memory sqlite database to test DB interaction.

Functional Tests
~~~~~~~~~~~~~~~~

Functional tests (neutron/tests/functional/) are intended to
validate actual system interaction. Mocks should be used sparingly,
if at all. Care should be taken to ensure that existing system
validate actual system interaction. Mocks should be used sparingly,
if at all. Care should be taken to ensure that existing system
resources are not modified and that resources created in tests are
properly cleaned up.
properly cleaned up both on test success and failure.

Fullstack Tests
~~~~~~~~~~~~~~~

Fullstack tests (neutron/tests/fullstack/) target Neutron as a whole.
The test infrastructure itself manages the Neutron server and its agents.
Fullstack tests are a form of integration testing and fill a void between
unit/functional tests and Tempest. More information may be found
`here. <fullstack_testing.html>`_

API Tests
~~~~~~~~~

API tests (neutron/tests/api/) are intended to ensure the function
and stability of the Neutron API. As much as possible, changes to
and stability of the Neutron API. As much as possible, changes to
this path should not be made at the same time as changes to the code
to limit the potential for introducing backwards-incompatible changes.

Since API tests need to be able to target a deployed Neutron daemon
that is not necessarily test-managed, they should not depend on
controlling the runtime configuration of the target daemon. API tests
should be black-box - no assumptions should be made about
implementation. Only the contract defined by Neutron's REST API
to limit the potential for introducing backwards-incompatible changes,
although the same patch that introduces a new API should include an API
test.

Since API tests target a deployed Neutron daemon that is not test-managed,
they should not depend on controlling the runtime configuration
of the target daemon. API tests should be black-box - no assumptions should
be made about implementation. Only the contract defined by Neutron's REST API
should be validated, and all interaction with the daemon should be via
a REST client.

Development process
neutron/tests/api was copied from the Tempest project. The Tempest networking
API directory was frozen and any new tests belong to the Neutron repository.

Development Process
-------------------

It is expected that any new changes that are proposed for merge
come with tests for that feature or code area. Ideally any bugs
fixes that are submitted also have tests to prove that they stay
fixed! In addition, before proposing for merge, all of the
fixed! In addition, before proposing for merge, all of the
current tests should be passing.

Structure of the unit test tree
Structure of the Unit Test Tree
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

The structure of the unit test tree should match the structure of the
@@ -66,7 +103,7 @@ code tree, e.g. ::

Unit test modules should have the same path under neutron/tests/unit/
as the module they target has under neutron/, and their name should be
the name of the target module prefixed by `test_`. This requirement
the name of the target module prefixed by `test_`. This requirement
is intended to make it easier for developers to find the unit tests
for a given module.

@@ -83,26 +120,11 @@ tree is structured according to the above requirements: ::

./tools/check_unit_test_structure.sh

Where appropriate, exceptions can be added to the above script. If
code is not part of the neutron namespace, for example, it's probably
Where appropriate, exceptions can be added to the above script. If
code is not part of the Neutron namespace, for example, it's probably
reasonable to exclude their unit tests from the check.

Virtual environments
~~~~~~~~~~~~~~~~~~~~

Testing OpenStack projects, including Neutron, is made easier with `DevStack <https://git.openstack.org/cgit/openstack-dev/devstack>`_.

Create a machine (such as a VM or Vagrant box) running a distribution supported
by DevStack and install DevStack there. For example, there is a Vagrant script
for DevStack at https://github.com/bcwaldon/vagrant_devstack.

.. note::

If you prefer not to use DevStack, you can still check out source code on your local
machine and develop from there.


Running tests
Running Tests
-------------

There are three mechanisms for running tests: run_tests.sh, tox,
@@ -143,16 +165,11 @@ some rough edges when it comes to diagnosing errors and failures, and there is
no easy way to set a breakpoint in the Neutron code, and enter an
interactive debugging session while using testr.

It is also possible to use nose2's predecessor, `nose`_, to run the tests::

source .venv/bin/activate
pip install nose
nosetests

nose has one additional disadvantage over nose2 - it does not
understand the `load_tests protocol`_ introduced in Python 2.7. This
limitation will result in errors being reported for modules that
depend on load_tests (usually due to use of `testscenarios`_).
Note that nose2's predecessor, `nose`_, does not understand
`load_tests protocol`_ introduced in Python 2.7. This limitation will result in
errors being reported for modules that depend on load_tests
(usually due to use of `testscenarios`_). nose, therefore, is not supported,
while nose2 is.

.. _nose2: http://nose2.readthedocs.org/en/latest/index.html
.. _nose: https://nose.readthedocs.org/en/latest/index.html
@@ -167,7 +184,7 @@ environments for running test cases. It uses `Testr`_ for managing the running
of the test cases.

Tox handles the creation of a series of `virtualenvs`_ that target specific
versions of Python (2.6, 2.7, 3.3, etc).
versions of Python.

Testr handles the parallel execution of series of test cases as well as
the tracking of long-running tests and other things.
@@ -183,7 +200,7 @@ see this wiki page:
.. _virtualenvs: https://pypi.python.org/pypi/virtualenv

PEP8 and Unit Tests
===================
+++++++++++++++++++

Running pep8 and unit tests is as easy as executing this in the root
directory of the Neutron source code::
@@ -204,7 +221,7 @@ To run only the unit tests::
tox -e py27

Functional Tests
================
++++++++++++++++

To run functional tests that do not require sudo privileges or
specific-system dependencies::
@@ -215,23 +232,23 @@ To run all the functional tests, including those requiring sudo
privileges and system-specific dependencies, the procedure defined by
tools/configure_for_func_testing.sh should be followed.

IMPORTANT: configure_for_func_testing.sh relies on devstack to perform
extensive modification to the underlying host. Execution of the
IMPORTANT: configure_for_func_testing.sh relies on DevStack to perform
extensive modification to the underlying host. Execution of the
script requires sudo privileges and it is recommended that the
following commands be invoked only on a clean and disposeable VM. A
VM that has had devstack previously installed on it is also fine. ::
following commands be invoked only on a clean and disposeable VM.
A VM that has had DevStack previously installed on it is also fine. ::

git clone https://git.openstack.org/openstack-dev/devstack ../devstack
./tools/configure_for_func_testing.sh ../devstack -i
tox -e dsvm-functional

The '-i' option is optional and instructs the script to use devstack
to install and configure all of Neutron's package dependencies. It is
not necessary to provide this option if devstack has already been used
The '-i' option is optional and instructs the script to use DevStack
to install and configure all of Neutron's package dependencies. It is
not necessary to provide this option if DevStack has already been used
to deploy Neutron to the target host.

Fullstack Tests
===============
+++++++++++++++

To run all the full-stack tests, you may use: ::

@@ -239,7 +256,7 @@ To run all the full-stack tests, you may use: ::

Since full-stack tests often require the same resources and
dependencies as the functional tests, using the configuration script
tools/configure_for_func_testing.sh is advised (as described above).
tools/configure_for_func_testing.sh is advised (As described above).
When running full-stack tests on a clean VM for the first time, we
advise to run ./stack.sh successfully to make sure all Neutron's
dependencies are met. Full-stack based Neutron daemons produce logs to a
@@ -248,47 +265,47 @@ sub-folder in /tmp/fullstack-logs (for example, a test named
so that will be a good place to look if your test is failing.

API Tests
=========
+++++++++

To run the api tests, deploy tempest and neutron with devstack and
To run the api tests, deploy Tempest and Neutron with DevStack and
then run the following command: ::

tox -e api

If tempest.conf cannot be found at the default location used by
devstack (/opt/stack/tempest/etc) it may be necessary to set
DevStack (/opt/stack/tempest/etc) it may be necessary to set
TEMPEST_CONFIG_DIR before invoking tox: ::

export TEMPEST_CONFIG_DIR=[path to dir containing tempest.conf]
tox -e api


Running individual tests
------------------------
Running Individual Tests
~~~~~~~~~~~~~~~~~~~~~~~~

For running individual test modules or cases, you just need to pass
the dot-separated path to the module you want as an argument to it.
For running individual test modules, cases or tests, you just need to pass
the dot-separated path you want as an argument to it.

For executing a specific test case, specify the name of the test case
class separating it from the module path with a colon.
For example, the following would run only a single test or test case::

For example, the following would run only the JSONV2TestCase tests from
neutron/tests/unit/test_api_v2.py::

$ ./run_tests.sh neutron.tests.unit.test_api_v2.JSONV2TestCase
$ ./run_tests.sh neutron.tests.unit.test_manager
$ ./run_tests.sh neutron.tests.unit.test_manager.NeutronManagerTestCase
$ ./run_tests.sh neutron.tests.unit.test_manager.NeutronManagerTestCase.test_service_plugin_is_loaded

or::

$ tox -e py27 neutron.tests.unit.test_api_v2.JSONV2TestCase
$ tox -e py27 neutron.tests.unit.test_manager
$ tox -e py27 neutron.tests.unit.test_manager.NeutronManagerTestCase
$ tox -e py27 neutron.tests.unit.test_manager.NeutronManagerTestCase.test_service_plugin_is_loaded

Adding more tests
~~~~~~~~~~~~~~~~~
Coverage
--------

Neutron has a fast growing code base and there is plenty of areas that
need to be covered by unit and functional tests.
Neutron has a fast growing code base and there are plenty of areas that
need better coverage.

To get a grasp of the areas where tests are needed, you can check
current coverage by running::
current unit tests coverage by running::

$ ./run_tests.sh -c

@@ -296,7 +313,7 @@ Debugging
---------

By default, calls to pdb.set_trace() will be ignored when tests
are run. For pdb statements to work, invoke run_tests as follows::
are run. For pdb statements to work, invoke run_tests as follows::

$ ./run_tests.sh -d [test module path]

@@ -311,7 +328,7 @@ after a tox run and reused for debugging::
$ . .tox/venv/bin/activate
$ python -m testtools.run [test module path]

Tox packages and installs the neutron source tree in a given venv
Tox packages and installs the Neutron source tree in a given venv
on every invocation, but if modifications need to be made between
invocation (e.g. adding more pdb statements), it is recommended
that the source tree be installed in the venv in editable mode::
@@ -323,7 +340,7 @@ Editable mode ensures that changes made to the source tree are
automatically reflected in the venv, and that such changes are not
overwritten during the next tox run.

Post-mortem debugging
Post-mortem Debugging
~~~~~~~~~~~~~~~~~~~~~

Setting OS_POST_MORTEM_DEBUGGER in the shell environment will ensure
@@ -341,7 +358,7 @@ with pdb::
$ OS_POST_MORTEM_DEBUGGER=pudb ./run_tests.sh -d [test module path]

References
==========
~~~~~~~~~~

.. [#pudb] PUDB debugger:
https://pypi.python.org/pypi/pudb

+ 25
- 3
doc/source/devref/client_command_extensions.rst View File

@@ -1,9 +1,31 @@
=================================
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.


Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)


Client command extension support
=================================
================================

The client command extension adds support for extending the neutron client while
considering ease of creation.

The full document can be found in the python-neutronclient repository:
http://docs.openstack.org/developer/python-neutronclient/devref/client_command_extensions.html
http://docs.openstack.org/developer/python-neutronclient/devref/client_command_extensions.html

+ 24
- 1
doc/source/devref/contribute.rst View File

@@ -1,3 +1,26 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.


Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)


Contributing new extensions to Neutron
======================================

@@ -546,6 +569,6 @@ Other repo-split items


Decomposition Phase II Progress Chart
=====================================
-------------------------------------

TBD.

+ 10
- 0
doc/source/devref/development.environment.rst View File

@@ -15,6 +15,16 @@
License for the specific language governing permissions and limitations
under the License.


Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)


Setting Up a Development Environment
====================================


+ 31
- 12
doc/source/devref/fullstack_testing.rst View File

@@ -1,9 +1,31 @@
==========================
Neutron Full Stack Testing
==========================
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.


Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)


Full Stack Testing
==================

Why?
====
----

The idea behind "fullstack" testing is to fill a gap between unit + functional
tests and Tempest. Tempest tests are expensive to run, difficult to run in
@@ -14,7 +36,7 @@ environment and provide a rapidly reproducible way to verify code as you're
still writing it.

How?
====
----

Full stack tests set up their own Neutron processes (Server & agents). They
assume a working Rabbit and MySQL server before the run starts. Instructions
@@ -44,7 +66,7 @@ interconnected.
.. image:: images/fullstack-multinode-simulation.png

When?
=====
-----

1) You'd like to test the interaction between Neutron components (Server
and agents) and have already tested each component in isolation via unit or
@@ -59,27 +81,24 @@ When?
agent during the test.

Short Term Goals
================
----------------

* Multinode & Stability:
- Interconnect the internal and external bridges
- Convert the L3 HA failover functional test to a full stack test
- Write a test for DHCP HA / Multiple DHCP agents per network
* Write DVR tests
* Write L3 HA tests
* Write additional L3 HA tests
* Write a test that validates L3 HA + l2pop integration after
https://bugs.launchpad.net/neutron/+bug/1365476 is fixed.
* Write a test that validates DVR + L3 HA integration after
https://bugs.launchpad.net/neutron/+bug/1365473 is fixed.

None of these tasks currently have owners. Feel free to send patches!

After these tests are merged, it should be fair to start asking contributors to
add full stack tests when appropriate in the patches themselves and not after
the fact as there will probably be something to copy/paste from.

Long Term Goals
===============
---------------

* Currently we configure the OVS agent with VLANs segmentation (Only because
it's easier). This allows us to validate most functionality, but we might

+ 9
- 0
doc/source/devref/index.rst View File

@@ -15,6 +15,15 @@
License for the specific language governing permissions and limitations
under the License.

Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)


Developer Guide
===============


+ 23
- 0
doc/source/devref/neutron_api.rst View File

@@ -1,3 +1,26 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.


Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)


Neutron public API
==================


+ 54
- 31
doc/source/devref/sub_projects.rst View File

@@ -1,3 +1,26 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.


Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)


Official Sub-Projects
=====================

@@ -62,7 +85,7 @@ The official source of all repositories that exist under the Neutron project is:
http://governance.openstack.org/reference/projects/neutron.html

Affiliated projects
===================
~~~~~~~~~~~~~~~~~~~

This table shows the affiliated projects that integrate with Neutron,
in one form or another. These projects typically leverage the pluggable
@@ -131,7 +154,7 @@ repo but are summarized here to describe the functionality they provide.
+-------------------------------+-----------------------+

Functionality legend
--------------------
++++++++++++++++++++

- l2: a Layer 2 service;
- ml2: an ML2 mechanism driver;
@@ -145,7 +168,7 @@ Functionality legend
.. _networking-arista:

Arista
------
++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-arista
* Launchpad: https://launchpad.net/networking-arista
@@ -154,7 +177,7 @@ Arista
.. _networking-bagpipe-l2:

BaGPipe
-------
+++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-bagpipe-l2
* Launchpad: https://launchpad.net/bagpipe-l2
@@ -163,14 +186,14 @@ BaGPipe
.. _networking-bgpvpn:

BGPVPN
-------
++++++

* Git: https://git.openstack.org/cgit/openstack/networking-bgpvpn

.. _networking-bigswitch:

Big Switch Networks
-------------------
+++++++++++++++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-bigswitch
* Pypi: https://pypi.python.org/pypi/bsnstacklib
@@ -178,7 +201,7 @@ Big Switch Networks
.. _networking-brocade:

Brocade
-------
+++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-brocade
* Launchpad: https://launchpad.net/networking-brocade
@@ -187,7 +210,7 @@ Brocade
.. _networking-cisco:

Cisco
-----
+++++

* Git: https://git.openstack.org/cgit/stackforge/networking-cisco
* Launchpad: https://launchpad.net/networking-cisco
@@ -196,7 +219,7 @@ Cisco
.. _dragonflow:

DragonFlow
----------
++++++++++

* Git: https://git.openstack.org/cgit/openstack/dragonflow
* Launchpad: https://launchpad.net/dragonflow
@@ -205,7 +228,7 @@ DragonFlow
.. _networking-edge-vpn:

Edge VPN
--------
++++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-edge-vpn
* Launchpad: https://launchpad.net/edge-vpn
@@ -213,7 +236,7 @@ Edge VPN
.. _networking-fujitsu:

FUJITSU
-------
+++++++

* Git: https://git.openstack.org/cgit/openstack/networking-fujitsu
* Launchpad: https://launchpad.net/networking-fujitsu
@@ -222,7 +245,7 @@ FUJITSU
.. _networking-hyperv:

Hyper-V
-------
+++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-hyperv
* Launchpad: https://launchpad.net/networking-hyperv
@@ -231,7 +254,7 @@ Hyper-V
.. _group-based-policy:

Group Based Policy
------------------
++++++++++++++++++

* Git: https://git.openstack.org/cgit/stackforge/group-based-policy
* Launchpad: https://launchpad.net/group-based-policy
@@ -240,7 +263,7 @@ Group Based Policy
.. _networking-ibm:

IBM SDNVE
---------
+++++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-ibm
* Launchpad: https://launchpad.net/networking-ibm
@@ -248,7 +271,7 @@ IBM SDNVE
.. _networking-l2gw:

L2 Gateway
----------
++++++++++

* Git: https://git.openstack.org/cgit/openstack/networking-l2gw
* Launchpad: https://launchpad.net/networking-l2gw
@@ -256,7 +279,7 @@ L2 Gateway
.. _networking-midonet:

MidoNet
-------
+++++++

* Git: https://git.openstack.org/cgit/openstack/networking-midonet
* Launchpad: https://launchpad.net/networking-midonet
@@ -265,7 +288,7 @@ MidoNet
.. _networking-mlnx:

Mellanox
--------
++++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-mlnx
* Launchpad: https://launchpad.net/networking-mlnx
@@ -273,7 +296,7 @@ Mellanox
.. _networking-nec:

NEC
---
+++

* Git: https://git.openstack.org/cgit/stackforge/networking-nec
* Launchpad: https://launchpad.net/networking-nec
@@ -282,14 +305,14 @@ NEC
.. _nuage-openstack-neutron:

Nuage
-----
+++++

* Git: https://github.com/nuage-networks/nuage-openstack-neutron

.. _networking-odl:

OpenDayLight
------------
++++++++++++

* Git: https://git.openstack.org/cgit/openstack/networking-odl
* Launchpad: https://launchpad.net/networking-odl
@@ -297,7 +320,7 @@ OpenDayLight
.. _networking-ofagent:

OpenFlow Agent (ofagent)
------------------------
++++++++++++++++++++++++

* Git: https://git.openstack.org/cgit/openstack/networking-ofagent
* Launchpad: https://launchpad.net/networking-ofagent
@@ -306,7 +329,7 @@ OpenFlow Agent (ofagent)
.. _networking-ovn:

Open Virtual Network
--------------------
++++++++++++++++++++

* Git: https://git.openstack.org/cgit/openstack/networking-ovn
* Launchpad: https://launchpad.net/networking-ovn
@@ -315,7 +338,7 @@ Open Virtual Network
.. _networking-ovs-dpdk:

Open DPDK
---------
+++++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-ovs-dpdk
* Launchpad: https://launchpad.net/networking-ovs-dpdk
@@ -323,7 +346,7 @@ Open DPDK
.. _networking-plumgrid:

PLUMgrid
--------
++++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-plumgrid
* Launchpad: https://launchpad.net/networking-plumgrid
@@ -332,7 +355,7 @@ PLUMgrid
.. _neutron-powervm:

PowerVM
-------
+++++++

* Git: https://git.openstack.org/cgit/stackforge/neutron-powervm
* Launchpad: https://launchpad.net/neutron-powervm
@@ -341,7 +364,7 @@ PowerVM
.. _networking-portforwarding:

PortForwarding
--------------
++++++++++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-portforwarding
* Launchpad: https://launchpad.net/networking-portforwarding
@@ -349,22 +372,22 @@ PortForwarding
.. _networking-sfc:

SFC
---
+++

* Git: https://git.openstack.org/cgit/openstack/networking-sfc

.. _networking-vsphere:

vSphere
-------
+++++++

* Git: https://git.openstack.org/cgit/stackforge/networking-vsphere
* Git: https://git.openstack.org/cgit/openstack/networking-vsphere
* Launchpad: https://launchpad.net/networking-vsphere

.. _vmware-nsx:

VMware NSX
----------
++++++++++

* Git: https://git.openstack.org/cgit/openstack/vmware-nsx
* Launchpad: https://launchpad.net/vmware-nsx
@@ -373,7 +396,7 @@ VMware NSX
.. _octavia:

Octavia
-------
+++++++

* Git: https://git.openstack.org/cgit/openstack/octavia
* Launchpad: https://launchpad.net/octavia

+ 11
- 10
doc/source/policies/thirdparty-ci.rst View File

@@ -111,16 +111,17 @@ running.

At the root of the results - there should be the following:

console.html.gz - contains the output of stdout of the test run
local.conf / localrc - contains the setup used for this run
logs/
Logs must be a directory, which contains the following:

Log files for each screen session that DevStack creates and launches an
OpenStack component in
Test result files
testr_results.html.gz
tempest.txt.gz
* console.html.gz - contains the output of stdout of the test run
* local.conf / localrc - contains the setup used for this run
* logs - contains the output of detail test log of the test run

The above "logs" must be a directory, which contains the following:

* Log files for each screen session that DevStack creates and launches an
OpenStack component in
* Test result files
* testr_results.html.gz
* tempest.txt.gz

List of existing plugins and drivers
------------------------------------

+ 5
- 0
etc/dhcp_agent.ini View File

@@ -68,6 +68,11 @@
# as forwarders.
# dnsmasq_dns_servers =

# Base log dir for dnsmasq logging. The log contains DHCP and DNS log
# information and is useful for debugging issues with either DHCP or DNS.
# If this section is null, disable dnsmasq log.
# dnsmasq_base_log_dir =

# Limit number of leases to prevent a denial-of-service.
# dnsmasq_lease_max = 16777216


+ 0
- 0
etc/neutron.conf View File


+ 0
- 6
etc/neutron/plugins/midonet/midonet.ini View File

@@ -11,9 +11,3 @@

# ID of the project that MidoNet admin user belongs to
# project_id = 77777777-7777-7777-7777-777777777777

# Virtual provider router ID
# provider_router_id = 00112233-0011-0011-0011-001122334455

# Path to midonet host uuid file
# midonet_host_uuid_path = /etc/midolman/host_uuid.properties

+ 6
- 2
etc/neutron/plugins/ml2/linuxbridge_agent.ini View File

@@ -21,8 +21,12 @@
# (IntOpt) use specific TOS for vxlan interface protocol packets
# tos =
#
# (StrOpt) multicast group to use for broadcast emulation.
# This group must be the same on all the agents.
# (StrOpt) multicast group or group range to use for broadcast emulation.
# Specifying a range allows different VNIs to use different group addresses,
# reducing or eliminating spurious broadcast traffic to the tunnel endpoints.
# Ranges are specified by using CIDR notation. To reserve a unique group for
# each possible (24-bit) VNI, use a /8 such as 239.0.0.0/8.
# This setting must be the same on all the agents.
# vxlan_group = 224.0.0.1
#
# (StrOpt) Local IP address to use for VXLAN endpoints (required)

+ 0
- 100
etc/neutron/plugins/ml2/ml2_conf_arista.ini View File

@@ -1,100 +0,0 @@
# Defines configuration options specific for Arista ML2 Mechanism driver

[ml2_arista]
# (StrOpt) EOS IP address. This is required field. If not set, all
# communications to Arista EOS will fail
#
# eapi_host =
# Example: eapi_host = 192.168.0.1
#
# (StrOpt) EOS command API username. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_username =
# Example: arista_eapi_username = admin
#
# (StrOpt) EOS command API password. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_password =
# Example: eapi_password = my_password
#
# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs
# ("node1.domain.com") or as short names ("node1"). This is
# optional. If not set, a value of "True" is assumed.
#
# use_fqdn =
# Example: use_fqdn = True
#
# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
# This field defines how often the synchronization is performed.
# This is an optional field. If not set, a value of 180 seconds
# is assumed.
#
# sync_interval =
# Example: sync_interval = 60
#
# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller.
# This is useful when multiple OpenStack/Neutron controllers are
# managing the same Arista HW clusters. Note that this name must
# match with the region name registered (or known) to keystone
# service. Authentication with Keysotne is performed by EOS.
# This is optional. If not set, a value of "RegionOne" is assumed.
#
# region_name =
# Example: region_name = RegionOne


[l3_arista]

# (StrOpt) primary host IP address. This is required field. If not set, all
# communications to Arista EOS will fail. This is the host where
# primary router is created.
#
# primary_l3_host =
# Example: primary_l3_host = 192.168.10.10
#
# (StrOpt) Primary host username. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# primary_l3_host_username =
# Example: arista_primary_l3_username = admin
#
# (StrOpt) Primary host password. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# primary_l3_host_password =
# Example: primary_l3_password = my_password
#
# (StrOpt) IP address of the second Arista switch paired as
# MLAG (Multi-chassis Link Aggregation) with the first.
# This is optional field, however, if mlag_config flag is set,
# then this is a required field. If not set, all
# communications to Arista EOS will fail. If mlag_config is set
# to False, then this field is ignored
#
# seconadary_l3_host =
# Example: seconadary_l3_host = 192.168.10.20
#
# (BoolOpt) Defines if Arista switches are configured in MLAG mode
# If yes, all L3 configuration is pushed to both switches
# automatically. If this flag is set, ensure that secondary_l3_host
# is set to the second switch's IP.
# This flag is Optional. If not set, a value of "False" is assumed.
#
# mlag_config =
# Example: mlag_config = True
#
# (BoolOpt) Defines if the router is created in default VRF or a
# a specific VRF. This is optional.
# If not set, a value of "False" is assumed.
#
# Example: use_vrf = True
#
# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
# This field defines how often the synchronization is performed.
# This is an optional field. If not set, a value of 180 seconds
# is assumed.
#
# l3_sync_interval =
# Example: l3_sync_interval = 60

+ 1
- 1
etc/neutron/plugins/vmware/nsx.ini View File

@@ -39,7 +39,7 @@
# Name of the default interface name to be used on network-gateway. This value
# will be used for any device associated with a network gateway for which an
# interface name was not specified
# default_interface_name = breth0
# nsx_default_interface_name = breth0

# Reconnect connection to nsx if not used within this amount of time.
# conn_idle_timeout = 900

+ 23
- 31
neutron/agent/common/ovs_lib.py View File

@@ -141,12 +141,6 @@ class BaseOVS(object):
return self.ovsdb.db_get(table, record, column).execute(
check_error=check_error, log_errors=log_errors)

def db_list(self, table, records=None, columns=None,
check_error=True, log_errors=True, if_exists=False):
return (self.ovsdb.db_list(table, records=records, columns=columns,
if_exists=if_exists).
execute(check_error=check_error, log_errors=log_errors))


class OVSBridge(BaseOVS):
def __init__(self, br_name):
@@ -326,20 +320,24 @@ class OVSBridge(BaseOVS):
"Exception: %(exception)s"),
{'cmd': args, 'exception': e})

def get_ports_attributes(self, table, columns=None, ports=None,
check_error=True, log_errors=True,
if_exists=False):
port_names = ports or self.get_port_name_list()
return (self.ovsdb.db_list(table, port_names, columns=columns,
if_exists=if_exists).
execute(check_error=check_error, log_errors=log_errors))

# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
port_info = self.db_list(
'Interface', columns=['name', 'external_ids', 'ofport'])
by_name = {x['name']: x for x in port_info}
for name in port_names:
if not by_name.get(name):
#NOTE(dprince): some ports (like bonds) won't have all
# these attributes so we skip them entirely
continue
external_ids = by_name[name]['external_ids']
ofport = by_name[name]['ofport']
port_info = self.get_ports_attributes(
'Interface', columns=['name', 'external_ids', 'ofport'],
if_exists=True)
for port in port_info:
name = port['name']
external_ids = port['external_ids']
ofport = port['ofport']
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
@@ -356,9 +354,8 @@ class OVSBridge(BaseOVS):
return edge_ports

def get_vif_port_to_ofport_map(self):
port_names = self.get_port_name_list()
results = self.db_list(
'Interface', port_names, ['name', 'external_ids', 'ofport'],
results = self.get_ports_attributes(
'Interface', columns=['name', 'external_ids', 'ofport'],
if_exists=True)
port_map = {}
for r in results:
@@ -373,9 +370,8 @@ class OVSBridge(BaseOVS):

def get_vif_port_set(self):
edge_ports = set()
port_names = self.get_port_name_list()
results = self.db_list(
'Interface', port_names, ['name', 'external_ids', 'ofport'],
results = self.get_ports_attributes(
'Interface', columns=['name', 'external_ids', 'ofport'],
if_exists=True)
for result in results:
if result['ofport'] == UNASSIGNED_OFPORT:
@@ -413,22 +409,18 @@ class OVSBridge(BaseOVS):
in the "Interface" table queried by the get_vif_port_set() method.

"""
port_names = self.get_port_name_list()
results = self.db_list('Port', port_names, ['name', 'tag'],
if_exists=True)
results = self.get_ports_attributes(
'Port', columns=['name', 'tag'], if_exists=True)
return {p['name']: p['tag'] for p in results}

def get_vifs_by_ids(self, port_ids):
interface_info = self.db_list(
interface_info = self.get_ports_attributes(
"Interface", columns=["name", "external_ids", "ofport"])
by_id = {x['external_ids'].get('iface-id'): x for x in interface_info}
intfs_on_bridge = self.ovsdb.list_ports(self.br_name).execute(
check_error=True)
result = {}
for port_id in port_ids:
result[port_id] = None
if (port_id not in by_id or
by_id[port_id]['name'] not in intfs_on_bridge):
if port_id not in by_id:
LOG.info(_LI("Port %(port_id)s not present in bridge "
"%(br_name)s"),
{'port_id': port_id, 'br_name': self.br_name})

+ 5
- 0
neutron/agent/dhcp/config.py View File

@@ -54,6 +54,11 @@ DNSMASQ_OPTS = [
"This option is deprecated and "
"will be removed in a future release."),
deprecated_for_removal=True),
cfg.StrOpt('dnsmasq_base_log_dir',
help=_("Base log dir for dnsmasq logging. "
"The log contains DHCP and DNS log information and "
"is useful for debugging issues with either DHCP or "
"DNS. If this section is null, disable dnsmasq log.")),
cfg.IntOpt(
'dnsmasq_lease_max',
default=(2 ** 24),

+ 9
- 0
neutron/agent/firewall.py View File

@@ -117,6 +117,11 @@ class FirewallDriver(object):
"""Update rules in a security group."""
raise NotImplementedError()

def security_group_updated(self, action_type, sec_group_ids,
device_id=None):
"""Called when a security group is updated."""
raise NotImplementedError()


class NoopFirewallDriver(FirewallDriver):
"""Noop Firewall Driver.
@@ -152,3 +157,7 @@ class NoopFirewallDriver(FirewallDriver):

def update_security_group_rules(self, sg_id, rules):
pass

def security_group_updated(self, action_type, sec_group_ids,
device_id=None):
pass

+ 6
- 0
neutron/agent/l3/agent.py View File

@@ -538,6 +538,12 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
LOG.debug('Processing :%r', routers)
for r in routers:
ns_manager.keep_router(r['id'])
if r.get('distributed'):
# need to keep fip namespaces as well
ext_net_id = (r['external_gateway_info'] or {}).get(
'network_id')
if ext_net_id:
ns_manager.keep_ext_net(ext_net_id)
update = queue.RouterUpdate(r['id'],
queue.PRIORITY_SYNC_ROUTERS_TASK,
router=r,

+ 3
- 0
neutron/agent/l3/dvr_local_router.py View File

@@ -17,6 +17,7 @@ import netaddr

from oslo_log import log as logging
from oslo_utils import excutils
import six

from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_router_base
@@ -206,6 +207,8 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
"""
net = netaddr.IPNetwork(ip_cidr)
if net.version == 6:
if isinstance(ip_cidr, six.text_type):
ip_cidr = ip_cidr.encode() # Needed for Python 3.x
# the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
# xor-fold the hash to reserve upper range to extend smaller values

+ 104
- 0
neutron/agent/l3/item_allocator.py View File

@@ -0,0 +1,104 @@
# Copyright 2015 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import os


class ItemAllocator(object):
"""Manages allocation of items from a pool

Some of the allocations such as link local addresses used for routing
inside the fip namespaces need to persist across agent restarts to maintain
consistency. Persisting such allocations in the neutron database is
unnecessary and would degrade performance. ItemAllocator utilizes local
file system to track allocations made for objects of a given class.

The persistent datastore is a file. The records are one per line of
the format: key<delimiter>value. For example if the delimiter is a ','
(the default value) then the records will be: key,value (one per line)
"""

def __init__(self, state_file, ItemClass, item_pool, delimiter=','):
"""Read the file with previous allocations recorded.

See the note in the allocate method for more detail.
"""
self.ItemClass = ItemClass
self.state_file = state_file

self.allocations = {}

self.remembered = {}
self.pool = item_pool

for line in self._read():
key, saved_value = line.strip().split(delimiter)
self.remembered[key] = self.ItemClass(saved_value)

self.pool.difference_update(self.remembered.values())

def allocate(self, key):
"""Try to allocate an item of ItemClass type.

I expect this to work in all cases because I expect the pool size to be
large enough for any situation. Nonetheless, there is some defensive
programming in here.

Since the allocations are persisted, there is the chance to leak
allocations which should have been released but were not. This leak
could eventually exhaust the pool.

So, if a new allocation is needed, the code first checks to see if
there are any remembered allocations for the key. If not, it checks
the free pool. If the free pool is empty then it dumps the remembered
allocations to free the pool. This final desperate step will not
happen often in practice.
"""
if key in self.remembered:
self.allocations[key] = self.remembered.pop(key)
return self.allocations[key]

if not self.pool:
# Desperate times. Try to get more in the pool.
self.pool.update(self.remembered.values())
self.remembered.clear()
if not self.pool:
# More than 256 routers on a compute node!
raise RuntimeError("Cannot allocate item of type:"
" %s from pool using file %s"
% (self.ItemClass, self.state_file))

self.allocations[key] = self.pool.pop()
self._write_allocations()
return self.allocations[key]

def release(self, key):
self.pool.add(self.allocations.pop(key))
self._write_allocations()

def _write_allocations(self):
current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()]
remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()]
current.extend(remembered)
self._write(current)

def _write(self, lines):
with open(self.state_file, "w") as f:
f.writelines(lines)

def _read(self):
if not os.path.exists(self.state_file):
return []
with open(self.state_file) as f:
return f.readlines()

+ 11
- 70
neutron/agent/l3/link_local_allocator.py View File

@@ -13,7 +13,8 @@
# under the License.

import netaddr
import os

from neutron.agent.l3.item_allocator import ItemAllocator


class LinkLocalAddressPair(netaddr.IPNetwork):
@@ -26,7 +27,7 @@ class LinkLocalAddressPair(netaddr.IPNetwork):
netaddr.IPNetwork("%s/%s" % (self.broadcast, self.prefixlen)))


class LinkLocalAllocator(object):
class LinkLocalAllocator(ItemAllocator):
"""Manages allocation of link local IP addresses.

These link local addresses are used for routing inside the fip namespaces.
@@ -37,73 +38,13 @@ class LinkLocalAllocator(object):
Persisting these in the database is unnecessary and would degrade
performance.
"""
def __init__(self, state_file, subnet):
"""Read the file with previous allocations recorded.
See the note in the allocate method for more detail.
def __init__(self, data_store_path, subnet):
"""Create the necessary pool and item allocator
using ',' as the delimiter and LinkLocalAllocator as the
class type
"""
self.state_file = state_file
subnet = netaddr.IPNetwork(subnet)

self.allocations = {}

self.remembered = {}
for line in self._read():
key, cidr = line.strip().split(',')
self.remembered[key] = LinkLocalAddressPair(cidr)

self.pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
self.pool.difference_update(self.remembered.values())

def allocate(self, key):
"""Try to allocate a link local address pair.

I expect this to work in all cases because I expect the pool size to be
large enough for any situation. Nonetheless, there is some defensive
programming in here.

Since the allocations are persisted, there is the chance to leak
allocations which should have been released but were not. This leak
could eventually exhaust the pool.

So, if a new allocation is needed, the code first checks to see if
there are any remembered allocations for the key. If not, it checks
the free pool. If the free pool is empty then it dumps the remembered
allocations to free the pool. This final desperate step will not
happen often in practice.
"""
if key in self.remembered:
self.allocations[key] = self.remembered.pop(key)
return self.allocations[key]

if not self.pool:
# Desperate times. Try to get more in the pool.
self.pool.update(self.remembered.values())
self.remembered.clear()
if not self.pool:
# More than 256 routers on a compute node!
raise RuntimeError(_("Cannot allocate link local address"))

self.allocations[key] = self.pool.pop()
self._write_allocations()
return self.allocations[key]

def release(self, key):
self.pool.add(self.allocations.pop(key))
self._write_allocations()

def _write_allocations(self):
current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()]
remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()]
current.extend(remembered)
self._write(current)

def _write(self, lines):
with open(self.state_file, "w") as f:
f.writelines(lines)

def _read(self):
if not os.path.exists(self.state_file):
return []
with open(self.state_file) as f:
return f.readlines()
pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
super(LinkLocalAllocator, self).__init__(data_store_path,
LinkLocalAddressPair,
pool)

+ 3
- 0
neutron/agent/l3/namespace_manager.py View File

@@ -95,6 +95,9 @@ class NamespaceManager(object):
def keep_router(self, router_id):
self._ids_to_keep.add(router_id)

def keep_ext_net(self, ext_net_id):
self._ids_to_keep.add(ext_net_id)

def get_prefix_and_id(self, ns_name):
"""Get the prefix and id from the namespace name.


+ 20
- 1
neutron/agent/linux/dhcp.py View File

@@ -36,7 +36,7 @@ from neutron.common import exceptions
from neutron.common import ipv6_utils
from neutron.common import utils as commonutils
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.i18n import _LI, _LW
from neutron.i18n import _LI, _LW, _LE

LOG = logging.getLogger(__name__)

@@ -379,6 +379,20 @@ class Dnsmasq(DhcpLocalProcess):
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')

if self.conf.dnsmasq_base_log_dir:
try:
if not os.path.exists(self.conf.dnsmasq_base_log_dir):
os.makedirs(self.conf.dnsmasq_base_log_dir)
log_filename = os.path.join(
self.conf.dnsmasq_base_log_dir,
self.network.id, 'dhcp_dns_log')
cmd.append('--log-queries')
cmd.append('--log-dhcp')
cmd.append('--log-facility=%s' % log_filename)
except OSError:
LOG.error(_LE('Error while create dnsmasq base log dir: %s'),
self.conf.dnsmasq_base_log_dir)

return cmd

def spawn_process(self):
@@ -408,6 +422,11 @@ class Dnsmasq(DhcpLocalProcess):

def _release_lease(self, mac_address, ip, client_id):
"""Release a DHCP lease."""
if netaddr.IPAddress(ip).version == constants.IP_VERSION_6:
# Note(SridharG) dhcp_release is only supported for IPv4
# addresses. For more details, please refer to man page.
return

cmd = ['dhcp_release', self.interface_name, ip, mac_address]
if client_id:
cmd.append(client_id)

+ 89
- 0
neutron/agent/linux/ip_conntrack.py View File

@@ -0,0 +1,89 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import netaddr
from oslo_log import log as logging

from neutron.agent.linux import utils as linux_utils
from neutron.i18n import _LE

LOG = logging.getLogger(__name__)


class IpConntrackManager(object):
"""Smart wrapper for ip conntrack."""

def __init__(self, execute=None, namespace=None):
self.execute = execute or linux_utils.execute
self.namespace = namespace

@staticmethod
def _generate_conntrack_cmd_by_rule(rule, namespace):
ethertype = rule.get('ethertype')
protocol = rule.get('protocol')
direction = rule.get('direction')
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', str(protocol)])
cmd.extend(['-f', str(ethertype).lower()])
cmd.append('-d' if direction == 'ingress' else '-s')
cmd_ns = []
if namespace:
cmd_ns.extend(['ip', 'netns', 'exec', namespace])
cmd_ns.extend(cmd)
return cmd_ns

def _get_conntrack_cmds(self, device_info_list, rule, remote_ip=None):
conntrack_cmds = []
cmd = self._generate_conntrack_cmd_by_rule(rule, self.namespace)
ethertype = rule.get('ethertype')
for device_info in device_info_list:
zone_id = device_info.get('zone_id')
if not zone_id:
continue
ips = device_info.get('fixed_ips', [])
for ip in ips:
net = netaddr.IPNetwork(ip)
if str(net.version) not in ethertype:
continue
ip_cmd = [str(net.ip), '-w', zone_id]
if remote_ip and str(
netaddr.IPNetwork(remote_ip).version) in ethertype:
ip_cmd.extend(['-s', str(remote_ip)])
conntrack_cmds.append(cmd + ip_cmd)
return conntrack_cmds

def _delete_conntrack_state(self, device_info_list, rule, remote_ip=None):
conntrack_cmds = self._get_conntrack_cmds(device_info_list,
rule, remote_ip)
for cmd in conntrack_cmds:
try:
self.execute(cmd, run_as_root=True,
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception(
_LE("Failed execute conntrack command %s"), str(cmd))

def delete_conntrack_state_by_rule(self, device_info_list, rule):
self._delete_conntrack_state(device_info_list, rule)

def delete_conntrack_state_by_remote_ips(self, device_info_list,
ethertype, remote_ips):
rule = {'ethertype': str(ethertype).lower(), 'direction': 'ingress'}
if remote_ips:
for remote_ip in remote_ips:
self._delete_conntrack_state(
device_info_list, rule, remote_ip)
else:
self._delete_conntrack_state(device_info_list, rule)

+ 8
- 0
neutron/agent/linux/ip_lib.py View File

@@ -723,6 +723,14 @@ class IpNetnsCommand(IpCommandBase):
return False


def vxlan_in_use(segmentation_id, namespace=None):
"""Return True if VXLAN VNID is in use by an interface, else False."""
ip_wrapper = IPWrapper(namespace=namespace)
interfaces = ip_wrapper.netns.execute(["ip", "-d", "link", "list"],
check_exit_code=True)
return 'vxlan id %s ' % segmentation_id in interfaces


def device_exists(device_name, namespace=None):
"""Return True if the device exists in the namespace."""
try:

+ 95
- 0
neutron/agent/linux/iptables_firewall.py View File

@@ -20,6 +20,7 @@ from oslo_log import log as logging
import six

from neutron.agent import firewall
from neutron.agent.linux import ip_conntrack
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_manager
@@ -56,6 +57,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
# TODO(majopela, shihanzhang): refactor out ipset to a separate
# driver composed over this one
self.ipset = ipset_manager.IpsetManager(namespace=namespace)
self.ipconntrack = ip_conntrack.IpConntrackManager(namespace=namespace)
# list of port which has security group
self.filtered_ports = {}
self.unfiltered_ports = {}
@@ -72,6 +74,9 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
self.pre_sg_members = None
self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset
self._enabled_netfilter_for_bridges = False
self.updated_rule_sg_ids = set()
self.updated_sg_members = set()
self.devices_with_udpated_sg_members = collections.defaultdict(list)

def _enable_netfilter_for_bridges(self):
# we only need to set these values once, but it has to be when
@@ -102,6 +107,22 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
def ports(self):
return dict(self.filtered_ports, **self.unfiltered_ports)

def _update_remote_security_group_members(self, sec_group_ids):
for sg_id in sec_group_ids:
for device in self.filtered_ports.values():
if sg_id in device.get('security_group_source_groups', []):
self.devices_with_udpated_sg_members[sg_id].append(device)

def security_group_updated(self, action_type, sec_group_ids,
device_ids=[]):
if action_type == 'sg_rule':
self.updated_rule_sg_ids.update(sec_group_ids)
elif action_type == 'sg_member':
if device_ids:
self.updated_sg_members.update(device_ids)
else:
self._update_remote_security_group_members(sec_group_ids)

def update_security_group_rules(self, sg_id, sg_rules):
LOG.debug("Update rules of security group (%s)", sg_id)
self.sg_rules[sg_id] = sg_rules
@@ -688,6 +709,79 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
if not sg_has_members:
del self.sg_members[sg_id]

def _find_deleted_sg_rules(self, sg_id):
del_rules = list()
for pre_rule in self.pre_sg_rules.get(sg_id, []):
if pre_rule not in self.sg_rules.get(sg_id, []):
del_rules.append(pre_rule)
return del_rules

def _find_devices_on_security_group(self, sg_id):
device_list = list()
for device in self.filtered_ports.values():
if sg_id in device.get('security_groups', []):
device_list.append(device)
return device_list

def _clean_deleted_sg_rule_conntrack_entries(self):
deleted_sg_ids = set()
for sg_id in self.updated_rule_sg_ids:
del_rules = self._find_deleted_sg_rules(sg_id)
if not del_rules:
continue
device_list = self._find_devices_on_security_group(sg_id)
for rule in del_rules:
self.ipconntrack.delete_conntrack_state_by_rule(
device_list, rule)
deleted_sg_ids.add(sg_id)
for id in deleted_sg_ids:
self.updated_rule_sg_ids.remove(id)

def _clean_updated_sg_member_conntrack_entries(self):
updated_device_ids = set()
for device in self.updated_sg_members:
sec_group_change = False
device_info = self.filtered_ports.get(device)
pre_device_info = self._pre_defer_filtered_ports.get(device)
if not (device_info or pre_device_info):
continue
for sg_id in pre_device_info.get('security_groups', []):
if sg_id not in device_info.get('security_groups', []):
sec_group_change = True
break
if not sec_group_change:
continue
for ethertype in [constants.IPv4, constants.IPv6]:
self.ipconntrack.delete_conntrack_state_by_remote_ips(
[device_info], ethertype, set())
updated_device_ids.add(device)
for id in updated_device_ids:
self.updated_sg_members.remove(id)

def _clean_deleted_remote_sg_members_conntrack_entries(self):
deleted_sg_ids = set()
for sg_id, devices in self.devices_with_udpated_sg_members.items():
for ethertype in [constants.IPv4, constants.IPv6]:
pre_ips = self._get_sg_members(
self.pre_sg_members, sg_id, ethertype)
cur_ips = self._get_sg_members(
self.sg_members, sg_id, ethertype)
ips = (pre_ips - cur_ips)
if devices and ips:
self.ipconntrack.delete_conntrack_state_by_remote_ips(
devices, ethertype, ips)
deleted_sg_ids.add(sg_id)
for id in deleted_sg_ids:
self.devices_with_udpated_sg_members.pop(id, None)

def _remove_conntrack_entries_from_sg_updates(self):
self._clean_deleted_sg_rule_conntrack_entries()
self._clean_updated_sg_member_conntrack_entries()
self._clean_deleted_remote_sg_members_conntrack_entries()

def _get_sg_members(self, sg_info, sg_id, ethertype):
return set(sg_info.get(sg_id, {}).get(ethertype, []))

def filter_defer_apply_off(self):
if self._defer_apply:
self._defer_apply = False
@@ -696,6 +790,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
self._setup_chains_apply(self.filtered_ports,
self.unfiltered_ports)
self.iptables.defer_apply_off()
self._remove_conntrack_entries_from_sg_updates()
self._remove_unused_security_group_info()
self._pre_defer_filtered_ports = None
self._pre_defer_unfiltered_ports = None

+ 1
- 1
neutron/agent/rpc.py View File

@@ -80,7 +80,7 @@ class PluginReportStateAPI(object):
agent_state['uuid'] = uuidutils.generate_uuid()
kwargs = {
'agent_state': {'agent_state': agent_state},
'time': datetime.utcnow().isoformat(),
'time': datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT),
}
method = cctxt.call if use_call else cctxt.cast
return method(context, 'report_state', **kwargs)

+ 8
- 3
neutron/agent/securitygroups_rpc.py View File

@@ -198,22 +198,25 @@ class SecurityGroupAgentRpc(object):
"rule updated %r"), security_groups)
self._security_group_updated(
security_groups,
'security_groups')
'security_groups',
'sg_rule')

def security_groups_member_updated(self, security_groups):
LOG.info(_LI("Security group "
"member updated %r"), security_groups)
self._security_group_updated(
security_groups,
'security_group_source_groups')
'security_group_source_groups',
'sg_member')

def _security_group_updated(self, security_groups, attribute):
def _security_group_updated(self, security_groups, attribute, action_type):
devices = []
sec_grp_set = set(security_groups)
for device in self.firewall.ports.values():
if sec_grp_set & set(device.get(attribute, [])):
devices.append(device['device'])
if devices:
self.firewall.security_group_updated(action_type, sec_grp_set)
if self.defer_refresh_firewall:
LOG.debug("Adding %s devices to the list of devices "
"for which firewall needs to be refreshed",
@@ -307,6 +310,8 @@ class SecurityGroupAgentRpc(object):
LOG.debug("Refreshing firewall for all filtered devices")
self.refresh_firewall()
else:
self.firewall.security_group_updated('sg_member', [],
updated_devices)
# If a device is both in new and updated devices
# avoid reprocessing it
updated_devices = ((updated_devices | devices_to_refilter) -

+ 6
- 0
neutron/api/rpc/handlers/dhcp_rpc.py View File

@@ -26,6 +26,7 @@ from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import api as db_api
from neutron.extensions import portbindings
from neutron.i18n import _LW
from neutron import manager
@@ -157,6 +158,7 @@ class DhcpRpcCallback(object):
network['ports'] = plugin.get_ports(context, filters=filters)
return network

@db_api.retry_db_errors
def release_dhcp_port(self, context, **kwargs):
"""Release the port currently being used by a DHCP agent."""
host = kwargs.get('host')
@@ -169,6 +171,7 @@ class DhcpRpcCallback(object):
plugin = manager.NeutronManager.get_plugin()
plugin.delete_ports_by_device_id(context, device_id, network_id)

@db_api.retry_db_errors
def release_port_fixed_ip(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
host = kwargs.get('host')
@@ -203,6 +206,7 @@ class DhcpRpcCallback(object):
LOG.warning(_LW('Updating lease expiration is now deprecated. Issued '
'from host %s.'), host)

@db_api.retry_db_errors
@resource_registry.mark_resources_dirty
def create_dhcp_port(self, context, **kwargs):
"""Create and return dhcp port information.
@@ -224,6 +228,7 @@ class DhcpRpcCallback(object):
plugin = manager.NeutronManager.get_plugin()
return self._port_action(plugin, context, port, 'create_port')

@db_api.retry_db_errors
def update_dhcp_port(self, context, **kwargs):
"""Update the dhcp port."""
host = kwargs.get('host')
@@ -233,5 +238,6 @@ class DhcpRpcCallback(object):
'from %(host)s.',
{'port': port,
'host': host})
port['port'][portbindings.HOST_ID] = host
plugin = manager.NeutronManager.get_plugin()
return self._port_action(plugin, context, port, 'update_port')

+ 64
- 16
neutron/api/rpc/handlers/l3_rpc.py View File

@@ -23,6 +23,7 @@ from neutron.common import constants
from neutron.common import exceptions
from neutron.common import utils
from neutron import context as neutron_context
from neutron.db import api as db_api
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.i18n import _LE
@@ -43,7 +44,8 @@ class L3RpcCallback(object):
# 1.4 Added L3 HA update_router_state. This method was later removed,
# since it was unused. The RPC version was not changed
# 1.5 Added update_ha_routers_states
target = oslo_messaging.Target(version='1.5')
# 1.6 Added process_prefix_update to support IPv6 Prefix Delegation
target = oslo_messaging.Target(version='1.6')

@property
def plugin(self):
@@ -58,6 +60,7 @@ class L3RpcCallback(object):
plugin_constants.L3_ROUTER_NAT]
return self._l3plugin

@db_api.retry_db_errors
def sync_routers(self, context, **kwargs):
"""Sync routers according to filters to a specific agent.

@@ -104,33 +107,70 @@ class L3RpcCallback(object):
router.get('gw_port_host'),
p, router['id'])
else:
self._ensure_host_set_on_port(context, host,
router.get('gw_port'),
router['id'])
self._ensure_host_set_on_port(
context, host,
router.get('gw_port'),
router['id'],
ha_router_port=router.get('ha'))
for interface in router.get(constants.INTERFACE_KEY, []):
self._ensure_host_set_on_port(context, host,
interface, router['id'])
self._ensure_host_set_on_port(
context,
host,
interface,
router['id'],
ha_router_port=router.get('ha'))
interface = router.get(constants.HA_INTERFACE_KEY)
if interface:
self._ensure_host_set_on_port(context, host, interface,
router['id'])

def _ensure_host_set_on_port(self, context, host, port, router_id=None):
def _ensure_host_set_on_port(self, context, host, port, router_id=None,
ha_router_port=False):
if (port and host is not None and
(port.get('device_owner') !=
constants.DEVICE_OWNER_DVR_INTERFACE and
port.get(portbindings.HOST_ID) != host or
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_BINDING_FAILED)):
# All ports, including ports created for SNAT'ing for
# DVR are handled here
try:
self.plugin.update_port(context, port['id'],
{'port': {portbindings.HOST_ID: host}})
except exceptions.PortNotFound:
LOG.debug("Port %(port)s not found while updating "
"agent binding for router %(router)s.",
{"port": port['id'], "router": router_id})

# Ports owned by non-HA routers are bound again if they're
# already bound but the router moved to another host.
if not ha_router_port:
# All ports, including ports created for SNAT'ing for
# DVR are handled here
try:
self.plugin.update_port(
context,
port['id'],
{'port': {portbindings.HOST_ID: host}})
except exceptions.PortNotFound:
LOG.debug("Port %(port)s not found while updating "
"agent binding for router %(router)s.",
{"port": port['id'], "router": router_id})
# Ports owned by HA routers should only be bound once, if
# they are unbound. These ports are moved when an agent reports
# that one of its routers moved to the active state.
else:
if not port.get(portbindings.HOST_ID):
active_host = (
self.l3plugin.get_active_host_for_ha_router(
context, router_id))
if active_host:
host = active_host
# If there is currently no active router instance (For
# example it's a new router), the host that requested
# the routers (Essentially a random host) will do. The
# port binding will be corrected when an active is
# elected.
try:
self.plugin.update_port(
context,
port['id'],
{'port': {portbindings.HOST_ID: host}})
except exceptions.PortNotFound:
LOG.debug("Port %(port)s not found while updating "
"agent binding for router %(router)s.",
{"port": port['id'], "router": router_id})
elif (port and
port.get('device_owner') ==
constants.DEVICE_OWNER_DVR_INTERFACE):
@@ -196,6 +236,7 @@ class L3RpcCallback(object):
filters = {'fixed_ips': {'subnet_id': [subnet_id]}}
return self.plugin.get_ports(context, filters=filters)

@db_api.retry_db_errors
def get_agent_gateway_port(self, context, **kwargs):
"""Get Agent Gateway port for FIP.

@@ -224,3 +265,10 @@ class L3RpcCallback(object):

LOG.debug('Updating HA routers states on host %s: %s', host, states)
self.l3plugin.update_routers_states(context, states, host)

def process_prefix_update(self, context, **kwargs):
subnets = kwargs.get('subnets')

for subnet_id, prefix in subnets.items():
self.plugin.update_subnet(context, subnet_id,
{'subnet': {'cidr': prefix}})

+ 13
- 1
neutron/api/v2/attributes.py View File

@@ -367,6 +367,16 @@ def _validate_regex_or_none(data, valid_values=None):
return _validate_regex(data, valid_values)


def _validate_subnetpool_id(data, valid_values=None):
if data != constants.IPV6_PD_POOL_ID:
return _validate_uuid_or_none(data, valid_values)


def _validate_subnetpool_id_or_none(data, valid_values=None):
if data is not None:
return _validate_subnetpool_id(data, valid_values)