Browse Source

Retire stackforge/networking-bigswitch

tags/9.40.0
Monty Taylor 4 years ago
parent
commit
36c4dce515
76 changed files with 5 additions and 8492 deletions
  1. +0
    -7
      .coveragerc
  2. +0
    -30
      .gitignore
  3. +0
    -4
      .gitreview
  4. +0
    -125
      .pylintrc
  5. +0
    -4
      .testr.conf
  6. +0
    -16
      CONTRIBUTING.rst
  7. +0
    -32
      HACKING.rst
  8. +0
    -176
      LICENSE
  9. +0
    -14
      MANIFEST.in
  10. +5
    -9
      README.rst
  11. +0
    -233
      TESTING.rst
  12. +0
    -19
      bsnstacklib/__init__.py
  13. +0
    -0
      bsnstacklib/bsnlldp/__init__.py
  14. +0
    -342
      bsnstacklib/bsnlldp/bsnlldp.py
  15. +0
    -27
      bsnstacklib/hooks.py
  16. +0
    -0
      bsnstacklib/plugins/__init__.py
  17. +0
    -14
      bsnstacklib/plugins/bigswitch/README
  18. +0
    -0
      bsnstacklib/plugins/bigswitch/__init__.py
  19. +0
    -0
      bsnstacklib/plugins/bigswitch/agent/__init__.py
  20. +0
    -247
      bsnstacklib/plugins/bigswitch/agent/restproxy_agent.py
  21. +0
    -133
      bsnstacklib/plugins/bigswitch/config.py
  22. +0
    -0
      bsnstacklib/plugins/bigswitch/db/__init__.py
  23. +0
    -219
      bsnstacklib/plugins/bigswitch/db/consistency_db.py
  24. +0
    -52
      bsnstacklib/plugins/bigswitch/db/porttracker_db.py
  25. +0
    -24
      bsnstacklib/plugins/bigswitch/dhcp_driver.py
  26. +0
    -0
      bsnstacklib/plugins/bigswitch/extensions/__init__.py
  27. +0
    -140
      bsnstacklib/plugins/bigswitch/extensions/routerrule.py
  28. +0
    -336
      bsnstacklib/plugins/bigswitch/l3_router_plugin.py
  29. +0
    -996
      bsnstacklib/plugins/bigswitch/plugin.py
  30. +0
    -128
      bsnstacklib/plugins/bigswitch/routerrule_db.py
  31. +0
    -704
      bsnstacklib/plugins/bigswitch/servermanager.py
  32. +0
    -0
      bsnstacklib/plugins/bigswitch/tests/__init__.py
  33. +0
    -185
      bsnstacklib/plugins/bigswitch/tests/test_server.py
  34. +0
    -22
      bsnstacklib/plugins/bigswitch/vcsversion.py
  35. +0
    -48
      bsnstacklib/plugins/bigswitch/version.py
  36. +0
    -0
      bsnstacklib/plugins/ml2/__init__.py
  37. +0
    -0
      bsnstacklib/plugins/ml2/drivers/__init__.py
  38. +0
    -0
      bsnstacklib/plugins/ml2/drivers/mech_bigswitch/__init__.py
  39. +0
    -298
      bsnstacklib/plugins/ml2/drivers/mech_bigswitch/driver.py
  40. +0
    -14
      bsnstacklib/tests/__init__.py
  41. +0
    -19
      bsnstacklib/tests/unit/__init__.py
  42. +0
    -0
      bsnstacklib/tests/unit/bigswitch/__init__.py
  43. +0
    -46
      bsnstacklib/tests/unit/bigswitch/etc/restproxy.ini.test
  44. +0
    -2
      bsnstacklib/tests/unit/bigswitch/etc/ssl/ca_certs/README
  45. +0
    -2
      bsnstacklib/tests/unit/bigswitch/etc/ssl/combined/README
  46. +0
    -2
      bsnstacklib/tests/unit/bigswitch/etc/ssl/host_certs/README
  47. +0
    -182
      bsnstacklib/tests/unit/bigswitch/fake_server.py
  48. +0
    -34
      bsnstacklib/tests/unit/bigswitch/test_agent_scheduler.py
  49. +0
    -86
      bsnstacklib/tests/unit/bigswitch/test_base.py
  50. +0
    -93
      bsnstacklib/tests/unit/bigswitch/test_capabilities.py
  51. +0
    -323
      bsnstacklib/tests/unit/bigswitch/test_restproxy_agent.py
  52. +0
    -334
      bsnstacklib/tests/unit/bigswitch/test_restproxy_plugin.py
  53. +0
    -577
      bsnstacklib/tests/unit/bigswitch/test_router_db.py
  54. +0
    -49
      bsnstacklib/tests/unit/bigswitch/test_security_groups.py
  55. +0
    -674
      bsnstacklib/tests/unit/bigswitch/test_servermanager.py
  56. +0
    -251
      bsnstacklib/tests/unit/bigswitch/test_ssl.py
  57. +0
    -0
      bsnstacklib/tests/unit/ml2/__init__.py
  58. +0
    -0
      bsnstacklib/tests/unit/ml2/drivers/__init__.py
  59. +0
    -226
      bsnstacklib/tests/unit/ml2/drivers/test_bigswitch_mech.py
  60. +0
    -0
      doc/source/conf.py
  61. +0
    -0
      doc/source/contents.rst
  62. +0
    -114
      etc/neutron/plugins/bigswitch/restproxy.ini
  63. +0
    -3
      etc/neutron/plugins/bigswitch/ssl/ca_certs/README
  64. +0
    -6
      etc/neutron/plugins/bigswitch/ssl/host_certs/README
  65. +0
    -147
      etc/policy.json
  66. +0
    -8
      requirements.txt
  67. +0
    -260
      run_tests.sh
  68. +0
    -58
      setup.cfg
  69. +0
    -30
      setup.py
  70. +0
    -21
      test-requirements.txt
  71. +0
    -5
      tools/clean.sh
  72. +0
    -72
      tools/install_venv.py
  73. +0
    -172
      tools/install_venv_common.py
  74. +0
    -6
      tools/pretty_tox.sh
  75. +0
    -19
      tools/with_venv.sh
  76. +0
    -73
      tox.ini

+ 0
- 7
.coveragerc View File

@@ -1,7 +0,0 @@
[run]
branch = True
source = neutron
omit = neutron/tests/*,neutron/plugins/cisco/test/*,neutron/openstack/*

[report]
ignore_errors = True

+ 0
- 30
.gitignore View File

@@ -1,30 +0,0 @@
AUTHORS
build/*
build-stamp
ChangeLog
cover/
covhtml/
dist/
doc/build
*.DS_Store
*.pyc
bsnstacklib.egg-info/
neutron.egg-info/
neutron/vcsversion.py
neutron/versioninfo
pbr*.egg/
quantum.egg-info/
quantum/vcsversion.py
quantum/versioninfo
setuptools*.egg/
*.log
*.mo
*.sw?
*~
/.*
!/.coveragerc
!/.gitignore
!/.gitreview
!/.mailmap
!/.pylintrc
!/.testr.conf

+ 0
- 4
.gitreview View File

@@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=stackforge/networking-bigswitch.git

+ 0
- 125
.pylintrc View File

@@ -1,125 +0,0 @@
# The format of this file isn't really documented; just use --generate-rcfile
[MASTER]
# Add <file or directory> to the black list. It should be a base name, not a
# path. You may set this option multiple times.
#
# Note the 'openstack' below is intended to match only
# neutron.openstack.common. If we ever have another 'openstack'
# dirname, then we'll need to expand the ignore features in pylint :/
ignore=.git,tests,openstack

[MESSAGES CONTROL]
# NOTE(gus): This is a long list. A number of these are important and
# should be re-enabled once the offending code is fixed (or marked
# with a local disable)
disable=
# "F" Fatal errors that prevent further processing
import-error,
# "I" Informational noise
locally-disabled,
# "E" Error for important programming issues (likely bugs)
access-member-before-definition,
bad-super-call,
maybe-no-member,
no-member,
no-method-argument,
no-self-argument,
no-value-for-parameter,
# "W" Warnings for stylistic problems or minor programming issues
abstract-method,
arguments-differ,
attribute-defined-outside-init,
bad-builtin,
bad-indentation,
broad-except,
dangerous-default-value,
deprecated-lambda,
duplicate-key,
expression-not-assigned,
fixme,
global-statement,
global-variable-not-assigned,
logging-not-lazy,
no-init,
non-parent-init-called,
protected-access,
redefined-builtin,
redefined-outer-name,
redefine-in-handler,
signature-differs,
star-args,
super-init-not-called,
unnecessary-lambda,
unnecessary-pass,
unpacking-non-sequence,
unreachable,
unused-argument,
unused-import,
unused-variable,
# "C" Coding convention violations
bad-continuation,
invalid-name,
missing-docstring,
old-style-class,
superfluous-parens,
# "R" Refactor recommendations
abstract-class-little-used,
abstract-class-not-used,
duplicate-code,
interface-not-implemented,
no-self-use,
too-few-public-methods,
too-many-ancestors,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-public-methods,
too-many-return-statements,
too-many-statements

[BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$

# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$

# Method names should be at least 3 characters long
# and be lowecased with underscores
method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$

# Module names matching neutron-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$

# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$

[FORMAT]
# Maximum number of characters on a single line.
max-line-length=79

[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
# _ is used by our localization
additional-builtins=_

[CLASSES]
# List of interface methods to ignore, separated by a comma.
ignore-iface-methods=

[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=
# should use openstack.common.jsonutils
json

[TYPECHECK]
# List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems

[REPORTS]
# Tells whether to display a full report or only the messages
reports=no

+ 0
- 4
.testr.conf View File

@@ -1,4 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./bsnstacklib/tests/unit} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

+ 0
- 16
CONTRIBUTING.rst View File

@@ -1,16 +0,0 @@
If you would like to contribute to the development of OpenStack,
you must follow the steps documented at:

http://docs.openstack.org/infra/manual/developers.html#development-workflow

Once those steps have been completed, changes to OpenStack
should be submitted for review via the Gerrit tool, following
the workflow documented at:

http://docs.openstack.org/infra/manual/developers.html#development-workflow

Pull requests submitted through GitHub will be ignored.

Bugs should be filed on Launchpad, not GitHub:

https://bugs.launchpad.net/neutron

+ 0
- 32
HACKING.rst View File

@@ -1,32 +0,0 @@
Neutron Style Commandments
=======================

- Step 1: Read the OpenStack Style Commandments
http://docs.openstack.org/developer/hacking/
- Step 2: Read on

Neutron Specific Commandments
--------------------------

- [N319] Validate that debug level logs are not translated
- [N320] Validate that LOG messages, except debug ones, have translations
- [N321] Validate that jsonutils module is used instead of json
- [N322] Detect common errors with assert_called_once_with
- [N323] Enforce namespace-less imports for oslo libraries

Creating Unit Tests
-------------------
For every new feature, unit tests should be created that both test and
(implicitly) document the usage of said feature. If submitting a patch for a
bug that had no unit test, a new passing unit test should be added. If a
submitted bug fix does have a unit test, be sure to add a new one that fails
without the patch and passes with the patch.

All unittest classes must ultimately inherit from testtools.TestCase. In the
Neutron test suite, this should be done by inheriting from
neutron.tests.base.BaseTestCase.

All setUp and tearDown methods must upcall using the super() method.
tearDown methods should be avoided and addCleanup calls should be preferred.
Never manually create tempfiles. Always use the tempfile fixtures from
the fixture library to ensure that they are cleaned up.

+ 0
- 176
LICENSE View File

@@ -1,176 +0,0 @@

Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/

TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

1. Definitions.

"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.

"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.

"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.

"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.

"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.

"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.

"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).

"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.

"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."

"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.

2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.

3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.

4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:

(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and

(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and

(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and

(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.

You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.

5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.

6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.

7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.

8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.

9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.


+ 0
- 14
MANIFEST.in View File

@@ -1,14 +0,0 @@
include AUTHORS
include README.rst
include ChangeLog
include LICENSE
include neutron/db/migration/README
include neutron/db/migration/alembic.ini
include neutron/db/migration/alembic_migrations/script.py.mako
include neutron/db/migration/alembic_migrations/versions/README
recursive-include neutron/locale *

exclude .gitignore
exclude .gitreview

global-exclude *.pyc

+ 5
- 9
README.rst View File

@@ -1,11 +1,7 @@
Welcome!
========
This project is no longer maintained.

This library contains the components required to integrate an
OpenStack deployment with a Big Switch Networks fabric.
The contents of this repository are still available in the Git source code
management system. To see the contents of this repository before it reached
its end of life, please check out the previous commit with
"git checkout HEAD^1".


External Resources:
===================

Big Switch Networks Website: http://www.bigswitch.com

+ 0
- 233
TESTING.rst View File

@@ -1,233 +0,0 @@
Testing Neutron
=============================================================

Overview
--------

The unit tests (neutron/test/unit/) are meant to cover as much code as
possible and should be executed without the service running. They are
designed to test the various pieces of the neutron tree to make sure
any new changes don't break existing functionality.

The functional tests (neutron/tests/functional/) are intended to
validate actual system interaction. Mocks should be used sparingly,
if at all. Care should be taken to ensure that existing system
resources are not modified and that resources created in tests are
properly cleaned up.

Development process
-------------------

It is expected that any new changes that are proposed for merge
come with tests for that feature or code area. Ideally any bugs
fixes that are submitted also have tests to prove that they stay
fixed! In addition, before proposing for merge, all of the
current tests should be passing.

Virtual environments
~~~~~~~~~~~~~~~~~~~~

Testing OpenStack projects, including Neutron, is made easier with `DevStack <https://git.openstack.org/cgit/openstack-dev/devstack>`_.

Create a machine (such as a VM or Vagrant box) running a distribution supported
by DevStack and install DevStack there. For example, there is a Vagrant script
for DevStack at https://github.com/bcwaldon/vagrant_devstack.

.. note::

If you prefer not to use DevStack, you can still check out source code on your local
machine and develop from there.


Running unit tests
------------------

There are three mechanisms for running tests: run_tests.sh, tox,
and nose2. Before submitting a patch for review you should always
ensure all test pass; a tox run is triggered by the jenkins gate
executed on gerrit for each patch pushed for review.

With these mechanisms you can either run the tests in the standard
environment or create a virtual environment to run them in.

By default after running all of the tests, any pep8 errors
found in the tree will be reported.


With `run_tests.sh`
~~~~~~~~~~~~~~~~~~~

You can use the `run_tests.sh` script in the root source directory to execute
tests in a virtualenv::

./run_tests -V


With `nose2`
~~~~~~~~~~~

You can use `nose2`_ to run individual tests, as well as use for debugging
portions of your code::

source .venv/bin/activate
pip install nose2
nose2

There are disadvantages to running nose2 - the tests are run sequentially, so
race condition bugs will not be triggered, and the full test suite will
take significantly longer than tox & testr. The upside is that testr has
some rough edges when it comes to diagnosing errors and failures, and there is
no easy way to set a breakpoint in the Neutron code, and enter an
interactive debugging session while using testr.

It is also possible to use nose2's predecessor, `nose`_, to run the tests::

source .venv/bin/activate
pip install nose
nosetests

nose has one additional disadvantage over nose2 - it does not
understand the `load_tests protocol`_ introduced in Python 2.7. This
limitation will result in errors being reported for modules that
depend on load_tests (usually due to use of `testscenarios`_).

.. _nose2: http://nose2.readthedocs.org/en/latest/index.html
.. _nose: https://nose.readthedocs.org/en/latest/index.html
.. _load_tests protocol: https://docs.python.org/2/library/unittest.html#load-tests-protocol
.. _testscenarios: https://pypi.python.org/pypi/testscenarios/

With `tox`
~~~~~~~~~~

Neutron, like other OpenStack projects, uses `tox`_ for managing the virtual
environments for running test cases. It uses `Testr`_ for managing the running
of the test cases.

Tox handles the creation of a series of `virtualenvs`_ that target specific
versions of Python (2.6, 2.7, 3.3, etc).

Testr handles the parallel execution of series of test cases as well as
the tracking of long-running tests and other things.

Running unit tests is as easy as executing this in the root directory of the
Neutron source code::

tox

To run functional tests that do not require sudo privileges or
specific-system dependencies::

tox -e functional

To run all the functional tests, including those requiring sudo
privileges and system-specific dependencies, the procedure defined by
tools/configure_for_func_testing.sh should be followed.

IMPORTANT: configure_for_func_testing.sh relies on devstack to perform
extensive modification to the underlying host. Execution of the
script requires sudo privileges and it is recommended that the
following commands be invoked only on a clean and disposeable VM. A
VM that has had devstack previously installed on it is also fine. ::

git clone https://git.openstack.org/openstack-dev/devstack ../devstack
./tools/configure_for_func_testing.sh ../devstack -i
tox -e dsvm-functional

The '-i' option is optional and instructs the script to use devstack
to install and configure all of Neutron's package dependencies. It is
not necessary to provide this option if devstack has already been used
to deploy Neutron to the target host.

For more information on the standard Tox-based test infrastructure used by
OpenStack and how to do some common test/debugging procedures with Testr,
see this wiki page:

https://wiki.openstack.org/wiki/Testr

.. _Testr: https://wiki.openstack.org/wiki/Testr
.. _tox: http://tox.readthedocs.org/en/latest/
.. _virtualenvs: https://pypi.python.org/pypi/virtualenv


Running individual tests
~~~~~~~~~~~~~~~~~~~~~~~~

For running individual test modules or cases, you just need to pass
the dot-separated path to the module you want as an argument to it.

For executing a specific test case, specify the name of the test case
class separating it from the module path with a colon.

For example, the following would run only the JSONV2TestCase tests from
neutron/tests/unit/test_api_v2.py::

$ ./run_tests.sh neutron.tests.unit.test_api_v2.JSONV2TestCase

or::

$ tox -e py27 neutron.tests.unit.test_api_v2.JSONV2TestCase

Adding more tests
~~~~~~~~~~~~~~~~~

Neutron has a fast growing code base and there is plenty of areas that
need to be covered by unit and functional tests.

To get a grasp of the areas where tests are needed, you can check
current coverage by running::

$ ./run_tests.sh -c

Debugging
---------

By default, calls to pdb.set_trace() will be ignored when tests
are run. For pdb statements to work, invoke run_tests as follows::

$ ./run_tests.sh -d [test module path]

It's possible to debug tests in a tox environment::

$ tox -e venv -- python -m testtools.run [test module path]

Tox-created virtual environments (venv's) can also be activated
after a tox run and reused for debugging::

$ tox -e venv
$ . .tox/venv/bin/activate
$ python -m testtools.run [test module path]

Tox packages and installs the neutron source tree in a given venv
on every invocation, but if modifications need to be made between
invocation (e.g. adding more pdb statements), it is recommended
that the source tree be installed in the venv in editable mode::

# run this only after activating the venv
$ pip install --editable .

Editable mode ensures that changes made to the source tree are
automatically reflected in the venv, and that such changes are not
overwritten during the next tox run.

Post-mortem debugging
~~~~~~~~~~~~~~~~~~~~~

Setting OS_POST_MORTEM_DEBUGGER in the shell environment will ensure
that the debugger .post_mortem() method will be invoked on test failure::

$ OS_POST_MORTEM_DEBUGGER=pdb ./run_tests.sh -d [test module path]

Supported debuggers are pdb, and pudb. Pudb is full-screen, console-based
visual debugger for Python which let you inspect variables, the stack,
and breakpoints in a very visual way, keeping a high degree of compatibility
with pdb::

$ ./.venv/bin/pip install pudb

$ OS_POST_MORTEM_DEBUGGER=pudb ./run_tests.sh -d [test module path]

References
==========

.. [#pudb] PUDB debugger:
https://pypi.python.org/pypi/pudb

+ 0
- 19
bsnstacklib/__init__.py View File

@@ -1,19 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import gettext


gettext.install('neutron', unicode=1)

+ 0
- 0
bsnstacklib/bsnlldp/__init__.py View File


+ 0
- 342
bsnstacklib/bsnlldp/bsnlldp.py View File

@@ -1,342 +0,0 @@
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import argparse
import ctypes
from ctypes import c_byte
from ctypes import c_char_p
from ctypes import c_uint
from ctypes import c_uint16
from ctypes import c_uint32
from ctypes import c_ushort
from ctypes import c_void_p
from ctypes import cast
from ctypes import get_errno
from ctypes import pointer
from ctypes import POINTER
from ctypes import Structure
from ctypes import Union
import ctypes.util
import os
import socket
from socket import AF_INET
from socket import AF_INET6
from socket import inet_ntop
from subprocess import check_output
import time

LLDP_DST_MAC = "01:80:c2:00:00:0e"
SYSTEM_DESC = "5c:16:c7:00:00:04"
LLDP_ETHERTYPE = 0x88cc
CHASSIS_ID = "Big Cloud Fabric"
TTL = 120
INTERVAL = 10
CHASSIS_ID_LOCALLY_ASSIGNED = 7
PORT_ID_INTERFACE_ALIAS = 1


class struct_sockaddr(Structure):
_fields_ = [
('sa_family', c_ushort),
('sa_data', c_byte * 14)]


class struct_sockaddr_in(Structure):
_fields_ = [
('sin_family', c_ushort),
('sin_port', c_uint16),
('sin_addr', c_byte * 4)]


class struct_sockaddr_in6(Structure):
_fields_ = [
('sin6_family', c_ushort),
('sin6_port', c_uint16),
('sin6_flowinfo', c_uint32),
('sin6_addr', c_byte * 16),
('sin6_scope_id', c_uint32)]


class union_ifa_ifu(Union):
_fields_ = [
('ifu_broadaddr', POINTER(struct_sockaddr)),
('ifu_dstaddr', POINTER(struct_sockaddr))]


class struct_ifaddrs(Structure):
pass
struct_ifaddrs._fields_ = [
('ifa_next', POINTER(struct_ifaddrs)),
('ifa_name', c_char_p),
('ifa_flags', c_uint),
('ifa_addr', POINTER(struct_sockaddr)),
('ifa_netmask', POINTER(struct_sockaddr)),
('ifa_ifu', union_ifa_ifu),
('ifa_data', c_void_p)]

libc = ctypes.CDLL(ctypes.util.find_library('c'))


def ifap_iter(ifap):
ifa = ifap.contents
while True:
yield ifa
if not ifa.ifa_next:
break
ifa = ifa.ifa_next.contents


def getfamaddr(sa):
family = sa.sa_family
addr = None
if family == AF_INET:
sa = cast(pointer(sa), POINTER(struct_sockaddr_in)).contents
addr = inet_ntop(family, sa.sin_addr)
elif family == AF_INET6:
sa = cast(pointer(sa), POINTER(struct_sockaddr_in6)).contents
addr = inet_ntop(family, sa.sin6_addr)
return family, addr


class NetworkInterface(object):
def __init__(self, name):
self.name = name
self.index = libc.if_nametoindex(name)
self.addresses = {}

def __str__(self):
return "%s [index=%d, IPv4=%s, IPv6=%s]" % (
self.name, self.index,
self.addresses.get(AF_INET),
self.addresses.get(AF_INET6))


def get_network_interfaces():
ifap = POINTER(struct_ifaddrs)()
result = libc.getifaddrs(pointer(ifap))
if result != 0:
raise OSError(get_errno())
del result
try:
retval = {}
for ifa in ifap_iter(ifap):
name = ifa.ifa_name
i = retval.get(name)
if not i:
i = retval[name] = NetworkInterface(name)
family, addr = getfamaddr(ifa.ifa_addr.contents)
if addr:
i.addresses[family] = addr
return retval.values()
finally:
libc.freeifaddrs(ifap)


def parse_args():
parser = argparse.ArgumentParser()

# LLDP packet arguments
parser.add_argument("--network_interface")
parser.add_argument("--system-name")
parser.add_argument("--system-desc")

# Other arguments
parser.add_argument("-i", "--interval", type=int, default=0)
parser.add_argument("-d", "--daemonize",
action="store_true", default=False)

return parser.parse_args()


def validate_num_bits_of_int(int_value, num_bits, name=None):
mask = pow(2, num_bits) - 1
if (int_value & mask) != int_value:
name = name if name else "The integer value"
raise ValueError("%s must be %d-bit long. Given: %d (%s)"
% (name, num_bits, int_value, hex(int_value)))


def raw_bytes_of_hex_str(hex_str):
return hex_str.decode("hex")


def raw_bytes_of_mac_str(mac_str):
return raw_bytes_of_hex_str(mac_str.replace(":", ""))


def raw_bytes_of_int(int_value, num_bytes, name=None):
validate_num_bits_of_int(int_value, num_bytes * 8, name)
template = "%0" + "%d" % (num_bytes * 2) + "x"
return raw_bytes_of_hex_str(template % int_value)


def get_mac_str(network_interface):
with open("/sys/class/net/%s/address" % network_interface) as f:
return f.read().strip()


def lldp_ethertype():
return raw_bytes_of_int(LLDP_ETHERTYPE, 2, "LLDP ethertype")


def validate_tlv_type(type_):
validate_num_bits_of_int(type_, 7, "TLV type")


def validate_tlv_length(length):
validate_num_bits_of_int(length, 9, "TLV length")


def tlv_1st_2nd_bytes_of(type_, length):
validate_tlv_type(type_)
validate_tlv_length(length)
int_value = (type_ << (8 + 1)) | length
return raw_bytes_of_int(int_value, 2, "First 2 bytes of TLV")


def tlv_of(type_, str_value):
return tlv_1st_2nd_bytes_of(type_, len(str_value)) + str_value


def chassis_id_tlv_of(chassis_id, subtype=CHASSIS_ID_LOCALLY_ASSIGNED):
return tlv_of(1,
raw_bytes_of_int(subtype, 1, "Chassis ID subtype") + chassis_id)


def port_id_tlv_of(port_id, subtype=PORT_ID_INTERFACE_ALIAS):
return tlv_of(2, raw_bytes_of_int(subtype, 1, "Port ID subtype") + port_id)


def ttl_tlv_of(ttl_seconds):
return tlv_of(3, raw_bytes_of_int(ttl_seconds, 2, "TTL (seconds)"))


def system_name_tlv_of(system_name):
return tlv_of(5, system_name)


def system_desc_tlv_of(system_desc):
return tlv_of(6, system_desc)


def end_tlv():
return tlv_of(0, "")


def lldp_frame_of(chassis_id,
network_interface,
ttl,
system_name=None,
system_desc=None):
contents = [
# Ethernet header
raw_bytes_of_mac_str(LLDP_DST_MAC),
raw_bytes_of_mac_str(get_mac_str(network_interface)),
lldp_ethertype(),

# Required LLDP TLVs
chassis_id_tlv_of(chassis_id),
port_id_tlv_of(network_interface),
ttl_tlv_of(ttl)]

# Optional LLDP TLVs
if system_name is not None:
contents.append(system_name_tlv_of(system_name))
if system_desc is not None:
contents.append(system_desc_tlv_of(system_desc))

# End TLV
contents.append(end_tlv())

return "".join(contents)


def daemonize():
# Do not use this code for daemonizing elsewhere as this is
# a very simple version that is just good enough for here.
pid = os.fork()
if pid != 0:
# Exit from the parent process
os._exit(os.EX_OK)

os.setsid()

pid = os.fork()
if pid != 0:
# Exit from the 2nd parent process
os._exit(os.EX_OK)


def get_hostname():
return socket.gethostname()


def get_phy_interfaces():
intf_list = []
nics = get_network_interfaces()
for ni in nics:
if ni.addresses.get(AF_INET):
continue
try:
cmd_out = check_output("sudo ovs-vsctl show | grep " +
ni.name, shell=True)
if ni.name not in cmd_out:
continue
cmd_out = check_output("ethtool " + ni.name +
" | grep 'Supported ports'",
shell=True)
if "[ ]" not in cmd_out:
# value is present in [ TP ], its not empty list
intf_list.append(ni.name)
except Exception:
# interface doesn't have supported ports list
pass
return intf_list


def main():
args = parse_args()
if args.daemonize:
daemonize()

def _generate_senders_frames(intfs):
senders = []
frames = []
for intf in intfs:
interface = intf.strip()
frame = lldp_frame_of(chassis_id=CHASSIS_ID,
network_interface=interface,
ttl=TTL,
system_name=get_hostname(),
system_desc=SYSTEM_DESC)
frames.append(frame)
# Send the frame
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
s.bind((interface, 0))
senders.append(s)
return senders, frames

intfs = []
while True:
if len(intfs) == 0:
intfs = get_phy_interfaces()
senders, frames = _generate_senders_frames(intfs)
for idx, s in enumerate(senders):
s.send(frames[idx])
time.sleep(INTERVAL)


if __name__ == "__main__":
main()

+ 0
- 27
bsnstacklib/hooks.py View File

@@ -1,27 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import sys


def setup_hook(config):
"""Filter config parsed from a setup.cfg to inject our defaults."""
metadata = config['metadata']
requires = metadata.get('requires_dist', '').split('\n')
if sys.platform == 'win32':
requires.append('pywin32')
requires.append('wmi')
metadata['requires_dist'] = "\n".join(requires)
config['metadata'] = metadata

+ 0
- 0
bsnstacklib/plugins/__init__.py View File


+ 0
- 14
bsnstacklib/plugins/bigswitch/README View File

@@ -1,14 +0,0 @@
# Neuron REST Proxy Plug-in for Big Switch and FloodLight Controllers

This module provides a generic neutron plugin 'NeutronRestProxy' that
translates neutron function calls to authenticated REST requests (JSON supported)
to a set of redundant external network controllers.

It also keeps a local persistent store of neutron state that has been
setup using that API.

Currently the FloodLight Openflow Controller or the Big Switch Networks Controller
can be configured as external network controllers for this plugin.

For more details on this plugin, please refer to the following link:
http://www.openflowhub.org/display/floodlightcontroller/Neutron+REST+Proxy+Plugin

+ 0
- 0
bsnstacklib/plugins/bigswitch/__init__.py View File


+ 0
- 0
bsnstacklib/plugins/bigswitch/agent/__init__.py View File


+ 0
- 247
bsnstacklib/plugins/bigswitch/agent/restproxy_agent.py View File

@@ -1,247 +0,0 @@
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import sys
import time

import eventlet
eventlet.monkey_patch()

from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_service import loopingcall
from oslo_utils import excutils

from neutron.agent.common import ovs_lib
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config
from neutron.common import constants as q_const
from neutron.common import topics
from neutron import context as q_context
from neutron.extensions import securitygroup as ext_sg
from neutron.i18n import _LE, _LI

from bsnstacklib.plugins.bigswitch import config as pl_config

LOG = log.getLogger(__name__)


class IVSBridge(ovs_lib.OVSBridge):
'''
This class does not provide parity with OVS using IVS.
It's only the bare minimum necessary to use IVS with this agent.
'''
def run_vsctl(self, args, check_error=False, log_fail_as_error=True):
full_args = ["ivs-ctl"] + args
try:
resp = utils.execute(full_args, run_as_root=True,
return_stderr=True,
log_fail_as_error=log_fail_as_error)
return resp[0] or resp[1]
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
if log_fail_as_error:
logfunc = LOG.error
else:
logfunc = LOG.debug
logfunc(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
if not check_error:
ctxt.reraise = False

def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set(port_names)
return edge_ports

def get_vif_port_by_id(self, port_id):
# IVS in nova uses hybrid method with last 14 chars of UUID
name = 'qvo%s' % port_id[:14]
if name in self.get_vif_port_set():
return name
return False

def get_port_name_list(self):
# Try native list-ports command first and then fallback to show
# command.
try:
resp = self.run_vsctl(['list-ports'], True,
log_fail_as_error=False).strip().splitlines()
port_names = map(lambda x: x.strip(), resp)
except RuntimeError:
resp = self.run_vsctl(['show'], True)
# get rid of stats and blank lines
ports = filter(
lambda x: 'packets=' not in x and x.strip(),
resp.split('ivs:')[1].split('ports:')[1].splitlines())
port_names = map(lambda x: x.strip().split(' ')[1], ports)
LOG.debug("Ports on IVS: %s", port_names)
return port_names


class FilterDeviceIDMixin(sg_rpc.SecurityGroupAgentRpc):

def prepare_devices_filter(self, device_ids):
if not device_ids:
return
# use tap as a prefix because ml2 is hard-coded to expect that
device_ids = [d.replace('qvo', 'tap') for d in device_ids]
LOG.info(_LI("Preparing filters for devices %s"), device_ids)
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, list(device_ids))
devices = devices_info['devices']
security_groups = devices_info['security_groups']
security_group_member_ips = devices_info['sg_member_ips']
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, list(device_ids))

with self.firewall.defer_apply():
for device in devices.values():
# strip tap back off since prepare_port_filter will apply it
device['device'] = device['device'].replace('tap', '')
self.firewall.prepare_port_filter(device)
if self.use_enhanced_rpc:
LOG.debug("Update security group information for ports %s",
devices.keys())
self._update_security_group_info(
security_groups, security_group_member_ips)


class RestProxyAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin):

target = oslo_messaging.Target(version='1.1')

def __init__(self, integ_br, polling_interval, vs='ovs'):
super(RestProxyAgent, self).__init__()
self.polling_interval = polling_interval
self._setup_rpc()
self.sg_agent = FilterDeviceIDMixin(self.context, self.sg_plugin_rpc)
if vs == 'ivs':
self.int_br = IVSBridge(integ_br)
else:
self.int_br = ovs_lib.OVSBridge(integ_br)
self.use_call = True
self.agent_state = {
'binary': 'neutron-bsn-agent',
'host': cfg.CONF.host,
'topic': q_const.L2_AGENT_TOPIC,
'configurations': {},
'agent_type': "BSN IVS Agent",
'start_flag': True}

def _report_state(self):
# How many devices are likely used by a VM
try:
self.state_rpc.report_state(self.context,
self.agent_state,
self.use_call)
self.use_call = False
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))

def _setup_rpc(self):
self.topic = topics.AGENT
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.context = q_context.get_admin_context_without_session()
self.endpoints = [self]
consumers = [[topics.PORT, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)

def port_update(self, context, **kwargs):
LOG.debug("Port update received")
port = kwargs.get('port')
vif_port = self.int_br.get_vif_port_by_id(port['id'])
if not vif_port:
LOG.debug("Port %s is not present on this host.", port['id'])
return

LOG.debug("Port %s found. Refreshing firewall.", port['id'])
if ext_sg.SECURITYGROUPS in port:
self.sg_agent.refresh_firewall()

def _update_ports(self, registered_ports):
ports = self.int_br.get_vif_port_set()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}

def _process_devices_filter(self, port_info):
if 'added' in port_info:
self.sg_agent.prepare_devices_filter(port_info['added'])
if 'removed' in port_info:
self.sg_agent.remove_devices_filter(port_info['removed'])

def daemon_loop(self):
ports = set()

while True:
start = time.time()
try:
port_info = self._update_ports(ports)
if port_info:
LOG.debug("Agent loop has new device")
self._process_devices_filter(port_info)
ports = port_info['current']
except Exception:
LOG.exception(_LE("Error in agent event loop"))

elapsed = max(time.time() - start, 0)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})


def main():
config.init(sys.argv[1:])
config.setup_logging()
pl_config.register_config()

integ_br = cfg.CONF.RESTPROXYAGENT.integration_bridge
polling_interval = cfg.CONF.RESTPROXYAGENT.polling_interval
bsnagent = RestProxyAgent(integ_br, polling_interval,
cfg.CONF.RESTPROXYAGENT.virtual_switch_type)
bsnagent.daemon_loop()
sys.exit(0)

if __name__ == "__main__":
main()

+ 0
- 133
bsnstacklib/plugins/bigswitch/config.py View File

@@ -1,133 +0,0 @@
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""
This module manages configuration options
"""

from oslo_config import cfg

from neutron.agent.common import config as agconfig
from neutron.common import utils
from neutron.extensions import portbindings

restproxy_opts = [
cfg.ListOpt('servers', default=['localhost:8800'],
help=_("A comma separated list of Big Switch or Floodlight "
"servers and port numbers. The plugin proxies the "
"requests to the Big Switch/Floodlight server, "
"which performs the networking configuration. Only one"
"server is needed per deployment, but you may wish to"
"deploy multiple servers to support failover.")),
cfg.StrOpt('server_auth', secret=True,
help=_("The username and password for authenticating against "
" the Big Switch or Floodlight controller.")),
cfg.BoolOpt('server_ssl', default=True,
help=_("If True, Use SSL when connecting to the Big Switch or "
"Floodlight controller.")),
cfg.BoolOpt('ssl_sticky', default=True,
help=_("Trust and store the first certificate received for "
"each controller address and use it to validate future "
"connections to that address.")),
cfg.BoolOpt('no_ssl_validation', default=False,
help=_("Disables SSL certificate validation for controllers")),
cfg.BoolOpt('cache_connections', default=True,
help=_("Re-use HTTP/HTTPS connections to the controller.")),
cfg.StrOpt('ssl_cert_directory',
default='/etc/neutron/plugins/bigswitch/ssl',
help=_("Directory containing ca_certs and host_certs "
"certificate directories.")),
cfg.BoolOpt('sync_data', default=False,
help=_("Sync data on connect")),
cfg.BoolOpt('auto_sync_on_failure', default=True,
help=_("If neutron fails to create a resource because "
"the backend controller doesn't know of a dependency, "
"the plugin automatically triggers a full data "
"synchronization to the controller.")),
cfg.IntOpt('consistency_interval', default=60,
help=_("Time between verifications that the backend controller "
"database is consistent with Neutron. (0 to disable)")),
cfg.IntOpt('server_timeout', default=10,
help=_("Maximum number of seconds to wait for proxy request "
"to connect and complete.")),
cfg.IntOpt('thread_pool_size', default=4,
help=_("Maximum number of threads to spawn to handle large "
"volumes of port creations.")),
cfg.StrOpt('neutron_id', default='neutron-' + utils.get_hostname(),
deprecated_name='quantum_id',
help=_("User defined identifier for this Neutron deployment")),
cfg.BoolOpt('add_meta_server_route', default=True,
help=_("Flag to decide if a route to the metadata server "
"should be injected into the VM")),
]
router_opts = [
cfg.MultiStrOpt('tenant_default_router_rule', default=['*:any:any:permit'],
help=_("The default router rules installed in new tenant "
"routers. Repeat the config option for each rule. "
"Format is <tenant>:<source>:<destination>:<action>"
" Use an * to specify default for all tenants.")),
cfg.IntOpt('max_router_rules', default=200,
help=_("Maximum number of router rules")),
]
nova_opts = [
cfg.StrOpt('vif_type', default='ivs',
help=_("Virtual interface type to configure on "
"Nova compute nodes")),
]

VIF_TYPE_IVS = 'ivs'
VIF_TYPES = [
portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED,
portbindings.VIF_TYPE_DISTRIBUTED,
portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_BRIDGE,
portbindings.VIF_TYPE_OTHER,
VIF_TYPE_IVS,
'iovisor', 'vhostuser', 'dvs', '802.1qbg', '802.1qbh', 'hyperv',
'midonet', 'ib_hostdev', 'hw_web', 'vrouter',
]

# Each VIF Type can have a list of nova host IDs that are fixed to that type
for i in VIF_TYPES:
opt = cfg.ListOpt('node_override_vif_' + i, default=[],
help=_("Nova compute nodes to manually set VIF "
"type to %s") % i)
nova_opts.append(opt)

# Add the vif types for reference later
nova_opts.append(cfg.ListOpt('vif_types',
default=VIF_TYPES,
help=_('List of allowed vif_type values.')))

agent_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_('Name of integration bridge on compute '
'nodes used for security group insertion.')),
cfg.IntOpt('polling_interval', default=5,
help=_('Seconds between agent checks for port changes')),
cfg.StrOpt('virtual_switch_type', default='ivs',
help=_('Virtual switch type.'))
]


def register_config():
cfg.CONF.register_opts(restproxy_opts, "RESTPROXY")
cfg.CONF.register_opts(router_opts, "ROUTER")
cfg.CONF.register_opts(nova_opts, "NOVA")
cfg.CONF.register_opts(agent_opts, "RESTPROXYAGENT")
# include for report_interval
cfg.CONF.register_opts(agconfig.AGENT_STATE_OPTS, "AGENT")
agconfig.register_root_helper(cfg.CONF)

+ 0
- 0
bsnstacklib/plugins/bigswitch/db/__init__.py View File


+ 0
- 219
bsnstacklib/plugins/bigswitch/db/consistency_db.py View File

@@ -1,219 +0,0 @@
# Copyright 2014, Big Switch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import re
import string
import time

from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session
from oslo_log import log as logging
import sqlalchemy as sa

from neutron.i18n import _LI, _LW
from neutron.plugins.bigswitch.db import consistency_db

LOG = logging.getLogger(__name__)
# Maximum time in seconds to wait for a single record lock to be released
# NOTE: The total time waiting may exceed this if there are multiple servers
# waiting for the same lock
MAX_LOCK_WAIT_TIME = 15
ConsistencyHash = consistency_db.ConsistencyHash


def setup_db():
'''Helper to register models for unit tests'''
if HashHandler._FACADE is None:
HashHandler._FACADE = session.EngineFacade.from_config(
cfg.CONF, sqlite_fk=True)
ConsistencyHash.metadata.create_all(
HashHandler._FACADE.get_engine())


def clear_db():
'''Helper to unregister models and clear engine in unit tests'''
if not HashHandler._FACADE:
return
ConsistencyHash.metadata.drop_all(HashHandler._FACADE.get_engine())
HashHandler._FACADE = None


class HashHandler(object):
'''
A wrapper object to keep track of the session between the read
and the update operations.

This class needs an SQL engine completely independent of the main
neutron connection so rollbacks from consistency hash operations don't
affect the parent sessions.
'''
_FACADE = None

def __init__(self, hash_id='1'):
if HashHandler._FACADE is None:
HashHandler._FACADE = session.EngineFacade.from_config(
cfg.CONF, sqlite_fk=True)
self.hash_id = hash_id
self.session = HashHandler._FACADE.get_session(autocommit=True,
expire_on_commit=False)
self.random_lock_id = ''.join(random.choice(string.ascii_uppercase
+ string.digits)
for _ in range(10))
self.lock_marker = 'LOCKED_BY[%s]' % self.random_lock_id

def _get_current_record(self):
with self.session.begin(subtransactions=True):
res = (self.session.query(ConsistencyHash).
filter_by(hash_id=self.hash_id).first())
if res:
self.session.refresh(res) # make sure latest is loaded from db
return res

def _insert_empty_hash_with_lock(self):
# try to insert a new hash, return False on conflict
try:
with self.session.begin(subtransactions=True):
res = ConsistencyHash(hash_id=self.hash_id,
hash=self.lock_marker)
self.session.add(res)
return True
except db_exc.DBDuplicateEntry:
# another server created a new record at the same time
return False

def _optimistic_update_hash_record(self, old_record, new_hash):
# Optimistic update strategy. Returns True if successful, else False.
query = sa.update(ConsistencyHash.__table__).values(hash=new_hash)
query = query.where(ConsistencyHash.hash_id == old_record.hash_id)
query = query.where(ConsistencyHash.hash == old_record.hash)
try:
with self._FACADE.get_engine().begin() as conn:
result = conn.execute(query)
except db_exc.DBDeadlock:
# mysql can encounter internal deadlocks servicing a query with
# multiple where criteria. treat it the same as not being able
# to update the record so it will be tried again
return False
# We need to check update row count in case another server is
# doing this at the same time. Only one will succeed, the other will
# not update any rows.
return result.rowcount != 0

def _get_lock_owner(self, record):
matches = re.findall(r"^LOCKED_BY\[(\w+)\]", record)
if not matches:
return None
return matches[0]

def read_for_update(self):
# An optimistic locking strategy with a timeout to avoid using a
# consistency hash while another server is using it. This will
# not return until a lock is acquired either normally or by stealing
# it after an individual ID holds it for greater than
# MAX_LOCK_WAIT_TIME.
lock_wait_start = None
last_lock_owner = None
while True:
res = self._get_current_record()
if not res:
# no current entry. try to insert to grab lock
if not self._insert_empty_hash_with_lock():
# A failed insert after missing current record means
# a concurrent insert occured. Start process over to
# find the new record.
LOG.debug("Concurrent record inserted. Retrying.")
time.sleep(0.25)
continue
# The empty hash was successfully inserted with our lock
return ''

current_lock_owner = self._get_lock_owner(res.hash)
if not current_lock_owner:
# no current lock. attempt to lock
new = self.lock_marker + res.hash
if not self._optimistic_update_hash_record(res, new):
# someone else beat us to it. restart process to wait
# for new lock ID to be removed
LOG.debug(
"Failed to acquire lock. Restarting lock wait. "
"Previous hash: %(prev)s. Attempted update: %(new)s",
{'prev': res.hash, 'new': new})
time.sleep(0.25)
continue
# successfully got the lock
return res.hash

LOG.debug("This request's lock ID is %(this)s. "
"DB lock held by %(that)s",
{'this': self.random_lock_id,
'that': current_lock_owner})

if current_lock_owner == self.random_lock_id:
# no change needed, we already have the table lock due to
# previous read_for_update call.
# return hash with lock tag stripped off for use in a header
return res.hash.replace(self.lock_marker, '')

if current_lock_owner != last_lock_owner:
# The owner changed since the last iteration, but it
# wasn't to us. Reset the counter. Log if not
# first iteration.
if lock_wait_start:
LOG.debug("Lock owner changed from %(old)s to %(new)s "
"while waiting to acquire it.",
{'old': last_lock_owner,
'new': current_lock_owner})
lock_wait_start = time.time()
last_lock_owner = current_lock_owner
if time.time() - lock_wait_start > MAX_LOCK_WAIT_TIME:
# the lock has been held too long, steal it
LOG.warning(_LW("Gave up waiting for consistency DB "
"lock, trying to take it. "
"Current hash is: %s"), res.hash)
new_db_value = res.hash.replace(current_lock_owner,
self.random_lock_id)
if self._optimistic_update_hash_record(res, new_db_value):
return res.hash.replace(new_db_value, '')
LOG.info(_LI("Failed to take lock. Another process updated "
"the DB first."))

def clear_lock(self):
LOG.debug("Clearing hash record lock of id %s", self.random_lock_id)
with self.session.begin(subtransactions=True):
res = (self.session.query(ConsistencyHash).
filter_by(hash_id=self.hash_id).first())
if not res:
LOG.warning(_LW("Hash record already gone, no lock to clear."))
return
if not res.hash.startswith(self.lock_marker):
# if these are frequent the server is too slow
LOG.warning(_LW("Another server already removed the lock. %s"),
res.hash)
return
res.hash = res.hash.replace(self.lock_marker, '')

def put_hash(self, hash):
hash = hash or ''
with self.session.begin(subtransactions=True):
res = (self.session.query(ConsistencyHash).
filter_by(hash_id=self.hash_id).first())
if res:
res.hash = hash
else:
conhash = ConsistencyHash(hash_id=self.hash_id, hash=hash)
self.session.merge(conhash)
LOG.debug("Consistency hash for group %(hash_id)s updated "
"to %(hash)s", {'hash_id': self.hash_id, 'hash': hash})

+ 0
- 52
bsnstacklib/plugins/bigswitch/db/porttracker_db.py View File

@@ -1,52 +0,0 @@
# Copyright 2013, Big Switch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging

from neutron.api.v2 import attributes
from neutron.i18n import _LW

LOG = logging.getLogger(__name__)


def get_port_hostid(context, port_id):
# REVISIT(kevinbenton): this is a workaround to avoid portbindings_db
# relational table generation until one of the functions is called.
from neutron.db import portbindings_db
with context.session.begin(subtransactions=True):
query = context.session.query(portbindings_db.PortBindingPort)
res = query.filter_by(port_id=port_id).first()
if not res:
return False
return res.host


def put_port_hostid(context, port_id, host):
# REVISIT(kevinbenton): this is a workaround to avoid portbindings_db
# relational table generation until one of the functions is called.
from neutron.db import portbindings_db
if not attributes.is_attr_set(host):
LOG.warning(_LW("No host_id in port request to track port location."))
return
if port_id == '':
LOG.warning(_LW("Received an empty port ID for host_id '%s'"), host)
return
if host == '':
LOG.debug("Received an empty host_id for port '%s'", port_id)
return
LOG.debug("Logging port %(port)s on host_id %(host)s",
{'port': port_id, 'host': host})
with context.session.begin(subtransactions=True):
location = portbindings_db.PortBindingPort(port_id=port_id, host=host)
context.session.merge(location)

+ 0
- 24
bsnstacklib/plugins/bigswitch/dhcp_driver.py View File

@@ -1,24 +0,0 @@
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.linux import dhcp


class DnsmasqWithMetaData(dhcp.Dnsmasq):

@classmethod
def should_enable_metadata(cls, conf, network):
# we always return true here because the fabric router does not support
# hijacking the metadata requests
return True

+ 0
- 0
bsnstacklib/plugins/bigswitch/extensions/__init__.py View File


+ 0
- 140
bsnstacklib/plugins/bigswitch/extensions/routerrule.py View File

@@ -1,140 +0,0 @@
# Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging

from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as nexception


LOG = logging.getLogger(__name__)


# Router Rules Exceptions
class InvalidRouterRules(nexception.InvalidInput):
message = _("Invalid format for router rules: %(rule)s, %(reason)s")


class RulesExhausted(nexception.BadRequest):
message = _("Unable to complete rules update for %(router_id)s. "
"The number of rules exceeds the maximum %(quota)s.")


def convert_to_valid_router_rules(data):
"""
Validates and converts router rules to the appropriate data structure
Example argument = [{'source': 'any', 'destination': 'any',
'action':'deny'},
{'source': '1.1.1.1/32', 'destination': 'external',
'action':'permit',
'nexthops': ['1.1.1.254', '1.1.1.253']}
]
"""
V4ANY = '0.0.0.0/0'
CIDRALL = ['any', 'external']
if not isinstance(data, list):
emsg = _("Invalid data format for router rule: '%s'") % data
LOG.debug(emsg)
raise nexception.InvalidInput(error_message=emsg)
_validate_uniquerules(data)
rules = []
expected_keys = ['source', 'destination', 'action']
for rule in data:
rule['nexthops'] = rule.get('nexthops', [])
if not isinstance(rule['nexthops'], list):
rule['nexthops'] = rule['nexthops'].split('+')

src = V4ANY if rule['source'] in CIDRALL else rule['source']
dst = V4ANY if rule['destination'] in CIDRALL else rule['destination']

errors = [attr._verify_dict_keys(expected_keys, rule, False),
attr._validate_subnet(dst),
attr._validate_subnet(src),
_validate_nexthops(rule['nexthops']),
_validate_action(rule['action'])]
errors = [m for m in errors if m]
if errors:
LOG.debug(errors)
raise nexception.InvalidInput(error_message=errors)
rules.append(rule)
return rules


def _validate_nexthops(nexthops):
seen = []
for ip in nexthops:
msg = attr._validate_ip_address(ip)
if ip in seen:
msg = _("Duplicate nexthop in rule '%s'") % ip
seen.append(ip)
if msg:
return msg


def _validate_action(action):
if action not in ['permit', 'deny']:
return _("Action must be either permit or deny."
" '%s' was provided") % action


def _validate_uniquerules(rules):
pairs = []
for r in rules:
if 'source' not in r or 'destination' not in r:
continue
pairs.append((r['source'], r['destination']))

if len(set(pairs)) != len(pairs):
error = _("Duplicate router rules (src,dst) found '%s'") % pairs
LOG.debug(error)
raise nexception.InvalidInput(error_message=error)


class Routerrule(object):

@classmethod
def get_name(cls):
return "Neutron Router Rule"

@classmethod
def get_alias(cls):
return "router_rules"

@classmethod
def get_description(cls):
return "Router rule configuration for L3 router"

@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/routerrules/api/v1.0"

@classmethod
def get_updated(cls):
return "2013-05-23T10:00:00-00:00"

def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}

# Attribute Map
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
'router_rules': {'allow_post': False, 'allow_put': True,
'convert_to': convert_to_valid_router_rules,
'is_visible': True,
'default': attr.ATTR_NOT_SPECIFIED},
}
}

+ 0
- 336
bsnstacklib/plugins/bigswitch/l3_router_plugin.py View File

@@ -1,336 +0,0 @@
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#

"""
Neutron L3 REST Proxy Plugin for Big Switch and Floodlight Controllers.

This plugin handles the L3 router calls for Big Switch Floodlight deployments.
It is intended to be used in conjunction with the Big Switch ML2 driver or the
Big Switch core plugin.
"""

from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils

from neutron.api import extensions as neutron_extensions
from neutron.common import exceptions
from neutron.common import log
from neutron.db import l3_db
from neutron.extensions import l3
from neutron.i18n import _LE
from neutron import manager
from neutron.plugins.common import constants

from bsnstacklib.plugins.bigswitch import extensions
from bsnstacklib.plugins.bigswitch import plugin as cplugin
from bsnstacklib.plugins.bigswitch import routerrule_db
from bsnstacklib.plugins.bigswitch import servermanager

# number of fields in a router rule string
ROUTER_RULE_COMPONENT_COUNT = 5
LOG = logging.getLogger(__name__)
put_context_in_serverpool = cplugin.put_context_in_serverpool
BCF_CAPABILITY_L3_PLUGIN_MISS_MATCH = ("BCF does "
"not have floatingip capability, should not "
"deploy BSN l3 router plugin")


class L3RestProxy(cplugin.NeutronRestProxyV2Base,
routerrule_db.RouterRule_db_mixin):

supported_extension_aliases = ["router", "router_rules"]
# This is a flag to tell that L3 plugin is BSN.
bsn = True

@staticmethod
def get_plugin_type():
return constants.L3_ROUTER_NAT

@staticmethod
def get_plugin_description():
return _("L3 Router Service Plugin for Big Switch fabric")

def __init__(self):
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
super(L3RestProxy, self).__init__()
self.servers = servermanager.ServerPool.get_instance()

@put_context_in_serverpool
@log.log
def create_router(self, context, router):
self._warn_on_state_status(router['router'])

tenant_id = self._get_tenant_id_for_create(context, router["router"])

# set default router rules
rules = self._get_tenant_default_router_rules(tenant_id)
router['router']['router_rules'] = rules

with context.session.begin(subtransactions=True):
# create router in DB
new_router = super(L3RestProxy, self).create_router(context,
router)