Retire Packaging Deb project repos
This commit is part of a series to retire the Packaging Deb project. Step 2 is to remove all content from the project repos, replacing it with a README notification where to find ongoing work, and how to recover the repo if needed at some future point (as in https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project). Change-Id: I161dc093726c3c8cce5a7057282743ca4908eb57
This commit is contained in:
parent
88541fa2f5
commit
449a1f66c9
@ -1,7 +0,0 @@
|
||||
[run]
|
||||
branch = True
|
||||
source = networking_odl
|
||||
omit = networking_odl/tests/*
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
32
.gitignore
vendored
32
.gitignore
vendored
@ -1,32 +0,0 @@
|
||||
AUTHORS
|
||||
build/*
|
||||
build-stamp
|
||||
ChangeLog
|
||||
cover/
|
||||
covhtml/
|
||||
dist/
|
||||
doc/build
|
||||
*.DS_Store
|
||||
*.pyc
|
||||
etc/neutron/plugins/ml2/ml2_conf_odl.ini.sample
|
||||
networking_odl.egg-info/
|
||||
networking_odl/vcsversion.py
|
||||
networking_odl/versioninfo
|
||||
pbr*.egg/
|
||||
run_tests.err.log
|
||||
run_tests.log
|
||||
# Files create dy releasenotes build
|
||||
releasenotes/build
|
||||
setuptools*.egg/
|
||||
subunit.log
|
||||
*.mo
|
||||
*.sw?
|
||||
*~
|
||||
.vagrant
|
||||
/.*
|
||||
!/.coveragerc
|
||||
!/.gitignore
|
||||
!/.gitreview
|
||||
!/.mailmap
|
||||
!/.pylintrc
|
||||
!/.testr.conf
|
@ -1,4 +0,0 @@
|
||||
[gerrit]
|
||||
host=review.openstack.org
|
||||
port=29418
|
||||
project=openstack/networking-odl.git
|
11
.mailmap
11
.mailmap
@ -1,11 +0,0 @@
|
||||
# Format is:
|
||||
# <preferred e-mail> <other e-mail 1>
|
||||
# <preferred e-mail> <other e-mail 2>
|
||||
lawrancejing <lawrancejing@gmail.com> <liuqing@windawn.com>
|
||||
Jiajun Liu <jiajun@unitedstack.com> <iamljj@gmail.com>
|
||||
Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
|
||||
Kun Huang <gareth@unitedstack.com> <academicgareth@gmail.com>
|
||||
Zhenguo Niu <zhenguo@unitedstack.com> <Niu.ZGlinux@gmail.com>
|
||||
Isaku Yamahata <isaku.yamahata@intel.com> <isaku.yamahata@gmail.com>
|
||||
Isaku Yamahata <isaku.yamahata@intel.com> <yamahata@private.email.ne.jp>
|
||||
Morgan Fainberg <morgan.fainberg@gmail.com> <m@metacloud.com>
|
112
.pylintrc
112
.pylintrc
@ -1,112 +0,0 @@
|
||||
# The format of this file isn't really documented; just use --generate-rcfile
|
||||
[MASTER]
|
||||
# Add <file or directory> to the black list. It should be a base name, not a
|
||||
# path. You may set this option multiple times.
|
||||
#
|
||||
ignore=.git,tests
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
# NOTE(gus): This is a long list. A number of these are important and
|
||||
# should be re-enabled once the offending code is fixed (or marked
|
||||
# with a local disable)
|
||||
disable=
|
||||
# "F" Fatal errors that prevent further processing
|
||||
import-error,
|
||||
# "I" Informational noise
|
||||
locally-disabled,
|
||||
# "E" Error for important programming issues (likely bugs)
|
||||
access-member-before-definition,
|
||||
no-member,
|
||||
no-method-argument,
|
||||
no-self-argument,
|
||||
# "W" Warnings for stylistic problems or minor programming issues
|
||||
abstract-method,
|
||||
arguments-differ,
|
||||
attribute-defined-outside-init,
|
||||
bad-builtin,
|
||||
bad-indentation,
|
||||
broad-except,
|
||||
cyclic-import,
|
||||
dangerous-default-value,
|
||||
deprecated-lambda,
|
||||
expression-not-assigned,
|
||||
fixme,
|
||||
global-statement,
|
||||
no-init,
|
||||
non-parent-init-called,
|
||||
protected-access,
|
||||
redefined-builtin,
|
||||
redefined-outer-name,
|
||||
signature-differs,
|
||||
star-args,
|
||||
super-init-not-called,
|
||||
unpacking-non-sequence,
|
||||
unused-argument,
|
||||
unused-import,
|
||||
unused-variable,
|
||||
# "C" Coding convention violations
|
||||
bad-continuation,
|
||||
invalid-name,
|
||||
missing-docstring,
|
||||
superfluous-parens,
|
||||
# "R" Refactor recommendations
|
||||
abstract-class-little-used,
|
||||
abstract-class-not-used,
|
||||
duplicate-code,
|
||||
interface-not-implemented,
|
||||
no-self-use,
|
||||
too-few-public-methods,
|
||||
too-many-ancestors,
|
||||
too-many-arguments,
|
||||
too-many-branches,
|
||||
too-many-instance-attributes,
|
||||
too-many-lines,
|
||||
too-many-locals,
|
||||
too-many-public-methods,
|
||||
too-many-return-statements,
|
||||
too-many-statements
|
||||
|
||||
[BASIC]
|
||||
# Variable names can be 1 to 31 characters long, with lowercase and underscores
|
||||
variable-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||
|
||||
# Argument names can be 2 to 31 characters long, with lowercase and underscores
|
||||
argument-rgx=[a-z_][a-z0-9_]{1,30}$
|
||||
|
||||
# Method names should be at least 3 characters long
|
||||
# and be lowecased with underscores
|
||||
method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
|
||||
|
||||
# Module names matching neutron-* are ok (files in bin/)
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
|
||||
|
||||
# Don't require docstrings on tests.
|
||||
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
|
||||
|
||||
[FORMAT]
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=79
|
||||
|
||||
[VARIABLES]
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
# _ is used by our localization
|
||||
additional-builtins=_
|
||||
|
||||
[CLASSES]
|
||||
# List of interface methods to ignore, separated by a comma.
|
||||
ignore-iface-methods=
|
||||
|
||||
[IMPORTS]
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=
|
||||
# should use oslo_serialization.jsonutils
|
||||
json
|
||||
|
||||
[TYPECHECK]
|
||||
# List of module names for which member attributes should not be checked
|
||||
ignored-modules=six.moves,_MovedItems
|
||||
|
||||
[REPORTS]
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=no
|
@ -1,8 +0,0 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
|
||||
OS_LOG_CAPTURE=1 \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./networking_odl/tests/unit} $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
@ -1,13 +0,0 @@
|
||||
If you would like to contribute to the development of OpenStack,
|
||||
you must follow the steps documented at:
|
||||
http://docs.openstack.org/infra/manual/developers.html
|
||||
|
||||
Once those steps have been completed, changes to OpenStack
|
||||
should be submitted for review via the Gerrit tool, following
|
||||
the workflow documented at:
|
||||
http://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Pull requests submitted through GitHub will be ignored.
|
||||
|
||||
Bugs should be filed on Launchpad, not GitHub:
|
||||
https://bugs.launchpad.net/networking-odl
|
33
HACKING.rst
33
HACKING.rst
@ -1,33 +0,0 @@
|
||||
Neutron Style Commandments
|
||||
=======================
|
||||
|
||||
- Step 1: Read the OpenStack Style Commandments
|
||||
https://docs.openstack.org/hacking/latest/
|
||||
- Step 2: Read on
|
||||
|
||||
Neutron Specific Commandments
|
||||
--------------------------
|
||||
|
||||
- [N319] Validate that debug level logs are not translated
|
||||
- [N320] Validate that LOG messages, except debug ones, have translations
|
||||
- [N321] Validate that jsonutils module is used instead of json
|
||||
- [N322] We do not use @authors tags in source files. We have git to track
|
||||
authorship.
|
||||
- [N323] Detect common errors with assert_called_once_with
|
||||
|
||||
Creating Unit Tests
|
||||
-------------------
|
||||
For every new feature, unit tests should be created that both test and
|
||||
(implicitly) document the usage of said feature. If submitting a patch for a
|
||||
bug that had no unit test, a new passing unit test should be added. If a
|
||||
submitted bug fix does have a unit test, be sure to add a new one that fails
|
||||
without the patch and passes with the patch.
|
||||
|
||||
All unittest classes must ultimately inherit from testtools.TestCase. In the
|
||||
Neutron test suite, this should be done by inheriting from
|
||||
neutron.tests.base.BaseTestCase.
|
||||
|
||||
All setUp and tearDown methods must upcall using the super() method.
|
||||
tearDown methods should be avoided and addCleanup calls should be preferred.
|
||||
Never manually create tempfiles. Always use the tempfile fixtures from
|
||||
the fixture library to ensure that they are cleaned up.
|
176
LICENSE
176
LICENSE
@ -1,176 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
14
README
Normal file
14
README
Normal file
@ -0,0 +1,14 @@
|
||||
This project is no longer maintained.
|
||||
|
||||
The contents of this repository are still available in the Git
|
||||
source code management system. To see the contents of this
|
||||
repository before it reached its end of life, please check out the
|
||||
previous commit with "git checkout HEAD^1".
|
||||
|
||||
For ongoing work on maintaining OpenStack packages in the Debian
|
||||
distribution, please see the Debian OpenStack packaging team at
|
||||
https://wiki.debian.org/OpenStack/.
|
||||
|
||||
For any further questions, please email
|
||||
openstack-dev@lists.openstack.org or join #openstack-dev on
|
||||
Freenode.
|
30
README.rst
30
README.rst
@ -1,30 +0,0 @@
|
||||
==========================
|
||||
Welcome to networking-odl!
|
||||
==========================
|
||||
|
||||
.. Team and repository tags
|
||||
|
||||
.. image:: http://governance.openstack.org/badges/networking-odl.svg
|
||||
:target: http://governance.openstack.org/reference/tags/index.html
|
||||
|
||||
.. Change things from this point on
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
OpenStack networking-odl is a library of drivers and plugins that integrates
|
||||
OpenStack Neutron API with OpenDaylight Backend. For example it has ML2
|
||||
driver and L3 plugin to enable communication of OpenStack Neutron L2
|
||||
and L3 resources API to OpenDayLight Backend.
|
||||
|
||||
To report and discover bugs in networking-odl the following
|
||||
link can be used:
|
||||
https://bugs.launchpad.net/networking-odl
|
||||
|
||||
Any new code submission or proposal must follow the development
|
||||
guidelines detailed in HACKING.rst and for further details this
|
||||
link can be checked:
|
||||
https://docs.openstack.org/networking-odl/latest/
|
||||
|
||||
The OpenDaylight homepage:
|
||||
https://www.opendaylight.org/
|
195
TESTING.rst
195
TESTING.rst
@ -1,195 +0,0 @@
|
||||
Testing Networking-odl + neutron
|
||||
================================
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The unit tests (networking_odl/tests/unit/) are meant to cover as much code as
|
||||
possible and should be executed without the service running. They are
|
||||
designed to test the various pieces of the neutron tree to make sure
|
||||
any new changes don't break existing functionality.
|
||||
|
||||
# TODO (Manjeet): Update functional testing doc.
|
||||
|
||||
Development process
|
||||
-------------------
|
||||
|
||||
It is expected that any new changes that are proposed for merge
|
||||
come with tests for that feature or code area. Ideally any bugs
|
||||
fixes that are submitted also have tests to prove that they stay
|
||||
fixed! In addition, before proposing for merge, all of the
|
||||
current tests should be passing.
|
||||
|
||||
Virtual environments
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Testing OpenStack projects, including Neutron, is made easier with `DevStack <https://git.openstack.org/cgit/openstack-dev/devstack>`_.
|
||||
|
||||
Create a machine (such as a VM or Vagrant box) running a distribution supported
|
||||
by DevStack and install DevStack there. For example, there is a Vagrant script
|
||||
for DevStack at https://github.com/bcwaldon/vagrant_devstack.
|
||||
|
||||
.. note::
|
||||
|
||||
If you prefer not to use DevStack, you can still check out source code on your local
|
||||
machine and develop from there.
|
||||
|
||||
|
||||
Running unit tests
|
||||
------------------
|
||||
|
||||
There are two mechanisms for running tests: tox, and nose. Before submitting
|
||||
a patch for review you should always ensure all test pass; a tox run is
|
||||
triggered by the jenkins gate executed on gerrit for each patch pushed for
|
||||
review.
|
||||
|
||||
With these mechanisms you can either run the tests in the standard
|
||||
environment or create a virtual environment to run them in.
|
||||
|
||||
By default after running all of the tests, any pep8 errors
|
||||
found in the tree will be reported.
|
||||
|
||||
|
||||
With `nose`
|
||||
~~~~~~~~~~~
|
||||
|
||||
You can use `nose`_ to run individual tests, as well as use for debugging
|
||||
portions of your code::
|
||||
|
||||
source .venv/bin/activate
|
||||
pip install nose
|
||||
nosetests
|
||||
|
||||
There are disadvantages to running Nose - the tests are run sequentially, so
|
||||
race condition bugs will not be triggered, and the full test suite will
|
||||
take significantly longer than tox & testr. The upside is that testr has
|
||||
some rough edges when it comes to diagnosing errors and failures, and there is
|
||||
no easy way to set a breakpoint in the Neutron code, and enter an
|
||||
interactive debugging session while using testr.
|
||||
|
||||
.. _nose: https://nose.readthedocs.org/en/latest/index.html
|
||||
|
||||
With `tox`
|
||||
~~~~~~~~~~
|
||||
|
||||
Networking-odl, like other OpenStack projects, uses `tox`_ for managing the virtual
|
||||
environments for running test cases. It uses `Testr`_ for managing the running
|
||||
of the test cases.
|
||||
|
||||
Tox handles the creation of a series of `virtualenvs`_ that target specific
|
||||
versions of Python (2.6, 2.7, 3.3, etc).
|
||||
|
||||
Testr handles the parallel execution of series of test cases as well as
|
||||
the tracking of long-running tests and other things.
|
||||
|
||||
Running unit tests is as easy as executing this in the root directory of the
|
||||
Neutron source code::
|
||||
|
||||
tox
|
||||
|
||||
Running tests for syntax and style check for written code::
|
||||
|
||||
tox -e pep8
|
||||
|
||||
For more information on the standard Tox-based test infrastructure used by
|
||||
OpenStack and how to do some common test/debugging procedures with Testr,
|
||||
see this wiki page:
|
||||
https://wiki.openstack.org/wiki/Testr
|
||||
|
||||
.. _Testr: https://wiki.openstack.org/wiki/Testr
|
||||
.. _tox: http://tox.readthedocs.org/en/latest/
|
||||
.. _virtualenvs: https://pypi.python.org/pypi/virtualenv
|
||||
|
||||
Tests written can also be debugged by adding pdb break points. Normally if you add
|
||||
a break point and just run the tests with normal flags they will end up in failing.
|
||||
There is debug flag you can use to run after adding pdb break points in the tests.
|
||||
|
||||
Set break points in your test code and run::
|
||||
|
||||
tox -e debug networking_odl.tests.unit.db.test_db.DbTestCase.test_validate_updates_same_object_uuid
|
||||
|
||||
The package oslotest was used to enable debugging in the tests. For more
|
||||
information see the link:
|
||||
https://docs.openstack.org/oslotest/latest/user/features.html
|
||||
|
||||
|
||||
Running individual tests
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For running individual test modules or cases, you just need to pass
|
||||
the dot-separated path to the module you want as an argument to it.
|
||||
|
||||
For executing a specific test case, specify the name of the test case
|
||||
class separating it from the module path with a colon.
|
||||
|
||||
For example, the following would run only the Testodll3 tests from
|
||||
networking_odl/tests/unit/l3/test_odl_l3.py ::
|
||||
|
||||
$ tox -e py27 networking_odl.tests.unit.l3.test_l3_odl.Testodll3
|
||||
|
||||
Adding more tests
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
There might not be full coverage yet. New patches for adding tests
|
||||
which are not there are always welcome.
|
||||
|
||||
To get a grasp of the areas where tests are needed, you can check
|
||||
current coverage by running::
|
||||
|
||||
$ tox -e cover
|
||||
|
||||
Debugging
|
||||
---------
|
||||
|
||||
It's possible to debug tests in a tox environment::
|
||||
|
||||
$ tox -e venv -- python -m testtools.run [test module path]
|
||||
|
||||
Tox-created virtual environments (venv's) can also be activated
|
||||
after a tox run and reused for debugging::
|
||||
|
||||
$ tox -e venv
|
||||
$ . .tox/venv/bin/activate
|
||||
$ python -m testtools.run [test module path]
|
||||
|
||||
Tox packages and installs the neutron source tree in a given venv
|
||||
on every invocation, but if modifications need to be made between
|
||||
invocation (e.g. adding more pdb statements), it is recommended
|
||||
that the source tree be installed in the venv in editable mode::
|
||||
|
||||
# run this only after activating the venv
|
||||
$ pip install --editable .
|
||||
|
||||
Editable mode ensures that changes made to the source tree are
|
||||
automatically reflected in the venv, and that such changes are not
|
||||
overwritten during the next tox run.
|
||||
|
||||
Running functional tests
|
||||
------------------------
|
||||
Neutron defines different classes of test cases. One of them is functional
|
||||
test. It requires pre-configured environment. But it's lighter than
|
||||
running devstack or openstack deployment.
|
||||
For definitions of functional tests, please refer to:
|
||||
https://docs.openstack.org/neutron/latest/contributor/index.html
|
||||
|
||||
The script is provided to setup the environment.
|
||||
At first make sure the latest version of pip command::
|
||||
|
||||
# ensure you have the latest version of pip command
|
||||
# for example on ubuntu
|
||||
$ sudo apt-get install python-pip
|
||||
$ sudo pip --upgrade pip
|
||||
|
||||
And then run functional test as follows::
|
||||
|
||||
# assuming devstack is setup with networking-odl
|
||||
$ cd networking-odl
|
||||
$ ./tools/configure_for_func_testing.sh /path/to/devstack
|
||||
$ tox -e dsvm-functional
|
||||
|
||||
|
||||
For setting up devstack, please refer to neutron documentation:
|
||||
|
||||
* https://wiki.openstack.org/wiki/NeutronDevstack
|
||||
* https://docs.openstack.org/neutron/latest/contributor/index.html
|
||||
* https://docs.openstack.org/neutron/latest/contributor/testing/testing.html
|
@ -1,186 +0,0 @@
|
||||
======================
|
||||
Enabling in Devstack
|
||||
======================
|
||||
|
||||
1. Download DevStack
|
||||
|
||||
2. Copy the sample local.conf over::
|
||||
|
||||
cp devstack/local.conf.example local.conf
|
||||
|
||||
3. Optionally, to manually configure this:
|
||||
|
||||
Add this repo as an external repository::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl
|
||||
|
||||
4. Optionally, to enable support for OpenDaylight L3 router functionality,
|
||||
add the below::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
ODL_L3=True
|
||||
|
||||
.. note::
|
||||
|
||||
This is only relevant when using old netvirt (ovsdb based, default).
|
||||
|
||||
5. If you need to route the traffic out of the box (e.g. br-ex), set
|
||||
ODL_PROVIDER_MAPPINGS to map the physical provider network to device
|
||||
mapping, as shown below::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
ODL_L3=True
|
||||
ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-br-ex:eth2} # for old netvirt (ovsdb based)
|
||||
ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-physnet1:eth2} # for new netvirt (vpnservice based)
|
||||
|
||||
6. Optionally, to enable support for OpenDaylight with LBaaS V2, add this::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
enable_plugin neutron-lbaas http://git.openstack.org/openstack/neutron-lbaas
|
||||
enable_service q-lbaasv2
|
||||
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:opendaylight:networking_odl.lbaas.driver_v2.OpenDaylightLbaasDriverV2:default"
|
||||
|
||||
7. run ``stack.sh``
|
||||
|
||||
8. Note: In a multi-node devstack environment, for each compute node you will
|
||||
want to add this to the local.conf file::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl
|
||||
ODL_MODE=compute
|
||||
|
||||
9. Note: In a node using a release of Open vSwitch provided from another source
|
||||
than your Linux distribution you have to enable in your local.conf skipping
|
||||
of OVS installation step by setting *SKIP_OVS_INSTALL=True*. For example
|
||||
when stacking together with `networking-ovs-dpdk
|
||||
<https://github.com/openstack/networking-ovs-dpdk/>`_ Neutron plug-in to
|
||||
avoid conflicts between openvswitch and ovs-dpdk you have to add this to
|
||||
the local.conf file::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
enable_plugin networking-ovs-dpdk http://git.openstack.org/openstack/networking-ovs-dpdk
|
||||
enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl
|
||||
SKIP_OVS_INSTALL=True
|
||||
|
||||
10. Note: Optionally, to use the new netvirt implementation
|
||||
(netvirt-vpnservice-openstack), add the following to the local.conf file
|
||||
(only allinone topology is currently supported by devstack, since tunnel
|
||||
endpoints are not automatically configured). For tunnel configurations
|
||||
after loading devstack, please refer to this guide
|
||||
https://wiki.opendaylight.org/view/Netvirt:_L2Gateway_HowTo#Configuring_Tunnels::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
ODL_NETVIRT_KARAF_FEATURE=odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-vpnservice-openstack
|
||||
ODL_BOOT_WAIT_URL=restconf/operational/network-topology:network-topology/ # Workaround since netvirt:1 no longer exists in DS!
|
||||
|
||||
11. Note: To enable Quality Of Service (QoS) with OpenDaylight Backend,
|
||||
add the following lines in neutron.conf::
|
||||
|
||||
> in /etc/neutron/neutron.conf
|
||||
service_plugins = qos, odl-router
|
||||
|
||||
enable qos extension driver in ml2 conf::
|
||||
|
||||
> in /etc/neutron/plugins/ml2/ml2_conf.ini
|
||||
extensions_drivers = qos, port_security
|
||||
|
||||
restart neutron service q-svc
|
||||
|
||||
|
||||
12. Note: legacy netvirt specific options
|
||||
|
||||
- OVS conntrack support
|
||||
|
||||
:variable: ODL_LEGACY_NETVIRT_CONNTRACK By default it's False for
|
||||
compatibility and version requirements.
|
||||
|
||||
- version requirement
|
||||
|
||||
:ODL version: Boron release or later.
|
||||
(ODL legacy netvirt support is from Beryllium. But
|
||||
networking-odl devstack supports Boron+)
|
||||
|
||||
:OVS version: 2.5 or later
|
||||
|
||||
enable OVS conntrack support::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
ODL_LEGACY_NETVIRT_CONNTRACK=True
|
||||
|
||||
13. Note: To enable Vlan Aware VMs (Trunk) with OpenDaylight Backend,
|
||||
make the following entries in local.conf::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
Q_SERVICE_PLUGIN_CLASSES=trunk
|
||||
|
||||
14. Enabling L2Gateway Backend for OpenDaylight
|
||||
|
||||
- The package networking-l2gw must be installed as a pre-requisite.
|
||||
|
||||
So include in your localrc (or local.conf) the following::
|
||||
|
||||
enable_plugin networking-l2gw http://git.openstack.org/openstack/networking-l2gw
|
||||
enable_service l2gw_plugin
|
||||
NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.OpenDaylightL2gwDriver:default
|
||||
|
||||
- Now stack up Devstack and after stacking completes, we are all set to use
|
||||
l2gateway-as-a-service with OpenDaylight.
|
||||
|
||||
15. Note: To enable Service Function Chaining support driven by networking-sfc,
|
||||
the following steps have to be taken:
|
||||
|
||||
- local.conf should contain the following lines::
|
||||
|
||||
# enable our plugin:
|
||||
enable_plugin networking-odl https://github.com/openstack/networking-odl.git
|
||||
|
||||
# enable the networking-sfc plugin:
|
||||
enable_plugin networking-sfc https://github.com/openstack/networking-sfc.git
|
||||
|
||||
# enable the odl-netvirt-sfc Karaf feature in OpenDaylight
|
||||
ODL_NETVIRT_KARAF_FEATURE+=,odl-netvirt-sfc
|
||||
|
||||
# enable the networking-sfc OpenDaylight driver pair
|
||||
[[post-config|$NEUTRON_CONF]]
|
||||
[sfc]
|
||||
drivers = odl_v2
|
||||
[flowclassifier]
|
||||
drivers = odl_v2
|
||||
|
||||
- A special commit of Open vSwitch should be compiled and installed
|
||||
(containing compatible NSH OpenFlow support). This isn't
|
||||
done automatically by networking-odl or DevStack, so the user has to
|
||||
manually install. Please follow the instructions in:
|
||||
https://wiki.opendaylight.org/view/Service_Function_Chaining:Main#Building_Open_vSwitch_with_VxLAN-GPE_and_NSH_support
|
||||
|
||||
- Carbon is the recommended and latest version of OpenDaylight to use,
|
||||
you can specify it by adding the following to local.conf::
|
||||
|
||||
ODL_RELEASE=carbon-snapshot-0.6
|
||||
|
||||
- To clarify, OpenDaylight doesn't have to be running/installed before
|
||||
stacking with networking-odl (and it shouldn't). The networking-odl
|
||||
DevStack plugin will download and start OpenDaylight automatically.
|
||||
However, it will not fetch the correct Open vSwitch version, so the
|
||||
instructions above and the usage of ``SKIP_OVS_INSTALL`` are important.
|
||||
|
||||
16. To enable BGPVPN driver to use with OpenDaylight controller
|
||||
Include the following lines in your localrc (or local.conf)::
|
||||
|
||||
enable_plugin networking-bgpvpn https://git.openstack.org/openstack/networking-bgpvpn.git
|
||||
|
||||
[[post-config|$NETWORKING_BGPVPN_CONF]]
|
||||
[service_providers]
|
||||
service_provider=BGPVPN:OpenDaylight.networking_odl.bgpvpn.odl_v2.OpenDaylightBgpvpnDriver:default
|
||||
|
||||
and then stack up your devstack.
|
@ -1,113 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
#
|
||||
# This script is executed in the OpenStack CI job that runs DevStack + tempest.
|
||||
# You can find the CI job configuration here:
|
||||
#
|
||||
# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/networking-odl.yaml
|
||||
#
|
||||
|
||||
# TODO(yamahata): tempest test is run serially at the moment and
|
||||
# we're occasionally hitting timeout of 120 mins. For now as work around,
|
||||
# lengthen timeout a bit.
|
||||
# In near future(Ocata cycle) after migrating to new ODL netvirt(conserves),
|
||||
# parallel execution should be enabled and remove this work around.
|
||||
if [[ -z "${RALLY_SCENARIO}" && -z "${GRENADE_PLUGINRC}" ]] ; then
|
||||
export BUILD_TIMEOUT=180
|
||||
export DEVSTACK_GATE_TIMEOUT=$(expr $BUILD_TIMEOUT - $DEVSTACK_GATE_TIMEOUT_BUFFER)
|
||||
fi
|
||||
|
||||
export OVERRIDE_ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,g-reg,key,mysql,n-api,n-cond,n-cpu,n-crt,n-obj,n-sch,q-dhcp,q-meta,q-svc,quantum,rabbit,placement-api,n-api-meta
|
||||
if [ -z "${RALLY_SCENARIO}" ] ; then
|
||||
# Only include tempest if this is not a rally job, As running tempest in Rally is likely to cause failure
|
||||
export OVERRIDE_ENABLED_SERVICES=${OVERRIDE_ENABLED_SERVICES},tempest
|
||||
fi
|
||||
|
||||
# NOTE(manjeets) To prevent create of public network twice
|
||||
if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]] ; then
|
||||
|
||||
# NOTE(manjeets) Temporarily disabling LM test due to bug 1643678
|
||||
# https://bugs.launchpad.net/networking-odl/+bug/1643678
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"LIVE_MIGRATION_AVAILABLE=False"
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=False"
|
||||
# DEVSTACK_GATE_NEUTRON_DVR in devstack-gate set Q_DVR_MODE as dvr_snat
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_DVR_MODE=legacy"
|
||||
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"Q_DVR_MODE=legacy"
|
||||
|
||||
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"disable_all_services"
|
||||
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"ENABLED_SERVICES=n-cpu,dstat,c-vol,c-bak,mysql,placement-client"
|
||||
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"RABBIT_HOST=\$SERVICE_HOST"
|
||||
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"ODL_MODE=compute"
|
||||
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"enable_plugin networking-odl git://git.openstack.org/openstack/networking-odl"
|
||||
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"LIBVIRT_TYPE=qemu"
|
||||
fi
|
||||
|
||||
# Begin list of exclusions.
|
||||
r="^(?!.*"
|
||||
|
||||
# exclude the slow tag (part of the default for 'full')
|
||||
r="$r(?:.*\[.*\bslow\b.*\])"
|
||||
|
||||
# exclude things that just aren't enabled:
|
||||
r="$r|(?:tempest\.api\.network\.admin\.test_quotas\.QuotasTest\.test_lbaas_quotas.*)"
|
||||
r="$r|(?:tempest\.api\.network\.test_load_balancer.*)"
|
||||
r="$r|(?:tempest\.scenario\.test_load_balancer.*)"
|
||||
r="$r|(?:tempest\.api\.network\.admin\.test_load_balancer.*)"
|
||||
r="$r|(?:tempest\.api\.network\.admin\.test_lbaas.*)"
|
||||
r="$r|(?:tempest\.api\.network\.test_fwaas_extensions.*)"
|
||||
r="$r|(?:tempest\.api\.network\.test_vpnaas_extensions.*)"
|
||||
r="$r|(?:tempest\.api\.network\.test_metering_extensions.*)"
|
||||
r="$r|(?:tempest\.thirdparty\.boto\.test_s3.*)"
|
||||
|
||||
# exclude stuff we're less likely to break because i'm impatient
|
||||
r="$r|(?:tempest\.api\.identity.*)"
|
||||
r="$r|(?:tempest\.api\.image.*)"
|
||||
r="$r|(?:tempest\.api\.volume.*)"
|
||||
|
||||
# unsupported features
|
||||
# ODL legacy netvirt doesn't support ipv6
|
||||
r="$r|(?:tempest\.scenario\.test_network_v6\.TestGettingAddress.*)"
|
||||
|
||||
# Current list of failing tests that need to be triaged, have bugs filed, and
|
||||
# fixed as appropriate.
|
||||
# (none)
|
||||
|
||||
# TODO(yamahata): fix bugs and remove those tests from here
|
||||
# BUG: https://bugs.launchpad.net/networking-odl/+bug/1642158
|
||||
# legacy netvirt ignores admin-state-up state for network/port
|
||||
r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_update_instance_port_admin_state.*)"
|
||||
r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_update_router_admin_state.*)"
|
||||
|
||||
# BUG: https://bugs.launchpad.net/networking-odl/+bug/1643033
|
||||
# stateful security group: conntracking needs to be enabled
|
||||
r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_hotplug_nic.*)"
|
||||
r="$r|(?:tempest\.scenario\.test_security_groups_basic_ops\.TestSecurityGroupsBasicOps\.test_cross_tenant_traffic.*)"
|
||||
r="$r|(?:tempest\.scenario\.test_security_groups_basic_ops\.TestSecurityGroupsBasicOps\.test_port_security_disable_security_group.*)"
|
||||
|
||||
# BUG: https://bugs.launchpad.net/networking-odl/+bug/1656129
|
||||
# exluding some tests temporarily
|
||||
if [ -n $DEVSTACK_GATE_GRENADE ]; then
|
||||
# Disable some tempest tests temporarily on
|
||||
# grenade job
|
||||
r="$r|(?:tempest\.scenario\.test_encrypted_cinder_volumes\.TestEncryptedCinderVolumes\.test_encrypted_cinder_volumes_cryptsetup.*)"
|
||||
r="$r|(?:tempest\.scenario\.test_encrypted_cinder_volumes\.TestEncryptedCinderVolumes\.test_encrypted_cinder_volumes_luks.*)"
|
||||
r="$r|(?:tempest\.scenario\.test_minimum_basic\.TestMinimumBasicScenario\.test_minimum_basic_scenario.*)"
|
||||
fi
|
||||
|
||||
# End list of exclusions.
|
||||
r="$r)"
|
||||
|
||||
# only run tempest.api/scenario/thirdparty tests (part of the default for 'full')
|
||||
r="$r(tempest\.(api|scenario|thirdparty)).*$"
|
||||
|
||||
export DEVSTACK_GATE_TEMPEST_REGEX="$r"
|
@ -1,369 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
# cleanup_opendaylight() - Remove residual data files, anything left over
|
||||
# from previous runs that a clean run would need to clean up
|
||||
function cleanup_opendaylight {
|
||||
# Wipe out the data, journal and snapshots directories ... grumble grumble grumble
|
||||
rm -rf $ODL_DIR/$ODL_NAME/{data,journal,snapshots}
|
||||
|
||||
# Remove existing logfiles
|
||||
if [[ -n "$LOGDIR" ]]; then
|
||||
rm -f "$LOGDIR/$ODL_KARAF_LOG_BASE*"
|
||||
fi
|
||||
if [[ -n "$SCREEN_LOGDIR" ]]; then
|
||||
rm -f "$SCREEN_LOGDIR/$ODL_KARAF_LOG_BASE*"
|
||||
fi
|
||||
rm -f "$DEST/logs/$ODL_KARAF_LOG_BASE*"
|
||||
|
||||
move_interface_addresses "outof_bridge"
|
||||
|
||||
unbind_opendaylight_controller
|
||||
}
|
||||
|
||||
|
||||
# configure_opendaylight() - Set config files, create data dirs, etc
|
||||
function configure_opendaylight {
|
||||
echo "Configuring OpenDaylight"
|
||||
|
||||
# The logging config file in ODL
|
||||
local ODL_LOGGING_CONFIG=${ODL_DIR}/${ODL_NAME}/etc/org.ops4j.pax.logging.cfg
|
||||
|
||||
# Add netvirt feature in Karaf, if it's not already there
|
||||
local ODLFEATUREMATCH=$(cat $ODL_DIR/$ODL_NAME/etc/org.apache.karaf.features.cfg | \
|
||||
grep featuresBoot= | grep $ODL_NETVIRT_KARAF_FEATURE)
|
||||
if [ "$ODLFEATUREMATCH" == "" ]; then
|
||||
sed -i "/^featuresBoot=/ s/$/,$ODL_NETVIRT_KARAF_FEATURE/" \
|
||||
$ODL_DIR/$ODL_NAME/etc/org.apache.karaf.features.cfg
|
||||
fi
|
||||
|
||||
# Move Jetty to $ODL_PORT
|
||||
local _ODLPORT=$(cat $ODL_DIR/$ODL_NAME/etc/jetty.xml | grep $ODL_PORT)
|
||||
if [ "$_ODLPORT" == "" ]; then
|
||||
sed -i "/\<Property name\=\"jetty\.port/ s/808./$ODL_PORT/" \
|
||||
$ODL_DIR/$ODL_NAME/etc/jetty.xml
|
||||
fi
|
||||
|
||||
# Configure conntrack for legacy netvirt
|
||||
if [[ "$ODL_LEGACY_NETVIRT_CONNTRACK" == "True" ]]; then
|
||||
NETVIRT_INIT_CONFIG_XML=$NETWORKING_ODL_DIR/devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-config_netvirt-impl-config.xml
|
||||
ODL_DATASTORE_INITIAL_CONFIG_DIR=${ODL_DIR}/${ODL_NAME}/etc/opendaylight/datastore/initial/config
|
||||
mkdir -p $ODL_DATASTORE_INITIAL_CONFIG_DIR
|
||||
cp --backup --force $NETVIRT_INIT_CONFIG_XML $ODL_DATASTORE_INITIAL_CONFIG_DIR/
|
||||
fi
|
||||
|
||||
# Configure L3 if the user wants it for NETVIRT_OVSDB
|
||||
# L3 is always enabled in NETVIRT_VPNSERVICE
|
||||
if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]] && [ "${ODL_L3}" == "True" ]; then
|
||||
# Configure L3 FWD if it's not there
|
||||
local L3FWD=$(cat $ODL_DIR/$ODL_NAME/etc/custom.properties | \
|
||||
grep ^ovsdb.l3.fwd.enabled)
|
||||
if [ "$L3FWD" == "" ]; then
|
||||
echo "ovsdb.l3.fwd.enabled=yes" >> $ODL_DIR/$ODL_NAME/etc/custom.properties
|
||||
fi
|
||||
|
||||
# Configure L3 GW MAC if it's not there
|
||||
local L3GW_MAC=$(cat $ODL_DIR/$ODL_NAME/etc/custom.properties | \
|
||||
grep ^ovsdb.l3gateway.mac)
|
||||
if [[ -z "$L3GW_MAC" && -n "$ODL_L3GW_MAC" ]]; then
|
||||
echo "ovsdb.l3gateway.mac=$ODL_L3GW_MAC" >> $ODL_DIR/$ODL_NAME/etc/custom.properties
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove existing logfiles
|
||||
local ODL_LOGDIR=$DEST/logs
|
||||
if [[ -n "$LOGDIR" ]]; then
|
||||
ODL_LOGDIR=$LOGDIR
|
||||
fi
|
||||
|
||||
rm -f "$ODL_LOGDIR/$ODL_KARAF_LOG_BASE*"
|
||||
# Log karaf output to a file
|
||||
_LF=$ODL_LOGDIR/$ODL_KARAF_LOG_NAME
|
||||
LF=$(echo $_LF | sed 's/\//\\\//g')
|
||||
# Soft link for easy consumption
|
||||
sudo mkdir -p "$ODL_LOGDIR"
|
||||
ln -sf $_LF "$ODL_LOGDIR/screen-karaf.log"
|
||||
if [[ -n $SCREEN_LOGDIR ]]; then
|
||||
ln -sf $_LF "$SCREEN_LOGDIR/screen-karaf.log"
|
||||
fi
|
||||
|
||||
# Change the karaf logfile
|
||||
# disable log rotation by setting max fiel size large enough
|
||||
sed -i -e "/^log4j\.appender\.out\.file/ s/.*/log4j\.appender\.out\.file\=$LF/" \
|
||||
-e "/^log4j\.appender\.out\.maxFileSize/ s/.*/log4j\.appender\.out\.maxFileSize\=1024GB/" \
|
||||
$ODL_DIR/$ODL_NAME/etc/org.ops4j.pax.logging.cfg
|
||||
|
||||
# Configure DEBUG logs for network virtualization in odl, if the user wants it
|
||||
if [ "${ODL_NETVIRT_DEBUG_LOGS}" == "True" ]; then
|
||||
local OVSDB_DEBUG_LOGS=$(cat $ODL_LOGGING_CONFIG | grep ^log4j.logger.org.opendaylight.ovsdb)
|
||||
if [ "${OVSDB_DEBUG_LOGS}" == "" ]; then
|
||||
echo 'log4j.logger.org.opendaylight.ovsdb = TRACE, out' >> $ODL_LOGGING_CONFIG
|
||||
echo 'log4j.logger.org.opendaylight.ovsdb.lib = INFO, out' >> $ODL_LOGGING_CONFIG
|
||||
echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.impl.NeutronL3Adapter = DEBUG, out' >> $ODL_LOGGING_CONFIG
|
||||
echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.impl.TenantNetworkManagerImpl = DEBUG, out' >> $ODL_LOGGING_CONFIG
|
||||
echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.services.arp.GatewayMacResolverService = DEBUG, out' >> $ODL_LOGGING_CONFIG
|
||||
echo 'log4j.logger.org.opendaylight.ovsdb.plugin.md.OvsdbInventoryManager = INFO, out' >> $ODL_LOGGING_CONFIG
|
||||
fi
|
||||
local ODL_NEUTRON_DEBUG_LOGS=$(cat $ODL_LOGGING_CONFIG | \
|
||||
grep ^log4j.logger.org.opendaylight.neutron)
|
||||
if [ "${ODL_NEUTRON_DEBUG_LOGS}" == "" ]; then
|
||||
echo 'log4j.logger.org.opendaylight.neutron = TRACE, out' >> $ODL_LOGGING_CONFIG
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# configure_neutron_opendaylight() - Set Neutron config files according to ODL settings
|
||||
function configure_neutron_odl {
|
||||
echo "Configuring ML2 for OpenDaylight"
|
||||
|
||||
# https://bugs.launchpad.net/neutron/+bug/1614766
|
||||
# Allow ovsdb_interface native by avoiding port conflict.
|
||||
if [[ -n "$ODL_OVSDB_ALTPORT" ]]; then
|
||||
iniset $NEUTRON_CONF OVS ovsdb_connection tcp:127.0.0.1:$ODL_OVSDB_ALTPORT
|
||||
iniset $NEUTRON_DHCP_CONF OVS ovsdb_connection tcp:127.0.0.1:$ODL_OVSDB_ALTPORT
|
||||
fi
|
||||
|
||||
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl url=$ODL_ENDPOINT
|
||||
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl username=$ODL_USERNAME
|
||||
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl password=$ODL_PASSWORD
|
||||
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl port_binding_controller=$ODL_PORT_BINDING_CONTROLLER
|
||||
if [[ -n "$ODL_TIMEOUT" ]]; then
|
||||
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl timeout=$ODL_TIMEOUT
|
||||
fi
|
||||
# When it's not set, the default value is set by networking-odl
|
||||
if [[ -n "$ODL_HOSTCONF_URI" ]]; then
|
||||
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl odl_hostconf_uri=$ODL_HOSTCONF_URI
|
||||
fi
|
||||
|
||||
# NOTE(mgkwill): ODL layer-3 and DHCP services currently lack support
|
||||
# for metadata. Enabling both native services also requires enabling
|
||||
# config drive to provide instances with metadata. If conventional DHCP agent
|
||||
# is used instead, configure it to provide instances with metadata.
|
||||
if is_service_enabled q-dhcp; then
|
||||
# Conventional DHCP agent must provide all metadata when ODL
|
||||
# layer-3 is enabled. The conventional DHCP agent will be forced
|
||||
# to provide metadata for all networks.
|
||||
iniset $Q_DHCP_CONF_FILE DEFAULT force_metadata True
|
||||
fi
|
||||
if [[ "$ODL_L3" == "True" ]]; then
|
||||
if is_service_enabled n-cpu; then
|
||||
iniset $NOVA_CONF DEFAULT force_config_drive True
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function configure_neutron_odl_lightweight_testing {
|
||||
echo "Configuring lightweight testing for OpenDaylight"
|
||||
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl enable_lightweight_testing=True
|
||||
}
|
||||
|
||||
# init_opendaylight() - Initialize databases, etc.
|
||||
function init_opendaylight {
|
||||
# clean up from previous (possibly aborted) runs
|
||||
# create required data files
|
||||
:
|
||||
}
|
||||
|
||||
|
||||
# install_opendaylight() - Collect source and prepare
|
||||
function install_opendaylight {
|
||||
echo "Installing OpenDaylight and dependent packages"
|
||||
if [[ "$ODL_USING_EXISTING_JAVA" != "True" ]]
|
||||
then
|
||||
if ! setup_java "${ODL_REQUIRED_JAVA_VERSION:-7}"; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Download OpenDaylight
|
||||
cd $ODL_DIR
|
||||
|
||||
if [[ "$OFFLINE" != "True" ]]; then
|
||||
wget -N $ODL_URL/$ODL_PKG
|
||||
fi
|
||||
unzip -u -o $ODL_PKG
|
||||
}
|
||||
|
||||
|
||||
# install_networking_odl() - Install the ML2 driver and other plugins/drivers
|
||||
function install_networking_odl {
|
||||
echo "Installing the Networking-ODL driver for OpenDaylight"
|
||||
setup_develop $NETWORKING_ODL_DIR
|
||||
}
|
||||
|
||||
|
||||
# install_opendaylight_compute() - Make sure OVS is installed
|
||||
function install_opendaylight_compute {
|
||||
if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then
|
||||
echo "Skipping OVS installation."
|
||||
else
|
||||
# packages are the same as for Neutron OVS agent
|
||||
_neutron_ovs_base_install_agent_packages
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# start_opendaylight() - Start running processes, including screen
|
||||
function start_opendaylight {
|
||||
echo "Starting OpenDaylight"
|
||||
|
||||
# Wipe out the data and journal directories ... grumble grumble grumble
|
||||
rm -rf $ODL_DIR/$ODL_NAME/{data,journal}
|
||||
|
||||
# The following variables are needed by the running karaf process.
|
||||
# See the "bin/setenv" file in the OpenDaylight distribution for
|
||||
# their individual meaning.
|
||||
setup_java_env
|
||||
export JAVA_MIN_MEM=$ODL_JAVA_MIN_MEM
|
||||
export JAVA_MAX_MEM=$ODL_JAVA_MAX_MEM
|
||||
export JAVA_MAX_PERM_MEM=$ODL_JAVA_MAX_PERM_MEM
|
||||
|
||||
# this is a forking process, just start it in the background
|
||||
$ODL_DIR/$ODL_NAME/bin/start
|
||||
|
||||
if [ -n "$ODL_BOOT_WAIT_URL" ]; then
|
||||
echo "Waiting for OpenDaylight to start via $ODL_BOOT_WAIT_URL ..."
|
||||
# Probe ODL restconf for netvirt until it is operational
|
||||
local testcmd="curl -o /dev/null --fail --silent --head -u \
|
||||
${ODL_USERNAME}:${ODL_PASSWORD} http://${ODL_MGR_HOST}:${ODL_PORT}/${ODL_BOOT_WAIT_URL}"
|
||||
test_with_retry "$testcmd" "OpenDaylight did not start after $ODL_BOOT_WAIT" \
|
||||
$ODL_BOOT_WAIT $ODL_RETRY_SLEEP_INTERVAL
|
||||
else
|
||||
echo "Waiting for OpenDaylight to start ..."
|
||||
# Sleep a bit to let OpenDaylight finish starting up
|
||||
sleep $ODL_BOOT_WAIT
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# stop_opendaylight() - Stop running processes (non-screen)
|
||||
function stop_opendaylight {
|
||||
# Stop the karaf container
|
||||
$ODL_DIR/$ODL_NAME/bin/stop
|
||||
}
|
||||
|
||||
|
||||
# cleanup_opendaylight_compute() - Remove all OVS ports, bridges and disconnects
|
||||
# controller from switch
|
||||
function cleanup_opendaylight_compute {
|
||||
# Remove the patch ports
|
||||
for port in $(sudo ovs-vsctl show | grep Port | awk '{print $2}' | cut -d '"' -f 2 | grep patch); do
|
||||
sudo ovs-vsctl del-port ${port}
|
||||
done
|
||||
|
||||
# remove all OVS ports that look like Neutron created ports
|
||||
for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
|
||||
sudo ovs-vsctl del-port ${port}
|
||||
done
|
||||
|
||||
# Remove all the vxlan ports
|
||||
for port in $(sudo ovs-vsctl list port | grep name | grep vxlan | awk '{print $3}' | cut -d '"' -f 2); do
|
||||
sudo ovs-vsctl del-port ${port}
|
||||
done
|
||||
|
||||
# Disconnect controller from switch
|
||||
unbind_opendaylight_controller
|
||||
|
||||
# remove all OVS bridges created by ODL
|
||||
for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BR} -e ${PUBLIC_BRIDGE}); do
|
||||
sudo ovs-vsctl del-br ${bridge}
|
||||
done
|
||||
}
|
||||
|
||||
# bind_opendaylight_controller() - set control manager to OVS
|
||||
function bind_opendaylight_controller {
|
||||
echo_summary "Initializing OpenDaylight"
|
||||
ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP}
|
||||
ODL_MGR_PORT=${ODL_MGR_PORT:-6640}
|
||||
read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid)
|
||||
local ODL_MANAGERS_PARAM=()
|
||||
for manager in $(echo $ODL_OVS_MANAGERS | tr "," "\n"); do
|
||||
local manager_ip=$(gethostip -d ${manager})
|
||||
ODL_MANAGERS_PARAM=( "${ODL_MANAGERS_PARAM[@]}" "tcp:${manager_ip}:$ODL_MGR_PORT" )
|
||||
done
|
||||
# don't overwrite the already existing managers
|
||||
local ODL_MANAGERS_OLD=$(sudo ovs-vsctl get-manager)
|
||||
local ODL_MANAGERS=$(echo $ODL_MANAGERS_OLD ${ODL_MANAGERS_PARAM[@]} | tr ' ' '\n' | sort | uniq | tr '\n' ' ')
|
||||
sudo ovs-vsctl set-manager ${ODL_MANAGERS}
|
||||
if [[ -n "$PUBLIC_BRIDGE" ]]; then
|
||||
sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
|
||||
fi
|
||||
if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then
|
||||
sudo ovs-vsctl set Open_vSwitch $ovstbl \
|
||||
other_config:provider_mappings=$ODL_PROVIDER_MAPPINGS
|
||||
fi
|
||||
sudo ovs-vsctl set Open_vSwitch $ovstbl other_config:local_ip=$ODL_LOCAL_IP
|
||||
# for pseudo agent port binding
|
||||
if [ "$ODL_PORT_BINDING_CONTROLLER" == "pseudo-agentdb-binding" ]; then
|
||||
ODL_OVS_HOSTCONFIGS_OPTIONS=${ODL_OVS_HOSTCONFIGS_OPTIONS:---debug --noovs_dpdk}
|
||||
if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then
|
||||
ODL_OVS_HOSTCONFIGS_OPTIONS="${ODL_OVS_HOSTCONFIGS_OPTIONS} --bridge_mappings=${ODL_PROVIDER_MAPPINGS}"
|
||||
fi
|
||||
if [[ -n "$ODL_OVS_HOSTCONFIGS" ]]; then
|
||||
ODL_OVS_HOSTCONFIGS_OPTIONS=${ODL_OVS_HOSTCONFIGS_OPTIONS} --ovs_hostconfigs="$ODL_OVS_HOSTCONFIGS"
|
||||
fi
|
||||
if [[ ! -f $NEUTRON_CONF ]]; then
|
||||
sudo neutron-odl-ovs-hostconfig $ODL_OVS_HOSTCONFIGS_OPTIONS
|
||||
else
|
||||
sudo neutron-odl-ovs-hostconfig --config-file=$NEUTRON_CONF $ODL_OVS_HOSTCONFIGS_OPTIONS
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# unbind_opendaylight_controller() - disconnect controller from switch and clear bridges
|
||||
function unbind_opendaylight_controller {
|
||||
sudo ovs-vsctl del-manager
|
||||
BRIDGES=$(sudo ovs-vsctl list-br)
|
||||
for bridge in $BRIDGES ; do
|
||||
sudo ovs-vsctl del-controller $bridge
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function _configure_veth {
|
||||
ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 ||
|
||||
sudo ip link add $Q_PUBLIC_VETH_INT type veth \
|
||||
peer name $Q_PUBLIC_VETH_EX
|
||||
sudo ip link set $Q_PUBLIC_VETH_INT up
|
||||
sudo ip link set $Q_PUBLIC_VETH_EX up
|
||||
sudo ip addr flush dev $Q_PUBLIC_VETH_EX
|
||||
if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]]; then
|
||||
local OVSBR_EX=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f1)
|
||||
sudo ovs-vsctl --may-exist add-port $OVSBR_EX $Q_PUBLIC_VETH_INT
|
||||
else
|
||||
sudo ovs-vsctl --may-exist add-port $OVS_BR $Q_PUBLIC_VETH_INT
|
||||
fi
|
||||
|
||||
local cidr_len=${FLOATING_RANGE#*/}
|
||||
sudo ip addr replace ${PUBLIC_NETWORK_GATEWAY}/$cidr_len dev $Q_PUBLIC_VETH_EX
|
||||
sudo ip route replace $FLOATING_RANGE dev $Q_PUBLIC_VETH_EX
|
||||
if [[ -n "$IPV6_PUBLIC_RANGE" ]] && [[ -n "$IPV6_PUBLIC_NETWORK_GATEWAY" ]] && [[ -n "$FIXED_RANGE_V6" ]] && [[ -n "$IPV6_ROUTER_GW_IP" ]]; then
|
||||
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
|
||||
sudo ip -6 addr replace ${IPV6_PUBLIC_NETWORK_GATEWAY}/$ipv6_cidr_len dev ${Q_PUBLIC_VETH_EX}
|
||||
sudo ip -6 route replace $IPV6_PUBLIC_RANGE dev $Q_PUBLIC_VETH_EX
|
||||
fi
|
||||
}
|
||||
|
||||
function _configure_opendaylight_l3_legacy_netvirt {
|
||||
wait_for_active_bridge $PUBLIC_BRIDGE $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT
|
||||
|
||||
if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then
|
||||
_configure_veth
|
||||
fi
|
||||
}
|
||||
|
||||
function _configure_opendaylight_l3_new_netvirt {
|
||||
if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then
|
||||
_configure_veth
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# configure_opendaylight_l3() - configure bridges for OpenDaylight L3 forwarding
|
||||
function configure_opendaylight_l3 {
|
||||
if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]]; then
|
||||
_configure_opendaylight_l3_legacy_netvirt
|
||||
else
|
||||
_configure_opendaylight_l3_new_netvirt
|
||||
fi
|
||||
}
|
@ -1 +0,0 @@
|
||||
syslinux-utils
|
@ -1 +0,0 @@
|
||||
syslinux
|
@ -1,159 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# functions - OpenDaylight driver utility functions
|
||||
|
||||
function _odl_nexus_path {
|
||||
local ODL_URL_PREFIX=$1
|
||||
echo "${NEXUSPATH:-${ODL_URL_PREFIX}/${ODL_URL_SNAPSHOT_REPOSITORY_PATH}/org/opendaylight/integration/distribution-karaf}"
|
||||
}
|
||||
|
||||
function _wget {
|
||||
local MAVENMETAFILE=$1
|
||||
local URL=$2
|
||||
local $OFFLINE=$3
|
||||
|
||||
if [[ "$OFFLINE" == "True" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Remove stale MAVENMETAFILE for cases where you switch releases
|
||||
rm -f $MAVENMETAFILE
|
||||
|
||||
# Acquire the timestamp information from maven-metadata.xml
|
||||
wget -O $MAVENMETAFILE $URL
|
||||
}
|
||||
|
||||
function _xpath {
|
||||
local XPATH=$1
|
||||
local MAVENMETAFILE=$2
|
||||
local result=""
|
||||
if is_ubuntu; then
|
||||
install_package libxml-xpath-perl >/dev/null
|
||||
result=`xpath -e "$XPATH" $MAVENMETAFILE 2>/dev/null`
|
||||
elif [ "$os_VENDOR" = "Fedora" ]; then
|
||||
yum_install perl-XML-XPath >/dev/null
|
||||
result=`xpath -e "$XPATH" $MAVENMETAFILE 2>/dev/null`
|
||||
else
|
||||
yum_install perl-XML-XPath >/dev/null
|
||||
result=`xpath $MAVENMETAFILE "$XPATH" 2>/dev/null`
|
||||
fi
|
||||
echo $result
|
||||
}
|
||||
|
||||
# get snapshot version <major>.<minor> -> <major>.<minor>.<reivision>
|
||||
function odl_snapshot_full_version {
|
||||
local ODL_DIR=$1
|
||||
local ODL_URL_PREFIX=$2
|
||||
local MAJOR_MINOR=$3
|
||||
local OFFLINE=$4
|
||||
|
||||
local MAVENMETAFILE=$ODL_DIR/maven-metadata-snapshot.xml
|
||||
local NEXUSPATH=$(_odl_nexus_path $ODL_URL_PREFIX)
|
||||
_wget $MAVENMETAFILE ${NEXUSPATH}/maven-metadata.xml $OFFLINE
|
||||
if [[ ! -r $MAVENMETAFILE ]]; then
|
||||
echo "$MAVENMETAFILE doesn't exist. Please try with OFFLINE=False and check internet connection to $NEXUSPATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$MAJOR_MINOR" == "latest" ]]; then
|
||||
local ODL_FULL_VERSION=$(_xpath "//latest/text()" $MAVENMETAFILE)
|
||||
else
|
||||
local ODL_FULL_VERSION=$(_xpath "//version[starts-with(text(), '$MAJOR_MINOR')][last()]/text()" $MAVENMETAFILE)
|
||||
fi
|
||||
ODL_FULL_VERSION=${ODL_FULL_VERSION/-SNAPSHOT/}
|
||||
echo $ODL_FULL_VERSION
|
||||
}
|
||||
|
||||
function _odl_export_snapshot_url_pkg {
|
||||
local ODL_DIR=$1
|
||||
local ODL_URL_PREFIX=$2
|
||||
local BUNDLEVERSION=$3
|
||||
local OFFLINE=$4
|
||||
local BUNDLE_TIMESTAMP=$5
|
||||
|
||||
local MAVENMETAFILE=$ODL_DIR/maven-metadata.xml
|
||||
local NEXUSPATH=$(_odl_nexus_path $ODL_URL_PREFIX)
|
||||
|
||||
if [ "$BUNDLE_TIMESTAMP" == "latest" ]; then
|
||||
# Get build information
|
||||
_wget $MAVENMETAFILE ${NEXUSPATH}/${BUNDLEVERSION}/maven-metadata.xml $OFFLINE
|
||||
BUNDLE_TIMESTAMP=$(_xpath "//snapshotVersion[extension='zip'][1]/value/text()" $MAVENMETAFILE)
|
||||
fi
|
||||
|
||||
export ODL_URL=${NEXUSPATH}/${BUNDLEVERSION}
|
||||
export ODL_PKG=distribution-karaf-${BUNDLE_TIMESTAMP}.zip
|
||||
}
|
||||
|
||||
function _odl_export_release_url_pkg {
|
||||
local ODL_URL_PREFIX=$1
|
||||
local BUNDLEVERSION=$2
|
||||
local NEXUSPATH="${NEXUSPATH:-${ODL_URL_PREFIX}/${ODL_URL_RELEASE_REPOSITORY_PATH}/org/opendaylight/integration/distribution-karaf}"
|
||||
|
||||
export ODL_URL=${NEXUSPATH}/${BUNDLEVERSION}
|
||||
export ODL_PKG=distribution-karaf-${BUNDLEVERSION}.zip
|
||||
}
|
||||
|
||||
function setup_opendaylight_package {
|
||||
if [[ -n "$ODL_SNAPSHOT_VERSION" ]]; then
|
||||
_odl_export_snapshot_url_pkg ${ODL_DIR} ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION} ${OFFLINE} ${ODL_SNAPSHOT_VERSION}
|
||||
else
|
||||
_odl_export_release_url_pkg ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION}
|
||||
fi
|
||||
}
|
||||
|
||||
# Test if OpenDaylight is enabled
|
||||
function is_opendaylight_enabled {
|
||||
[[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
# Check that the bridge is up and running
|
||||
function wait_for_active_bridge {
|
||||
local BRIDGE=$1
|
||||
local SLEEP_INTERVAL=$2
|
||||
local MAX_WAIT=$3
|
||||
|
||||
echo "Waiting for bridge $BRIDGE to be available..."
|
||||
local testcmd="sudo ovs-vsctl list Bridge | grep $BRIDGE"
|
||||
test_with_retry "$testcmd" \
|
||||
"$BRIDGE did not become available in $MAX_WAIT seconds." \
|
||||
$MAX_WAIT $SLEEP_INTERVAL
|
||||
echo "Bridge $BRIDGE is available."
|
||||
}
|
||||
|
||||
# Move the public IP addresses to the OVS bridge on startup,
|
||||
# or back to the public interface on cleanup
|
||||
function move_interface_addresses {
|
||||
local direction=$1
|
||||
|
||||
if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then
|
||||
local VETH_INTERFACE=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f1)
|
||||
local PHYSICAL_INTERFACE=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f2)
|
||||
|
||||
if [[ "$direction" == "into_bridge" ]]; then
|
||||
_move_neutron_addresses_route "$PHYSICAL_INTERFACE" "$VETH_INTERFACE" True False "inet"
|
||||
if _has_public_ipv6_address "$PHYSICAL_INTERFACE"; then
|
||||
_move_neutron_addresses_route "$PHYSICAL_INTERFACE" "$VETH_INTERFACE" False False "inet6"
|
||||
fi
|
||||
elif [[ "$direction" == "outof_bridge" ]]; then
|
||||
_move_neutron_addresses_route "$VETH_INTERFACE" "$PHYSICAL_INTERFACE" False True "inet"
|
||||
if _has_public_ipv6_address "$VETH_INTERFACE"; then
|
||||
_move_neutron_addresses_route "$VETH_INTERFACE" "$PHYSICAL_INTERFACE" False False "inet6"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Check that the interface has an IP v6 address which
|
||||
# is routable on external network
|
||||
function _has_public_ipv6_address {
|
||||
local interface=$1
|
||||
local interface_public_ipv6_addresses=$(ip -f inet6 a s dev "$interface" | grep -c 'global')
|
||||
echo "$interface public IPv6 address count: $interface_public_ipv6_addresses"
|
||||
if [[ "$interface_public_ipv6_addresses" != 0 ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
@ -1,109 +0,0 @@
|
||||
[[local|localrc]]
|
||||
# This will fetch the latest ODL snapshot
|
||||
ODL_RELEASE=latest-snapshot
|
||||
|
||||
# Default is V2 driver, uncomment below line to use V1
|
||||
#ODL_V2DRIVER=False
|
||||
|
||||
# Default is psuedo-port-binding-controller
|
||||
#ODL_PORT_BINDING_CONTROLLER=
|
||||
|
||||
|
||||
# Set here which ODL openstack service provider to use
|
||||
# These are core ODL features
|
||||
ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs
|
||||
|
||||
# Set DLUX Karaf features needed for the ODL GUI at http://<ODL_IP>:8181/index.html
|
||||
ODL_NETVIRT_KARAF_FEATURE+=,odl-dluxapps-nodes,odl-dluxapps-topology,odl-dluxapps-yangui,odl-dluxapps-yangvisualizer
|
||||
|
||||
# Set L2 Karaf features needed for the ODL GUI at http://<ODL_IP>:8181/index.html
|
||||
ODL_NETVIRT_KARAF_FEATURE+=,odl-l2switch-switch,odl-l2switch-switch-ui,odl-ovsdb-hwvtepsouthbound-ui,odl-ovsdb-southbound-impl-ui,odl-netvirt-ui
|
||||
|
||||
# Set OpenFlow Karaf features needed for the ODL GUI at http://<ODL_IP>:8181/index.html
|
||||
ODL_NETVIRT_KARAF_FEATURE+=,odl-openflowplugin-flow-services-ui
|
||||
|
||||
# odl-netvirt-openstack is used for new netvirt
|
||||
ODL_NETVIRT_KARAF_FEATURE+=,odl-netvirt-openstack
|
||||
|
||||
# optional feature neutron-logger to log changes of neutron yang models
|
||||
ODL_NETVIRT_KARAF_FEATURE+=,odl-neutron-logger
|
||||
|
||||
# Switch to using the ODL's L3 implementation
|
||||
ODL_L3=True
|
||||
|
||||
# Set Host IP here. It is externally reachable network, set
|
||||
# below param to use ip from a different network
|
||||
HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
|
||||
|
||||
# public network connectivity
|
||||
Q_USE_PUBLIC_VETH=True
|
||||
Q_PUBLIC_VETH_EX=veth-pub-ex
|
||||
Q_PUBLIC_VETH_INT=veth-pub-int
|
||||
ODL_PROVIDER_MAPPINGS=public:${Q_PUBLIC_VETH_INT}
|
||||
|
||||
# Enable debug logs for odl ovsdb
|
||||
ODL_NETVIRT_DEBUG_LOGS=True
|
||||
|
||||
#Q_USE_DEBUG_COMMAND=True
|
||||
|
||||
DEST=/opt/stack/
|
||||
# move DATA_DIR outside of DEST to keep DEST a bit cleaner
|
||||
DATA_DIR=/opt/stack/data
|
||||
|
||||
ADMIN_PASSWORD=password
|
||||
MYSQL_PASSWORD=${ADMIN_PASSWORD}
|
||||
RABBIT_PASSWORD=${ADMIN_PASSWORD}
|
||||
SERVICE_PASSWORD=${ADMIN_PASSWORD}
|
||||
SERVICE_TOKEN=supersecrettoken
|
||||
|
||||
enable_service dstat
|
||||
enable_service g-api
|
||||
enable_service g-reg
|
||||
enable_service key
|
||||
enable_service mysql
|
||||
enable_service n-api
|
||||
enable_service n-cond
|
||||
enable_service n-cpu
|
||||
enable_service n-crt
|
||||
enable_service n-novnc
|
||||
enable_service n-sch
|
||||
enable_service placement-api
|
||||
enable_service placement-client
|
||||
enable_service q-dhcp
|
||||
enable_service q-meta
|
||||
enable_service q-svc
|
||||
enable_service rabbit
|
||||
enable_service tempest
|
||||
|
||||
# These can be enabled if storage is needed to do
|
||||
# any feature or testing for integration
|
||||
disable_service c-api
|
||||
disable_service c-vol
|
||||
disable_service c-sch
|
||||
|
||||
SKIP_EXERCISES=boot_from_volume,bundle,client-env,euca
|
||||
|
||||
# Screen console logs will capture service logs.
|
||||
SYSLOG=False
|
||||
SCREEN_LOGDIR=/opt/stack/new/screen-logs
|
||||
LOGFILE=/opt/stack/new/devstacklog.txt
|
||||
VERBOSE=True
|
||||
FIXED_RANGE=10.1.0.0/20
|
||||
FLOATING_RANGE=172.24.5.0/24
|
||||
PUBLIC_NETWORK_GATEWAY=172.24.5.1
|
||||
FIXED_NETWORK_SIZE=4096
|
||||
VIRT_DRIVER=libvirt
|
||||
|
||||
export OS_NO_CACHE=1
|
||||
|
||||
# Additional repositories need to be cloned can be added here.
|
||||
#LIBS_FROM_GIT=
|
||||
|
||||
# Enable MySql Logging
|
||||
DATABASE_QUERY_LOGGING=True
|
||||
|
||||
# set this until all testing platforms have libvirt >= 1.2.11
|
||||
# see bug #1501558
|
||||
EBTABLES_RACE_FIX=True
|
||||
|
||||
enable_plugin networking-odl git://git.openstack.org/openstack/networking-odl
|
@ -1,5 +0,0 @@
|
||||
<netvirt-impl-config xmlns="urn:opendaylight:params:xml:ns:yang:netvirt:impl:config">
|
||||
<conntrack-enabled>
|
||||
true
|
||||
</conntrack-enabled>
|
||||
</netvirt-impl-config>
|
@ -1 +0,0 @@
|
||||
export ODL_BUNDLEVERSION='0.5.0-Boron'
|
@ -1 +0,0 @@
|
||||
export ODL_BUNDLEVERSION='0.5.1-Boron-SR1'
|
@ -1,77 +0,0 @@
|
||||
_XTRACE_ODL_RELEASE_COMMON=$(set +o | grep xtrace)
|
||||
set -o xtrace
|
||||
|
||||
_odl_release=$1
|
||||
if [[ "$_odl_release" =~ -snapshot ]]; then
|
||||
# <release name>-snapshot-<N>.<N>.<N> -> <N>.<N>.<N>-SNAPSHOT
|
||||
_odl_version=${_odl_release/[[:alpha:]]*-snapshot-/}
|
||||
if [[ "$_odl_release" == "latest-snapshot" ]]; then
|
||||
# get latest revision of snapshot
|
||||
_odl_version=$(odl_snapshot_full_version $ODL_DIR $ODL_URL_PREFIX "latest" $OFFLINE)
|
||||
# update ODL_RELEASE to prevent odl_snapshot_full_version from being called
|
||||
# every time networking-odl/devstack/plugin.sh is called by devstack
|
||||
# latest-snapshot -> latest-snapshot-<N>.<N>.<N>
|
||||
ODL_RELEASE=${ODL_RELEASE}-${_odl_version}
|
||||
elif [[ "${_odl_version}" =~ ^[[:digit:]]\.[[:digit:]]$ ]]; then
|
||||
# get latest revision of given major.minor
|
||||
# <major>.<minor> -> <major>.<minor>.<revision>
|
||||
_odl_version=$(odl_snapshot_full_version $ODL_DIR $ODL_URL_PREFIX $_odl_version $OFFLINE)
|
||||
# update ODL_RELEASE to prevent odl_snapshot_full_version from being called
|
||||
# every time networking-odl/devstack/plugin.sh is called by devstack
|
||||
# <release name>-snapshot-<N>.<N> -> <release name>-snapshot-<N>.<N>.<N>
|
||||
_odl_revision=${_odl_version/[[:digit:]]\.[[:digit:]]\./}
|
||||
ODL_RELEASE=${ODL_RELEASE}.${_odl_revision}
|
||||
fi
|
||||
_odl_bundleversion_default=${_odl_version}-SNAPSHOT
|
||||
export ODL_BUNDLEVERSION=${ODL_BUNDLEVERSION:-${_odl_bundleversion_default}}
|
||||
export ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest}
|
||||
else
|
||||
# <release name>-<N>.<N>.<N>[-SR<N>] -> <N>.<N>.<N>-<Release name>[-SR<N>]
|
||||
_name=$(echo ${_odl_release} | awk -F- '{print toupper(substr($1, 1, 1))substr($1, 2)}')
|
||||
_version=$(echo ${_odl_release} | awk -F- '{print $2}')
|
||||
_sr=$(echo ${_odl_release} | awk -F- '{print $3}')
|
||||
_odl_bundleversion_default=${_version}-${_name}
|
||||
if [[ -n $_sr ]]; then
|
||||
_odl_bundleversion_default=${_odl_bundleversion_default}-${_sr}
|
||||
fi
|
||||
export ODL_BUNDLEVERSION=${ODL_BUNDLEVERSION:-${_odl_bundleversion_default}}
|
||||
fi
|
||||
|
||||
|
||||
# Java major version required to run OpenDaylight: 7, 8, ...
|
||||
# by default, ODL uses jdk 8 as of Boron
|
||||
export ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
|
||||
|
||||
# karaf distribution name of ODL to download
|
||||
export ODL_NAME=${ODL_NAME:-distribution-karaf-${ODL_BUNDLEVERSION}}
|
||||
|
||||
# The network virtualization older feature name (ovsdb based)
|
||||
export ODL_NETVIRT_KARAF_FEATURE_OVSDB=${ODL_NETVIRT_KARAF_FEATURE_OVSDB:-odl-ovsdb-openstack}
|
||||
|
||||
# The network virtualization newer feature name (vpnservice based)
|
||||
export ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE=${ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE:-odl-netvirt-openstack}
|
||||
|
||||
ODL_NETVIRT_KARAF_FEATURE_DEFAULT=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs
|
||||
# new netvirt has been introduced into netvirt from Boron release
|
||||
# odl-neutron-logger has been introduced from Boron release
|
||||
case "$ODL_BUNDLEVERSION" in
|
||||
0.5.?-*)
|
||||
# 0.5.?-*
|
||||
ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE
|
||||
ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-logger
|
||||
;;
|
||||
*)
|
||||
# 0.6.?-* or later
|
||||
ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE
|
||||
ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-hostconfig-ovs
|
||||
ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-logger
|
||||
;;
|
||||
esac
|
||||
|
||||
# The network virtualization feature used by opendaylight loaded by Karaf
|
||||
export ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE:-$ODL_NETVIRT_KARAF_FEATURE_DEFAULT}
|
||||
|
||||
# The url that this version of ODL netvirt can use to know ODL is fully up
|
||||
export ODL_BOOT_WAIT_URL=${ODL_BOOT_WAIT_URL:-restconf/operational/network-topology:network-topology/topology/netvirt:1}
|
||||
|
||||
$_XTRACE_ODL_RELEASE_COMMON
|
@ -1,38 +0,0 @@
|
||||
# Override few things here as early as we can
|
||||
|
||||
# We will enable the opendaylight ML2 MechanismDriver v1 version by default.
|
||||
# Note we are also enabling the logger driver, which is helpful for
|
||||
# debugging things on the Neutron side.
|
||||
if [[ "$ODL_V2DRIVER" == "True" ]]
|
||||
then
|
||||
V2_POSTFIX="_v2"
|
||||
else
|
||||
V2_POSTFIX=""
|
||||
fi
|
||||
|
||||
Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-"logger,opendaylight${V2_POSTFIX}"}
|
||||
|
||||
# This triggers the provisioning of L3 resources like routers and
|
||||
# external network, if not overridden.
|
||||
Q_L3_ENABLED=${Q_L3_ENABLED:-True}
|
||||
|
||||
# We have to disable the neutron L2 agent. OpenDaylight does not use the
|
||||
# L2 agent, it instead uses a combination of OpenFlow and OVSDB commands
|
||||
# to program OVS on each compute and network node host.
|
||||
disable_service q-agt
|
||||
|
||||
# If ODL_L3 is enabled, then we don't need the L3 agent and OpenDaylight
|
||||
# is going to act as the ML2's L3 service plugin.
|
||||
# NETVIRT_VPNSERVICE feature enables ODL L3 by default, so ODL_L3 is disregarded.
|
||||
if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE," ]] || [ "$ODL_L3" == "True" ];
|
||||
then
|
||||
disable_service q-l3
|
||||
ML2_L3_PLUGIN="${ML2_L3_PLUGIN:-odl-router${V2_POSTFIX}}"
|
||||
fi
|
||||
|
||||
# bug work around
|
||||
# https://bugs.launchpad.net/neutron/+bug/1614766
|
||||
# ODL ovsdb listens to 6640 and
|
||||
# neutron agent with native uses also 6640 to connect to ovsdb-server
|
||||
# If ODL server and neutron agent run in same box, alternative port is needed.
|
||||
export ODL_OVSDB_ALTPORT=${ODL_OVSDB_ALTPORT:-6641}
|
@ -1,156 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# devstack/plugin.sh
|
||||
# Functions to control the configuration and operation of the opendaylight service
|
||||
|
||||
# Save trace setting
|
||||
_XTRACE_NETWORKING_ODL=$(set +o | grep xtrace)
|
||||
set +o xtrace
|
||||
|
||||
# OpenDaylight directories
|
||||
NETWORKING_ODL_DIR=${NETWORKING_ODL_DIR:-$DEST/networking-odl}
|
||||
ODL_DIR=$DEST/opendaylight
|
||||
|
||||
# Make sure $ODL_DIR exists
|
||||
mkdir -p $ODL_DIR
|
||||
|
||||
# Import utility functions
|
||||
source $TOP_DIR/functions
|
||||
source $NETWORKING_ODL_DIR/devstack/functions
|
||||
source $TOP_DIR/lib/neutron-legacy
|
||||
|
||||
# Import bridge data
|
||||
source $TOP_DIR/lib/neutron_plugins/ovs_base
|
||||
|
||||
# Import ODL settings
|
||||
source $NETWORKING_ODL_DIR/devstack/settings.odl
|
||||
if [ -r $NETWORKING_ODL_DIR/devstack/odl-releases/$ODL_RELEASE ]; then
|
||||
source $NETWORKING_ODL_DIR/devstack/odl-releases/$ODL_RELEASE
|
||||
fi
|
||||
source $NETWORKING_ODL_DIR/devstack/odl-releases/common $ODL_RELEASE
|
||||
|
||||
# Utilities functions for setting up Java
|
||||
source $NETWORKING_ODL_DIR/devstack/setup_java.sh
|
||||
|
||||
# Import Entry Points
|
||||
# -------------------
|
||||
source $NETWORKING_ODL_DIR/devstack/entry_points
|
||||
|
||||
# Restore xtrace
|
||||
$_XTRACE_NETWORKING_ODL
|
||||
|
||||
if [[ "$ODL_USING_EXISTING_JAVA" == "True" ]]; then
|
||||
echo 'Using installed java.'
|
||||
java -version || exit 1
|
||||
fi
|
||||
|
||||
# main loop
|
||||
if is_service_enabled odl-server; then
|
||||
if [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
install_networking_odl
|
||||
setup_opendaylight_package
|
||||
install_opendaylight
|
||||
configure_opendaylight
|
||||
init_opendaylight
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
configure_neutron_odl
|
||||
# This has to start before Neutron
|
||||
start_opendaylight
|
||||
elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
|
||||
if [[ "$1" == "unstack" && "$UNSTACK_KEEP_ODL" != "True" ]]; then
|
||||
stop_opendaylight
|
||||
cleanup_opendaylight
|
||||
fi
|
||||
|
||||
if [[ "$1" == "clean" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
fi
|
||||
|
||||
if is_service_enabled odl-compute; then
|
||||
if [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
install_networking_odl
|
||||
install_opendaylight_compute
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
if is_service_enabled nova; then
|
||||
create_nova_conf_neutron
|
||||
fi
|
||||
bind_opendaylight_controller
|
||||
sudo ovs-vsctl --may-exist add-br $OVS_BR
|
||||
wait_for_active_bridge $OVS_BR $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT
|
||||
|
||||
# L3 needs to be configured only for netvirt-ovsdb - in netvirt-vpnservice L3 is configured
|
||||
# by provider_mappings, and the provider mappings are added to br-int by default
|
||||
if [[ "${ODL_L3}" == "True" ]]; then
|
||||
configure_opendaylight_l3
|
||||
fi
|
||||
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
|
||||
# no-op
|
||||
:
|
||||
elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
|
||||
if [[ "$1" == "unstack" && "$UNSTACK_KEEP_ODL" != "True" ]]; then
|
||||
cleanup_opendaylight_compute
|
||||
fi
|
||||
|
||||
if [[ "$1" == "clean" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
fi
|
||||
|
||||
if is_service_enabled odl-neutron; then
|
||||
if [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
install_networking_odl
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
configure_neutron_odl
|
||||
elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
|
||||
if [[ "$1" == "unstack" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
|
||||
if [[ "$1" == "clean" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
fi
|
||||
|
||||
if is_service_enabled odl-lightweight-testing; then
|
||||
if [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
install_networking_odl
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
configure_neutron_odl
|
||||
configure_neutron_odl_lightweight_testing
|
||||
elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
|
||||
if [[ "$1" == "unstack" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
|
||||
if [[ "$1" == "clean" ]]; then
|
||||
# no-op
|
||||
:
|
||||
fi
|
||||
fi
|
||||
|
||||
# Tell emacs to use shell-script-mode
|
||||
## Local variables:
|
||||
## mode: shell-script
|
||||
## End:
|
@ -1,62 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -xe
|
||||
|
||||
GATE_DEST=$BASE/new
|
||||
DEVSTACK_PATH=$GATE_DEST/devstack
|
||||
|
||||
source $DEVSTACK_PATH/functions
|
||||
source $DEVSTACK_PATH/openrc admin admin
|
||||
|
||||
TEMPEST_CODE_DIR="$BASE/new/tempest"
|
||||
TEMPEST_DATA_DIR="$DATA_DIR/tempest"
|
||||
NETWORKING_ODL_DIR="${NETWORKING_ODL_DIR:-$BASE/new/networking-odl}"
|
||||
|
||||
owner=stack
|
||||
sudo_env="TEMPEST_CONFIG_DIR=$TEMPEST_CODE_DIR/etc"
|
||||
|
||||
cd $TEMPEST_CODE_DIR
|
||||
sudo chown -R $owner:stack $TEMPEST_CODE_DIR
|
||||
sudo mkdir -p "$TEMPEST_DATA_DIR"
|
||||
sudo chown -R $owner:stack $TEMPEST_DATA_DIR
|
||||
|
||||
function _odl_show_info {
|
||||
sudo ip address
|
||||
sudo ip link
|
||||
sudo ip route
|
||||
sudo ovsdb-client dump
|
||||
sudo ovs-vsctl show
|
||||
for br in $(sudo ovs-vsctl list-br); do
|
||||
echo "--- flows on $br ---"
|
||||
sudo ovs-ofctl --protocols OpenFlow13 dump-ports $br
|
||||
sudo ovs-ofctl --protocols OpenFlow13 dump-ports-desc $br
|
||||
sudo ovs-ofctl --protocols OpenFlow13 dump-flows $br
|
||||
done
|
||||
|
||||
openstack network list
|
||||
openstack port list
|
||||
openstack subnet list
|
||||
openstack router list
|
||||
|
||||
# ODL_UESRNAME=admin
|
||||
# ODL_PASSWORD=admin
|
||||
# ODL_MGR_HOST=$SERVICE_HOST
|
||||
# ODL_PORT=8087
|
||||
# There is no good way to retrieve from setting.odl at the moment
|
||||
curl --silent --user admin:admin "http://localhost:8087/restconf/config/neutron:neutron?prettyPrint=true"
|
||||
echo -e "\n"
|
||||
}
|
||||
|
||||
echo "Some pre-process info"
|
||||
_odl_show_info
|
||||
|
||||
echo "Running networking-odl test suite"
|
||||
set +e
|
||||
sudo -H -u $owner $sudo_env tox -eall -- "$DEVSTACK_GATE_TEMPEST_REGEX" --serial
|
||||
retval=$?
|
||||
set -e
|
||||
|
||||
echo "Some post-process info"
|
||||
_odl_show_info
|
||||
|
||||
return $retval
|
@ -1,129 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -xe
|
||||
|
||||
# Drop a token that marks the build as coming from openstack infra
|
||||
GATE_DEST=$BASE/new
|
||||
DEVSTACK_PATH=$GATE_DEST/devstack
|
||||
# for localrc_set
|
||||
source $DEVSTACK_PATH/inc/ini-config
|
||||
|
||||
case "$ODL_RELEASE_BASE" in
|
||||
latest-snapshot)
|
||||
ODL_RELEASE=latest-snapshot
|
||||
;;
|
||||
nitrogen-snapshot)
|
||||
ODL_RELEASE=nitrogen-snapshot-0.7
|
||||
;;
|
||||
carbon-snapshot)
|
||||
ODL_RELEASE=carbon-snapshot-0.6
|
||||
;;
|
||||
boron-snapshot)
|
||||
ODL_RELEASE=boron-snapshot-0.5
|
||||
;;
|
||||
*)
|
||||
echo "Unknown ODL release base: $ODL_RELEASE_BASE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -z "$ODL_GATE_V2DRIVER" ]] && [[ -n "${RALLY_SCENARIO}" ]]; then
|
||||
ODL_GATE_V2DRIVER=v2driver
|
||||
fi
|
||||
case "$ODL_GATE_V2DRIVER" in
|
||||
v2driver)
|
||||
ODL_V2DRIVER=True
|
||||
;;
|
||||
v1driver|*)
|
||||
ODL_V2DRIVER=False
|
||||
;;
|
||||
esac
|
||||
|
||||
ODL_PORT_BINDING_CONTROLLER=pseudo-agentdb-binding
|
||||
|
||||
ODL_GATE_SERVICE_PROVIDER=${ODL_GATE_SERVICE_PROVIDER%-}
|
||||
if [[ -z "$ODL_GATE_SERVICE_PROVIDER" ]] && [[ -n "${RALLY_SCENARIO}" ]]; then
|
||||
ODL_GATE_SERVICE_PROVIDER=vpnservice
|
||||
fi
|
||||
|
||||
case "$ODL_GATE_SERVICE_PROVIDER" in
|
||||
vpnservice)
|
||||
ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-openstack
|
||||
# $PUBLIC_PHYSICAL_NETWORK = public by default
|
||||
ODL_MAPPING_KEY=public
|
||||
;;
|
||||
netvirt|*)
|
||||
ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-ovsdb-openstack
|
||||
# $PUBLIC_BRIDGE = br-ex by default
|
||||
ODL_MAPPING_KEY=br-ex
|
||||
;;
|
||||
esac
|
||||
|
||||
ODL_NETVIRT_KARAF_FEATURE=$ODL_NETVIRT_KARAF_FEATURE,odl-neutron-logger
|
||||
case "$ODL_RELEASE_BASE" in
|
||||
carbon-snapshot|nitrogen-snapshot)
|
||||
ODL_NETVIRT_KARAF_FEATURE=$ODL_NETVIRT_KARAF_FEATURE,odl-neutron-hostconfig-ovs
|
||||
;;
|
||||
esac
|
||||
|
||||
local localrc_file=$DEVSTACK_PATH/local.conf
|
||||
|
||||
localrc_set $localrc_file "IS_GATE" "True"
|
||||
|
||||
# Set here the ODL release to use for the Gate job
|
||||
localrc_set $localrc_file "ODL_RELEASE" "${ODL_RELEASE}"
|
||||
|
||||
# Set here which driver, v1 or v2 driver
|
||||
localrc_set $localrc_file "ODL_V2DRIVER" "${ODL_V2DRIVER}"
|
||||
|
||||
# Set timeout in seconds for http client to ODL neutron northbound
|
||||
localrc_set $localrc_file "ODL_TIMEOUT" "60"
|
||||
|
||||
# Set here which port binding controller
|
||||
localrc_set $localrc_file "ODL_PORT_BINDING_CONTROLLER" "${ODL_PORT_BINDING_CONTROLLER}"
|
||||
|
||||
# Set here which ODL openstack service provider to use
|
||||
localrc_set $localrc_file "ODL_NETVIRT_KARAF_FEATURE" "${ODL_NETVIRT_KARAF_FEATURE}"
|
||||
|
||||
# Switch to using the ODL's L3 implementation
|
||||
localrc_set $localrc_file "ODL_L3" "True"
|
||||
|
||||
# Since localrc_set adds it in reverse order, ODL_PROVIDER_MAPPINGS needs to be
|
||||
# before depending variables
|
||||
|
||||
if [[ "$ODL_GATE_SERVICE_PROVIDER" == "vpnservice" ]]; then
|
||||
localrc_set $localrc_file "ODL_PROVIDER_MAPPINGS" "public:br-ex"
|
||||
localrc_set $localrc_file "PUBLIC_PHYSICAL_NETWORK" "public"
|
||||
localrc_set $localrc_file "PUBLIC_BRIDGE" "br-ex"
|
||||
localrc_set $localrc_file "Q_USE_PUBLIC_VETH" "False"
|
||||
else
|
||||
localrc_set $localrc_file "ODL_PROVIDER_MAPPINGS" "\${ODL_PROVIDER_MAPPINGS:-${ODL_MAPPING_KEY}:\${Q_PUBLIC_VETH_INT}}"
|
||||
localrc_set $localrc_file "Q_USE_PUBLIC_VETH" "True"
|
||||
localrc_set $localrc_file "Q_PUBLIC_VETH_EX" "veth-pub-ex"
|
||||
localrc_set $localrc_file "Q_PUBLIC_VETH_INT" "veth-pub-int"
|
||||
fi
|
||||
|
||||
# Enable debug logs for odl ovsdb
|
||||
localrc_set $localrc_file "ODL_NETVIRT_DEBUG_LOGS" "True"
|
||||
|
||||
localrc_set $localrc_file "RALLY_SCENARIO" "${RALLY_SCENARIO}"
|
||||
|
||||
# delete and recreate network to workaroud netvirt bug:
|
||||
# https://bugs.opendaylight.org/show_bug.cgi?id=7456
|
||||
# https://bugs.opendaylight.org/show_bug.cgi?id=8133
|
||||
if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]] || [[ "$ODL_GATE_SERVICE_PROVIDER" == "vpnservice" ]]; then
|
||||
cat <<EOF >> $DEVSTACK_PATH/local.sh
|
||||
#!/usr/bin/env bash
|
||||
|
||||
sudo ifconfig br-ex 172.24.5.1/24 up
|
||||
source $DEVSTACK_PATH/openrc admin
|
||||
openstack router unset --external-gateway router1
|
||||
openstack port list --router router1 -c ID -f value | xargs -I {} openstack router remove port router1 {}
|
||||
openstack router delete router1
|
||||
openstack subnet list | grep -e public -e private | cut -f2 -d'|' | xargs openstack subnet delete
|
||||
openstack network list | grep -e public -e private | cut -f2 -d'|' | xargs openstack network delete
|
||||
openstack network create public --external --provider-network-type=flat --provider-physical-network=public
|
||||
openstack subnet create --network=public --subnet-range=172.24.5.0/24 --gateway 172.24.5.1 public-subnet
|
||||
EOF
|
||||
chmod 755 $DEVSTACK_PATH/local.sh
|
||||
fi
|
@ -1,114 +0,0 @@
|
||||
# Devstack settings
|
||||
|
||||
# Each service you enable has the following meaning:
|
||||
# odl-neutron - Add this config flag if OpenDaylight controller and OpenStack
|
||||
# Controller are on different nodes.
|
||||
# odl-server - Add this config flag if OpenDaylight controller and OpenStack
|
||||
# Controller are on the same node.
|
||||
# odl-compute - Add this config flag for OpenStack Compute.
|
||||
#
|
||||
# odl-lightweight-testing - Add this config flag for testing neutron ODL ML2
|
||||
# driver and networking-odl without a real running
|
||||
# OpenDaylight instance
|
||||
#
|
||||
# NOTE: odl-server includes odl-neutron.
|
||||
#
|
||||
# An example of enabling all-in-one ODL is below.
|
||||
#enable_service odl-compute odl-server
|
||||
|
||||
# This can be overridden in the localrc file
|
||||
ODL_MODE=${ODL_MODE:-allinone}
|
||||
|
||||
# ODL_MODE is used to configure how devstack works with OpenDaylight. You
|
||||
# can configure this three ways:
|
||||
#
|
||||
# ODL_MODE=allinone
|
||||
# Use this mode if you want to run ODL in this devstack instance. Useful
|
||||
# for a single node deployment or on the control node of a multi-node
|
||||
# devstack environment.
|
||||
#
|
||||
# ODL_MODE=compute
|
||||
# Use this for the compute nodes of a multi-node devstack install.
|
||||
#
|
||||
# ODL_MODE=externalodl
|
||||
# This installs the neutron code for ODL, but does not attempt to
|
||||
# manage ODL in devstack. This is used for development environments
|
||||
# similar to the allinone case except where you are using bleeding edge ODL
|
||||
# which is not yet released, and thus don't want it managed by
|
||||
# devstack.
|
||||
#
|
||||
# ODL_MODE=lightweight-testing
|
||||
# Use this for testing neutron ML2 driver plus networking-odl without
|
||||
# a running OpenDaylight instance.
|
||||
#
|
||||
# ODL_MODE=manual
|
||||
# You're on your own here, and are enabling services outside the scope of
|
||||
# the ODL_MODE variable.
|
||||
|
||||
case $ODL_MODE in
|
||||
allinone)
|
||||
enable_service odl-server odl-compute
|
||||
;;
|
||||
externalodl)
|
||||
enable_service odl-neutron odl-compute
|
||||
;;
|
||||
compute)
|
||||
enable_service odl-compute
|
||||
;;
|
||||
lightweight-testing)
|
||||
enable_service odl-lightweight-testing
|
||||
;;
|
||||
manual)
|
||||
echo "Manual mode: Enabling services explicitly."
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
IS_GATE=$(trueorfalse False IS_GATE)
|
||||
if [[ "$IS_GATE" == "True" ]]
|
||||
then
|
||||
NETWORKING_ODL_DIR=${NETWORKING_ODL_DIR:-$DEST/networking-odl}
|
||||
fi
|
||||
|
||||
# in tempest.conf
|
||||
# [networking-feature-enabled] api-extensions
|
||||
# api-extensions=all means any kind of extensions is enabled irrelevant of
|
||||
# what plugin supports ML2 plugin with ODL driver supports only the following
|
||||
# extensions, not all Those list must be maintained as ML2 plugin
|
||||
# with ODL driver supports more extensions
|
||||
if [[ -z "$NETWORK_API_EXTENSIONS" ]]; then
|
||||
NETWORK_API_EXTENSIONS=address-scope
|
||||
NETWORK_API_EXTENSIONS+=,agent
|
||||
NETWORK_API_EXTENSIONS+=,allowed-address-pairs
|
||||
NETWORK_API_EXTENSIONS+=,binding
|
||||
NETWORK_API_EXTENSIONS+=,dhcp_agent_scheduler
|
||||
NETWORK_API_EXTENSIONS+=,dns-integration
|
||||
NETWORK_API_EXTENSIONS+=,dvr
|
||||
NETWORK_API_EXTENSIONS+=,ext-gw-mode
|
||||
NETWORK_API_EXTENSIONS+=,external-net
|
||||
NETWORK_API_EXTENSIONS+=,extra_dhcp_opt
|
||||
NETWORK_API_EXTENSIONS+=,extraroute
|
||||
NETWORK_API_EXTENSIONS+=,flavors
|
||||
NETWORK_API_EXTENSIONS+=,multi-provider
|
||||
NETWORK_API_EXTENSIONS+=,net-mtu
|
||||
NETWORK_API_EXTENSIONS+=,network-ip-availability
|
||||
NETWORK_API_EXTENSIONS+=,pagination
|
||||
NETWORK_API_EXTENSIONS+=,port-security
|
||||
NETWORK_API_EXTENSIONS+=,project-id
|
||||
NETWORK_API_EXTENSIONS+=,provider
|
||||
NETWORK_API_EXTENSIONS+=,qos
|
||||
NETWORK_API_EXTENSIONS+=,quotas
|
||||
NETWORK_API_EXTENSIONS+=,rbac-policies
|
||||
NETWORK_API_EXTENSIONS+=,router
|
||||
NETWORK_API_EXTENSIONS+=,router-interface-fip
|
||||
NETWORK_API_EXTENSIONS+=,security-group
|
||||
NETWORK_API_EXTENSIONS+=,service-type
|
||||
NETWORK_API_EXTENSIONS+=,sorting
|
||||
NETWORK_API_EXTENSIONS+=,standard-attr-description
|
||||
NETWORK_API_EXTENSIONS+=,standard-attr-revisions
|
||||
NETWORK_API_EXTENSIONS+=,standard-attr-timestamp
|
||||
NETWORK_API_EXTENSIONS+=,subnet_allocation
|
||||
NETWORK_API_EXTENSIONS+=,tag
|
||||
NETWORK_API_EXTENSIONS+=,timestamp_core
|
||||
NETWORK_API_EXTENSIONS+=,vlan-transparent
|
||||
fi
|
@ -1,134 +0,0 @@
|
||||
# Add here any global default values that apply for any ODL release
|
||||
# -----------------------------------------------------------------
|
||||
|
||||
# What release to use. Choices are:
|
||||
# https://wiki.opendaylight.org/view/Release_Plan
|
||||
#
|
||||
# latest-snapshot (master latest snapshot)
|
||||
# nitrogen-snapshot-0.7 (master latest nitrogen snapshot)
|
||||
# nitrogen-snapshot-0.7.0 (master)
|
||||
# carbon-snapshot-0.6 (stable/carbon latest carbon snapshot)
|
||||
# carbon-snapshot-0.6.2 (stable/carbon)
|
||||
# carbon-0.6.1-SR1
|
||||
# carbon-0.6.0
|
||||
# boron-snapshot-0.5 (stable/boron latest boron snapshot)
|
||||
# boron-snapshot-0.5.5 (stable/boron)
|
||||
# boron-0.5.2-SR4
|
||||
# boron-0.5.2-SR3
|
||||
# boron-0.5.2-SR2
|
||||
# boron-0.5.1-SR1
|
||||
# boron-0.5.0
|
||||
|
||||
ODL_RELEASE=${ODL_RELEASE:-latest-snapshot}
|
||||
|
||||
# The IP address of ODL. Set this in local.conf.
|
||||
|
||||
#Set ODL_MGR_HOST to ODL_MGR_IP if ODL_MGR_HOST is not set
|
||||
ODL_MGR_HOST=${ODL_MGR_HOST:-$ODL_MGR_IP}
|
||||
|
||||
# Set ODL_MGR_HOST to SERVICE_HOST if neither ODL_MGR_HOST nor ODL_MGR_IP is set
|
||||
ODL_MGR_HOST=${ODL_MGR_HOST:-$SERVICE_HOST}
|
||||
|
||||
# The list of IP addresses used as OVS manager, separated by a comma.
|
||||
# In non-clustering cases, this is normally the same as ODL_MGR_HOST. However,
|
||||
# for HA deployments the southbound portion to ODL is expected to
|
||||
# use the ip addresses of the ODL instances instead of a single vip. That
|
||||
# enables OVS to simultaneously connect to more than one ODL instance.
|
||||
# Example of expected format: ODL_OVS_MANAGERS=1.1.1.1,2.2.2.2,3.3.3.3
|
||||
ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS:-$ODL_MGR_HOST}
|
||||
|
||||
# The default ODL port for Jetty to use
|
||||
# NOTE: We make this configurable because by default, ODL uses port 8080 for
|
||||
# Jetty, and this conflicts with swift which also uses port 8080.
|
||||
ODL_PORT=${ODL_PORT:-8087}
|
||||
|
||||
# The ODL endpoint URL
|
||||
ODL_ENDPOINT=${ODL_ENDPOINT:-http://${ODL_MGR_HOST}:${ODL_PORT}/controller/nb/v2/neutron}
|
||||
|
||||
# The ODL username
|
||||
ODL_USERNAME=${ODL_USERNAME:-admin}
|
||||
|
||||
# The ODL password
|
||||
ODL_PASSWORD=${ODL_PASSWORD:-admin}
|
||||
|
||||
# The http timeout in seconds for http client to ODL neutron northbound.
|
||||
# unset or empty string means default.
|
||||
ODL_TIMEOUT=${ODL_TIMEOUT:-""}
|
||||
|
||||
# use v2 type driver
|
||||
# this requires post mitaka
|
||||
ODL_V2DRIVER=${ODL_V2DRIVER:-True}
|
||||
|
||||
# The OpenDaylight URL PREFIX
|
||||
ODL_URL_PREFIX=${ODL_URL_PREFIX:-https://nexus.opendaylight.org}
|
||||
|
||||
# OpenDaylight snapshot & release repositories paths
|
||||
# Can be overridden in case you host proxy repositories which have a different directory structure than OpenDaylight's
|
||||
ODL_URL_SNAPSHOT_REPOSITORY_PATH=${ODL_URL_SNAPSHOT_REPOSITORY_PATH:-content/repositories/opendaylight.snapshot}
|
||||
ODL_URL_RELEASE_REPOSITORY_PATH=${ODL_URL_RELEASE_REPOSITORY_PATH:-content/repositories/opendaylight.release}
|
||||
|
||||
# How long (in seconds) to pause after ODL starts to let it complete booting
|
||||
ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-600}
|
||||
|
||||
# Enable conntrack support for legacy netvirt
|
||||
ODL_LEGACY_NETVIRT_CONNTRACK=${ODL_LEGACY_NETVIRT_CONNTRACK:-False}
|
||||
|
||||
# Enable OpenDaylight l3 forwarding
|
||||
ODL_L3=${ODL_L3:-False}
|
||||
|
||||
# If you need to route the traffic out of the box, set
|
||||
# ODL_PROVIDER_MAPPINGS to map br-ex as shown below. Note
|
||||
# This used to be accomplished via PUBLIC_BRIDGE, but that
|
||||
# is no longer necessary.
|
||||
#
|
||||
# The physical provider network to device mapping. Use this
|
||||
# to instruct ODL to map ports into specific bridges
|
||||
# Examples:
|
||||
# ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-br-ex:eth2}
|
||||
# ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-physnet1:eth1,br-ex:eth2}
|
||||
|
||||
# MAC address for next hop gateway at external network
|
||||
ODL_L3GW_MAC=${ODL_L3GW_MAC:-''}
|
||||
|
||||
# Enable debug logs for odl ovsdb
|
||||
ODL_NETVIRT_DEBUG_LOGS=${ODL_NETVIRT_DEBUG_LOGS:-False}
|
||||
|
||||
# Karaf logfile information
|
||||
ODL_KARAF_LOG_DATE=$(date +%Y-%m-%d-%H%M%S)
|
||||
ODL_KARAF_LOG_BASE=${ODL_KARAF_LOG_BASE:-screen-karaf.log}
|
||||
ODL_KARAF_LOG_NAME=$ODL_KARAF_LOG_BASE.$ODL_KARAF_LOG_DATE
|
||||
|
||||
# The bridge to configure
|
||||
OVS_BR=${OVS_BR:-br-int}
|
||||
|
||||
# Use the existing ready java env
|
||||
ODL_USING_EXISTING_JAVA=${ODL_USING_EXISTING_JAVA:-False}
|
||||
|
||||
# Allow the min/max/perm Java memory to be configurable
|
||||
ODL_JAVA_MIN_MEM=${ODL_JAVA_MIN_MEM:-256m}
|
||||
ODL_JAVA_MAX_MEM=${ODL_JAVA_MAX_MEM:-512m}
|
||||
ODL_JAVA_MAX_PERM_MEM=${ODL_JAVA_MAX_PERM_MEM:-512m}
|
||||
|
||||
# Interval in test_with_retry calls
|
||||
ODL_RETRY_SLEEP_INTERVAL=${ODL_RETRY_SLEEP_INTERVAL:-5}
|
||||
|
||||
# Skip installation of distribution provided Open vSwitch
|
||||
SKIP_OVS_INSTALL=$(trueorfalse False SKIP_OVS_INSTALL)
|
||||
|
||||
# The ODL Restconf URL
|
||||
# URI to hostconfigs: empty for default value
|
||||
ODL_HOSTCONF_URI=${ODL_HOSTCONF_URI:-}
|
||||
|
||||
# Port binding controller
|
||||
# pseudo-agentdb-binding, legacy-port-binding
|
||||
# pseudo-agentdb-binding is supported by Boron or later
|
||||
ODL_PORT_BINDING_CONTROLLER=${ODL_PORT_BINDING_CONTROLLER:-pseudo-agentdb-binding}
|
||||
|
||||
# Snapshot version - allows using a specific version e.g. 0.5.0-20160719.101233-3643
|
||||
# latest: check the latest snapshot
|
||||
# specific version: the specific version of the snapshot
|
||||
# "": odl release
|
||||
ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-}
|
||||
|
||||
# Set to True to keep odl running after unstack
|
||||
UNSTACK_KEEP_ODL=${UNSTACK_KEEP_ODL:-False}
|
@ -1,207 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
ORACLE_JAVA_URL="http://download.oracle.com/otn-pub/java/jdk"
|
||||
ORACLE_JAVA7_URL="${ORACLE_JAVA7_URL:-$ORACLE_JAVA_URL/7u80-b15/jdk-7u80}"
|
||||
ORACLE_JAVA7_NAME="jdk1.7.0_80"
|
||||
ORACLE_JAVA8_URL="${ORACLE_JAVA8_URL:-$ORACLE_JAVA_URL/8u112-b15/jdk-8u112}"
|
||||
ORACLE_JAVA8_NAME="jdk1.8.0_112"
|
||||
|
||||
function setup_java {
|
||||
# Java version 8 is the last stable one
|
||||
local VERSION="${1:-8}"
|
||||
|
||||
echo "Setup Java version: $VERSION"
|
||||
if test_java_version "$VERSION" && setup_java_env; then
|
||||
echo "Current Java version is already $VERSION."
|
||||
elif select_java "$VERSION"; then
|
||||
echo "Java version $VERSION has been selected."
|
||||
elif install_openjdk "$VERSION" && select_java "$VERSION"; then
|
||||
echo "OpenJDK version $VERSION has been installed and selected."
|
||||
elif install_other_java "$VERSION" && select_java "$VERSION"; then
|
||||
echo "Some Java version $VERSION has been installed and selected."
|
||||
else
|
||||
echo "ERROR: Unable to setup Java version $VERSION."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function setup_java_env {
|
||||
local JAVA_COMMAND="${1:-${JAVA:-java}}"
|
||||
|
||||
JAVA_LINK="$(which $JAVA_COMMAND)"
|
||||
if [[ "$JAVA_LINK" == "" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
export JAVA="$(readlink -f $JAVA_LINK)"
|
||||
export JAVA_HOME=$(echo $JAVA | sed "s:/bin/java::" | sed "s:/jre::")
|
||||
if [ "$JAVA" != "$(readlink -f $(which java))" ]; then
|
||||
export PATH="$(dirname $JAVA):$PATH"
|
||||
if [ "$JAVA" != "$(readlink -f $(which java))" ]; then
|
||||
echo "Unable to set $JAVA as current."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "JAVA is: $JAVA"
|
||||
echo "JAVA_HOME is: $JAVA_HOME"
|
||||
echo "Java version is:"
|
||||
$JAVA -version 2>&1
|
||||
}
|
||||
|
||||
function select_java {
|
||||
local VERSION="$1"
|
||||
local COMMAND
|
||||
|
||||
for COMMAND in $(list_java_commands); do
|
||||
if test_java_version "$VERSION" "$COMMAND"; then
|
||||
if setup_java_env "$COMMAND"; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo 'Required java version not found.'
|
||||
return 1
|
||||
}
|
||||
|
||||
function test_java_version {
|
||||
local EXPECTED_VERSION="'"*' version "1.'$1'.'*'"'"'"
|
||||
local COMMAND="${2:-${JAVA:-java}}"
|
||||
local ACTUAL_VERSION="'"$($COMMAND -version 2>&1 | head -n 1)"'"
|
||||
|
||||
if [[ $ACTUAL_VERSION == $EXPECTED_VERSION ]]; then
|
||||
echo "Found matching java version: $ACTUAL_VERSION"
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
if is_ubuntu; then
|
||||
# --- Ubuntu -------------------------------------------------------------
|
||||
|
||||
function list_java_commands {
|
||||
update-alternatives --list java
|
||||
}
|
||||
|
||||
function install_openjdk {
|
||||
local REQUIRED_VERSION="$1"
|
||||
apt_get install "openjdk-$REQUIRED_VERSION-jre-headless"
|
||||
}
|
||||
|
||||
function install_other_java {
|
||||
local VERSION="$1"
|
||||
local PPA_REPOSITORY="ppa:webupd8team/java"
|
||||
local JAVA_INSTALLER="oracle-java${VERSION}-installer"
|
||||
local JAVA_SET_DEFAULT="oracle-java${VERSION}-set-default"
|
||||
|
||||
# Accept installer license
|
||||
echo "$JAVA_INSTALLER" shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
|
||||
|
||||
# Remove all existing set-default versions
|
||||
apt_get remove oracle-java*-set-default
|
||||
if apt_get install $JAVA_INSTALLER ; then
|
||||
if apt_get install $JAVA_SET_DEFAULT ; then
|
||||
return 0 # Some PPA was already providing desired packages
|
||||
fi
|
||||
fi
|
||||
|
||||
# Add PPA only when package is not available
|
||||
if apt_get install software-properties-common; then
|
||||
# I pipe this after echo to emulate an user key-press
|
||||
if echo | sudo -E add-apt-repository "$PPA_REPOSITORY"; then
|
||||
if apt_get update; then
|
||||
if apt_get install $JAVA_INSTALLER ; then
|
||||
if apt_get install $JAVA_SET_DEFAULT ; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Something has gone wrong!
|
||||
return 1
|
||||
}
|
||||
|
||||
else
|
||||
# --- Red Hat -------------------------------------------------------------
|
||||
|
||||
function list_java_commands {
|
||||
alternatives --display java 2>&1 | grep -v '^[[:space:]]' | awk '/[[:space:]]- priority[[:space:]]/{print $1}'
|
||||
}
|
||||
|
||||
function install_openjdk {
|
||||
local VERSION="$1"
|
||||
yum_install java-1.$VERSION.*-openjdk-headless
|
||||
}
|
||||
|
||||
function install_other_java {
|
||||
local VERSION="$1"
|
||||
|
||||
if [[ "$(uname -m)" == "x86_64" ]]; then
|
||||
local ARCH=linux-x64
|
||||
else
|
||||
local ARCH=linux-i586
|
||||
fi
|
||||
|
||||
if [[ "$VERSION" == "7" ]]; then
|
||||
ORIGIN=$ORACLE_JAVA7_URL
|
||||
TARGET=$ORACLE_JAVA7_NAME
|
||||
elif [[ "$VERSION" == "8" ]]; then
|
||||
ORIGIN=$ORACLE_JAVA8_URL
|
||||
TARGET=$ORACLE_JAVA8_NAME
|
||||
else
|
||||
echo "Unsupported Java version: $VERSION."
|
||||
return 1
|
||||
fi
|
||||
|
||||
local NEW_JAVA="/usr/java/$TARGET/jre/bin/java"
|
||||
if test_java_version "$VERSION" "$NEW_JAVA"; then
|
||||
if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
local EXT
|
||||
local WGET_OPTIONS="-c --no-check-certificate --no-cookies"
|
||||
local HEADER="Cookie: oraclelicense=accept-securebackup-cookie"
|
||||
|
||||
for EXT in "rpm" "tar.gz"; do
|
||||
local URL="$ORIGIN-$ARCH.$EXT"
|
||||
local PACKAGE="/tmp/$(basename $URL)"
|
||||
|
||||
if wget $WGET_OPTIONS --header "$HEADER" "$URL" -O "$PACKAGE"; then
|
||||
case "$EXT" in
|
||||
"rpm")
|
||||
sudo rpm -i "$PACKAGE"
|
||||
;;
|
||||
"tar.gz")
|
||||
sudo mkdir -p /usr/java && sudo tar -C /usr/java -xzf "$PACKAGE"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported extension: $EXT"
|
||||
;;
|
||||
esac
|
||||
|
||||
if test_java_version "$VERSION" "$NEW_JAVA"; then
|
||||
if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Unable to register installed java."
|
||||
|
||||
else
|
||||
echo "Unable to download java archive: $URL"
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
fi
|
@ -1,34 +0,0 @@
|
||||
register_project_for_upgrade networking-odl
|
||||
|
||||
# NOTE(manjeets) Workaround for bug 1648176 to upgrade
|
||||
# networking-odl before neutron
|
||||
UPGRADE_PROJECTS="networking-odl ${UPGRADE_PROJECTS/ networking-odl/}"
|
||||
|
||||
# Add karaf features to be enabled for ODL
|
||||
ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-mdsal-apidocs
|
||||
ODL_NETVIRT_KARAF_FEATURE+=,odl-l2switch-switch,odl-netvirt-openstack
|
||||
|
||||
# for base it should be enabling recent stable/release
|
||||
devstack_localrc base enable_plugin networking-odl http://github.com/openstack/networking-odl.git stable/ocata
|
||||
|
||||
devstack_localrc target enable_plugin networking-odl http://github.com/openstack/networking-odl.git
|
||||
|
||||
for w in base target; do
|
||||
devstack_localrc $w disable_service q-agt
|
||||
devstack_localrc $w disable_service q-l3
|
||||
devstack_localrc $w enable_service q-dhcp
|
||||
devstack_localrc $w enable_service q-meta
|
||||
devstack_localrc $w enable_service placement-api
|
||||
devstack_localrc $w enable_service placement-client
|
||||
devstack_localrc $w Q_PLUGIN=ml2
|
||||
devstack_localrc $w ODL_CONFIG_BRIDGES=True
|
||||
devstack_localrc $w ODL_L3=True
|
||||
devstack_localrc $w ODL_V2DRIVER=True
|
||||
devstack_localrc $w Q_ML2_PLUGIN_TYPE_DRIVERS=flat,vlan,gre,vxlan
|
||||
devstack_localrc $w Q_USE_PUBLIC_VETH=True
|
||||
devstack_localrc $w Q_PUBLIC_VETH_EX=veth-pub-ex
|
||||
devstack_localrc $w Q_PUBLIC_VETH_INT=veth-pub-int
|
||||
devstack_localrc $w ODL_RELEASE=carbon-snapshot-0.6
|
||||
devstack_localrc $w ODL_PROVIDER_MAPPINGS=public:${Q_PUBLIC_VETH_INT}
|
||||
devstack_localrc $w ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE}
|
||||
done
|
@ -1,23 +0,0 @@
|
||||
echo "*********************************************************************"
|
||||
echo "Begin $0"
|
||||
echo "*********************************************************************"
|
||||
|
||||
set -o xtrace
|
||||
|
||||
# Set for DevStack compatibility
|
||||
|
||||
source $GRENADE_DIR/grenaderc
|
||||
source $TARGET_DEVSTACK_DIR/stackrc
|
||||
|
||||
# Get functions from current DevStack
|
||||
source $TARGET_DEVSTACK_DIR/inc/python
|
||||
|
||||
NETWORKING_ODL_DIR="$TARGET_RELEASE_DIR/networking-odl"
|
||||
|
||||
setup_develop $NETWORKING_ODL_DIR
|
||||
|
||||
set +x
|
||||
set +o xtrace
|
||||
echo "*********************************************************************"
|
||||
echo "SUCCESS: End $0"
|
||||
echo "*********************************************************************"
|
@ -1,9 +0,0 @@
|
||||
====================
|
||||
Administration Guide
|
||||
====================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:glob:
|
||||
|
||||
*
|
@ -1,116 +0,0 @@
|
||||
Reference Architecture
|
||||
======================
|
||||
This document lists the minimum reference architecture to get OpenStack
|
||||
installed with OpenDayLight. Wherever possible, additional resources will be
|
||||
stated.
|
||||
|
||||
Cloud Composition
|
||||
-----------------
|
||||
The basic cloud will have 3 types of nodes:
|
||||
|
||||
* Controller Node - Runs OpenStack services and the ODL controller.
|
||||
* Network Node - Runs the DHCP agent, the metadata agent, and the L3 agent (for
|
||||
SNAT).
|
||||
* Compute Node - VMs live here.
|
||||
|
||||
Usually each of the first 2 types of nodes will have a cluster of 3 nodes to
|
||||
support HA. It's also possible to run the ODL controller on separate hardware
|
||||
than the OpenStack services, but this isn't mandatory.
|
||||
|
||||
The last type of nodes can have as many nodes as scale requirements dictate.
|
||||
|
||||
Networking Requirements
|
||||
-----------------------
|
||||
There are several types of networks on the cloud, the most important for the
|
||||
reference architecture are:
|
||||
|
||||
* Management Network - This is the network used to communicate between the
|
||||
different management components, i.e. Nova controller to Nova agent, Neutron
|
||||
to ODL, ODL to OVS, etc.
|
||||
* External Network - This network provides VMs with external connectivity (i.e.
|
||||
internet) usually via virtual routers.
|
||||
* Data Network - This is the network used to connect the VMs to each other and
|
||||
to network resources such as virtual routers.
|
||||
|
||||
The Control Nodes usually are only connected to the Management Network, unless
|
||||
they have an externally reachable IP on the External Network.
|
||||
|
||||
The other node types are connected to all the networks since ODL uses a
|
||||
distributed routing model so that each Compute Node hosts a "virtual router"
|
||||
responsible for connecting the VMs from that node to other networks (including
|
||||
the External Network).
|
||||
|
||||
This diagram illustrates how these nodes might be connected::
|
||||
|
||||
Controller Node
|
||||
+-----------------+
|
||||
| |
|
||||
+-----------+192.168.0.251 |
|
||||
| | |
|
||||
| +-----------------+
|
||||
|
|
||||
| Compute Node +----------------+
|
||||
| +---------------+ | Legend |
|
||||
| | | +----------------+
|
||||
+-----------+192.168.0.1 | | |
|
||||
| | | | --- Management |
|
||||
| +~~~~~~~~~+10.0.0.1 | | |
|
||||
| | | | | ~~~ Data |
|
||||
| | +=======+br-int | | |
|
||||
| | | | | | === External |
|
||||
| | | +---------------+ | |
|
||||
| | | +----------------+
|
||||
| | | Network Node
|
||||
| | | +-----------------+
|
||||
| | | | |
|
||||
+-----------+192.168.0.100 |
|
||||
| | | |
|
||||
+~~~~~~~~~+10.0.0.100 |
|
||||
| | |
|
||||
|=======+br-int |
|
||||
| | |
|
||||
| +-----------------+
|
||||
+----+---+
|
||||
| |
|
||||
| Router |
|
||||
| |
|
||||
+--------+
|
||||
|
||||
|
||||
Minimal Hardware Requirements
|
||||
-----------------------------
|
||||
The rule of thumb is the bigger the better, more RAM and more cores will
|
||||
translate to a better environment. For a POC environment the following is
|
||||
necessary:
|
||||
|
||||
Management Node
|
||||
~~~~~~~~~~~~~~~
|
||||
CPU: 2 cores
|
||||
|
||||
Memory: 8 GB
|
||||
|
||||
Storage: 100 GB
|
||||
|
||||
Network: 1 * 1 Gbps NIC
|
||||
|
||||
Network Node
|
||||
~~~~~~~~~~~~
|
||||
CPU: 2 cores
|
||||
|
||||
Memory: 2 GB
|
||||
|
||||
Storage: 50 GB
|
||||
|
||||
Network: 1 Gbps NIC (Management Network), 2 * 1+ Gbps NICs
|
||||
|
||||
|
||||
Compute Node
|
||||
~~~~~~~~~~~~
|
||||
CPU: 2+ cores
|
||||
|
||||
Memory: 8+ GB
|
||||
|
||||
Storage: 100 GB
|
||||
|
||||
Network: 1 Gbps NIC (Management Network), 2 * 1+ Gbps NICs
|
||||
|
@ -1,82 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.intersphinx',
|
||||
'openstackdocstheme',
|
||||
'oslo_config.sphinxext',
|
||||
]
|
||||
|
||||
# openstackdocstheme options
|
||||
repository_name = 'openstack/networking-odl'
|
||||
bug_project = 'networking-odl'
|
||||
bug_tag = 'doc'
|
||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||
# text edit cycles.
|
||||
# execute "export SPHINX_DEBUG=1" in your terminal to disable
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'networking-odl'
|
||||
copyright = u'2013, OpenStack Foundation'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
add_module_names = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
# html_theme_path = ["."]
|
||||
html_theme = 'openstackdocs'
|
||||
# html_static_path = ['static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = '%sdoc' % project
|
||||
|
||||
html_last_updated_fmt = '%Y-%m-%d %H:%M'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
u'%s Documentation' % project,
|
||||
u'OpenStack Foundation', 'manual'),
|
||||
]
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
#intersphinx_mapping = {'http://docs.python.org/': None}
|
@ -1,11 +0,0 @@
|
||||
|
||||
Configuration options
|
||||
=====================
|
||||
|
||||
Networking-odl uses the following configuration options
|
||||
in the Neutron server configuration, which is typically
|
||||
`/etc/neutron/neutron.conf`.
|
||||
|
||||
.. show-options::
|
||||
|
||||
ml2_odl
|
@ -1,4 +0,0 @@
|
||||
============
|
||||
Contributing
|
||||
============
|
||||
.. include:: ../../../CONTRIBUTING.rst
|
@ -1,89 +0,0 @@
|
||||
ODL Drivers Architecture
|
||||
========================
|
||||
|
||||
This document covers architectural concepts of the ODL drivers. Although
|
||||
'driver' is an ML2 term, it's used widely in ODL to refer to any
|
||||
implementation of APIs. Any mention of ML2 in this document is solely for
|
||||
reference purposes.
|
||||
|
||||
V1 Driver Overview
|
||||
------------------
|
||||
|
||||
The first driver version was a naive implementation which synchronously
|
||||
mirrored all calls to the ODL controller. For example, a create network request
|
||||
would first get written to the DB by Neutron's ML2 plugin, and then the ODL
|
||||
driver would send the request to POST the network to the ODL controller.
|
||||
|
||||
Although this implementation is simple, it has a few problems:
|
||||
|
||||
* ODL is not really synchronous, so if the REST call succeeds it doesn't mean
|
||||
the action really happened on ODL.
|
||||
* The "synchronous" call can be a bottleneck under load.
|
||||
* Upon failure the V1 driver would try to "full sync" the entire Neutron DB
|
||||
over on the next call, so the next call could take a very long time.
|
||||
* It doesn't really handle race conditions:
|
||||
|
||||
- For example, create subnet and then create port could be sent in parallel
|
||||
by the driver in an HA Neutron environment, causing the port creation to
|
||||
fail.
|
||||
- Full-sync could possibly recreate deleted resources if the deletion happens
|
||||
in parallel.
|
||||
|
||||
.. _v2_design:
|
||||
|
||||
V2 Driver Design
|
||||
----------------
|
||||
|
||||
The V2 driver set upon to tackle problems encountered in the V1 driver while
|
||||
maintaining feature parity.
|
||||
The major design concept of the V2 driver is *journaling* - instead of passing
|
||||
the calls directly to the ODL controller, they get registered
|
||||
in the journal table which keeps a sort of queue of the various operations that
|
||||
occurred on Neutron and should be mirrored to the controller.
|
||||
|
||||
The journal is processed mainly by a journaling thread which runs periodically
|
||||
and checks if the journal table has any entries in need of processing.
|
||||
Additionally the thread is triggered in the postcommit hook of the operation
|
||||
(where applicable).
|
||||
|
||||
If we take the example of create network again, after it gets stored in the
|
||||
Neutron DB by the ML2 plugin, the ODL driver stores a "journal entry"
|
||||
representing that operation and triggers the journalling thread to take care of
|
||||
the entry.
|
||||
|
||||
The journal entry is recorded in the pre-commit phase (whenever applicable) so
|
||||
that in case of a commit failure the journal entry gets aborted along with the
|
||||
original operation, and there's nothing extra needed.
|
||||
|
||||
Journal Entry Lifecycle
|
||||
-----------------------
|
||||
|
||||
The first state in which a journal entry is created is the 'pending' state. In
|
||||
this state, the entry is awaiting a thread to pick it up and process it.
|
||||
Multiple threads can try to grab the same journal entry, but only one will
|
||||
succeed since the "selection" is done inside a 'select for update' clause.
|
||||
Special care is taken for GaleraDB since it reports a deadlock if more than
|
||||
one thread selects the same row simultaneously.
|
||||
|
||||
Once an entry has been selected it will be put into the 'processing' state
|
||||
which acts as a lock. This is done in the same transaction so that in case
|
||||
multiple threads try to "lock" the same entry only one of them will succeed.
|
||||
When the winning thread succeeds it will continue with processing the entry.
|
||||
|
||||
The first thing the thread does is check for dependencies - if the entry
|
||||
depends on another one to complete. If a dependency is found, the entry is put
|
||||
back into the queue and the thread moves on to the next entry.
|
||||
|
||||
When there are no dependencies for the entry, the thread analyzes the operation
|
||||
that occurred and performs the appropriate call to the ODL controller. The call
|
||||
is made to the correct resource or collection and the type of call (PUT, POST,
|
||||
DELETE) is determined by the operation type. At this point if the call was
|
||||
successful (i.e. got a 200 class HTTP code) the entry is marked 'completed'.
|
||||
|
||||
In case of a failure the thread determines if this is an expected failure (e.g.
|
||||
network connectivity issue) or an unexpected failure. For unexpected failures
|
||||
a counter is raised, so that a given entry won't be retried more than a given
|
||||
amount of times. Expected failures don't change the counter. If the counter
|
||||
exceeds the configured amount of retries, the entry is marked as 'failed'.
|
||||
Otherwise, the entry is marked back as 'pending' so that it can later be
|
||||
retried.
|
@ -1,148 +0,0 @@
|
||||
Host Configuration
|
||||
==================
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
ODL is agentless configuration. In this scenario Host Configuration is used
|
||||
to specify the physical host type and other configurations for the host
|
||||
system. This information is populated by the Cloud Operator is in OVSDB in
|
||||
Open_vSwitch configuration data in the external_ids field as a key value pair.
|
||||
This information is then read by ODL and made available to networking-odl
|
||||
through REST API. Networking-odl populates this information in agent_db in
|
||||
Neutron and is then used by Neutron scheduler. This information is required
|
||||
for features like Port binding and Router scheduling.
|
||||
|
||||
Refer to this link for detailed design for this feature.
|
||||
|
||||
https://docs.google.com/presentation/d/1kq0elysCDEmIWs3omTi5RoXTSBbrewn11Je2d26cI4M/edit?pref=2&pli=1#slide=id.g108988d1e3_0_6
|
||||
|
||||
Related ODL changes:
|
||||
|
||||
https://git.opendaylight.org/gerrit/#/c/36767/
|
||||
|
||||
https://git.opendaylight.org/gerrit/#/c/40143/
|
||||
|
||||
Host Configuration fields
|
||||
-------------------------
|
||||
|
||||
- **host-id**
|
||||
|
||||
This represents host identification string. This string will be stored in
|
||||
external_ids field with the key as odl_os_hostconfig_hostid.
|
||||
Refer to Neutron config definition for host field for details on this field.
|
||||
|
||||
http://docs.openstack.org/kilo/config-reference/content/section_neutron.conf.html
|
||||
|
||||
- **host-type**
|
||||
|
||||
The field is for type of the node. This value corresponds to agent_type in
|
||||
agent_db. Example value are “ODL L2” and “ODL L3” for Compute and Network
|
||||
node respectively. Same host can be configured to have multiple
|
||||
configurations and can therefore can have both L2, L3 and other
|
||||
configurations at the same time. This string will be populated by ODL based
|
||||
on the configurations available on the host. See example in section below.
|
||||
|
||||
- **config**
|
||||
|
||||
This is the configuration data for the host type. Since same node can be
|
||||
configured to store multiple configurations different external_ids key value
|
||||
pair are used to store these configuration. The external_ids with keys as
|
||||
odl_os_hostconfig_config_odl_XXXXXXXX store different configurations.
|
||||
8 characters after the suffix odl_os_hostconfig_config_odl are host type.
|
||||
ODL extracts these characters and store that as the host-type fields. For
|
||||
example odl_os_hostconfig_config_odl_l2, odl_os_hostconfig_config_odl_l3 keys
|
||||
are used to provide L2 and L3 configurations respectively. ODL will extract
|
||||
"ODL L2" and "ODL L3" as host-type field from these keys and populate
|
||||
host-type field.
|
||||
|
||||
Config is a Json string. Some examples of config:
|
||||
|
||||
OVS configuration example::
|
||||
|
||||
{“supported_vnic_types”: [{
|
||||
“vnic_type”: “normal”,
|
||||
“vif_type”: “ovs”,
|
||||
“vif_details”: “{}”
|
||||
}]
|
||||
“allowed_network_types”: ["local", "gre", "vlan", "vxlan"]”,
|
||||
“bridge_mappings”: {“physnet1":"br-ex”}
|
||||
}"
|
||||
|
||||
OVS_DPDK configuration example::
|
||||
|
||||
{“supported_vnic_types”: [{
|
||||
“vnic_type”: “normal”,
|
||||
“vif_type”: “vhostuser”,
|
||||
“vif_details”: {
|
||||
"uuid": "TEST_UUID",
|
||||
"has_datapath_type_netdev": True,
|
||||
"support_vhost_user": True,
|
||||
"port_prefix": "vhu_",
|
||||
# Assumption: /var/run mounted as tmpfs
|
||||
"vhostuser_socket_dir": "/var/run/openvswitch",
|
||||
"vhostuser_ovs_plug": True,
|
||||
"vhostuser_mode": "client",
|
||||
"vhostuser_socket": "/var/run/openvswitch/vhu_$PORT_ID"}
|
||||
}]
|
||||
“allowed_network_types”: ["local", "gre", "vlan", "vxlan"]”,
|
||||
“bridge_mappings”: {“physnet1":"br-ex”}
|
||||
}"
|
||||
|
||||
VPP configuration example::
|
||||
|
||||
{ {"supported_vnic_types": [
|
||||
{"vnic_type": "normal",
|
||||
"vif_type": “vhostuser”,
|
||||
"vif_details": {
|
||||
"uuid": "TEST_UUID",
|
||||
"has_datapath_type_netdev": True,
|
||||
"support_vhost_user": True,
|
||||
"port_prefix": "socket_",
|
||||
"vhostuser_socket_dir": "/tmp",
|
||||
"vhostuser_ovs_plug": True,
|
||||
"vhostuser_mode": "server",
|
||||
"vhostuser_socket": "/tmp/socket_$PORT_ID"
|
||||
}}],
|
||||
"allowed_network_types": ["local", "vlan", "vxlan", "gre"],
|
||||
"bridge_mappings": {"physnet1": "br-ex"}}}
|
||||
|
||||
**Host Config URL**
|
||||
|
||||
Url : http://ip:odlport/restconf/operational/neutron:neutron/hostconfigs/
|
||||
|
||||
**Commands to setup host config in OVSDB**
|
||||
::
|
||||
|
||||
export OVSUUID=$(ovs-vsctl get Open_vSwitch . _uuid)
|
||||
ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_hostid=test_host
|
||||
ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_config_odl_l2 =
|
||||
"{“supported_vnic_types”: [{“vnic_type”: “normal”, “vif_type”: “ovs”, "vif_details": {} }], “allowed_network_types”: [“local”], “bridge_mappings”: {“physnet1":"br-ex”}}"
|
||||
|
||||
Example for host configuration
|
||||
-------------------------------
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"hostconfigs": {
|
||||
"hostconfig": [
|
||||
{
|
||||
"host-id": "test_host1",
|
||||
"host-type": "ODL L2",
|
||||
"config":
|
||||
"{“supported_vnic_types”: [{
|
||||
“vnic_type”: “normal”,
|
||||
“vif_type”: “ovs”,
|
||||
“vif_details”: {}
|
||||
}]
|
||||
“allowed_network_types”: ["local", "gre", "vlan", "vxlan"],
|
||||
“bridge_mappings”: {“physnet1":"br-ex”}}"
|
||||
},
|
||||
{
|
||||
"host-id": "test_host2",
|
||||
"host-type": "ODL L3",
|
||||
"config": {}
|
||||
}]
|
||||
}
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
Contributor Guide
|
||||
=================
|
||||
|
||||
In the Developer/Contributor Guide, you will find information on
|
||||
networking-odl's lower level design and implementation details.
|
||||
We will cover only essential details related to just networking-odl
|
||||
and we won't repeat neutron devref here, for details in neutron,
|
||||
neutron's devref can be checked:
|
||||
https://docs.openstack.org/neutron/latest/contributor/index.html
|
||||
|
||||
For details regarding OpenStack Neutron's Api:
|
||||
https://developer.openstack.org/api-ref/networking/
|
||||
|
||||
Contributor's Reference
|
||||
-----------------------
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
testing
|
||||
drivers_architecture
|
||||
maintenance
|
||||
usage
|
||||
contributing
|
||||
specs/index
|
||||
|
||||
Tutorial
|
||||
--------
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
quickstart.rst
|
||||
|
||||
|
||||
Networking OpenDayLight Internals
|
||||
---------------------------------
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
hostconfig
|
@ -1,44 +0,0 @@
|
||||
Journal Maintenance
|
||||
===================
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The V2 ODL driver is Journal based [#]_, which means that there's a journal of
|
||||
entries detailing the various operations done on a Neutron resource.
|
||||
The driver has a thread which is in charge of processing the journal of
|
||||
operations which entails communicating the operation forward to the ODL
|
||||
controller.
|
||||
|
||||
The journal entries can wind up in several states due to various reasons:
|
||||
|
||||
* PROCESSING - Stale lock left by a thread due to thread dying or other error
|
||||
* COMPLETED - After the operation is processed successfully
|
||||
* FAILED - If there was an unexpected error during the operation
|
||||
|
||||
These journal entries need to be dealt with appropriately, hence a maintenance
|
||||
thread was introduced that takes care of journal maintenance and other related
|
||||
tasks.
|
||||
This thread runs in a configurable interval and is HA safe using a shared state
|
||||
kept in the DB.
|
||||
|
||||
Currently the maintenance thread performs:
|
||||
|
||||
* Stale lock release
|
||||
* Completed entries clean up
|
||||
* Failed entries are handled by the recovery mechanism
|
||||
* Full sync detect when ODL is "tabula rasa" and syncs all the resources to it
|
||||
|
||||
Creating New Maintenance Operations
|
||||
-----------------------------------
|
||||
|
||||
Creating a new maintenance operation is as simple as writing a function
|
||||
that receives the database session object and registering it using a call to::
|
||||
|
||||
MaintenanceThread.register_operation
|
||||
|
||||
The best place to do so would be at the _start_maintenance_thread method of
|
||||
the V2 OpenDaylightMechanismDriver class.
|
||||
|
||||
.. [#] See :ref:`v2_design` for details.
|
||||
|
@ -1,219 +0,0 @@
|
||||
.. _quickstart:
|
||||
|
||||
=====================
|
||||
Developer Quick-Start
|
||||
=====================
|
||||
|
||||
This is a quick walkthrough to get you started developing code for
|
||||
networking-odl. This assumes you are already familiar with submitting code
|
||||
reviews to an OpenStack project.
|
||||
|
||||
.. see also::
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html
|
||||
|
||||
Setup Dev Environment
|
||||
=====================
|
||||
|
||||
Install OS-specific prerequisites::
|
||||
|
||||
# Ubuntu/Debian 14.04:
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y python-dev libssl-dev libxml2-dev curl \
|
||||
libmysqlclient-dev libxslt1-dev libpq-dev git \
|
||||
libffi-dev gettext build-essential
|
||||
|
||||
# CentOS/RHEL 7.2:
|
||||
sudo yum install -y python-devel openssl-devel mysql-devel curl \
|
||||
libxml2-devel libxslt-devel postgresql-devel git \
|
||||
libffi-devel gettext gcc
|
||||
|
||||
# openSUSE/SLE 12:
|
||||
sudo zypper --non-interactive install git libffi-devel curl \
|
||||
libmysqlclient-devel libopenssl-devel libxml2-devel \
|
||||
libxslt-devel postgresql-devel python-devel \
|
||||
gettext-runtime
|
||||
|
||||
Install pip::
|
||||
|
||||
curl -s https://bootstrap.pypa.io/get-pip.py | sudo python
|
||||
|
||||
Install common prerequisites::
|
||||
|
||||
sudo pip install virtualenv flake8 tox testrepository git-review
|
||||
|
||||
You may need to explicitly upgrade virtualenv if you've installed the one
|
||||
from your OS distribution and it is too old (tox will complain). You can
|
||||
upgrade it individually, if you need to::
|
||||
|
||||
sudo pip install -U virtualenv
|
||||
|
||||
Networking-odl source code should be pulled directly from git::
|
||||
|
||||
# from your home or source directory
|
||||
cd ~
|
||||
git clone https://git.openstack.org/openstack/networking-odl
|
||||
cd networking-odl
|
||||
|
||||
|
||||
For installation of networking-odl refer to :doc:`/install/index`.
|
||||
For testing refer to :doc:`Testing <testing>` guide.
|
||||
|
||||
Verifying Successful Installation
|
||||
==================================
|
||||
|
||||
There are some checks you can run quickly to verify that networking-odl
|
||||
has been installed sucessfully.
|
||||
|
||||
#. Neutron agents must be in runing state, if you are using pseudo-agent
|
||||
for port binding then output of **openstack network agent list** should
|
||||
be something like::
|
||||
|
||||
ubuntu@ubuntu-14:~/devstack$ openstack network agent list
|
||||
+----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+
|
||||
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
|
||||
+----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+
|
||||
| 00628905-6550-43a5-9cda- | ODL L2 | ubuntu-14 | None | True | UP | neutron-odlagent- |
|
||||
| 175a309ea538 | | | | | | portbinding |
|
||||
| 37491134-df2a- | DHCP agent | ubuntu-14 | nova | True | UP | neutron-dhcp-agent |
|
||||
| 45ab-8373-e186154aebee | | | | | | |
|
||||
| 8e0e5614-4d68-4a42-aacb- | Metadata agent | ubuntu-14 | None | True | UP | neutron-metadata-agent |
|
||||
| d0a10df470fb | | | | | | |
|
||||
+----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+
|
||||
|
||||
Your output of this command may vary depending on the your environment,
|
||||
for example hostname etc.
|
||||
|
||||
#. You can check that opendaylight is running by executing following
|
||||
command::
|
||||
|
||||
ubuntu@ubuntu-14:~/devstack$ ps -eaf | grep opendaylight
|
||||
|
||||
|
||||
|
||||
Launching Instance and floating IP
|
||||
==================================
|
||||
|
||||
#. Gather paramters required for launching instance. We need flavor Id,
|
||||
image Id and network id, following comand can be used for launching an
|
||||
instance::
|
||||
|
||||
openstack server create --flavor <flavor(m1.tiny)> --image \
|
||||
<image(cirros)> --nic net-id=<Network ID> --security-group \
|
||||
<security group(default) --key-name <keyname(mykey)> \
|
||||
<server name(test-instance)>
|
||||
|
||||
For details on creating instances refer to [#third]_ and
|
||||
[#fourth]_.
|
||||
|
||||
#. Attaching floating IPs to created server can be done by following command::
|
||||
|
||||
openstack server add floating ip <INSTANCE_NAME_OR_ID(test-instance) \
|
||||
<FLOATING_IP_ADDRESS(203.20.2.12)>
|
||||
|
||||
For details on attaching floating IPs refer to [#fifth]_.
|
||||
|
||||
|
||||
Useful Commands
|
||||
================
|
||||
|
||||
#. For verifying status try following command::
|
||||
|
||||
ubuntu@ubuntu-14:<Location of opendaylight directory>/distribution-karaf-0.6.0-SNAPSHOT/bin$ ./karaf status
|
||||
|
||||
You should receive following output::
|
||||
|
||||
Running ...
|
||||
|
||||
#. You can login using available client::
|
||||
|
||||
ubuntu@ubuntu-14:<Location of opendaylight directory>/distribution-karaf-0.6.0-SNAPSHOT/bin$ ./client
|
||||
|
||||
You will receive output in following format::
|
||||
|
||||
Logging in as karaf
|
||||
3877 [sshd-SshClient[6dbb137d]-nio2-thread-3] WARN org.apache.sshd.client.keyverifier.AcceptAllServerKeyVerifier - Server at [/0.0.0.0:8101, RSA, 56:41:48:1c:38:3b:73:a8:a5:96:8e:69:a5:4c:93:e0] presented unverified {} key: {}
|
||||
________ ________ .__ .__ .__ __
|
||||
\_____ \ ______ ____ ____ \______ \ _____ ___.__.| | |__| ____ | |___/ |_
|
||||
/ | \\____ \_/ __ \ / \ | | \\__ \< | || | | |/ ___\| | \ __\
|
||||
/ | \ |_> > ___/| | \| ` \/ __ \\___ || |_| / /_/ > Y \ |
|
||||
\_______ / __/ \___ >___| /_______ (____ / ____||____/__\___ /|___| /__|
|
||||
\/|__| \/ \/ \/ \/\/ /_____/ \/
|
||||
|
||||
Hit '<tab>' for a list of available commands
|
||||
and '[cmd] --help' for help on a specific command.
|
||||
Hit '<ctrl-d>' or type 'system:shutdown' or 'logout' to shutdown OpenDaylight.
|
||||
|
||||
Now you can run commands as per your for example::
|
||||
|
||||
opendaylight-user@root>subnet-show
|
||||
No SubnetOpData configured.
|
||||
Following subnetId is present in both subnetMap and subnetOpDataEntry
|
||||
|
||||
|
||||
|
||||
Following subnetId is present in subnetMap but not in subnetOpDataEntry
|
||||
|
||||
Uuid [_value=2131f292-732d-4ba4-b74e-d70c07eceeb4]
|
||||
|
||||
Uuid [_value=7a03e5d8-3adb-4b19-b1ec-a26691a08f26]
|
||||
|
||||
Uuid [_value=7cd269ea-e06a-4aa3-bc11-697d71be4cbd]
|
||||
|
||||
Uuid [_value=6da591bc-6bba-4c8a-a12b-671265898c4f]
|
||||
|
||||
|
||||
Usage 1: To display subnetMaps for a given subnetId subnet-show --subnetmap [<subnetId>]
|
||||
|
||||
Usage 2: To display subnetOpDataEntry for a given subnetId subnet-show --subnetopdata [<subnetId>]
|
||||
|
||||
To get help on some command::
|
||||
|
||||
opendaylight-user@root>help feature
|
||||
COMMANDS
|
||||
info Shows information about selected feature.
|
||||
install Installs a feature with the specified name and version.
|
||||
list Lists all existing features available from the defined repositories.
|
||||
repo-add Add a features repository.
|
||||
repo-list Displays a list of all defined repositories.
|
||||
repo-refresh Refresh a features repository.
|
||||
repo-remove Removes the specified repository features service.
|
||||
uninstall Uninstalls a feature with the specified name and version.
|
||||
version-list Lists all versions of a feature available from the currently available repositories.
|
||||
|
||||
There are other helpfull commands, for example, log:tail, log:set, shutdown
|
||||
to get tail of logs, set log levels and shutdown.
|
||||
|
||||
For checking neutron bundle is installed::
|
||||
|
||||
opendaylight-user@root>feature:list -i | grep neutron
|
||||
odl-neutron-service | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: API
|
||||
odl-neutron-northbound-api | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Northbound
|
||||
odl-neutron-spi | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: API
|
||||
odl-neutron-transcriber | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Implementation
|
||||
odl-neutron-logger | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Logger
|
||||
|
||||
For checking netvirt bundle is installed::
|
||||
|
||||
opendaylight-user@root>feature:list -i | grep netvirt
|
||||
odl-netvirt-api | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: api
|
||||
odl-netvirt-impl | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: impl
|
||||
odl-netvirt-openstack | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: OpenStack
|
||||
|
||||
|
||||
#. For exploration of API's following links can be used::
|
||||
|
||||
API explorer:
|
||||
http://localhost:8080/apidoc/explorer
|
||||
|
||||
Karaf:
|
||||
http://localhost:8181/apidoc/explorer/index.html
|
||||
|
||||
Detailed information can be found [#sixth]_.
|
||||
|
||||
.. rubric:: References
|
||||
|
||||
.. [#third] https://docs.openstack.org/mitaka/install-guide-rdo/launch-instance-selfservice.html
|
||||
.. [#fourth] https://docs.openstack.org/draft/install-guide-rdo/launch-instance.html
|
||||
.. [#fifth] https://docs.openstack.org/user-guide/cli-manage-ip-addresses.html
|
||||
.. [#sixth] https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Restconf_API_Explorer
|
@ -1,33 +0,0 @@
|
||||
.. networking-odl specs documentation index
|
||||
|
||||
==============
|
||||
Specifications
|
||||
==============
|
||||
|
||||
Pike specs
|
||||
==========
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
:maxdepth: 1
|
||||
|
||||
pike/*
|
||||
|
||||
Ocata specs
|
||||
===========
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
:maxdepth: 1
|
||||
|
||||
ocata/*
|
||||
|
||||
Newton specs
|
||||
============
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
:maxdepth: 1
|
||||
|
||||
newton/*
|
||||
|
@ -1,104 +0,0 @@
|
||||
==========================================
|
||||
Quality of Service Driver for OpenDaylight
|
||||
==========================================
|
||||
|
||||
This spec describes the plan to implement quality of service driver for
|
||||
OpenDaylight Controller.
|
||||
|
||||
Problem Statement
|
||||
=================
|
||||
OpenStack networking project (neutron [1]) have a extension plugin implemented
|
||||
and which expose api for quality of service that can be also be implemented by
|
||||
any backend networking service provider to support QoS. These APIs provide a
|
||||
way to integrate OpenStack Neutron QoS with any of the backend QoS providers.
|
||||
OpenDaylight will provide backend for existing functionalities in neutron-QoS.
|
||||
A notification driver is needed for integration of existing api in Openstack
|
||||
neutron for QoS with OpenDaylight backend.
|
||||
|
||||
Proposed Change
|
||||
===============
|
||||
This change will introduce a new notification driver in networking-odl that
|
||||
will take CRUD requests data for QoS policies from OpenStack neutron and notify
|
||||
the OpenDaylight controller about the respective operation.
|
||||
|
||||
Detailed Design
|
||||
===============
|
||||
To enable the formal end to end integration between OpenStack QoS and
|
||||
OpenDaylight requires an networking-odl QoS notification driver. QoS driver
|
||||
will act as a shim layer between OpenStack and OpenDaylight that will carry
|
||||
out following task:
|
||||
|
||||
#. After getting QoS policy request data from neutron, It will log a operation
|
||||
request in opendaylightjournal table.
|
||||
|
||||
#. The operation will be picked from opendaylightjournal table and a rest call
|
||||
for notifying OpenDaylight server will be prepared and sent.
|
||||
|
||||
#. This request will processed by neutron northbound in OpenDaylight.
|
||||
The OpenDaylight neutron northbound project. These models will be based
|
||||
on the existing neutron qos plugin APIs.
|
||||
|
||||
QoS providers in OpenDaylight can listen to these OpenDaylight Neutron
|
||||
Northbound QoS models and translate it to their specific yang models for QoS.
|
||||
The following diagram shows the high level integration between OpenStack and
|
||||
the OpenDaylight QoS provider::
|
||||
|
||||
+---------------------------------------------+
|
||||
| OpenStack Network Server (neutron qos) |
|
||||
| |
|
||||
| +---------------------+ |
|
||||
| | networking-odl | |
|
||||
| | | |
|
||||
| | +---------------| |
|
||||
| | | Notification | |
|
||||
| | | driver QoS | |
|
||||
+----------------------|----------------------+
|
||||
|
|
||||
| Rest Communication
|
||||
|
|
||||
OpenDaylight Controller |
|
||||
+-----------------------|------------+
|
||||
| +----------V----+ |
|
||||
| ODL | QoS Yang Model| |
|
||||
| Northbound | | |
|
||||
| (neutron) +---------------+ |
|
||||
| | |
|
||||
| | |
|
||||
| ODL +----V----+ |
|
||||
| Southbound | QoS | |
|
||||
| (neutron) +---------+ |
|
||||
+-----------------|------------------+
|
||||
|
|
||||
|
|
||||
+------------------------------------+
|
||||
| Network/OVS |
|
||||
| |
|
||||
+------------------------------------+
|
||||
|
||||
In the above diagram, the OpenDaylight components are shown just to understand
|
||||
the overall architecture, but it's out of scope of this spec's work items.
|
||||
This spec will only track progress related to networking-odl notification QoS
|
||||
driver work.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
It has a dependency on OpenDaylight Neutron Northbound QoS yang models, but
|
||||
that is out of scope of this spec.
|
||||
|
||||
Impact
|
||||
======
|
||||
None
|
||||
|
||||
Assignee(s)
|
||||
===========
|
||||
|
||||
Following developers will be the initial contributor to the driver, but we
|
||||
will be happy to have more contributor on board.
|
||||
|
||||
* Manjeet Singh Bhatia (manjeet.s.bhatia@intel.com, irc: manjeets)
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
* [1] https://docs.openstack.org/neutron/latest/contributor/internals/quality_of_service.html
|
||||
* [2] https://wiki.opendaylight.org/view/NeutronNorthbound:Main
|
@ -1,139 +0,0 @@
|
||||
=================================================
|
||||
Service Function Chaining Driver for OpenDaylight
|
||||
=================================================
|
||||
|
||||
This spec describes the plan to implement OpenStack networking-sfc[1] driver
|
||||
for OpenDaylight Controller.
|
||||
|
||||
Problem Statement
|
||||
===================
|
||||
OpenStack SFC project (networking-sfc [1]) exposes generic APIs[2] for Service
|
||||
Function Chaining (SFC) that can be implemented by any backend networking
|
||||
service provider to support SFC. These APIs provide a way to integrate
|
||||
OpenStack SFC with any of the backend SFC providers. OpenDaylight SFC project
|
||||
provides a very mature implementation of SFC [3], but currently there is no
|
||||
formal integration mechanism present to consume OpenDaylight as an SFC provider
|
||||
for networking-sfc.
|
||||
|
||||
Recently Tacker project [4] has been approved as an official project in
|
||||
OpenStack, that opens many possibilities to realize the NFV use cases (e.g SFC)
|
||||
using OpenStack as a platform. Providing a formal end to end integration
|
||||
between OpenStack and OpenDaylight for SFC use case will help NFV users
|
||||
leverage OpenStack, Tacker and OpenDaylight as a solution. A POC for this
|
||||
integration work has already been implemented [5][6] by Tim Rozet, but in
|
||||
this POC work, Tacker directly communicates to OpenDaylight SFC & classifier
|
||||
providers and not through OpenStack SFC APIs (networking-sfc).
|
||||
|
||||
Proposed Change
|
||||
===============
|
||||
Implementation of this spec will introduce a networking-sfc[1] driver for
|
||||
OpenDaylight Controller in networking-odl project that will pass through
|
||||
the networking-sfc API's call to the OpenDaylight Controller.
|
||||
|
||||
Detailed Design
|
||||
===============
|
||||
To enable the formal end to end integration between OpenStack SFC and
|
||||
OpenDaylight requires an SFC driver for OpenDaylight. ODL SFC driver will
|
||||
act as a shim layer between OpenStack and OpenDaylight that will carry out
|
||||
following two main tasks:
|
||||
|
||||
* Translation of OpenStack SFC Classifier API to ODL SFC classifier yang
|
||||
models**.
|
||||
|
||||
* Translation of OpenStack SFC API's to OpenDaylight Neutron Northbound
|
||||
SFC models** [8].
|
||||
|
||||
** This work is not yet done, but the OpenDaylight neutron northbound project
|
||||
needs to come up with yang models for SFC classification/chain. These models
|
||||
will be based on the existing networking-sfc APIs. This work is out of scope
|
||||
of networking-odl work and will be collaborated in the scope of OpenDaylight
|
||||
Neutron Northbound project.
|
||||
|
||||
SFC providers (E.g Net-Virt, GBP, SFC ) in OpenDaylight can listen to these
|
||||
OpenDaylight Neutron Northbound SFC models and translate it to their specific
|
||||
yang models for classification/sfc. The following diagram shows the high level
|
||||
integration between OpenStack and the OpenDaylight SFC provider::
|
||||
|
||||
+---------------------------------------------+
|
||||
| OpenStack Network Server (networking-sfc) |
|
||||
| +-------------------+ |
|
||||
| | networking-odl | |
|
||||
| | SFC Driver | |
|
||||
| +-------------------+ |
|
||||
+----------------------|----------------------+
|
||||
| REST Communication
|
||||
|
|
||||
-----------------------
|
||||
OpenDaylight Controller | |
|
||||
+-----------------------|-----------------------|---------------+
|
||||
| +----------v----+ +---v---+ |
|
||||
| Neutron | SFC Classifier| |SFC | Neutron |
|
||||
| Northbound | Models | |Models | Northbound|
|
||||
| Project +---------------+ +-------+ Project |
|
||||
| / \ | |
|
||||
| / \ | |
|
||||
| / \ | |
|
||||
| +-----V--+ +---V----+ +---V---+ |
|
||||
| |Net-Virt| ... | GBP | | SFC | ... |
|
||||
| +---------+ +--------+ +-------+ |
|
||||
+-----------|----------------|------------------|---------------+
|
||||
| | |
|
||||
| | |
|
||||
+-----------V----------------V------------------V---------------+
|
||||
| Network/OVS |
|
||||
| |
|
||||
+---------------------------------------------------------------+
|
||||
|
||||
In the above architecture, the opendaylight components are shown just to
|
||||
understand the overall architecture, but it's out of scope of this spec's
|
||||
work items. This spec will only track progress related to networking-odl
|
||||
OpenStack sfc driver work.
|
||||
|
||||
Given that OpenStack SFC APIs are port-pair based API's and OpenDaylight SFC
|
||||
API's are based on IETF SFC yang models[8], there might be situations where
|
||||
translation might requires API enhancement from OpenStack SFC. Networking SFC
|
||||
team is open for these new enhancement requirements given that they are generic
|
||||
enough to be leveraged by other backend SFC providers[9]. This work will be
|
||||
leveraging the POC work done by Tim [10] to come up with the first version of
|
||||
SFC driver.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
It has a dependency on OpenDaylight Neutron Northbound SFC classifier and chain
|
||||
yang models, but that is out of scope of this spec.
|
||||
|
||||
Impact
|
||||
======
|
||||
None
|
||||
|
||||
Assignee(s)
|
||||
===========
|
||||
|
||||
Following developers will be the initial contributor to the driver, but we will
|
||||
be happy to have more contributor on board.
|
||||
|
||||
* Anil Vishnoi (vishnoianil@gmail.com, irc: vishnoianil)
|
||||
* Tim Rozet (trozet@redhat.com, irc: trozet)
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
[1] https://docs.openstack.org/networking-sfc/latest/
|
||||
|
||||
[2] https://github.com/openstack/networking-sfc/blob/master/doc/source/api.rst
|
||||
|
||||
[3] https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
|
||||
|
||||
[4] https://wiki.openstack.org/wiki/Tacker
|
||||
|
||||
[5] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc
|
||||
|
||||
[6] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc_classifier
|
||||
|
||||
[7] https://tools.ietf.org/html/draft-ietf-netmod-acl-model-05
|
||||
|
||||
[8] https://wiki.opendaylight.org/view/NeutronNorthbound:Main
|
||||
|
||||
[9] http://eavesdrop.openstack.org/meetings/service_chaining/2016/service_chaining.2016-03-31-17.00.log.html
|
||||
|
||||
[10] https://github.com/trozet/tacker/blob/SFC_brahmaputra/tacker/sfc/drivers/opendaylight.py
|
@ -1,152 +0,0 @@
|
||||
..
|
||||
This work is licensed under a Creative Commons Attribution 3.0 Unported
|
||||
License.
|
||||
|
||||
http://creativecommons.org/licenses/by/3.0/legalcode
|
||||
|
||||
================
|
||||
Journal Recovery
|
||||
================
|
||||
|
||||
https://blueprints.launchpad.net/networking-odl/+spec/journal-recovery
|
||||
|
||||
Journal entries in the failed state need to be handled somehow. This spec will
|
||||
try to address the issue and propose a solution.
|
||||
|
||||
Problem Description
|
||||
===================
|
||||
|
||||
Currently there is no handling for Journal entries that reach the failed state.
|
||||
A journal entry can reach the failed state for several reasons, some of which
|
||||
are:
|
||||
|
||||
* Reached maximum failed attempts for retrying the operation.
|
||||
|
||||
* Inconsistency between ODL and the Neutron DB.
|
||||
|
||||
* For example: An update fails because the resource doesn't exist in ODL.
|
||||
|
||||
* Bugs that can lead to failure to sync up.
|
||||
|
||||
These entries will be left in the journal table forever which is a bit wasteful
|
||||
since they take up some space on the DB storage and also affect the performance
|
||||
of the journal table.
|
||||
Albeit each entry has a negligble effect on it's own, the impact of a large
|
||||
number of such entries can become quite significant.
|
||||
|
||||
Proposed Change
|
||||
===============
|
||||
|
||||
A "journal recovery" routine will run as part of the current journal
|
||||
maintenance process.
|
||||
This routine will scan the journal table for rows in the "failed" state and
|
||||
will try to sync the resource for that entry.
|
||||
|
||||
The procedure can be best described by the following flow chart:
|
||||
|
||||
asciiflow::
|
||||
|
||||
+-----------------+
|
||||
| For each entry |
|
||||
| in failed state |
|
||||
+-------+---------+
|
||||
|
|
||||
+-------v--------+
|
||||
| Query resource |
|
||||
| on ODL (REST) |
|
||||
+-----+-----+----+
|
||||
| | +-----------+
|
||||
Resource | | Determine |
|
||||
exists +--Resource doesn't exist--> operation |
|
||||
| | type |
|
||||
+-----v-----+ +-----+-----+
|
||||
| Determine | |
|
||||
| operation | |
|
||||
| type | |
|
||||
+-----+-----+ |
|
||||
| +------------+ |
|
||||
+--Create------> Mark entry <--Delete--+
|
||||
| | completed | |
|
||||
| +----------^-+ Create/
|
||||
| | Update
|
||||
| | |
|
||||
| +------------+ | +-----v-----+
|
||||
+--Delete--> Mark entry | | | Determine |
|
||||
| | pending | | | parent |
|
||||
| +---------^--+ | | relation |
|
||||
| | | +-----+-----+
|
||||
+-----v------+ | | |
|
||||
| Compare to +--Different--+ | |
|
||||
| resource | | |
|
||||
| in DB +--Same------------+ |
|
||||
+------------+ |
|
||||
|
|
||||
+-------------------+ |
|
||||
| Create entry for <-----Has no parent------+
|
||||
| resource creation | |
|
||||
+--------^----------+ Has a parent
|
||||
| |
|
||||
| +---------v-----+
|
||||
+------Parent exists------+ Query parent |
|
||||
| on ODL (REST) |
|
||||
+---------+-----+
|
||||
+------------------+ |
|
||||
| Create entry for <---Parent doesn't exist--+
|
||||
| parent creation |
|
||||
+------------------+
|
||||
|
||||
For every error during the process the entry will remain in failed state but
|
||||
the error shouldn't stop processing of further entries.
|
||||
|
||||
|
||||
The implementation could be done in two phases where the parent handling is
|
||||
done in a second phase.
|
||||
For the first phase if we detect an entry that is in failed for a create/update
|
||||
operation and the resource doesn't exist on ODL we create a new "create
|
||||
resource" journal entry for the resource.
|
||||
|
||||
This proposal utilises the journal mechanism for it's operation while the only
|
||||
part that deviates from the standard mode of operation is when it queries ODL
|
||||
directly. This direct query has to be done to get ODL's representation of the
|
||||
resource.
|
||||
|
||||
Performance Impact
|
||||
------------------
|
||||
|
||||
The maintenance thread will have another task to handle. This can lead to
|
||||
longer processing time and even cause the thread to skip an iteration.
|
||||
This is not an issue since the maintenance thread runs in parallel and doesn't
|
||||
directly impact the responsiveness of the system.
|
||||
|
||||
Since most operations here involve I/O then CPU probably won't be impacted.
|
||||
|
||||
Network traffic would be impacted slightly since we will attempt to fetch the
|
||||
resource each time from ODL and we might attempt to fetch it's parent.
|
||||
This is however negligble as we do this only for failed entries, which are
|
||||
expected to appear rarely.
|
||||
|
||||
|
||||
Alternatives
|
||||
------------
|
||||
|
||||
The partial sync process could make this process obsolete (along with full
|
||||
sync), but it's a far more complicated and problematic process.
|
||||
It's better to start with this process which is more lightweight and doable
|
||||
and consider partial sync in the future.
|
||||
|
||||
|
||||
Assignee(s)
|
||||
===========
|
||||
|
||||
Primary assignee:
|
||||
mkolesni <mkolesni@redhat.com>
|
||||
|
||||
Other contributors:
|
||||
None
|
||||
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
https://goo.gl/IOMpzJ
|
||||
|
@ -1,129 +0,0 @@
|
||||
..
|
||||
This work is licensed under a Creative Commons Attribution 3.0 Unported
|
||||
License.
|
||||
|
||||
http://creativecommons.org/licenses/by/3.0/legalcode
|
||||
|
||||
================================
|
||||
Dependency Validations on Create
|
||||
================================
|
||||
|
||||
https://blueprints.launchpad.net/networking-odl/+spec/dep-validations-on-create
|
||||
|
||||
Right now V2 driver entry dependency validations happen when a journal entry is
|
||||
picked for processing. This spec proposes that this be moved to entry creation
|
||||
time, in order to have a clear understanding of the entry dependencies and
|
||||
conserve journal resources.
|
||||
|
||||
|
||||
Problem Description
|
||||
===================
|
||||
|
||||
Dependency validations are necessary in the V2 driver because each operation
|
||||
gets recorded in a journal entry and sent to ODL asynchronously. Thus, a
|
||||
consecutive operation might be sent to ODL before the first one finishes, while
|
||||
relying on the first operation.
|
||||
For example, when a subnet gets created it references a network, but if the
|
||||
network was created right before the subnet was then the subnet create
|
||||
shouldn't be sent over until the network create was sent.
|
||||
|
||||
Currently these checks are performed each time an entry is selected for
|
||||
processing - if the entry passes the dependency checks then it gets processed
|
||||
and if the dependency check fails (i.e. finds a previous unhandled entry that
|
||||
needs to execute before this one) then the entry gets sent back to the queue.
|
||||
|
||||
Generally this is not optimal for several reasons:
|
||||
* No clear indication of relations between the entries.
|
||||
|
||||
* The logic is hidden in the code and there's no good way to know why an
|
||||
entry fails a dependency check.
|
||||
* Difficult to debug in case of problems.
|
||||
* Difficult to spot phenomenon such as a cyclic dependency.
|
||||
|
||||
* Wasted CPU effort.
|
||||
|
||||
* An entry can be checked multiple times for dependencies.
|
||||
* Lots of redundant DB queries to determine dependencies each time.
|
||||
|
||||
|
||||
Proposed Change
|
||||
===============
|
||||
|
||||
The proposed solution is to move the dependency calculation to entry creation
|
||||
time.
|
||||
|
||||
When a journal entry is created the dependency management system will calculate
|
||||
the dependencies on other entries (Similarly to how it does now) and if there
|
||||
are journal entries the new entry should depend on, their IDs will be inserted
|
||||
into a link table.
|
||||
|
||||
Thus, when the journal looks for an entry to pick up it will only look for
|
||||
entries that no other entry depends on by making sure there aren't any entries
|
||||
in the dependency table.
|
||||
|
||||
When a journal entry is done processing (either successfully or reaches failed
|
||||
state), the dependency links will be removed from the dependency table so that
|
||||
dependent rows can be processed.
|
||||
|
||||
The proposed table::
|
||||
|
||||
+------------------------+
|
||||
| odl_journal_dependency |
|
||||
+------------------------+
|
||||
| parent_id |
|
||||
| dependent_id |
|
||||
+------------------------+
|
||||
|
||||
The table columns will be foreign keys to the seqnum column in the journal
|
||||
table. The constraints will be defined as "ON DELETE CASCADE" so that when a
|
||||
journal entry is removed any possible rows will be removed as well.
|
||||
The primary key will be made from both columns of the table as this is a link
|
||||
table and not an actual entity.
|
||||
If we face DB performance issues (highly unlikely, since this table should
|
||||
normally have a very small amount of rows if any at all) then an index can be
|
||||
constructed on the dependent_id column.
|
||||
|
||||
The dependency management mechanism will locate parent entries for the given
|
||||
entry and will populate the table so that the parent entry's seqnum will be
|
||||
set as the parent_id, and the dependent entry id will be set as dependent_id.
|
||||
When the journal picks up an entry for processing it will condition it on not
|
||||
having any rows with the parent_id in the dependency table. This will ensure
|
||||
that dependent rows get handled after the parent rows have finished processing.
|
||||
|
||||
|
||||
Performance Considerations
|
||||
==========================
|
||||
|
||||
Generally the performance shouldn't be impacted as we're moving the part of
|
||||
code that does dependency calculations from the entry selection time to entry
|
||||
creation time. This will assure that dependency calculations happen only once
|
||||
per journal entry.
|
||||
|
||||
However, some simple benchmarks should be performed before & after the change:
|
||||
* Average Tempest run time.
|
||||
* Average CPU consumption on Tempest.
|
||||
* Full sync run time (Start to finish of all entries).
|
||||
|
||||
If performance suffers a severe degradation then we should consider
|
||||
alternative solutions.
|
||||
|
||||
|
||||
Questions
|
||||
=========
|
||||
|
||||
Q: Should entries in "failed" state block other entries?
|
||||
|
||||
A: Currently "failed" rows are not considered as blocking for dependency
|
||||
validations, but we might want to change this as it makes little sense to
|
||||
process a dependent entry that failed processing.
|
||||
|
||||
Q: How will this help debug-ability?
|
||||
|
||||
A: It will be easy to query the table contents at any time to figure out which
|
||||
entries depend on which other entries.
|
||||
|
||||
Q: How will we be able to spot cyclic dependencies?
|
||||
|
||||
A: Currently this isn't planned as part of the spec, but a DB query (or a
|
||||
series of them) can help determine if this problem exists.
|
||||
|
@ -1,210 +0,0 @@
|
||||
..
|
||||
This work is licensed under a Creative Commons Attribution 3.0 Unported
|
||||
License.
|
||||
|
||||
http://creativecommons.org/licenses/by/3.0/legalcode
|
||||
|
||||
======================================================================
|
||||
Neutron Port Allocation per Subnet for OpenDaylight DHCP Proxy Service
|
||||
======================================================================
|
||||
|
||||
This spec describes the proposal to allocate a Neutron DHCP Port just for
|
||||
use by OpenDaylight Controller on Subnets that are created or updated with
|
||||
enable-dhcp to True.
|
||||
|
||||
When in OpenDaylight controller, the "controller-dhcp-enabled" configuration
|
||||
flag is set to true, these Neutron DHCP Ports will be used by the OpenDaylight
|
||||
Controller to provide DHCP Service instead of using the subnet-gateway-ip as
|
||||
the DHCP Server IP as it stands today.
|
||||
|
||||
The networking-odl driver is not aware about the above OpenDaylight controller
|
||||
parameter configuration. When controller-dhcp-enabled configuration flag is set
|
||||
to false the DHCP port will be created and destroyed without causing any harm
|
||||
to either OpenDaylight controller or networking-odl driver.
|
||||
|
||||
Problem Statement
|
||||
=================
|
||||
|
||||
The DHCP service within OpenDaylight currently assumes availability of the
|
||||
subnet gateway IP address. The subnet gateway ip is not a mandatory parameter
|
||||
for an OpenStack subnet, and so it might not be available from OpenStack
|
||||
orchestration. This renders the DHCP service in OpenDaylight to not be
|
||||
able to serve DHCP offers to virtual endpoints requesting for IP addresses,
|
||||
thereby resulting in service unavailability. Even if subnet-gateway-ip is
|
||||
available in the subnet, it is not a good design in OpenDaylight to hijack
|
||||
that ip address and use that as the DHCP Server IP Address.
|
||||
|
||||
Problem - 1: L2 Deployment with 3PP gateway
|
||||
-------------------------------------------
|
||||
|
||||
There can be deployment scenario in which L2 network is created with no
|
||||
distributed Router/VPN functionality. This deployment can have a separate
|
||||
gateway for the network such as a 3PP LB VM, which acts as a TCP termination
|
||||
point and this LB VM is configured with a default gateway IP. It means all
|
||||
inter-subnet traffic is terminated on this VM which takes the responsibility
|
||||
of forwarding the traffic.
|
||||
|
||||
But the current DHCP service in OpenDaylight controller hijacks gateway IP
|
||||
address for serving DHCP discover/request messages. If the LB is up, this can
|
||||
continue to work, DHCP broadcasts will get hijacked by the OpenDaylight, and
|
||||
responses sent as PKT_OUTs with SIP = GW IP.
|
||||
|
||||
However, if the LB is down, and the VM ARPs for the same IP as part of a DHCP
|
||||
renew workflow, the ARP resolution can fail, due to which renew request will
|
||||
not be generated. This can cause the DHCP lease to lapse.
|
||||
|
||||
Problem - 2: Designated DHCP for SR-IOV VMs via HWVTEP
|
||||
------------------------------------------------------
|
||||
|
||||
In this Deployment scenario, L2 network is created with no distributed Router/
|
||||
VPN functionality, and HWVTEP for SR-IOV VMs. DHCP flood requests from SR-IOV
|
||||
VMs(DHCP discover, request during bootup), are flooded by the HWVTEP on the
|
||||
L2 Broadcast domain, and punted to the controller by designated vswitch. DHCP
|
||||
offers are sent as unicast responses from Controller, which are forwarded by
|
||||
the HWVTEP to the VM. DHCP renews can be unicast requests, which the HWVTEP
|
||||
may forward to an external Gateway VM (3PPLB VM) as unicast packets. Designated
|
||||
vswitch will never receive these pkts, and thus not be able to punt them to the
|
||||
controller, so renews will fail.
|
||||
|
||||
Proposed Change
|
||||
===============
|
||||
In general as part of implementation of this spec, we are introducing a new
|
||||
configuration parameter 'create_opendaylight_dhcp_port' whose truth value
|
||||
determines whether the dhcp-proxy-service within the openstack-odl framework
|
||||
need to be made functional. This service will be responsible for managing the
|
||||
create/update/delete lifecycle for a new set of Neutron DHCP Ports which will
|
||||
be provisioned specifically for use by the OpenDaylight Controller's existing
|
||||
DHCP Service Module.
|
||||
|
||||
Detailed Design
|
||||
===============
|
||||
Introduce a driver config parameter(create_opendaylight_dhcp_port) to determine
|
||||
if OpenDaylight based DHCP service is being used. Default setting for the
|
||||
parameter is false.
|
||||
|
||||
When 'create_opendaylight_dhcp_port' is set to True, it triggers the networking
|
||||
-odl ml2 driver to hook on to OpenStack subnet resource lifecycle and use that
|
||||
to manage a special DHCP port per subnet for OpenDaylight Controller use. These
|
||||
special DHCP ports will be shipped to OpenDaylight controller, so that DHCP
|
||||
Service within the OpenDaylight controller can make use of these as DHCP
|
||||
Server ports themselves. The port will be used to service DHCP requests for
|
||||
virtual end points belonging to that subnet.
|
||||
|
||||
These special DHCP Ports (one per subnet), will carry unique device-id and
|
||||
device-owner values.
|
||||
|
||||
* device-owner(network:dhcp)
|
||||
* device-id(OpenDaylight-<subnet-id>)
|
||||
|
||||
OpenDaylight DHCP service will also introduce a new config parameter controller
|
||||
-dhcp-mode to indicate if the above DHCP port should be used for servicing DHCP
|
||||
requests. When the parameter is set to use-odl-dhcp-neutron-port, it is
|
||||
recommended to enable the create_opendaylight_dhcp_port flag for the networking
|
||||
-odl driver.
|
||||
|
||||
Alternative 1
|
||||
--------------
|
||||
The creation of Neutron OpenDaylight DHCP port will be invoked within the
|
||||
OpenDaylight mechanism Driver subnet-postcommit execution.
|
||||
|
||||
Any failures during the neutron dhcp port creation or allocation for the subnet
|
||||
should trigger failure of the subnet create operation with an appropriate
|
||||
failure message in logs. On success the subnet and port information will be
|
||||
persisted to Journal DB and will subsequently synced with the OpenDaylight
|
||||
controller.
|
||||
|
||||
The plugin should initiate the removal of allocated dhcp neutron port at the
|
||||
time of subnet delete. The port removal will be handled in a subnet-delete-
|
||||
post-commit execution and any failure during this process should rollback the
|
||||
subnet delete operation. The subnet delete operation will be allowed only when
|
||||
all other VMs launched on this subnet are already removed as per existing
|
||||
Neutron behavior.
|
||||
|
||||
A subnet update operation configuring the DHCP state as enabled should allocate
|
||||
such a port if not previously allocated for the subnet. Similarly a subnet
|
||||
update operation configuring DHCP state to disabled should remove any
|
||||
previously allocated OpenDaylight DHCP neutron ports.
|
||||
|
||||
Since the invocation of create/delete port will be synchronous within subnet
|
||||
post-commit, a failure to create/delete port will result in an exception being
|
||||
thrown which makes the ML2 Plugin to fail the subnet operation and not alter
|
||||
Openstack DB.
|
||||
|
||||
Alternative 2
|
||||
-------------
|
||||
The OpenDaylight Neutron DHCP Port creation/deletion is invoked asyncronously
|
||||
driven by a journal entry callback for any Subnet resource state changes as
|
||||
part of create/update/delete. A generic journal callback mechanism to be
|
||||
implemented. Initial consumer of this callback would be the OpenDaylight
|
||||
DHCP proxy service but this could be used by other services in future.
|
||||
|
||||
The Neutron DHCP Port (for OpenDaylight use) creation is triggered when the
|
||||
subnet journal-entry is moved from PENDING to PROCESSING. On a failure of
|
||||
port-creation, the journal will be retained in PENDING state and the subnet
|
||||
itself won't be synced to the OpenDaylight controller. The journal-entry state
|
||||
is marked as COMPLETED only on successful port creation and successful
|
||||
synchronization of that subnet resource to OpenDaylight controller. The same
|
||||
behavior is applicable for subnet update and delete operations too.
|
||||
|
||||
The subnet create/update operation that allocates an OpenDaylight DHCP port
|
||||
to always check if a port exists and allocate new port only if none exists
|
||||
for the subnet.
|
||||
|
||||
Since the invocation of create/delete port will be within the journal callback
|
||||
and asynchronous to subnet-postcommit, the failure to create/delete port
|
||||
will result in the created (or updated) subnet to remain in PENDING state. Next
|
||||
journal sync of this pending subnet will again retry creation/deletion of port
|
||||
and this cycle will happen until either create/delete port succeeds or the
|
||||
subnet is itself deleted by the orchestrating tenant. This could result in
|
||||
piling up of journal PENDING entries for these subnets when there is an
|
||||
unexpected failure in create/delete DHCP port operation. It is recommended to
|
||||
not keep retrying the port operation and instead failures would be indicated
|
||||
in OpenDaylight as DHCP offers/renews will not be honored by the dhcp service
|
||||
within the OpenDaylight controller, for that subnet.
|
||||
|
||||
Recommended Alternative
|
||||
-----------------------
|
||||
|
||||
All of the following cases will need to be addressed by the design.
|
||||
|
||||
* Neutron server can crash after submitting information to DB but before
|
||||
invoking post-commit during a subnet create/update/delete operation. The
|
||||
dhcp-proxy-service should handle the DHCP port creation/deletion during
|
||||
such failures when the service is enabled.
|
||||
* A subnet update operation to disable-dhcp can be immediately followed by
|
||||
a subnet update operation to enable-dhcp, and such a situation should end up
|
||||
in creating the neutron-dhcp-port for consumption by OpenDaylight.
|
||||
* A subnet update operation to enable-dhcp can be immediately followed by a
|
||||
subnet update operation to disable-dhcp, and such a situation should end up
|
||||
in deleting the neutron-dhcp-port that was created for use by OpenDaylight.
|
||||
* A subnet update operation to enable-dhcp can be immediately followed by a
|
||||
subnet delete operation,and such a situation should end up deleting the
|
||||
neutron-dhcp-port that was about to be provided for use by OpenDaylight.
|
||||
* A subnet create operation (with dhcp enabled) can be immediately followed
|
||||
by a subnet update operation to disable-dhcp, and such a situation should
|
||||
end up in deleting the neutron-dhcp-port that was created for use by
|
||||
OpenDaylight.
|
||||
|
||||
Design as per Alternative 2 meets the above cases better and is what we propose
|
||||
to take as the approach that we will pursue for this spec.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
Feature is dependent on enhancement in OpenDaylight DHCP Service as per the
|
||||
Spec in [1]
|
||||
|
||||
Impact
|
||||
======
|
||||
None
|
||||
|
||||
Assignee(s)
|
||||
===========
|
||||
|
||||
* Achuth Maniyedath (achuth.m@altencalsoftlabs.com)
|
||||
* Karthik Prasad(karthik.p@altencalsoftlabs.com)
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
* [1] OpenDaylight spec to cover this feature
|
||||
https://git.opendaylight.org/gerrit/#/c/52298/
|
@ -1 +0,0 @@
|
||||
.. include:: ../../../TESTING.rst
|
@ -1,7 +0,0 @@
|
||||
========
|
||||
Usage
|
||||
========
|
||||
|
||||
To use networking-odl in a project::
|
||||
|
||||
import networking_odl
|
@ -1,38 +0,0 @@
|
||||
.. cover title comes from README.rst
|
||||
|
||||
.. include:: ../../README.rst
|
||||
|
||||
Installation
|
||||
------------
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
install/index
|
||||
|
||||
Configuration options
|
||||
---------------------
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
configuration/index
|
||||
|
||||
Administration Guide
|
||||
--------------------
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
admin/index
|
||||
|
||||
Contributor Guide
|
||||
-----------------
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
contributor/index
|
||||
|
||||
Indices and tables
|
||||
------------------
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`search`
|
||||
|
@ -1 +0,0 @@
|
||||
.. include:: ../../../devstack/README.rst
|
@ -1,8 +0,0 @@
|
||||
Installation Guide
|
||||
==================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
installation
|
||||
DevStack plugin <devstack>
|
@ -1,220 +0,0 @@
|
||||
.. _installation:
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
The ``networking-odl`` repository includes integration with DevStack that
|
||||
enables creation of a simple OpenDaylight (ODL) development and test
|
||||
environment. This document discusses what is required for manual installation
|
||||
and integration into a production OpenStack deployment tool of conventional
|
||||
architectures that include the following types of nodes:
|
||||
|
||||
* Controller - Runs OpenStack control plane services such as REST APIs
|
||||
and databases.
|
||||
|
||||
* Network - Provides connectivity between provider (public) and project
|
||||
(private) networks. Services provided include layer-3 (routing), DHCP, and
|
||||
metadata agents. Layer-3 agent is optional. When using netvirt (vpnservice)
|
||||
DHCP/metadata are optional.
|
||||
|
||||
* Compute - Runs the hypervisor and layer-2 agent for the Networking
|
||||
service.
|
||||
|
||||
ODL Installation
|
||||
----------------
|
||||
|
||||
http://docs.opendaylight.org provides manual and general documentation for ODL
|
||||
|
||||
Review the following documentation regardless of install scenario:
|
||||
|
||||
* `ODL installation <http://docs.opendaylight.org/en/latest/getting-started-guide/installing_opendaylight.html>`_.
|
||||
|
||||
* `OpenDaylight with OpenStack <http://docs.opendaylight.org/en/latest/opendaylight-with-openstack/index.html>`_.
|
||||
|
||||
Choose and review one of the following installation scenarios:
|
||||
|
||||
* `GBP with OpenStack <http://docs.opendaylight.org/en/latest/opendaylight-with-openstack/openstack-with-gbp.html>`_.
|
||||
OpenDaylight Group Based Policy allows users to express network configuration
|
||||
in a declarative rather than imperative way. Often described as asking for
|
||||
"what you want", rather than "how you can do it", Group Based Policy achieves
|
||||
this by implementing an Intent System. The Intent System is a process around
|
||||
an intent driven data model and contains no domain specifics but is capable
|
||||
of addressing multiple semantic definitions of intent.
|
||||
|
||||
* `OVSDB with OpenStack <http://docs.opendaylight.org/en/latest/opendaylight-with-openstack/openstack-with-ovsdb.html>`_.
|
||||
OpenDaylight OVSDB allows users to take advantage of Network Virtualization
|
||||
using OpenDaylight SDN capabilities whilst utilizing OpenvSwitch. The stack
|
||||
includes a Neutron Northbound, a Network Virtualization layer, an OVSDB
|
||||
southbound plugin, and an OpenFlow southbound plugin.
|
||||
|
||||
* `VTN with OpenStack <http://docs.opendaylight.org/en/latest/opendaylight-with-openstack/openstack-with-vtn.html>`_.
|
||||
OpenDaylight Virtual Tenant Network (VTN) is an application that provides
|
||||
multi-tenant virtual network on an SDN controller. VTN Manager is
|
||||
implemented as one plugin to the OpenDaylight controller and provides a REST
|
||||
interface to create/update/delete VTN components. It provides an
|
||||
implementation of Openstack L2 Network Functions API.
|
||||
|
||||
Networking-odl Installation
|
||||
---------------------------
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sudo pip install networking-odl
|
||||
|
||||
.. note::
|
||||
|
||||
pip need to be installed before running above command.
|
||||
|
||||
|
||||
Networking-odl Configuration
|
||||
----------------------------
|
||||
|
||||
All related neutron services need to be restarted after configuration change.
|
||||
|
||||
#. Configure Openstack neutron server. The neutron server implements ODL as an
|
||||
ML2 driver. Edit the ``/etc/neutron/neutron.conf`` file:
|
||||
|
||||
* Enable the ML2 core plug-in.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[DEFAULT]
|
||||
...
|
||||
core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
|
||||
|
||||
* (Optional) Enable ODL L3 router, if QoS feature is desired,
|
||||
then qos should be appended to service_plugins
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[DEFAULT]
|
||||
...
|
||||
service_plugins = odl-router
|
||||
|
||||
|
||||
#. Configure the ML2 plug-in. Edit the
|
||||
``/etc/neutron/plugins/ml2/ml2_conf.ini`` file:
|
||||
|
||||
* Configure the ODL mechanism driver, network type drivers, self-service
|
||||
(tenant) network types, and enable extension drivers(optional).
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[ml2]
|
||||
...
|
||||
mechanism_drivers = opendaylight_v2
|
||||
type_drivers = local,flat,vlan,vxlan
|
||||
tenant_network_types = vxlan
|
||||
extension_drivers = port_security, qos
|
||||
|
||||
.. note::
|
||||
|
||||
The enabling of extension_driver qos is optional, it should be
|
||||
enabled if service_plugins for qos is also enabled.
|
||||
|
||||
* Configure the vxlan range.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[ml2_type_vxlan]
|
||||
...
|
||||
vni_ranges = 1:1000
|
||||
|
||||
* Optionally, enable support for VLAN provider and self-service
|
||||
networks on one or more physical networks. If you specify only
|
||||
the physical network, only administrative (privileged) users can
|
||||
manage VLAN networks. Additionally specifying a VLAN ID range for
|
||||
a physical network enables regular (non-privileged) users to
|
||||
manage VLAN networks. The Networking service allocates the VLAN ID
|
||||
for each self-service network using the VLAN ID range for the
|
||||
physical network.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[ml2_type_vlan]
|
||||
...
|
||||
network_vlan_ranges = PHYSICAL_NETWORK:MIN_VLAN_ID:MAX_VLAN_ID
|
||||
|
||||
Replace ``PHYSICAL_NETWORK`` with the physical network name and
|
||||
optionally define the minimum and maximum VLAN IDs. Use a comma
|
||||
to separate each physical network.
|
||||
|
||||
For example, to enable support for administrative VLAN networks
|
||||
on the ``physnet1`` network and self-service VLAN networks on
|
||||
the ``physnet2`` network using VLAN IDs 1001 to 2000:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
network_vlan_ranges = physnet1,physnet2:1001:2000
|
||||
|
||||
* Enable security groups.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[securitygroup]
|
||||
...
|
||||
enable_security_group = true
|
||||
|
||||
* Configure ML2 ODL
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[ml2_odl]
|
||||
|
||||
...
|
||||
username = <ODL_USERNAME>
|
||||
password = <ODL_PASSWORD>
|
||||
url = http://<ODL_IP_ADDRESS>:<ODL_PORT>/controller/nb/v2/neutron
|
||||
port_binding_controller = pseudo-agentdb-binding
|
||||
|
||||
|
||||
Compute/network nodes
|
||||
---------------------
|
||||
|
||||
Each compute/network node runs the OVS services. If compute/network nodes are
|
||||
already configured to run with Neutron ML2 OVS driver, more steps are
|
||||
necessary. `OVSDB with OpenStack <http://docs.opendaylight.org/en/latest/
|
||||
opendaylight-with-openstack/openstack-with-ovsdb.html>`_ can be referred to.
|
||||
|
||||
#. Install the ``openvswitch`` packages.
|
||||
|
||||
#. Start the OVS service.
|
||||
|
||||
Using the *systemd* unit:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# systemctl start openvswitch
|
||||
|
||||
Using the ``ovs-ctl`` script:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# /usr/share/openvswitch/scripts/ovs-ctl start
|
||||
|
||||
#. Configure OVS to use ODL as a manager.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ovs-vsctl set-manager tcp:${ODL_IP_ADDRESS}:6640
|
||||
|
||||
Replace ``ODL_IP_ADDRESS`` with the IP address of ODL controller node
|
||||
|
||||
#. Set host OVS configurations if port_binding_controller is pseudo-agent
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sudo neutron-odl-ovs-hostconfig
|
||||
|
||||
#. Verify the OVS service.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ovs-vsctl show
|
||||
|
||||
.. note::
|
||||
|
||||
After setting config files, you have to restart the neutron server
|
||||
if you are using screen then it can be directly started from q-svc
|
||||
window or you can use service neutron-server restart, latter may or
|
||||
may not work depending on OS you are using.
|
@ -1,61 +0,0 @@
|
||||
# Configuration for the OpenDaylight MechanismDriver
|
||||
|
||||
[ml2_odl]
|
||||
# (StrOpt) OpenDaylight REST URL
|
||||
# If this is not set then no HTTP requests will be made.
|
||||
#
|
||||
# url =
|
||||
# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron
|
||||
|
||||
# (StrOpt) Username for HTTP basic authentication to ODL.
|
||||
#
|
||||
# username =
|
||||
# Example: username = admin
|
||||
|
||||
# (StrOpt) Password for HTTP basic authentication to ODL.
|
||||
#
|
||||
# password =
|
||||
# Example: password = admin
|
||||
|
||||
# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion.
|
||||
# This is an optional parameter, default value is 10 seconds.
|
||||
#
|
||||
# timeout = 10
|
||||
# Example: timeout = 15
|
||||
|
||||
# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout.
|
||||
# This is an optional parameter, default value is 30 minutes.
|
||||
#
|
||||
# session_timeout = 30
|
||||
# Example: session_timeout = 60
|
||||
|
||||
# (IntOpt) Timeout in seconds for the V2 driver thread to fire off
|
||||
# another thread run through the journal database.
|
||||
#
|
||||
# sync_timeout = 10
|
||||
# Example: sync_timeout = 10
|
||||
|
||||
# (IntOpt) Number of times to retry a journal transaction before
|
||||
# marking it 'failed'.
|
||||
#
|
||||
# retry_count = 5
|
||||
# Example: retry_count = 5
|
||||
|
||||
# (IntOpt) (V2 driver) Journal maintenance operations interval in seconds.
|
||||
#
|
||||
# maintenance_interval = 300
|
||||
# Example: maintenance_interval = 30
|
||||
|
||||
# (IntOpt) (V2 driver) Time to keep completed rows in seconds.
|
||||
# Completed rows retention will be checked every maintenance_interval by the
|
||||
# cleanup thread.
|
||||
# To disable completed rows deletion value should be -1
|
||||
#
|
||||
# completed_rows_retention = 600
|
||||
# Example: completed_rows_retention = 30
|
||||
|
||||
# (IntOpt) (V2 driver) Timeout in seconds to wait before marking a processing
|
||||
# row back to pending state.
|
||||
#
|
||||
# processing_timeout = 100
|
||||
# Example: maintenance_interval = 200
|
143
etc/policy.json
143
etc/policy.json
@ -1,143 +0,0 @@
|
||||
{
|
||||
"context_is_admin": "role:admin",
|
||||
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
|
||||
"context_is_advsvc": "role:advsvc",
|
||||
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
|
||||
"admin_only": "rule:context_is_admin",
|
||||
"regular_user": "",
|
||||
"shared": "field:networks:shared=True",
|
||||
"shared_firewalls": "field:firewalls:shared=True",
|
||||
"external": "field:networks:router:external=True",
|
||||
"default": "rule:admin_or_owner",
|
||||
|
||||
"create_subnet": "rule:admin_or_network_owner",
|
||||
"get_subnet": "rule:admin_or_owner or rule:shared",
|
||||
"update_subnet": "rule:admin_or_network_owner",
|
||||
"delete_subnet": "rule:admin_or_network_owner",
|
||||
|
||||
"create_network": "",
|
||||
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
|
||||
"get_network:router:external": "rule:regular_user",
|
||||
"get_network:segments": "rule:admin_only",
|
||||
"get_network:provider:network_type": "rule:admin_only",
|
||||
"get_network:provider:physical_network": "rule:admin_only",
|
||||
"get_network:provider:segmentation_id": "rule:admin_only",
|
||||
"get_network:queue_id": "rule:admin_only",
|
||||
"create_network:shared": "rule:admin_only",
|
||||
"create_network:router:external": "rule:admin_only",
|
||||
"create_network:segments": "rule:admin_only",
|
||||
"create_network:provider:network_type": "rule:admin_only",
|
||||
"create_network:provider:physical_network": "rule:admin_only",
|
||||
"create_network:provider:segmentation_id": "rule:admin_only",
|
||||
"update_network": "rule:admin_or_owner",
|
||||
"update_network:segments": "rule:admin_only",
|
||||
"update_network:shared": "rule:admin_only",
|
||||
"update_network:provider:network_type": "rule:admin_only",
|
||||
"update_network:provider:physical_network": "rule:admin_only",
|
||||
"update_network:provider:segmentation_id": "rule:admin_only",
|
||||
"update_network:router:external": "rule:admin_only",
|
||||
"delete_network": "rule:admin_or_owner",
|
||||
|
||||
"create_port": "",
|
||||
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:binding:host_id": "rule:admin_only",
|
||||
"create_port:binding:profile": "rule:admin_only",
|
||||
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"get_port": "rule:admin_or_owner or rule:context_is_advsvc",
|
||||
"get_port:queue_id": "rule:admin_only",
|
||||
"get_port:binding:vif_type": "rule:admin_only",
|
||||
"get_port:binding:vif_details": "rule:admin_only",
|
||||
"get_port:binding:host_id": "rule:admin_only",
|
||||
"get_port:binding:profile": "rule:admin_only",
|
||||
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:binding:host_id": "rule:admin_only",
|
||||
"update_port:binding:profile": "rule:admin_only",
|
||||
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
|
||||
|
||||
"get_router:ha": "rule:admin_only",
|
||||
"create_router": "rule:regular_user",
|
||||
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
|
||||
"create_router:distributed": "rule:admin_only",
|
||||
"create_router:ha": "rule:admin_only",
|
||||
"get_router": "rule:admin_or_owner",
|
||||
"get_router:distributed": "rule:admin_only",
|
||||
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
|
||||
"update_router:distributed": "rule:admin_only",
|
||||
"update_router:ha": "rule:admin_only",
|
||||
"delete_router": "rule:admin_or_owner",
|
||||
|
||||
"add_router_interface": "rule:admin_or_owner",
|
||||
"remove_router_interface": "rule:admin_or_owner",
|
||||
|
||||
"create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
|
||||
"update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
|
||||
|
||||
"create_firewall": "",
|
||||
"get_firewall": "rule:admin_or_owner",
|
||||
"create_firewall:shared": "rule:admin_only",
|
||||
"get_firewall:shared": "rule:admin_only",
|
||||
"update_firewall": "rule:admin_or_owner",
|
||||
"update_firewall:shared": "rule:admin_only",
|
||||
"delete_firewall": "rule:admin_or_owner",
|
||||
|
||||
"create_firewall_policy": "",
|
||||
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
|
||||
"create_firewall_policy:shared": "rule:admin_or_owner",
|
||||
"update_firewall_policy": "rule:admin_or_owner",
|
||||
"delete_firewall_policy": "rule:admin_or_owner",
|
||||
|
||||
"create_firewall_rule": "",
|
||||
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
|
||||
"update_firewall_rule": "rule:admin_or_owner",
|
||||
"delete_firewall_rule": "rule:admin_or_owner",
|
||||
|
||||
"create_qos_queue": "rule:admin_only",
|
||||
"get_qos_queue": "rule:admin_only",
|
||||
|
||||
"update_agent": "rule:admin_only",
|
||||
"delete_agent": "rule:admin_only",
|
||||
"get_agent": "rule:admin_only",
|
||||
|
||||
"create_dhcp-network": "rule:admin_only",
|
||||
"delete_dhcp-network": "rule:admin_only",
|
||||
"get_dhcp-networks": "rule:admin_only",
|
||||
"create_l3-router": "rule:admin_only",
|
||||
"delete_l3-router": "rule:admin_only",
|
||||
"get_l3-routers": "rule:admin_only",
|
||||
"get_dhcp-agents": "rule:admin_only",
|
||||
"get_l3-agents": "rule:admin_only",
|
||||
"get_loadbalancer-agent": "rule:admin_only",
|
||||
"get_loadbalancer-pools": "rule:admin_only",
|
||||
|
||||
"create_floatingip": "rule:regular_user",
|
||||
"create_floatingip:floating_ip_address": "rule:admin_only",
|
||||
"update_floatingip": "rule:admin_or_owner",
|
||||
"delete_floatingip": "rule:admin_or_owner",
|
||||
"get_floatingip": "rule:admin_or_owner",
|
||||
|
||||
"create_network_profile": "rule:admin_only",
|
||||
"update_network_profile": "rule:admin_only",
|
||||
"delete_network_profile": "rule:admin_only",
|
||||
"get_network_profiles": "",
|
||||
"get_network_profile": "",
|
||||
"update_policy_profiles": "rule:admin_only",
|
||||
"get_policy_profiles": "",
|
||||
"get_policy_profile": "",
|
||||
|
||||
"create_metering_label": "rule:admin_only",
|
||||
"delete_metering_label": "rule:admin_only",
|
||||
"get_metering_label": "rule:admin_only",
|
||||
|
||||
"create_metering_label_rule": "rule:admin_only",
|
||||
"delete_metering_label_rule": "rule:admin_only",
|
||||
"get_metering_label_rule": "rule:admin_only",
|
||||
|
||||
"get_service_provider": "rule:regular_user",
|
||||
"get_lsn": "rule:admin_only",
|
||||
"create_lsn": "rule:admin_only"
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import gettext
|
||||
|
||||
import six
|
||||
|
||||
|
||||
if six.PY2:
|
||||
gettext.install('networking_odl', unicode=1)
|
||||
else:
|
||||
gettext.install('networking_odl')
|
@ -1,40 +0,0 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""oslo.i18n integration module.
|
||||
|
||||
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
|
||||
|
||||
"""
|
||||
|
||||
import oslo_i18n
|
||||
|
||||
DOMAIN = "networking_odl"
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
|
||||
|
||||
# The translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# The contextual translation function using the name "_C"
|
||||
# requires oslo.i18n >=2.1.0
|
||||
_C = _translators.contextual_form
|
||||
|
||||
# The plural translation function using the name "_P"
|
||||
# requires oslo.i18n >=2.1.0
|
||||
_P = _translators.plural_form
|
||||
|
||||
|
||||
def get_available_languages():
|
||||
return oslo_i18n.get_available_languages(DOMAIN)
|
@ -1,116 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import helpers as log_helpers
|
||||
from oslo_log import log as logging
|
||||
|
||||
from networking_bgpvpn.neutron.extensions import bgpvpn as bgpvpn_ext
|
||||
from networking_bgpvpn.neutron.services.service_drivers import driver_api
|
||||
from neutron_lib.api.definitions import bgpvpn as bgpvpn_const
|
||||
|
||||
from networking_odl.common import constants as odl_const
|
||||
from networking_odl.common import postcommit
|
||||
from networking_odl.journal import full_sync
|
||||
from networking_odl.journal import journal
|
||||
|
||||
|
||||
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
BGPVPN_RESOURCES = {
|
||||
odl_const.ODL_BGPVPN: odl_const.ODL_BGPVPNS,
|
||||
odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION:
|
||||
odl_const.ODL_BGPVPN_NETWORK_ASSOCIATIONS,
|
||||
|
||||
odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION:
|
||||
odl_const.ODL_BGPVPN_ROUTER_ASSOCIATIONS
|
||||
}
|
||||
|
||||
|
||||
@postcommit.add_postcommit('bgpvpn', 'net_assoc', 'router_assoc')
|
||||
class OpenDaylightBgpvpnDriver(driver_api.BGPVPNDriver):
|
||||
|
||||
"""OpenDaylight BGPVPN Driver
|
||||
|
||||
This code is the backend implementation for the OpenDaylight BGPVPN
|
||||
driver for Openstack Neutron.
|
||||
"""
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def __init__(self, service_plugin):
|
||||
LOG.info("Initializing OpenDaylight BGPVPN v2 driver")
|
||||
super(OpenDaylightBgpvpnDriver, self).__init__(service_plugin)
|
||||
self.journal = journal.OpenDaylightJournalThread()
|
||||
full_sync.register(bgpvpn_const.LABEL, BGPVPN_RESOURCES)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create_bgpvpn_precommit(self, context, bgpvpn):
|
||||
journal.record(context, odl_const.ODL_BGPVPN,
|
||||
bgpvpn['id'], odl_const.ODL_CREATE, bgpvpn)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update_bgpvpn_precommit(self, context, bgpvpn):
|
||||
journal.record(context, odl_const.ODL_BGPVPN,
|
||||
bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete_bgpvpn_precommit(self, context, bgpvpn):
|
||||
journal.record(context, odl_const.ODL_BGPVPN,
|
||||
bgpvpn['id'], odl_const.ODL_DELETE, [])
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create_net_assoc_precommit(self, context, net_assoc):
|
||||
our_bgpvpn = None
|
||||
bgpvpns = self.get_bgpvpns(context)
|
||||
for bgpvpn in bgpvpns:
|
||||
# ODL only allows a network to be associated with one BGPVPN
|
||||
if bgpvpn['id'] == net_assoc['bgpvpn_id']:
|
||||
our_bgpvpn = bgpvpn
|
||||
else:
|
||||
if bgpvpn['networks'] and (net_assoc['network_id'] in
|
||||
bgpvpn['networks']):
|
||||
raise bgpvpn_ext.BGPVPNNetworkAssocExistsAnotherBgpvpn(
|
||||
driver="OpenDaylight V2",
|
||||
network=net_assoc['network_id'],
|
||||
bgpvpn=bgpvpn['id'])
|
||||
journal.record(context, odl_const.ODL_BGPVPN,
|
||||
our_bgpvpn['id'], odl_const.ODL_UPDATE, our_bgpvpn)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete_net_assoc_precommit(self, context, net_assoc):
|
||||
bgpvpn = self.get_bgpvpn(context, net_assoc['bgpvpn_id'])
|
||||
journal.record(context, odl_const.ODL_BGPVPN,
|
||||
bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create_router_assoc_precommit(self, context, router_assoc):
|
||||
associated_routers = self.get_router_assocs(context,
|
||||
router_assoc['bgpvpn_id'])
|
||||
for assoc_router in associated_routers:
|
||||
if(router_assoc["router_id"] != assoc_router["router_id"]):
|
||||
raise bgpvpn_ext.BGPVPNMultipleRouterAssocNotSupported(
|
||||
driver="OpenDaylight V2")
|
||||
bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id'])
|
||||
journal.record(context, odl_const.ODL_BGPVPN,
|
||||
bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete_router_assoc_precommit(self, context, router_assoc):
|
||||
bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id'])
|
||||
journal.record(context, odl_const.ODL_BGPVPN,
|
||||
bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
|
@ -1,137 +0,0 @@
|
||||
#
|
||||
# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
from oslo_log import log
|
||||
import requests
|
||||
from requests import auth
|
||||
import six
|
||||
|
||||
from ceilometer.i18n import _
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class _Base(object):
|
||||
"""Base class of OpenDaylight REST APIs Clients."""
|
||||
|
||||
@abc.abstractproperty
|
||||
def base_url(self):
|
||||
"""Returns base url for each REST API."""
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
def get_statistics(self):
|
||||
return self.client.request(self.base_url)
|
||||
|
||||
|
||||
class OpenDaylightRESTAPIFailed(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class SwitchStatisticsAPIClient(_Base):
|
||||
"""OpenDaylight Switch Statistics REST API Client
|
||||
|
||||
Base URL:
|
||||
{endpoint}/flow-capable-switches
|
||||
"""
|
||||
|
||||
base_url = '/flow-capable-switches'
|
||||
|
||||
|
||||
class Client(object):
|
||||
|
||||
def __init__(self, conf, endpoint, params):
|
||||
self.switch_statistics = SwitchStatisticsAPIClient(self)
|
||||
self._endpoint = endpoint
|
||||
self.conf = conf
|
||||
|
||||
self._req_params = self._get_req_params(params)
|
||||
self.session = requests.Session()
|
||||
|
||||
def _get_req_params(self, params):
|
||||
req_params = {
|
||||
'headers': {
|
||||
'Accept': 'application/json'
|
||||
},
|
||||
'timeout': self.conf.http_timeout,
|
||||
}
|
||||
|
||||
auth_way = params.get('auth')
|
||||
if auth_way in ['basic', 'digest']:
|
||||
user = params.get('user')
|
||||
password = params.get('password')
|
||||
|
||||
if auth_way == 'basic':
|
||||
auth_class = auth.HTTPBasicAuth
|
||||
else:
|
||||
auth_class = auth.HTTPDigestAuth
|
||||
|
||||
req_params['auth'] = auth_class(user, password)
|
||||
return req_params
|
||||
|
||||
def _log_req(self, url):
|
||||
|
||||
curl_command = ['REQ: curl -i -X GET', '"%s"' % (url)]
|
||||
|
||||
if 'auth' in self._req_params:
|
||||
auth_class = self._req_params['auth']
|
||||
if isinstance(auth_class, auth.HTTPBasicAuth):
|
||||
curl_command.append('--basic')
|
||||
else:
|
||||
curl_command.append('--digest')
|
||||
|
||||
curl_command.append('--user "%s":"***"' % auth_class.username)
|
||||
|
||||
for name, value in six.iteritems(self._req_params['headers']):
|
||||
curl_command.append('-H "%s: %s"' % (name, value))
|
||||
|
||||
LOG.debug(' '.join(curl_command))
|
||||
|
||||
@staticmethod
|
||||
def _log_res(resp):
|
||||
|
||||
dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version,
|
||||
resp.status_code,
|
||||
resp.reason)]
|
||||
dump.extend('%s: %s\n' % (k, v)
|
||||
for k, v in six.iteritems(resp.headers))
|
||||
dump.append('\n')
|
||||
if resp.content:
|
||||
dump.extend([resp.content, '\n'])
|
||||
|
||||
LOG.debug(''.join(dump))
|
||||
|
||||
def _http_request(self, url):
|
||||
if self.conf.debug:
|
||||
self._log_req(url)
|
||||
resp = self.session.get(url, **self._req_params)
|
||||
if self.conf.debug:
|
||||
self._log_res(resp)
|
||||
if resp.status_code // 100 != 2:
|
||||
raise OpenDaylightRESTAPIFailed(
|
||||
_('OpenDaylight API returned %(status)s %(reason)s') %
|
||||
{'status': resp.status_code, 'reason': resp.reason})
|
||||
|
||||
return resp.json()
|
||||
|
||||
def request(self, path):
|
||||
|
||||
url = self._endpoint + path
|
||||
return self._http_request(url)
|
@ -1,296 +0,0 @@
|
||||
#
|
||||
# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
from six.moves.urllib import parse as urlparse
|
||||
|
||||
from ceilometer.network.statistics import driver
|
||||
from networking_odl.ceilometer.network.statistics.opendaylight_v2 import client
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
INT64_MAX_VALUE = (2 ** 64 / 2 - 1)
|
||||
|
||||
|
||||
class OpenDaylightDriver(driver.Driver):
|
||||
"""Driver of network info collector from OpenDaylight.
|
||||
|
||||
This driver uses resources in "/etc/ceilometer/polling.yaml".
|
||||
Resource requires below conditions:
|
||||
|
||||
* resource is url
|
||||
* scheme is "opendaylight.v2"
|
||||
|
||||
This driver can be configured via query parameters.
|
||||
Supported parameters:
|
||||
|
||||
* scheme:
|
||||
The scheme of request url to OpenDaylight REST API endpoint.
|
||||
(default http)
|
||||
* auth:
|
||||
Auth strategy of http.
|
||||
This parameter can be set basic or digest.(default None)
|
||||
* user:
|
||||
This is username that is used by auth.(default None)
|
||||
* password:
|
||||
This is password that is used by auth.(default None)
|
||||
|
||||
e.g.::
|
||||
|
||||
opendaylight.v2://127.0.0.1:8080/controller/statistics
|
||||
?auth=basic&user=admin&password=admin&scheme=http
|
||||
|
||||
In this case, the driver send request to below URLs:
|
||||
|
||||
http://127.0.0.1:8080/controller/statistics/flow-capable-switches
|
||||
|
||||
Example JSON response from OpenDaylight
|
||||
{
|
||||
flow_capable_switches: [{
|
||||
packet_in_messages_received: 501,
|
||||
packet_out_messages_sent: 300,
|
||||
ports: 1,
|
||||
flow_datapath_id: 55120148545607,
|
||||
tenant_id: ADMIN_ID,
|
||||
switch_port_counters: [{
|
||||
bytes_received: 1000,
|
||||
bytes_sent: 1000,
|
||||
duration: 600,
|
||||
packets_internal_received: 100,
|
||||
packets_internal_sent: 200,
|
||||
packets_received: 100,
|
||||
packets_received_drop: 0,
|
||||
packets_received_error: 0,
|
||||
packets_sent: 100,
|
||||
port_id: 4,
|
||||
tenant_id: PORT_1_TENANT_ID,
|
||||
uuid: PORT_1_ID
|
||||
}],
|
||||
table_counters: [{
|
||||
flow_count: 90,
|
||||
table_id: 0
|
||||
}]
|
||||
}]
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _get_int_sample(key, statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
if key not in statistic:
|
||||
return None
|
||||
value = int(statistic[key])
|
||||
if not (0 <= value <= INT64_MAX_VALUE):
|
||||
value = 0
|
||||
return value, resource_id, resource_meta, tenant_id
|
||||
|
||||
def _prepare_cache(self, endpoint, params, cache):
|
||||
|
||||
if 'network.statistics.opendaylight_v2' in cache:
|
||||
return cache['network.statistics.opendaylight_v2']
|
||||
|
||||
data = {}
|
||||
|
||||
odl_params = {}
|
||||
if 'auth' in params:
|
||||
odl_params['auth'] = params['auth'][0]
|
||||
if 'user' in params:
|
||||
odl_params['user'] = params['user'][0]
|
||||
if 'password' in params:
|
||||
odl_params['password'] = params['password'][0]
|
||||
cs = client.Client(self.conf, endpoint, odl_params)
|
||||
|
||||
try:
|
||||
# get switch statistics
|
||||
data['switch'] = cs.switch_statistics.get_statistics()
|
||||
except Exception:
|
||||
LOG.exception('Request failed to connect to OpenDaylight'
|
||||
' with NorthBound REST API')
|
||||
|
||||
cache['network.statistics.opendaylight_v2'] = data
|
||||
|
||||
return data
|
||||
|
||||
def get_sample_data(self, meter_name, parse_url, params, cache):
|
||||
|
||||
extractor = self._get_extractor(meter_name)
|
||||
if extractor is None:
|
||||
# The way to getting meter is not implemented in this driver or
|
||||
# OpenDaylight REST API has not api to getting meter.
|
||||
return None
|
||||
|
||||
iter = self._get_iter(meter_name)
|
||||
if iter is None:
|
||||
# The way to getting meter is not implemented in this driver or
|
||||
# OpenDaylight REST API has not api to getting meter.
|
||||
return None
|
||||
|
||||
parts = urlparse.ParseResult(params.get('scheme', ['http'])[0],
|
||||
parse_url.netloc,
|
||||
parse_url.path,
|
||||
None,
|
||||
None,
|
||||
None)
|
||||
endpoint = urlparse.urlunparse(parts)
|
||||
|
||||
data = self._prepare_cache(endpoint, params, cache)
|
||||
|
||||
samples = []
|
||||
if data:
|
||||
for sample in iter(extractor, data):
|
||||
if sample is not None:
|
||||
# set controller name to resource_metadata
|
||||
sample[2]['controller'] = 'OpenDaylight_V2'
|
||||
samples.append(sample)
|
||||
|
||||
return samples
|
||||
|
||||
def _get_iter(self, meter_name):
|
||||
if meter_name == 'switch' or meter_name == 'switch.ports':
|
||||
return self._iter_switch
|
||||
elif meter_name.startswith('switch.table'):
|
||||
return self._iter_table
|
||||
elif meter_name.startswith('switch.port'):
|
||||
return self._iter_switch_port
|
||||
elif meter_name.startswith('port'):
|
||||
return self._iter_port
|
||||
|
||||
def _get_extractor(self, meter_name):
|
||||
if (meter_name == 'switch.port' or
|
||||
meter_name.startswith('switch.port.')):
|
||||
meter_name = meter_name.split('.', 1)[1]
|
||||
method_name = '_' + meter_name.replace('.', '_')
|
||||
return getattr(self, method_name, None)
|
||||
|
||||
@staticmethod
|
||||
def _iter_switch(extractor, data):
|
||||
for switch in data['switch']['flow_capable_switches']:
|
||||
yield (extractor(switch, str(switch['flow_datapath_id']),
|
||||
{}, switch['tenant_id']))
|
||||
|
||||
@staticmethod
|
||||
def _switch(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return 1, resource_id, resource_meta, tenant_id
|
||||
|
||||
@staticmethod
|
||||
def _switch_ports(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'ports', statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _iter_switch_port(extractor, data):
|
||||
for switch in data['switch']['flow_capable_switches']:
|
||||
if 'switch_port_counters' in switch:
|
||||
switch_id = str(switch['flow_datapath_id'])
|
||||
tenant_id = switch['tenant_id']
|
||||
for port_statistic in switch['switch_port_counters']:
|
||||
port_id = port_statistic['port_id']
|
||||
resource_id = '%s:%d' % (switch_id, port_id)
|
||||
resource_meta = {'switch': switch_id,
|
||||
'port_number_on_switch': port_id}
|
||||
if 'uuid' in port_statistic:
|
||||
neutron_port_id = port_statistic['uuid']
|
||||
resource_meta['neutron_port_id'] = neutron_port_id
|
||||
yield extractor(port_statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _iter_port(extractor, data):
|
||||
resource_meta = {}
|
||||
for switch in data['switch']['flow_capable_switches']:
|
||||
if 'switch_port_counters' in switch:
|
||||
for port_statistic in switch['switch_port_counters']:
|
||||
if 'uuid' in port_statistic:
|
||||
resource_id = port_statistic['uuid']
|
||||
yield extractor(port_statistic,
|
||||
resource_id, resource_meta,
|
||||
port_statistic['tenant_id'])
|
||||
|
||||
@staticmethod
|
||||
def _port(statistic, resource_id, resource_meta, tenant_id):
|
||||
return 1, resource_id, resource_meta, tenant_id
|
||||
|
||||
@staticmethod
|
||||
def _port_uptime(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'duration', statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _port_receive_packets(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'packets_received', statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _port_transmit_packets(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'packets_sent', statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _port_receive_bytes(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'bytes_received', statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _port_transmit_bytes(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'bytes_sent', statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _port_receive_drops(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'packets_received_drop', statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _port_receive_errors(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'packets_received_error', statistic,
|
||||
resource_id, resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _iter_table(extractor, data):
|
||||
for switch_statistic in data['switch']['flow_capable_switches']:
|
||||
if 'table_counters' in switch_statistic:
|
||||
switch_id = str(switch_statistic['flow_datapath_id'])
|
||||
tenant_id = switch_statistic['tenant_id']
|
||||
for table_statistic in switch_statistic['table_counters']:
|
||||
resource_meta = {'switch': switch_id}
|
||||
resource_id = ("%s:table:%d" %
|
||||
(switch_id, table_statistic['table_id']))
|
||||
yield extractor(table_statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
||||
|
||||
@staticmethod
|
||||
def _switch_table_active_entries(statistic, resource_id,
|
||||
resource_meta, tenant_id):
|
||||
return OpenDaylightDriver._get_int_sample(
|
||||
'flow_count', statistic, resource_id,
|
||||
resource_meta, tenant_id)
|
@ -1,473 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""
|
||||
Command line script to set host OVS configurations (it requires ovsctl)
|
||||
|
||||
Examples:
|
||||
NOTE: bash accepts new line characters between quotes
|
||||
|
||||
To give a full custom json
|
||||
|
||||
python set_ovs_hostconfigs.py --ovs_hostconfigs='{
|
||||
"ODL L2": {
|
||||
"allowed_network_types":
|
||||
["local","vlan", "vxlan","gre"],
|
||||
"bridge_mappings": {"physnet1":"br-ex"}
|
||||
"supported_vnic_types": [
|
||||
{
|
||||
"vnic_type":"normal",
|
||||
"vif_type":"ovs",
|
||||
"vif_details":{}
|
||||
}
|
||||
],
|
||||
},
|
||||
"ODL L3": {}
|
||||
}'
|
||||
|
||||
To make sure to use system data path (Kernel)
|
||||
|
||||
python set_ovs_hostconfigs.py --noovs_dpdk
|
||||
|
||||
To make sure to use user space data path (vhostuser)
|
||||
|
||||
python set_ovs_hostconfigs.py --ovs_dpdk
|
||||
|
||||
To give bridge mappings
|
||||
|
||||
python --bridge_mapping=physnet1:br-ex,physnet2:br-eth0
|
||||
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from networking_odl._i18n import _
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
USERSPACE_DATAPATH_TYPES = ['netdev', 'dpdkvhostuser']
|
||||
|
||||
COMMAND_LINE_OPTIONS = [
|
||||
|
||||
cfg.ListOpt(
|
||||
'allowed_network_types',
|
||||
default=['local', 'vlan', 'vxlan', 'gre'],
|
||||
help=_("""
|
||||
Specifies allowed network types given as a Comma-separated list of
|
||||
types.
|
||||
|
||||
Default: --allowed_network_types=local,vlan,vxlan,gre
|
||||
""")),
|
||||
|
||||
cfg.DictOpt(
|
||||
'bridge_mappings',
|
||||
default={},
|
||||
help=_("""
|
||||
Comma-separated list of <physical_network>:<bridge> tuples mapping
|
||||
physical network names to the agent's node-specific Open vSwitch
|
||||
bridge names to be used for flat and VLAN networks. The length of
|
||||
bridge names should be no more than 11. Each bridge must exist, and
|
||||
should have a physical network interface configured as a port. All
|
||||
physical networks configured on the server should have mappings to
|
||||
appropriate bridges on each agent.
|
||||
|
||||
Note: If you remove a bridge from this mapping, make sure to
|
||||
disconnect it from the integration bridge as it won't be managed by
|
||||
the agent anymore.
|
||||
|
||||
Default: --bridge_mappings=
|
||||
""")),
|
||||
|
||||
cfg.StrOpt(
|
||||
'datapath_type',
|
||||
choices=['system', 'netdev', 'dpdkvhostuser'],
|
||||
default=None,
|
||||
help=_("""
|
||||
It specifies the OVS data path to use.
|
||||
|
||||
If this value is given then --ovs_dpdk will be ignored.
|
||||
If neither this option or --ovs_dpdk are given then it will use a
|
||||
valid value for current host.
|
||||
|
||||
Choices: --datapath_type=
|
||||
--datapath_type=system # kernel data path
|
||||
--datapath_type=netdev # userspace data path
|
||||
--datapath_type=dpdkvhostuser # userspace data path
|
||||
|
||||
Default: --datapath_type=netdev # if support is detected
|
||||
--datapath_type=system # in all other cases
|
||||
""")),
|
||||
|
||||
cfg.BoolOpt(
|
||||
'debug',
|
||||
default=False,
|
||||
help=_("""
|
||||
It shows debugging informations.
|
||||
|
||||
Default: --nodebug
|
||||
""")),
|
||||
|
||||
cfg.StrOpt(
|
||||
'host',
|
||||
default=socket.gethostname(), # pylint: disable=no-member
|
||||
help=_("""
|
||||
It specifies the host name of the target machine.
|
||||
|
||||
Default: --host=$HOSTNAME # running machine host name
|
||||
""")),
|
||||
|
||||
cfg.IPOpt(
|
||||
'local_ip',
|
||||
help=_("""
|
||||
IP address of local overlay (tunnel) network end-point.
|
||||
It accepts either an IPv4 or IPv6 address that resides on one
|
||||
of the host network interfaces. The IP version of this
|
||||
value must match the value of the 'overlay_ip_version'
|
||||
option in the ML2 plug-in configuration file on the Neutron
|
||||
server node(s).
|
||||
|
||||
Default: local_ip=
|
||||
""")),
|
||||
|
||||
cfg.BoolOpt(
|
||||
'ovs_dpdk',
|
||||
default=None,
|
||||
help=_("""
|
||||
It uses user-space type of virtual interface (vhostuser) instead of
|
||||
the system based one (ovs).
|
||||
|
||||
If this option is not specified it tries to detect vhostuser
|
||||
support on running host and in case of positive match it uses it.
|
||||
|
||||
NOTE: if --datapath_type is given then this option is ignored.
|
||||
|
||||
Default:
|
||||
""")),
|
||||
|
||||
cfg.StrOpt(
|
||||
'ovs_hostconfigs',
|
||||
help=_("""
|
||||
Fives pre-made host configuration for OpenDaylight as a JSON
|
||||
string.
|
||||
|
||||
NOTE: when specified all other options are ignored!
|
||||
|
||||
An entry should look like:
|
||||
--ovs_hostconfigs='{
|
||||
"ODL L2": {
|
||||
"allowed_network_types":
|
||||
["local","vlan", "vxlan","gre"],
|
||||
"bridge_mappings": {"physnet1":"br-ex"}
|
||||
"supported_vnic_types": [
|
||||
{
|
||||
"vnic_type":"normal",
|
||||
"vif_type":"ovs",
|
||||
"vif_details":{}
|
||||
}
|
||||
],
|
||||
},
|
||||
"ODL L3": {}
|
||||
}'
|
||||
|
||||
Default: --ovs_hostconfigs=
|
||||
""")),
|
||||
|
||||
cfg.StrOpt(
|
||||
'vhostuser_mode',
|
||||
choices=['client', 'server'],
|
||||
default='client',
|
||||
help=_("""
|
||||
It specifies the OVS VHostUser mode.
|
||||
|
||||
Choices: --vhostuser_mode=client
|
||||
--vhostuser_mode=server
|
||||
|
||||
Default: --vhostuser_mode=client
|
||||
""")),
|
||||
|
||||
cfg.BoolOpt(
|
||||
'vhostuser_ovs_plug',
|
||||
default=True,
|
||||
help=_("""
|
||||
Enable VHostUser OVS Plug.
|
||||
|
||||
Default: --vhostuser_ovs_plug
|
||||
""")),
|
||||
|
||||
cfg.StrOpt(
|
||||
'vhostuser_port_prefix',
|
||||
choices=['vhu', 'socket'],
|
||||
default='vhu',
|
||||
help=_("""
|
||||
VHostUser socket port prefix.
|
||||
|
||||
Choices: --vhostuser_socket_dir=vhu
|
||||
--vhostuser_socket_dir=socket
|
||||
|
||||
Default: --vhostuser_socket_dir=vhu
|
||||
""")),
|
||||
|
||||
cfg.StrOpt(
|
||||
'vhostuser_socket_dir',
|
||||
default='/var/run/openvswitch',
|
||||
help=_("""
|
||||
OVS VHostUser socket directory.
|
||||
|
||||
Default: --vhostuser_socket_dir=/var/run/openvswitch
|
||||
""")),
|
||||
]
|
||||
|
||||
|
||||
DEFAULT_COMMAND_LINE_OPTIONS = tuple(sys.argv[1:])
|
||||
|
||||
|
||||
def set_ovs_extid_hostconfigs(conf, ovs_vsctl):
|
||||
if conf.ovs_hostconfigs:
|
||||
json_str = conf.ovs_hostconfigs.replace("\'", "\"")
|
||||
LOG.debug("SET-HOSTCONFIGS: JSON String %s", json_str)
|
||||
hostconfigs = jsonutils.loads(json_str)
|
||||
|
||||
else:
|
||||
uuid = ovs_vsctl.uuid()
|
||||
userspace_datapath_types = ovs_vsctl.userspace_datapath_types()
|
||||
hostconfigs = _hostconfigs_from_conf(
|
||||
conf=conf, uuid=uuid,
|
||||
userspace_datapath_types=userspace_datapath_types)
|
||||
|
||||
ovs_vsctl.set_host_name(conf.host)
|
||||
for name in sorted(hostconfigs):
|
||||
ovs_vsctl.set_host_config(name, hostconfigs[name])
|
||||
|
||||
# for new netvirt
|
||||
if conf.local_ip:
|
||||
ovs_vsctl.set_local_ip(conf.local_ip)
|
||||
if conf.bridge_mappings:
|
||||
provider_mappings = ",".join(
|
||||
"{}:{}".format(k, v) for k, v in conf.bridge_mappings.items())
|
||||
ovs_vsctl.set_provider_mappings(provider_mappings)
|
||||
|
||||
|
||||
def _hostconfigs_from_conf(conf, uuid, userspace_datapath_types):
|
||||
vif_type = _vif_type_from_conf(
|
||||
conf=conf, userspace_datapath_types=userspace_datapath_types)
|
||||
datapath_type = conf.datapath_type or (
|
||||
'system' if vif_type == 'ovs' else userspace_datapath_types[0])
|
||||
vif_details = _vif_details_from_conf(
|
||||
conf=conf, uuid=uuid, vif_type=vif_type)
|
||||
|
||||
return {
|
||||
"ODL L2": {
|
||||
"allowed_network_types": conf.allowed_network_types,
|
||||
"bridge_mappings": conf.bridge_mappings,
|
||||
"datapath_type": datapath_type,
|
||||
"supported_vnic_types": [
|
||||
{
|
||||
"vif_details": vif_details,
|
||||
"vif_type": vif_type,
|
||||
"vnic_type": "normal",
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _vif_type_from_conf(conf, userspace_datapath_types):
|
||||
|
||||
# take vif_type from datapath_type ------------------------------------
|
||||
if conf.datapath_type:
|
||||
# take it from datapath_type
|
||||
if conf.datapath_type in USERSPACE_DATAPATH_TYPES:
|
||||
if conf.datapath_type not in userspace_datapath_types:
|
||||
LOG.warning(
|
||||
"Using user space data path type '%s' even if no "
|
||||
"support was detected.", conf.datapath_type)
|
||||
return 'vhostuser'
|
||||
else:
|
||||
return 'ovs'
|
||||
|
||||
# take vif_type from ovs_dpdk -----------------------------------------
|
||||
if conf.ovs_dpdk is True:
|
||||
if userspace_datapath_types:
|
||||
return 'vhostuser'
|
||||
|
||||
raise ValueError(_(
|
||||
"--ovs_dpdk option was specified but the 'netdev' datapath_type "
|
||||
"was not enabled. "
|
||||
"To override use option --datapath_type=netdev"))
|
||||
|
||||
elif conf.ovs_dpdk is False:
|
||||
return 'ovs'
|
||||
|
||||
# take detected dtype -------------------------------------------------
|
||||
if userspace_datapath_types:
|
||||
return 'vhostuser'
|
||||
|
||||
return 'ovs'
|
||||
|
||||
|
||||
def _vif_details_from_conf(conf, uuid, vif_type):
|
||||
host_addresses = [conf.local_ip or conf.host]
|
||||
if vif_type == 'ovs':
|
||||
# OVS legacy mode
|
||||
return {"uuid": uuid,
|
||||
"host_addresses": host_addresses,
|
||||
"has_datapath_type_netdev": False,
|
||||
"support_vhost_user": False}
|
||||
|
||||
elif vif_type == 'vhostuser':
|
||||
# enable VHOSTUSER
|
||||
return {"uuid": uuid,
|
||||
"host_addresses": host_addresses,
|
||||
"has_datapath_type_netdev": True,
|
||||
"support_vhost_user": True,
|
||||
"port_prefix": conf.vhostuser_port_prefix,
|
||||
"vhostuser_socket_dir": conf.vhostuser_socket_dir,
|
||||
"vhostuser_ovs_plug": conf.vhostuser_ovs_plug,
|
||||
"vhostuser_mode": conf.vhostuser_mode,
|
||||
"vhostuser_socket": os.path.join(
|
||||
conf.vhostuser_socket_dir,
|
||||
conf.vhostuser_port_prefix + '$PORT_ID')}
|
||||
|
||||
|
||||
def setup_conf(args=None):
|
||||
"""setup cmdline options."""
|
||||
|
||||
if args is None:
|
||||
args = DEFAULT_COMMAND_LINE_OPTIONS
|
||||
|
||||
conf = cfg.ConfigOpts()
|
||||
if '-h' in args or '--help' in args:
|
||||
# Prints out script documentation."
|
||||
print(__doc__)
|
||||
|
||||
conf.register_cli_opts(COMMAND_LINE_OPTIONS)
|
||||
conf(args=args)
|
||||
return conf
|
||||
|
||||
|
||||
class OvsVsctl(object):
|
||||
"""Wrapper class for ovs-vsctl command tool
|
||||
|
||||
"""
|
||||
|
||||
COMMAND = 'ovs-vsctl'
|
||||
TABLE = 'Open_vSwitch'
|
||||
|
||||
_uuid = None
|
||||
|
||||
def uuid(self):
|
||||
uuid = self._uuid
|
||||
if uuid is None:
|
||||
self._uuid = uuid = self._get('.', '_uuid')
|
||||
return uuid
|
||||
|
||||
_datapath_types = None
|
||||
|
||||
def datapath_types(self):
|
||||
datapath_types = self._datapath_types
|
||||
if datapath_types is None:
|
||||
try:
|
||||
datapath_types = self._get('.', 'datapath_types')
|
||||
except subprocess.CalledProcessError:
|
||||
datapath_types = 'system'
|
||||
self._datapath_types = datapath_types
|
||||
return datapath_types
|
||||
|
||||
_userspace_datapath_types = None
|
||||
|
||||
def userspace_datapath_types(self):
|
||||
userspace_datapath_types = self._userspace_datapath_types
|
||||
if userspace_datapath_types is None:
|
||||
datapath_types = self.datapath_types()
|
||||
userspace_datapath_types = tuple(
|
||||
datapath_type
|
||||
for datapath_type in USERSPACE_DATAPATH_TYPES
|
||||
if datapath_types.find(datapath_type) >= 0)
|
||||
self._userspace_datapath_types = userspace_datapath_types
|
||||
return userspace_datapath_types
|
||||
|
||||
def set_host_name(self, host_name):
|
||||
self._set_external_ids('odl_os_hostconfig_hostid', host_name)
|
||||
|
||||
def set_host_config(self, name, value):
|
||||
self._set_external_ids(
|
||||
name='odl_os_hostconfig_config_' + name.lower().replace(' ', '_'),
|
||||
value=jsonutils.dumps(value))
|
||||
|
||||
def set_local_ip(self, local_ip):
|
||||
self._set_other_config("local_ip", local_ip)
|
||||
|
||||
def set_provider_mappings(self, provider_mappings):
|
||||
self._set_other_config("provider_mappings", provider_mappings)
|
||||
|
||||
# --- implementation details ----------------------------------------------
|
||||
|
||||
def _set_external_ids(self, name, value):
|
||||
# Refer below for ovs ext-id strings
|
||||
# https://review.openstack.org/#/c/309630/
|
||||
value = 'external_ids:{}={}'.format(name, value)
|
||||
self._set(record=self.uuid(), value=value)
|
||||
|
||||
def _set_other_config(self, name, value):
|
||||
value = 'other_config:{}={}'.format(name, value)
|
||||
self._set(record=self.uuid(), value=value)
|
||||
|
||||
def _get(self, record, name):
|
||||
return self._execute('get', self.TABLE, record, name)
|
||||
|
||||
def _set(self, record, value):
|
||||
self._execute('set', self.TABLE, record, value)
|
||||
|
||||
def _execute(self, *args):
|
||||
command_line = (self.COMMAND,) + args
|
||||
LOG.info(
|
||||
"SET-HOSTCONFIGS: Executing cmd: %s", ' '.join(command_line))
|
||||
return subprocess.check_output(command_line).strip()
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Main."""
|
||||
|
||||
conf = setup_conf(args)
|
||||
|
||||
if os.geteuid() != 0:
|
||||
LOG.error('Root permissions are required to configure ovsdb.')
|
||||
return 1
|
||||
|
||||
try:
|
||||
set_ovs_extid_hostconfigs(conf=conf, ovs_vsctl=OvsVsctl())
|
||||
|
||||
except Exception as ex: # pylint: disable=broad-except
|
||||
LOG.error("Fatal error: %s", ex, exc_info=conf.debug)
|
||||
return 1
|
||||
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
exit(main())
|
@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
python set_ovs_hostconfigs.py --debug --ovs_hostconfigs='{"ODL L2": {"supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs", "vif_details":{}}], "allowed_network_types":["local","vlan", "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}}, "ODL L3": {"some_details": "dummy_details"}}'
|
@ -1,98 +0,0 @@
|
||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
|
||||
from neutron_lib.callbacks import events
|
||||
from neutron_lib.callbacks import registry
|
||||
from neutron_lib.callbacks import resources
|
||||
from oslo_log import log as logging
|
||||
|
||||
from networking_odl.common import constants as odl_const
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ODLResource = collections.namedtuple('ODLResource', ('singular', 'plural'))
|
||||
_RESOURCE_MAPPING = {
|
||||
resources.SECURITY_GROUP: ODLResource(odl_const.ODL_SG, odl_const.ODL_SGS),
|
||||
resources.SECURITY_GROUP_RULE: ODLResource(odl_const.ODL_SG_RULE,
|
||||
odl_const.ODL_SG_RULES),
|
||||
}
|
||||
_OPERATION_MAPPING = {
|
||||
events.PRECOMMIT_CREATE: odl_const.ODL_CREATE,
|
||||
events.PRECOMMIT_UPDATE: odl_const.ODL_UPDATE,
|
||||
events.PRECOMMIT_DELETE: odl_const.ODL_DELETE,
|
||||
events.AFTER_CREATE: odl_const.ODL_CREATE,
|
||||
events.AFTER_UPDATE: odl_const.ODL_UPDATE,
|
||||
events.AFTER_DELETE: odl_const.ODL_DELETE,
|
||||
}
|
||||
|
||||
|
||||
class OdlSecurityGroupsHandler(object):
|
||||
|
||||
def __init__(self, precommit, postcommit):
|
||||
assert postcommit is not None
|
||||
self._precommit = precommit
|
||||
self._postcommit = postcommit
|
||||
self._subscribe()
|
||||
|
||||
def _subscribe(self):
|
||||
if self._precommit is not None:
|
||||
for event in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE):
|
||||
registry.subscribe(self.sg_callback_precommit,
|
||||
resources.SECURITY_GROUP, event)
|
||||
registry.subscribe(self.sg_callback_precommit,
|
||||
resources.SECURITY_GROUP_RULE, event)
|
||||
registry.subscribe(
|
||||
self.sg_callback_precommit, resources.SECURITY_GROUP,
|
||||
events.PRECOMMIT_UPDATE)
|
||||
|
||||
for event in (events.AFTER_CREATE, events.AFTER_DELETE):
|
||||
registry.subscribe(self.sg_callback_postcommit,
|
||||
resources.SECURITY_GROUP, event)
|
||||
registry.subscribe(self.sg_callback_postcommit,
|
||||
resources.SECURITY_GROUP_RULE, event)
|
||||
|
||||
registry.subscribe(self.sg_callback_postcommit,
|
||||
resources.SECURITY_GROUP, events.AFTER_UPDATE)
|
||||
|
||||
def _sg_callback(self, callback, resource, event, trigger, **kwargs):
|
||||
context = kwargs['context']
|
||||
res = kwargs.get(resource)
|
||||
res_id = kwargs.get("%s_id" % resource)
|
||||
if res_id is None:
|
||||
res_id = res.get('id')
|
||||
odl_res_type = _RESOURCE_MAPPING[resource]
|
||||
|
||||
odl_ops = _OPERATION_MAPPING[event]
|
||||
odl_res_dict = None if res is None else {odl_res_type.singular: res}
|
||||
|
||||
LOG.debug("Calling sync_from_callback with ODL_OPS (%(odl_ops)s) "
|
||||
"ODL_RES_TYPE (%(odl_res_type)s) RES_ID (%(res_id)s) "
|
||||
"ODL_RES_DICT (%(odl_res_dict)s) KWARGS (%(kwargs)s)",
|
||||
{'odl_ops': odl_ops, 'odl_res_type': odl_res_type,
|
||||
'res_id': res_id, 'odl_res_dict': odl_res_dict,
|
||||
'kwargs': kwargs})
|
||||
|
||||
copy_kwargs = kwargs.copy()
|
||||
copy_kwargs.pop('context')
|
||||
callback(context, odl_ops, odl_res_type, res_id, odl_res_dict,
|
||||
**copy_kwargs)
|
||||
|
||||
def sg_callback_precommit(self, resource, event, trigger, **kwargs):
|
||||
self._sg_callback(self._precommit, resource, event, trigger, **kwargs)
|
||||
|
||||
def sg_callback_postcommit(self, resource, event, trigger, **kwargs):
|
||||
self._sg_callback(self._postcommit, resource, event, trigger, **kwargs)
|
@ -1,162 +0,0 @@
|
||||
# Copyright (c) 2014 Red Hat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import threading
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import excutils
|
||||
import requests
|
||||
from requests import sessions
|
||||
|
||||
from networking_odl.common import constants as odl_const
|
||||
from networking_odl.common import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
|
||||
|
||||
|
||||
class OpenDaylightRestClient(object):
|
||||
@staticmethod
|
||||
def _check_opt(url):
|
||||
if not url:
|
||||
raise cfg.RequiredOptError('url', cfg.OptGroup('ml2_odl'))
|
||||
required_opts = ('url', 'username', 'password')
|
||||
for opt in required_opts:
|
||||
if not getattr(cfg.CONF.ml2_odl, opt):
|
||||
raise cfg.RequiredOptError(opt, cfg.OptGroup('ml2_odl'))
|
||||
|
||||
@classmethod
|
||||
def create_client(cls, url=None):
|
||||
if cfg.CONF.ml2_odl.enable_lightweight_testing:
|
||||
LOG.debug("ODL lightweight testing is enabled, "
|
||||
"returning a OpenDaylightLwtClient instance")
|
||||
|
||||
# Have to import at here, otherwise we create a dependency loop
|
||||
from networking_odl.common import lightweight_testing as lwt
|
||||
cls = lwt.OpenDaylightLwtClient
|
||||
|
||||
url = url or cfg.CONF.ml2_odl.url
|
||||
cls._check_opt(url)
|
||||
return cls(
|
||||
url,
|
||||
cfg.CONF.ml2_odl.username,
|
||||
cfg.CONF.ml2_odl.password,
|
||||
cfg.CONF.ml2_odl.timeout)
|
||||
|
||||
def __init__(self, url, username, password, timeout):
|
||||
super(OpenDaylightRestClient, self).__init__()
|
||||
self.url = url
|
||||
self.timeout = timeout
|
||||
self.session = sessions.Session()
|
||||
self.session.auth = (username, password)
|
||||
|
||||
def get_resource(self, resource_type, resource_id):
|
||||
response = self.get(utils.make_url_object(resource_type) + '/' +
|
||||
resource_id)
|
||||
if response.status_code == requests.codes.not_found:
|
||||
return None
|
||||
|
||||
return self._check_response(response).json()
|
||||
|
||||
def get(self, urlpath='', data=None):
|
||||
return self.request('get', urlpath, data)
|
||||
|
||||
def put(self, urlpath='', data=None):
|
||||
return self.request('put', urlpath, data)
|
||||
|
||||
def delete(self, urlpath='', data=None):
|
||||
return self.request('delete', urlpath, data)
|
||||
|
||||
def request(self, method, urlpath='', data=None):
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
url = '/'.join([self.url, urlpath])
|
||||
LOG.debug(
|
||||
"Sending METHOD (%(method)s) URL (%(url)s) JSON (%(data)s)",
|
||||
{'method': method, 'url': url, 'data': data})
|
||||
return self.session.request(
|
||||
method, url=url, headers=headers, data=data, timeout=self.timeout)
|
||||
|
||||
def sendjson(self, method, urlpath, obj):
|
||||
"""Send json to the OpenDaylight controller."""
|
||||
data = jsonutils.dumps(obj, indent=2) if obj else None
|
||||
try:
|
||||
return self._check_response(
|
||||
self.request(method, urlpath, data))
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("REST request ( %(method)s ) to "
|
||||
"url ( %(urlpath)s ) is failed. "
|
||||
"Request body : [%(body)s] service",
|
||||
{'method': method,
|
||||
'urlpath': urlpath,
|
||||
'body': obj})
|
||||
|
||||
def send_request(self, operation, service_type, object_type, data):
|
||||
"""Wrapper method for sendjson()"""
|
||||
obj_id = data['id']
|
||||
base_path = service_type + '/' + object_type + 's'
|
||||
if operation == odl_const.ODL_DELETE:
|
||||
urlpath = base_path + '/' + obj_id
|
||||
self.try_delete(urlpath)
|
||||
return
|
||||
elif operation == odl_const.ODL_CREATE:
|
||||
urlpath = base_path
|
||||
method = 'post'
|
||||
elif operation == odl_const.ODL_UPDATE:
|
||||
urlpath = base_path + '/' + obj_id
|
||||
method = 'put'
|
||||
self.sendjson(method, urlpath, {object_type: data})
|
||||
|
||||
def try_delete(self, urlpath):
|
||||
response = self.delete(urlpath)
|
||||
if response.status_code == requests.codes.not_found:
|
||||
# The resource is already removed. ignore 404 gracefully
|
||||
LOG.debug("%(urlpath)s doesn't exist", {'urlpath': urlpath})
|
||||
return False
|
||||
|
||||
self._check_response(response)
|
||||
return True
|
||||
|
||||
def _check_response(self, response):
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.HTTPError as error:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.debug("Exception from ODL: %(e)s %(text)s",
|
||||
{'e': error, 'text': response.text}, exc_info=1)
|
||||
else:
|
||||
LOG.debug("Got response:\n"
|
||||
"(%(response)s)", {'response': response.text})
|
||||
return response
|
||||
|
||||
|
||||
class OpenDaylightRestClientGlobal(object):
|
||||
"""ODL Rest client as global variable
|
||||
|
||||
The creation of OpenDaylightRestClient needs to be delayed until
|
||||
configuration values need to be configured at first.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(OpenDaylightRestClientGlobal, self).__init__()
|
||||
self._lock = threading.Lock()
|
||||
self._client = None
|
||||
|
||||
def get_client(self):
|
||||
with self._lock:
|
||||
if self._client is None:
|
||||
self._client = OpenDaylightRestClient.create_client()
|
||||
return self._client
|
@ -1,73 +0,0 @@
|
||||
# Copyright (c) 2014 Red Hat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from networking_odl._i18n import _
|
||||
|
||||
|
||||
odl_opts = [
|
||||
cfg.StrOpt('url',
|
||||
help=_("HTTP URL of OpenDaylight REST interface.")),
|
||||
cfg.StrOpt('username',
|
||||
help=_("HTTP username for authentication.")),
|
||||
cfg.StrOpt('password', secret=True,
|
||||
help=_("HTTP password for authentication.")),
|
||||
cfg.IntOpt('timeout', default=10,
|
||||
help=_("HTTP timeout in seconds.")),
|
||||
cfg.IntOpt('session_timeout', default=30,
|
||||
help=_("Tomcat session timeout in minutes.")),
|
||||
cfg.IntOpt('sync_timeout', default=10,
|
||||
help=_("(V2 driver) Sync thread timeout in seconds.")),
|
||||
cfg.IntOpt('retry_count', default=5,
|
||||
help=_("(V2 driver) Number of times to retry a row "
|
||||
"before failing.")),
|
||||
cfg.IntOpt('maintenance_interval', default=300,
|
||||
help=_("(V2 driver) Journal maintenance operations interval "
|
||||
"in seconds.")),
|
||||
cfg.IntOpt('completed_rows_retention', default=600,
|
||||
help=_("(V2 driver) Time to keep completed rows in seconds."
|
||||
"Completed rows retention will be checked every "
|
||||
"maintenance_interval by the cleanup thread."
|
||||
"To disable completed rows deletion "
|
||||
"value should be -1")),
|
||||
cfg.BoolOpt('enable_lightweight_testing',
|
||||
default=False,
|
||||
help=_('Test without real ODL.')),
|
||||
cfg.StrOpt('port_binding_controller',
|
||||
default='pseudo-agentdb-binding',
|
||||
help=_('Name of the controller to be used for port binding.')),
|
||||
cfg.IntOpt('processing_timeout', default='100',
|
||||
help=_("(V2 driver) Time in seconds to wait before a "
|
||||
"processing row is marked back to pending.")),
|
||||
cfg.StrOpt('odl_hostconf_uri',
|
||||
help=_("Path for ODL host configuration REST interface"),
|
||||
default="/restconf/operational/neutron:neutron/hostconfigs"),
|
||||
cfg.IntOpt('restconf_poll_interval', default=30,
|
||||
help=_("Poll interval in seconds for getting ODL hostconfig")),
|
||||
cfg.BoolOpt('enable_websocket_pseudo_agentdb', default=False,
|
||||
help=_('Enable websocket for pseudo-agent-port-binding.')),
|
||||
cfg.IntOpt('odl_features_retry_interval', default=5,
|
||||
help=_("Wait this many seconds before retrying the odl features"
|
||||
" fetch")),
|
||||
cfg.ListOpt('odl_features', item_type=str,
|
||||
help='A list of features supported by ODL')
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(odl_opts, "ml2_odl")
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [('ml2_odl', odl_opts)]
|
@ -1,85 +0,0 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
ODL_NETWORK = 'network'
|
||||
ODL_NETWORKS = 'networks'
|
||||
ODL_SUBNET = 'subnet'
|
||||
ODL_SUBNETS = 'subnets'
|
||||
ODL_PORT = 'port'
|
||||
ODL_PORTS = 'ports'
|
||||
ODL_SG = 'security_group'
|
||||
ODL_SGS = 'security_groups'
|
||||
ODL_SG_RULE = 'security_group_rule'
|
||||
ODL_SG_RULES = 'security_group_rules'
|
||||
ODL_ROUTER = 'router'
|
||||
ODL_ROUTERS = 'routers'
|
||||
ODL_FLOATINGIP = 'floatingip'
|
||||
ODL_FLOATINGIPS = 'floatingips'
|
||||
|
||||
ODL_LOADBALANCER = 'loadbalancer'
|
||||
ODL_LOADBALANCERS = 'loadbalancers'
|
||||
ODL_LISTENER = 'listener'
|
||||
ODL_LISTENERS = 'listeners'
|
||||
ODL_POOL = 'pool'
|
||||
ODL_POOLS = 'pools'
|
||||
ODL_MEMBER = 'member'
|
||||
ODL_MEMBERS = 'members'
|
||||
ODL_HEALTHMONITOR = 'healthmonitor'
|
||||
ODL_HEALTHMONITORS = 'healthmonitors'
|
||||
|
||||
ODL_QOS = 'qos'
|
||||
ODL_QOS_POLICY = 'policy'
|
||||
ODL_QOS_POLICIES = 'policies'
|
||||
|
||||
ODL_SFC = 'sfc'
|
||||
ODL_SFC_FLOW_CLASSIFIER = 'flowclassifier'
|
||||
ODL_SFC_FLOW_CLASSIFIERS = 'flowclassifiers'
|
||||
ODL_SFC_PORT_PAIR = 'portpair'
|
||||
ODL_SFC_PORT_PAIRS = 'portpairs'
|
||||
ODL_SFC_PORT_PAIR_GROUP = 'portpairgroup'
|
||||
ODL_SFC_PORT_PAIR_GROUPS = 'portpairgroups'
|
||||
ODL_SFC_PORT_CHAIN = 'portchain'
|
||||
ODL_SFC_PORT_CHAINS = 'portchains'
|
||||
|
||||
ODL_TRUNK = 'trunk'
|
||||
ODL_TRUNKS = 'trunks'
|
||||
|
||||
ODL_L2GATEWAY = 'l2_gateway'
|
||||
ODL_L2GATEWAYS = 'l2_gateways'
|
||||
ODL_L2GATEWAY_CONNECTION = 'l2gateway_connection'
|
||||
ODL_L2GATEWAY_CONNECTIONS = 'l2_gateway_connections'
|
||||
|
||||
ODL_BGPVPN = 'bgpvpn'
|
||||
ODL_BGPVPNS = 'bgpvpns'
|
||||
ODL_BGPVPN_NETWORK_ASSOCIATION = 'bgpvpn_network_association'
|
||||
ODL_BGPVPN_NETWORK_ASSOCIATIONS = 'bgpvpn_network_associations'
|
||||
ODL_BGPVPN_ROUTER_ASSOCIATION = 'bgpvpn_network_association'
|
||||
ODL_BGPVPN_ROUTER_ASSOCIATIONS = 'bgpvpn_network_associations'
|
||||
|
||||
ODL_ML2_MECH_DRIVER_V1 = "opendaylight"
|
||||
ODL_ML2_MECH_DRIVER_V2 = "opendaylight_v2"
|
||||
|
||||
ODL_CREATE = 'create'
|
||||
ODL_UPDATE = 'update'
|
||||
ODL_DELETE = 'delete'
|
||||
|
||||
# Constants for journal operation states
|
||||
PENDING = 'pending'
|
||||
PROCESSING = 'processing'
|
||||
FAILED = 'failed'
|
||||
COMPLETED = 'completed'
|
||||
|
||||
# dict to store url mappings
|
||||
RESOURCE_URL_MAPPINGS = {ODL_QOS_POLICY: "%s/%s" % (ODL_QOS, ODL_QOS_POLICIES)}
|
@ -1,178 +0,0 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lib import constants as n_const
|
||||
|
||||
from networking_odl.common import constants as odl_const
|
||||
from networking_odl.common import utils as odl_utils
|
||||
|
||||
|
||||
# NOTE(yamahata): As neutron keyston v3 support, tenant_id would be renamed to
|
||||
# project_id. In order to keep compatibility, populate both
|
||||
# 'project_id' and 'tenant_id'
|
||||
# for details refer to
|
||||
# https://specs.openstack.org/openstack/neutron-specs/specs/newton/moving-to-keystone-v3.html
|
||||
def _populate_project_id_and_tenant_id(resource_dict):
|
||||
# NOTE(yamahata): l3 plugin passes data as dependency_list as python list
|
||||
# delete_router, delete_floatingip
|
||||
if not isinstance(resource_dict, dict):
|
||||
return
|
||||
|
||||
project_id = resource_dict.get('project_id',
|
||||
resource_dict.get('tenant_id'))
|
||||
if project_id is not None:
|
||||
# NOTE(yamahata): project_id can be ""(empty string)
|
||||
resource_dict.setdefault('project_id', project_id)
|
||||
resource_dict.setdefault('tenant_id', project_id)
|
||||
|
||||
|
||||
def _filter_unmapped_null(resource_dict, unmapped_keys):
|
||||
# NOTE(yamahata): bug work around
|
||||
# https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475
|
||||
# Null-value for an unmapped element causes next mapped
|
||||
# collection to contain a null value
|
||||
# JSON: { "unmappedField": null, "mappedCollection": [ "a" ] }
|
||||
#
|
||||
# Java Object:
|
||||
# class Root {
|
||||
# Collection<String> mappedCollection = new ArrayList<String>;
|
||||
# }
|
||||
#
|
||||
# Result:
|
||||
# Field B contains one element; null
|
||||
#
|
||||
# TODO(yamahata): update along side with neutron and ODL
|
||||
# add when neutron adds more extensions
|
||||
# delete when ODL neutron northbound supports it
|
||||
# TODO(yamahata): do same thing for other resources
|
||||
keys_to_del = [key for key in unmapped_keys
|
||||
if resource_dict.get(key) is None]
|
||||
if keys_to_del:
|
||||
odl_utils.try_del(resource_dict, keys_to_del)
|
||||
|
||||
|
||||
_NETWORK_UNMAPPED_KEYS = ['qos_policy_id']
|
||||
_SUBNET_UNMAPPED_KEYS = ['segment_id', 'subnetpool_id']
|
||||
_PORT_UNMAPPED_KEYS = ['binding:profile', 'dns_name',
|
||||
'port_security_enabled', 'qos_policy_id']
|
||||
|
||||
|
||||
def _filter_network_create(network):
|
||||
odl_utils.try_del(network, ['status', 'subnets'])
|
||||
_filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS)
|
||||
|
||||
|
||||
def _filter_network_update(network):
|
||||
odl_utils.try_del(network, ['id', 'status', 'subnets',
|
||||
'tenant_id', 'project_id'])
|
||||
_filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS)
|
||||
|
||||
|
||||
def _filter_subnet_create(subnet):
|
||||
_filter_unmapped_null(subnet, _SUBNET_UNMAPPED_KEYS)
|
||||
|
||||
|
||||
def _filter_subnet_update(subnet):
|
||||
odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
|
||||
'allocation_pools', 'tenant_id', 'project_id'])
|
||||
_filter_unmapped_null(subnet, _SUBNET_UNMAPPED_KEYS)
|
||||
|
||||
|
||||
def _filter_port_create(port):
|
||||
"""Filter out port attributes not required for a create."""
|
||||
odl_utils.try_del(port, ['status'])
|
||||
_filter_unmapped_null(port, _PORT_UNMAPPED_KEYS)
|
||||
|
||||
|
||||
def _filter_port_update(port):
|
||||
"""Filter out port attributes for an update operation."""
|
||||
odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id',
|
||||
'project_id'])
|
||||
_filter_unmapped_null(port, _PORT_UNMAPPED_KEYS)
|
||||
|
||||
|
||||
def _filter_router_update(router):
|
||||
"""Filter out attributes for an update operation."""
|
||||
odl_utils.try_del(router, ['id', 'tenant_id', 'project_id', 'status'])
|
||||
|
||||
|
||||
# neutron has multiple ICMPv6 names
|
||||
# https://bugs.launchpad.net/tempest/+bug/1671366
|
||||
# REVISIT(yamahata): once neutron upstream is fixed to store unified form,
|
||||
# this can be removed.
|
||||
_ICMPv6_NAMES = (
|
||||
n_const.PROTO_NAME_ICMP,
|
||||
n_const.PROTO_NAME_IPV6_ICMP,
|
||||
n_const.PROTO_NAME_IPV6_ICMP_LEGACY,
|
||||
)
|
||||
|
||||
|
||||
def _sgrule_scrub_icmpv6_name(sgrule):
|
||||
if (sgrule.get('ethertype') == n_const.IPv6 and
|
||||
sgrule.get('protocol') in _ICMPv6_NAMES):
|
||||
sgrule['protocol'] = n_const.PROTO_NAME_IPV6_ICMP_LEGACY
|
||||
|
||||
|
||||
# ODL boron neturon northbound knows the following protocol names.
|
||||
# It's safe to pass those names
|
||||
_ODL_KNOWN_PROTOCOL_NAMES = (
|
||||
n_const.PROTO_NAME_TCP,
|
||||
n_const.PROTO_NAME_UDP,
|
||||
n_const.PROTO_NAME_ICMP,
|
||||
n_const.PROTO_NAME_IPV6_ICMP_LEGACY,
|
||||
)
|
||||
|
||||
|
||||
def _sgrule_scrub_unknown_protocol_name(protocol):
|
||||
"""Convert unknown protocol name to actual interger.
|
||||
|
||||
OpenDaylight does't want to keep catching up list of protocol names.
|
||||
So networking-odl converts unknown protcol name into integer
|
||||
"""
|
||||
if protocol in _ODL_KNOWN_PROTOCOL_NAMES:
|
||||
return protocol
|
||||
if protocol in n_const.IP_PROTOCOL_MAP:
|
||||
return n_const.IP_PROTOCOL_MAP[protocol]
|
||||
return protocol
|
||||
|
||||
|
||||
# TODO(yamahata): used by mech_driver.
|
||||
# make this private when v1 mech_driver is removed
|
||||
def filter_security_group_rule(sg_rule):
|
||||
_sgrule_scrub_icmpv6_name(sg_rule)
|
||||
if sg_rule.get('protocol'):
|
||||
sg_rule['protocol'] = _sgrule_scrub_unknown_protocol_name(
|
||||
sg_rule['protocol'])
|
||||
|
||||
|
||||
_FILTER_MAP = {
|
||||
(odl_const.ODL_NETWORK, odl_const.ODL_CREATE): _filter_network_create,
|
||||
(odl_const.ODL_NETWORK, odl_const.ODL_UPDATE): _filter_network_update,
|
||||
(odl_const.ODL_SUBNET, odl_const.ODL_CREATE): _filter_subnet_create,
|
||||
(odl_const.ODL_SUBNET, odl_const.ODL_UPDATE): _filter_subnet_update,
|
||||
(odl_const.ODL_PORT, odl_const.ODL_CREATE): _filter_port_create,
|
||||
(odl_const.ODL_PORT, odl_const.ODL_UPDATE): _filter_port_update,
|
||||
(odl_const.ODL_ROUTER, odl_const.ODL_UPDATE): _filter_router_update,
|
||||
(odl_const.ODL_SG_RULE, odl_const.ODL_CREATE): filter_security_group_rule,
|
||||
(odl_const.ODL_SG_RULE, odl_const.ODL_UPDATE): filter_security_group_rule,
|
||||
}
|
||||
|
||||
|
||||
def filter_for_odl(object_type, operation, data):
|
||||
"""Filter out the attributed before sending the data to ODL"""
|
||||
filter_key = (object_type, operation)
|
||||
if filter_key in _FILTER_MAP:
|
||||
_FILTER_MAP[filter_key](data)
|
||||
_populate_project_id_and_tenant_id(data)
|
@ -1,178 +0,0 @@
|
||||
# Copyright (c) 2015 Intel Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
import requests
|
||||
import six
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from networking_odl._i18n import _
|
||||
from networking_odl.common import client
|
||||
from networking_odl.common import constants as odl_const
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
OK = requests.codes.ok
|
||||
NO_CONTENT = requests.codes.no_content
|
||||
NOT_ALLOWED = requests.codes.not_allowed
|
||||
NOT_FOUND = requests.codes.not_found
|
||||
BAD_REQUEST = requests.codes.bad_request
|
||||
|
||||
|
||||
class OpenDaylightLwtClient(client.OpenDaylightRestClient):
|
||||
"""Lightweight testing client"""
|
||||
|
||||
lwt_dict = {odl_const.ODL_NETWORKS: {},
|
||||
odl_const.ODL_SUBNETS: {},
|
||||
odl_const.ODL_PORTS: {},
|
||||
odl_const.ODL_SGS: {},
|
||||
odl_const.ODL_SG_RULES: {},
|
||||
odl_const.ODL_LOADBALANCERS: {},
|
||||
odl_const.ODL_LISTENERS: {},
|
||||
odl_const.ODL_POOLS: {},
|
||||
odl_const.ODL_MEMBERS: {},
|
||||
odl_const.ODL_HEALTHMONITORS: {}}
|
||||
|
||||
@classmethod
|
||||
def _make_response(cls, status_code=OK, content=None):
|
||||
"""Only supports 'content-type': 'application/json'"""
|
||||
response = requests.models.Response()
|
||||
response.status_code = status_code
|
||||
if content:
|
||||
response.raw = six.BytesIO(
|
||||
jsonutils.dumps(content).encode('utf-8'))
|
||||
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def _get_resource_id(cls, urlpath):
|
||||
# resouce ID is the last element of urlpath
|
||||
return str(urlpath).rsplit('/', 1)[-1]
|
||||
|
||||
@classmethod
|
||||
def post(cls, resource_type, resource_dict, urlpath, resource_list):
|
||||
"""No ID in URL, elements in resource_list must have ID"""
|
||||
|
||||
if resource_list is None:
|
||||
raise ValueError(_("resource_list can not be None"))
|
||||
|
||||
for resource in resource_list:
|
||||
if resource['id'] in resource_dict:
|
||||
LOG.debug("%s %s already exists", resource_type,
|
||||
resource['id'])
|
||||
response = cls._make_response(NOT_ALLOWED)
|
||||
raise requests.exceptions.HTTPError(response=response)
|
||||
|
||||
resource_dict[resource['id']] = deepcopy(resource)
|
||||
|
||||
return cls._make_response(NO_CONTENT)
|
||||
|
||||
@classmethod
|
||||
def put(cls, resource_type, resource_dict, urlpath, resource_list):
|
||||
|
||||
resource_id = cls._get_resource_id(urlpath)
|
||||
|
||||
if resource_list is None:
|
||||
raise ValueError(_("resource_list can not be None"))
|
||||
|
||||
if resource_id and len(resource_list) != 1:
|
||||
LOG.debug("Updating %s with multiple resources", urlpath)
|
||||
response = cls._make_response(BAD_REQUEST)
|
||||
raise requests.exceptions.HTTPError(response=response)
|
||||
|
||||
for resource in resource_list:
|
||||
res_id = resource_id or resource['id']
|
||||
if res_id in resource_dict:
|
||||
resource_dict[res_id].update(deepcopy(resource))
|
||||
else:
|
||||
LOG.debug("%s %s does not exist", resource_type, res_id)
|
||||
response = cls._make_response(NOT_FOUND)
|
||||
raise requests.exceptions.HTTPError(response=response)
|
||||
|
||||
return cls._make_response(NO_CONTENT)
|
||||
|
||||
@classmethod
|
||||
def delete(cls, resource_type, resource_dict, urlpath, resource_list):
|
||||
|
||||
if resource_list is None:
|
||||
resource_id = cls._get_resource_id(urlpath)
|
||||
id_list = [resource_id]
|
||||
else:
|
||||
id_list = [res['id'] for res in resource_list]
|
||||
|
||||
for res_id in id_list:
|
||||
removed = resource_dict.pop(res_id, None)
|
||||
if removed is None:
|
||||
LOG.debug("%s %s does not exist", resource_type, res_id)
|
||||
response = cls._make_response(NOT_FOUND)
|
||||
raise requests.exceptions.HTTPError(response=response)
|
||||
|
||||
return cls._make_response(NO_CONTENT)
|
||||
|
||||
@classmethod
|
||||
def get(cls, resource_type, resource_dict, urlpath, resource_list=None):
|
||||
|
||||
resource_id = cls._get_resource_id(urlpath)
|
||||
|
||||
if resource_id:
|
||||
resource = resource_dict.get(resource_id)
|
||||
if resource is None:
|
||||
LOG.debug("%s %s does not exist", resource_type, resource_id)
|
||||
response = cls._make_response(NOT_FOUND)
|
||||
raise requests.exceptions.HTTPError(response=response)
|
||||
else:
|
||||
# When getting single resource, return value is a dict
|
||||
r_list = {resource_type[:-1]: deepcopy(resource)}
|
||||
return cls._make_response(OK, r_list)
|
||||
|
||||
r_list = [{resource_type[:-1]: deepcopy(res)}
|
||||
for res in resource_dict.values()]
|
||||
|
||||
return cls._make_response(OK, r_list)
|
||||
|
||||
def sendjson(self, method, urlpath, obj=None):
|
||||
"""Lightweight testing without ODL"""
|
||||
|
||||
if '/' not in urlpath:
|
||||
urlpath += '/'
|
||||
|
||||
resource_type = str(urlpath).split('/', 1)[0]
|
||||
resource_type = resource_type.replace('-', '_')
|
||||
|
||||
resource_dict = self.lwt_dict.get(resource_type)
|
||||
|
||||
if resource_dict is None:
|
||||
LOG.debug("Resource type %s is not supported", resource_type)
|
||||
response = self._make_response(NOT_FOUND)
|
||||
raise requests.exceptions.HTTPError(response=response)
|
||||
|
||||
func = getattr(self, str(method).lower())
|
||||
|
||||
resource_list = None
|
||||
if obj:
|
||||
# If obj is not None, it can only have one entry
|
||||
assert len(obj) == 1, "Obj can only have one entry"
|
||||
|
||||
key, resource_list = list(obj.items())[0]
|
||||
|
||||
if not isinstance(resource_list, list):
|
||||
# Need to transform resource_list to a real list, i.e. [res]
|
||||
resource_list = [resource_list]
|
||||
|
||||
return func(resource_type, resource_dict, urlpath, resource_list)
|
@ -1,112 +0,0 @@
|
||||
# Copyright (c) 2017 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import itertools
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from requests import exceptions
|
||||
|
||||
from networking_odl.common import client as odl_client
|
||||
from networking_odl.common import utils
|
||||
|
||||
|
||||
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
OPERATIONAL_PORT_STATUS = 'operational-port-status'
|
||||
|
||||
feature_set = set()
|
||||
|
||||
|
||||
def init():
|
||||
'''initialize odl_features.
|
||||
|
||||
Initialize odl_features. Try first from configuration and then try pulling
|
||||
via rest call from ODL.
|
||||
'''
|
||||
|
||||
global feature_set
|
||||
feature_set = None
|
||||
|
||||
if cfg.CONF.ml2_odl.odl_features is not None:
|
||||
feature_set = set(cfg.CONF.ml2_odl.odl_features)
|
||||
return
|
||||
|
||||
wait_interval = cfg.CONF.ml2_odl.odl_features_retry_interval
|
||||
|
||||
for times_tried in itertools.count():
|
||||
feature_set = _fetch_features()
|
||||
if feature_set is not None:
|
||||
break
|
||||
LOG.warning('Failed to retrieve ODL features, attempt %i', times_tried)
|
||||
time.sleep(wait_interval)
|
||||
|
||||
|
||||
def has(feature):
|
||||
return feature in feature_set
|
||||
|
||||
|
||||
def deinit():
|
||||
'''Set odl_features back to it's pre-initlialized '''
|
||||
global feature_set
|
||||
feature_set = set()
|
||||
|
||||
|
||||
def _load_features(json):
|
||||
"""parse and save features from json"""
|
||||
features = json['features']
|
||||
if 'feature' not in features:
|
||||
return
|
||||
|
||||
LOG.info('Retrieved ODL features %s', features)
|
||||
response = set()
|
||||
for feature in features['feature']:
|
||||
response.add(feature['service-provider-feature'].split(':')[1])
|
||||
return response
|
||||
|
||||
|
||||
def _fetch_features():
|
||||
'''Fetch the list of features declared by ODL.
|
||||
|
||||
This function should be called once during initialization
|
||||
'''
|
||||
|
||||
path = 'restconf/operational/neutron:neutron/neutron:features'
|
||||
features_url = utils.get_odl_url(path)
|
||||
|
||||
client = odl_client.OpenDaylightRestClient.create_client(features_url)
|
||||
try:
|
||||
response = client.request('get')
|
||||
except exceptions.ConnectionError:
|
||||
LOG.error("Error connecting to ODL to retrieve features",
|
||||
exc_info=True)
|
||||
return None
|
||||
|
||||
if response.status_code == 400:
|
||||
LOG.debug('ODL does not support feature negotiation')
|
||||
return set()
|
||||
|
||||
if response.status_code == 404:
|
||||
LOG.debug('No features configured')
|
||||
return set()
|
||||
|
||||
if response.status_code != 200:
|
||||
LOG.warning('error fetching features: %i',
|
||||
response.status_code)
|
||||
return None
|
||||
|
||||
return _load_features(response.json())
|
@ -1,71 +0,0 @@
|
||||
# Copyright (c) 2017 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import types
|
||||
|
||||
from oslo_log import helpers as log_helpers
|
||||
import six
|
||||
|
||||
|
||||
def _build_func(client_method):
|
||||
@log_helpers.log_method_call
|
||||
def f(self, *args, **kwargs):
|
||||
self.journal.set_sync_event()
|
||||
|
||||
f.__name__ = client_method
|
||||
return f
|
||||
|
||||
|
||||
def _unboundmethod(func, cls):
|
||||
if six.PY3:
|
||||
# python 3.x doesn't have unbound methods
|
||||
func.__qualname__ = cls.__qualname__ + '.' + func.__name__ # PEP 3155
|
||||
return func
|
||||
|
||||
# python 2.x
|
||||
return types.MethodType(func, None, cls)
|
||||
|
||||
|
||||
def _get_method_name(op, resource):
|
||||
return op + '_' + resource + '_postcommit'
|
||||
|
||||
|
||||
def _build_method(cls, resource):
|
||||
# add methods like the following:
|
||||
#
|
||||
# @log_helpers.log_method_call
|
||||
# def <method>_<resource>_postcommit(self, *args, **kwargs):
|
||||
# self.journal.set_sync_event()
|
||||
|
||||
operations = ['create', 'update', 'delete']
|
||||
for op in operations:
|
||||
client_method = _get_method_name(op, resource)
|
||||
if hasattr(cls, client_method) and client_method not in cls.__dict__:
|
||||
f = _build_func(client_method)
|
||||
unbound = _unboundmethod(f, cls)
|
||||
setattr(cls, client_method, unbound)
|
||||
|
||||
|
||||
def _build_methods(cls, *resources):
|
||||
for resource in resources:
|
||||
_build_method(cls, resource)
|
||||
|
||||
|
||||
def add_postcommit(*args):
|
||||
def postcommit(cls):
|
||||
_build_methods(cls, *args)
|
||||
return cls
|
||||
|
||||
return postcommit
|
@ -1,51 +0,0 @@
|
||||
# Copyright (c) 2014 Red Hat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from networking_odl.common import constants as odl_const
|
||||
|
||||
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
|
||||
|
||||
|
||||
def try_del(d, keys):
|
||||
"""Ignore key errors when deleting from a dictionary."""
|
||||
for key in keys:
|
||||
try:
|
||||
del d[key]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def make_url_object(object_type):
|
||||
obj_pl = odl_const.RESOURCE_URL_MAPPINGS.get(object_type, None)
|
||||
if obj_pl is None:
|
||||
obj_pl = neutronify(object_type + 's')
|
||||
return obj_pl
|
||||
|
||||
|
||||
# TODO(manjeets) consolidate this method with make_url_object
|
||||
def neutronify(name):
|
||||
"""Adjust the resource name for use with Neutron's API"""
|
||||
return name.replace('_', '-')
|
||||
|
||||
|
||||
def get_odl_url(path=''):
|
||||
'''Make a URL for some ODL resource (path)'''
|
||||
purl = urlparse.urlsplit(cfg.CONF.ml2_odl.url)
|
||||
features_url = urlparse.urlunparse((
|
||||
purl.scheme, purl.netloc, path, '', '', ''))
|
||||
return features_url
|
@ -1,331 +0,0 @@
|
||||
# Copyright (c) 2017 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import excutils
|
||||
from requests import codes
|
||||
from requests import exceptions
|
||||
import websocket
|
||||
|
||||
from networking_odl._i18n import _
|
||||
from networking_odl.common import client as odl_client
|
||||
|
||||
|
||||
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
ODL_OPERATIONAL_DATASTORE = "OPERATIONAL"
|
||||
ODL_CONFIGURATION_DATASTORE = "CONFIGURATION"
|
||||
ODL_NOTIFICATION_SCOPE_BASE = "BASE"
|
||||
ODL_NOTIFICATION_SCOPE_ONE = "ONE"
|
||||
ODL_NOTIFICATION_SCOPE_SUBTREE = "SUBTREE"
|
||||
|
||||
ODL_WEBSOCKET_DISCONNECTED = "ODL_WEBSOCKET_DISCONNECTED"
|
||||
ODL_WEBSOCKET_CONNECTING = "ODL_WEBSOCKET_CONNECTING"
|
||||
ODL_WEBSOCKET_CONNECTED = "ODL_WEBSOCKET_CONNECTED"
|
||||
|
||||
|
||||
class OpenDaylightWebsocketClient(object):
|
||||
"""Thread for the OpenDaylight Websocket """
|
||||
|
||||
def __init__(self, odl_rest_client, path, datastore, scope, leaf_node_only,
|
||||
packet_handler, timeout, status_cb=None):
|
||||
self.odl_rest_client = odl_rest_client
|
||||
self.path = path
|
||||
self.datastore = datastore
|
||||
self.scope = scope
|
||||
self.leaf_node_only = leaf_node_only
|
||||
self.packet_handler = packet_handler
|
||||
self.timeout = timeout
|
||||
self.exit_websocket_thread = False
|
||||
self.status_cb = status_cb
|
||||
self.current_status = ODL_WEBSOCKET_DISCONNECTED
|
||||
self._odl_sync_thread = self.start_odl_websocket_thread()
|
||||
|
||||
@classmethod
|
||||
def odl_create_websocket(cls, odl_url, path, datastore, scope,
|
||||
packet_handler, status_cb=None,
|
||||
leaf_node_only=False):
|
||||
"""Create a websocket connection with ODL.
|
||||
|
||||
This method will create a websocket client based on path,
|
||||
datastore and scope params. On data recv from websocket
|
||||
packet_handler callback is called. status_cb callback can be
|
||||
provided if notifications are requried for socket status
|
||||
changes
|
||||
"""
|
||||
|
||||
if odl_url is None:
|
||||
LOG.error("invalid odl url", exc_info=True)
|
||||
raise ValueError(_("Invalid ODL URL"))
|
||||
|
||||
odl_rest_client = odl_client.OpenDaylightRestClient.create_client(
|
||||
odl_url)
|
||||
return cls(
|
||||
odl_rest_client, path, datastore, scope, leaf_node_only,
|
||||
packet_handler, cfg.CONF.ml2_odl.timeout, status_cb
|
||||
)
|
||||
|
||||
def start_odl_websocket_thread(self):
|
||||
# Start the websocket thread
|
||||
LOG.debug("starting a new websocket thread")
|
||||
odl_websocket_thread = threading.Thread(
|
||||
name='websocket',
|
||||
target=self.run_websocket_thread)
|
||||
odl_websocket_thread.start()
|
||||
return odl_websocket_thread
|
||||
|
||||
def set_exit_flag(self, value=True):
|
||||
# set flag to exit
|
||||
self.exit_websocket_thread = value
|
||||
|
||||
def run_websocket_thread(self, exit_after_run=False):
|
||||
# TBD connections are persistent so there is really no way to know
|
||||
# when it is a "first connection". We need to wait for the
|
||||
# dis/reconnect logic to be able to know this
|
||||
first_connection = True
|
||||
ws = None
|
||||
while not self.exit_websocket_thread:
|
||||
if exit_after_run:
|
||||
# Permanently waiting thread model breaks unit tests
|
||||
# Adding this arg to exit after one run for unit tests
|
||||
self.set_exit_flag()
|
||||
# connect if necessary
|
||||
if ws is None:
|
||||
try:
|
||||
ws = self._connect_ws()
|
||||
except ValueError:
|
||||
LOG.error("websocket irrecoverable error ")
|
||||
return
|
||||
if ws is None:
|
||||
time.sleep(cfg.CONF.ml2_odl.restconf_poll_interval)
|
||||
continue
|
||||
# read off the websocket
|
||||
try:
|
||||
data = ws.recv()
|
||||
if not data:
|
||||
LOG.warning("websocket received 0 bytes")
|
||||
continue
|
||||
except websocket.WebSocketTimeoutException:
|
||||
continue
|
||||
except websocket.WebSocketConnectionClosedException:
|
||||
# per websocket-client, "If remote host closed the connection
|
||||
# or some network error happened"
|
||||
LOG.warning("websocket connection closed or IO error",
|
||||
exc_info=True)
|
||||
self._close_ws(ws)
|
||||
ws = None
|
||||
continue
|
||||
except Exception:
|
||||
# Connection closed trigger reconnection
|
||||
LOG.error("websocket unexpected exception, "
|
||||
"closing and restarting...", exc_info=True)
|
||||
# TODO(rsood): Websocket reconnect can cause race conditions
|
||||
self._close_ws(ws)
|
||||
ws = None
|
||||
continue
|
||||
|
||||
# Call handler for data received
|
||||
try:
|
||||
self.packet_handler(data, first_connection)
|
||||
first_connection = False
|
||||
except Exception:
|
||||
LOG.error("Error in packet_handler callback",
|
||||
exc_info=True)
|
||||
|
||||
self._close_ws(ws)
|
||||
|
||||
def _set_websocket_status(self, status):
|
||||
try:
|
||||
if self.status_cb:
|
||||
self.status_cb(status)
|
||||
except Exception:
|
||||
LOG.error("Error in status_cb", exc_info=True)
|
||||
|
||||
def _subscribe_websocket(self):
|
||||
"""ODL Websocket change notification subscription"""
|
||||
# Check ODL URL for details on this process
|
||||
# https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Restconf:Change_event_notification_subscription#rpc_create-data-change-event-subscription # noqa: E501 # pylint: disable=line-too-long
|
||||
|
||||
# Invoke rpc create-data-change-event-subscription
|
||||
ws_create_dce_subs_url = ("restconf/operations/sal-remote:"
|
||||
"create-data-change-event-subscription")
|
||||
odl_subscription_data = {'input': {
|
||||
'path': self.path,
|
||||
'sal-remote-augment:datastore': self.datastore,
|
||||
'sal-remote-augment:scope': self.scope,
|
||||
'sal-remote-augment:notification-output-type': 'JSON'
|
||||
}}
|
||||
try:
|
||||
response = self.odl_rest_client.sendjson('post',
|
||||
ws_create_dce_subs_url,
|
||||
odl_subscription_data)
|
||||
response.raise_for_status()
|
||||
except exceptions.ConnectionError:
|
||||
LOG.error("cannot connect to the opendaylight controller")
|
||||
return None
|
||||
except exceptions.HTTPError as e:
|
||||
# restconf returns 400 on operation when path is not available
|
||||
if e.response.status_code == codes.bad_request:
|
||||
LOG.debug("response code bad_request (400)"
|
||||
"check path for websocket connection")
|
||||
raise ValueError(_("bad_request (http400),check path."))
|
||||
else:
|
||||
LOG.warning("websocket connection failed",
|
||||
exc_info=True)
|
||||
return None
|
||||
except Exception:
|
||||
LOG.error("websocket subscription failed", exc_info=True)
|
||||
return None
|
||||
|
||||
# Subscribing to stream. Returns websocket URL to listen to
|
||||
ws_dce_subs_url = """restconf/streams/stream/"""
|
||||
try:
|
||||
stream_name = response.json()
|
||||
stream_name = stream_name['output']['stream-name']
|
||||
url = ws_dce_subs_url + stream_name
|
||||
if self.leaf_node_only:
|
||||
url += "?odl-leaf-nodes-only=true"
|
||||
response = self.odl_rest_client.get(url)
|
||||
response.raise_for_status()
|
||||
stream_url = response.headers['location']
|
||||
LOG.debug("websocket stream URL: %s", stream_url)
|
||||
return stream_url
|
||||
except exceptions.ConnectionError:
|
||||
LOG.error("cannot connect to the opendaylight controller")
|
||||
return None
|
||||
except exceptions.HTTPError as e:
|
||||
# restconf returns 404 on operation when there is no entry
|
||||
if e.response.status_code == codes.not_found:
|
||||
LOG.debug("response code not_found (404)"
|
||||
"unable to websocket connection url")
|
||||
raise ValueError(_("bad_request (http400),check path"))
|
||||
else:
|
||||
LOG.warning("websocket connection failed")
|
||||
return None
|
||||
except ValueError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("websocket subscribe got invalid stream name")
|
||||
except KeyError:
|
||||
LOG.error("websocket subscribe got bad stream data")
|
||||
raise ValueError(_("websocket subscribe bad stream data"))
|
||||
except Exception:
|
||||
LOG.error("websocket subscription failed", exc_info=True)
|
||||
return None
|
||||
|
||||
def _socket_create_connection(self, stream_url):
|
||||
ws = None
|
||||
try:
|
||||
ws = websocket.create_connection(stream_url,
|
||||
timeout=self.timeout)
|
||||
except ValueError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("websocket create connection invalid URL")
|
||||
except websocket.WebSocketBadStatusException:
|
||||
LOG.error("webSocket bad status exception", exc_info=True)
|
||||
return None
|
||||
except Exception:
|
||||
LOG.exception("websocket create connection failed",
|
||||
exc_info=True)
|
||||
return None
|
||||
if ws is None or not ws.connected:
|
||||
LOG.error("websocket create connection unsuccessful")
|
||||
return None
|
||||
|
||||
LOG.debug("websocket connection established")
|
||||
return ws
|
||||
|
||||
def _connect_ws(self):
|
||||
self._set_websocket_status(ODL_WEBSOCKET_CONNECTING)
|
||||
stream_url = self._subscribe_websocket()
|
||||
if stream_url is None:
|
||||
return None
|
||||
# Delay here causes websocket notification lose (ODL Bug 8299)
|
||||
ws = self._socket_create_connection(stream_url)
|
||||
if ws is not None:
|
||||
self._set_websocket_status(ODL_WEBSOCKET_CONNECTED)
|
||||
return ws
|
||||
|
||||
def _close_ws(self, ws):
|
||||
LOG.debug("closing websocket")
|
||||
try:
|
||||
if ws is not None:
|
||||
ws.close()
|
||||
except Exception:
|
||||
LOG.error("Error while closing websocket", exc_info=True)
|
||||
self._set_websocket_status(ODL_WEBSOCKET_DISCONNECTED)
|
||||
|
||||
|
||||
class EventDataParser(object):
|
||||
"""Helper class to parse websocket notification data"""
|
||||
|
||||
NOTIFICATION_TAG = 'notification'
|
||||
DC_NOTIFICATION_TAG = 'data-changed-notification'
|
||||
DC_EVENT_TAG = 'data-change-event'
|
||||
OPERATION_DELETE = 'deleted'
|
||||
OPERATION_CREATE = 'created'
|
||||
OPERATION_UPDATE = 'updated'
|
||||
|
||||
def __init__(self, item):
|
||||
self.item = item
|
||||
|
||||
@classmethod
|
||||
def get_item(cls, payload):
|
||||
try:
|
||||
data = jsonutils.loads(payload)
|
||||
except ValueError:
|
||||
LOG.warning("invalid websocket notification")
|
||||
return
|
||||
try:
|
||||
dn_events = (data[cls.NOTIFICATION_TAG]
|
||||
[cls.DC_NOTIFICATION_TAG]
|
||||
[cls.DC_EVENT_TAG])
|
||||
|
||||
if not isinstance(dn_events, list):
|
||||
dn_events = [dn_events]
|
||||
|
||||
for e in dn_events:
|
||||
yield cls(e)
|
||||
except KeyError:
|
||||
LOG.warning("invalid JSON for websocket notification")
|
||||
|
||||
def get_fields(self):
|
||||
return (self.get_operation(),
|
||||
self.get_path(),
|
||||
self.get_data())
|
||||
|
||||
def get_path(self):
|
||||
return self.item.get('path')
|
||||
|
||||
def get_data(self):
|
||||
return self.item.get('data')
|
||||
|
||||
def get_operation(self):
|
||||
return self.item.get('operation')
|
||||
|
||||
@staticmethod
|
||||
def extract_field(text, key):
|
||||
pattern = r'\[' + key + r'=(.*?)\]'
|
||||
match = re.search(pattern, text)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
return None
|
@ -1,227 +0,0 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import datetime
|
||||
|
||||
from sqlalchemy import asc
|
||||
from sqlalchemy import func
|
||||
from sqlalchemy import or_
|
||||
from sqlalchemy.orm import aliased
|
||||
|
||||
from networking_odl.common import constants as odl_const
|
||||
from networking_odl.db import models
|
||||
|
||||
from neutron.db import api as db_api
|
||||
|
||||
from oslo_db import api as oslo_db_api
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_pending_or_processing_ops(session, object_uuid, operation=None):
|
||||
q = session.query(models.OpenDaylightJournal).filter(
|
||||
or_(models.OpenDaylightJournal.state == odl_const.PENDING,
|
||||
models.OpenDaylightJournal.state == odl_const.PROCESSING),
|
||||
models.OpenDaylightJournal.object_uuid == object_uuid)
|
||||
|
||||
if operation:
|
||||
if isinstance(operation, (list, tuple)):
|
||||
q = q.filter(models.OpenDaylightJournal.operation.in_(operation))
|
||||
else:
|
||||
q = q.filter(models.OpenDaylightJournal.operation == operation)
|
||||
|
||||
return q.all()
|
||||
|
||||
|
||||
def get_pending_delete_ops_with_parent(session, object_type, parent_id):
|
||||
rows = session.query(models.OpenDaylightJournal).filter(
|
||||
or_(models.OpenDaylightJournal.state == odl_const.PENDING,
|
||||
models.OpenDaylightJournal.state == odl_const.PROCESSING),
|
||||
models.OpenDaylightJournal.object_type == object_type,
|
||||
models.OpenDaylightJournal.operation == odl_const.ODL_DELETE
|
||||
).all()
|
||||
|
||||
return (row for row in rows if parent_id in row.data)
|
||||
|
||||
|
||||
def get_all_db_rows(session):
|
||||
return session.query(models.OpenDaylightJournal).all()
|
||||
|
||||
|
||||
def get_all_db_rows_by_state(session, state):
|
||||
return session.query(models.OpenDaylightJournal).filter_by(
|
||||
state=state).all()
|
||||
|
||||
|
||||
# Retry deadlock exception for Galera DB.
|
||||
# If two (or more) different threads call this method at the same time, they
|
||||
# might both succeed in changing the same row to pending, but at least one
|
||||
# of them will get a deadlock from Galera and will have to retry the operation.
|
||||
@db_api.retry_db_errors
|
||||
def get_oldest_pending_db_row_with_lock(session):
|
||||
with session.begin():
|
||||
journal_dep = aliased(models.OpenDaylightJournal)
|
||||
dep_query = session.query(journal_dep).filter(
|
||||
models.OpenDaylightJournal.seqnum == journal_dep.seqnum
|
||||
).outerjoin(
|
||||
journal_dep.depending_on, aliased=True).filter(
|
||||
or_(models.OpenDaylightJournal.state == odl_const.PENDING,
|
||||
models.OpenDaylightJournal.state == odl_const.PROCESSING))
|
||||
row = session.query(models.OpenDaylightJournal).filter(
|
||||
models.OpenDaylightJournal.state == odl_const.PENDING,
|
||||
~ dep_query.exists()
|
||||
).order_by(
|
||||
asc(models.OpenDaylightJournal.last_retried)).first()
|
||||
if row:
|
||||
update_db_row_state(session, row, odl_const.PROCESSING)
|
||||
|
||||
return row
|
||||
|
||||
|
||||
def delete_dependency(session, entry):
|
||||
"""Delete dependency upon the given ID"""
|
||||
conn = session.connection()
|
||||
stmt = models.journal_dependencies.delete(
|
||||
models.journal_dependencies.c.depends_on == entry.seqnum)
|
||||
conn.execute(stmt)
|
||||
session.expire_all()
|
||||
|
||||
|
||||
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
|
||||
def update_db_row_state(session, row, state):
|
||||
row.state = state
|
||||
session.merge(row)
|
||||
session.flush()
|
||||
|
||||
|
||||
def update_pending_db_row_retry(session, row, retry_count):
|
||||
if row.retry_count >= retry_count:
|
||||
update_db_row_state(session, row, odl_const.FAILED)
|
||||
else:
|
||||
row.retry_count += 1
|
||||
update_db_row_state(session, row, odl_const.PENDING)
|
||||
|
||||
|
||||
# This function is currently not used.
|
||||
# Deleted resources are marked as 'deleted' in the database.
|
||||
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
|
||||
def delete_row(session, row=None, row_id=None):
|
||||
if row_id:
|
||||
row = session.query(models.OpenDaylightJournal).filter_by(
|
||||
id=row_id).one()
|
||||
if row:
|
||||
session.delete(row)
|
||||
session.flush()
|
||||
|
||||
|
||||
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
|
||||
def create_pending_row(session, object_type, object_uuid,
|
||||
operation, data, depending_on=None):
|
||||
if depending_on is None:
|
||||
depending_on = []
|
||||
row = models.OpenDaylightJournal(object_type=object_type,
|
||||
object_uuid=object_uuid,
|
||||
operation=operation, data=data,
|
||||
created_at=func.now(),
|
||||
state=odl_const.PENDING,
|
||||
depending_on=depending_on)
|
||||
session.add(row)
|
||||
# Keep session flush for unit tests. NOOP for L2/L3 events since calls are
|
||||
# made inside database session transaction with subtransactions=True.
|
||||
session.flush()
|
||||
|
||||
|
||||
@db_api.retry_db_errors
|
||||
def delete_pending_rows(session, operations_to_delete):
|
||||
with session.begin():
|
||||
session.query(models.OpenDaylightJournal).filter(
|
||||
models.OpenDaylightJournal.operation.in_(operations_to_delete),
|
||||
models.OpenDaylightJournal.state == odl_const.PENDING).delete(
|
||||
synchronize_session=False)
|
||||
session.expire_all()
|
||||
|
||||
|
||||
@db_api.retry_db_errors
|
||||
def _update_periodic_task_state(session, expected_state, state, task):
|
||||
with session.begin():
|
||||
row = session.query(models.OpenDaylightPeriodicTask).filter_by(
|
||||
state=expected_state,
|
||||
task=task).with_for_update().one_or_none()
|
||||
|
||||
if row is None:
|
||||
return False
|
||||
|
||||
row.state = state
|
||||
return True
|
||||
|
||||
|
||||
def was_periodic_task_executed_recently(session, task, interval):
|
||||
now = session.execute(func.now()).scalar()
|
||||
delta = datetime.timedelta(seconds=interval)
|
||||
row = session.query(models.OpenDaylightPeriodicTask).filter(
|
||||
models.OpenDaylightPeriodicTask.task == task,
|
||||
(now - delta >= (models.OpenDaylightPeriodicTask.lock_updated))
|
||||
).one_or_none()
|
||||
|
||||
return bool(row is None)
|
||||
|
||||
|
||||
def lock_periodic_task(session, task):
|
||||
return _update_periodic_task_state(session, odl_const.PENDING,
|
||||
odl_const.PROCESSING, task)
|
||||
|
||||
|
||||
def unlock_periodic_task(session, task):
|
||||
return _update_periodic_task_state(session, odl_const.PROCESSING,
|
||||
odl_const.PENDING, task)
|
||||
|
||||
|
||||
def update_periodic_task(session, task, operation=None):
|
||||
"""Update the current periodic task details.
|
||||
|
||||
The function assumes the lock is held, so it mustn't be run outside of a
|
||||
locked context.
|
||||
"""
|
||||
op_text = None
|
||||
if operation:
|
||||
op_text = operation.__name__
|
||||
|
||||
with session.begin():
|
||||
row = session.query(models.OpenDaylightPeriodicTask).filter_by(
|
||||
task=task).one()
|
||||
row.processing_operation = op_text
|
||||
|
||||
|
||||
def delete_rows_by_state_and_time(session, state, time_delta):
|
||||
with session.begin():
|
||||
now = session.execute(func.now()).scalar()
|
||||
session.query(models.OpenDaylightJournal).filter(
|
||||
models.OpenDaylightJournal.state == state,
|
||||
models.OpenDaylightJournal.last_retried < now - time_delta).delete(
|
||||
synchronize_session=False)
|
||||
session.expire_all()
|
||||
|
||||
|
||||
def reset_processing_rows(session, max_timedelta):
|
||||
with session.begin():
|
||||
now = session.execute(func.now()).scalar()
|
||||
max_timedelta = datetime.timedelta(seconds=max_timedelta)
|
||||
rows = session.query(models.OpenDaylightJournal).filter(
|
||||
models.OpenDaylightJournal.last_retried < now - max_timedelta,
|
||||
models.OpenDaylightJournal.state == odl_const.PROCESSING,
|
||||
).update({'state': odl_const.PENDING})
|
||||
|
||||
return rows
|
@ -1,24 +0,0 @@
|
||||
# Copyright 2016 Intel Corporation.
|
||||
# Copyright 2016 Isaku Yamahata <isaku.yamahata at intel com>
|
||||
# <isaku.yamahata at gmail com>
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from networking_odl.db import models # noqa
|
||||
|
||||
from neutron.db.migration.models import head
|
||||
|
||||
|
||||
def get_metadata():
|
||||
return head.model_base.BASEV2.metadata
|
@ -1 +0,0 @@
|
||||
This directory contains the migration scripts for the networking_odl project.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user