diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..5b10a1115 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = neutron +omit = neutron/tests/*,neutron/plugins/cisco/test/*,neutron/openstack/* + +[report] +ignore-errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..19526c2b1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,28 @@ +AUTHORS +build/* +build-stamp +ChangeLog +cover/ +.coverage +covhtml/ +dist/ +doc/build +*.DS_Store +*.pyc +neutron.egg-info/ +neutron/vcsversion.py +neutron/versioninfo +pbr*.egg/ +quantum.egg-info/ +quantum/vcsversion.py +quantum/versioninfo +run_tests.err.log +run_tests.log +setuptools*.egg/ +subunit.log +.testrepository +.tox/ +.venv/ +*.mo +*.sw? +*~ diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000..f3e7e5e1a --- /dev/null +++ b/.mailmap @@ -0,0 +1,11 @@ +# Format is: +# +# +lawrancejing +Jiajun Liu +Zhongyue Luo +Kun Huang +Zhenguo Niu +Isaku Yamahata +Isaku Yamahata +Morgan Fainberg diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 000000000..87fbcd3b3 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,42 @@ +# The format of this file isn't really documented; just use --generate-rcfile +[MASTER] +# Add to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=test + +[Messages Control] +# NOTE(justinsb): We might want to have a 2nd strict pylintrc in future +# C0111: Don't require docstrings on every method +# W0511: TODOs in code comments are fine. +# W0142: *args and **kwargs are fine. +# W0622: Redefining id is fine. +disable=C0111,W0511,W0142,W0622 + +[Basic] +# Variable names can be 1 to 31 characters long, with lowercase and underscores +variable-rgx=[a-z_][a-z0-9_]{0,30}$ + +# Argument names can be 2 to 31 characters long, with lowercase and underscores +argument-rgx=[a-z_][a-z0-9_]{1,30}$ + +# Method names should be at least 3 characters long +# and be lowecased with underscores +method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$ + +# Module names matching neutron-* are ok (files in bin/) +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ + +# Don't require docstrings on tests. +no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ + +[Design] +max-public-methods=100 +min-public-methods=0 +max-args=6 + +[Variables] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +# _ is used by our localization +additional-builtins=_ diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 000000000..c180b0319 --- /dev/null +++ b/.testr.conf @@ -0,0 +1,4 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 000000000..84f6f3e1c --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,28 @@ +Neutron Style Commandments +======================= + +- Step 1: Read the OpenStack Style Commandments + http://docs.openstack.org/developer/hacking/ +- Step 2: Read on + +Neutron Specific Commandments +-------------------------- + +- [N320] Validate that LOG messages, except debug ones, have translations + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +All unittest classes must ultimately inherit from testtools.TestCase. In the +Neutron test suite, this should be done by inheriting from +neutron.tests.base.BaseTestCase. + +All setUp and tearDown methods must upcall using the super() method. +tearDown methods should be avoided and addCleanup calls should be preferred. +Never manually create tempfiles. Always use the tempfile fixtures from +the fixture library to ensure that they are cleaned up. diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..68c771a09 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..4e527c7fa --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,14 @@ +include AUTHORS +include README.rst +include ChangeLog +include LICENSE +include neutron/db/migration/README +include neutron/db/migration/alembic.ini +include neutron/db/migration/alembic_migrations/script.py.mako +include neutron/db/migration/alembic_migrations/versions/README +recursive-include neutron/locale * + +exclude .gitignore +exclude .gitreview + +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 000000000..57a5930b0 --- /dev/null +++ b/README.rst @@ -0,0 +1,25 @@ +# -- Welcome! + + You have come across a cloud computing network fabric controller. It has + identified itself as "Neutron." It aims to tame your (cloud) networking! + +# -- External Resources: + + The homepage for Neutron is: http://launchpad.net/neutron . Use this + site for asking for help, and filing bugs. Code is available on github at + . + + The latest and most in-depth documentation on how to use Neutron is + available at: . This includes: + + Neutron Administrator Guide + http://docs.openstack.org/trunk/openstack-network/admin/content/ + + Neutron API Reference: + http://docs.openstack.org/api/openstack-network/2.0/content/ + + The start of some developer documentation is available at: + http://wiki.openstack.org/NeutronDevelopment + + For help using or hacking on Neutron, you can send mail to + . diff --git a/TESTING.rst b/TESTING.rst new file mode 100644 index 000000000..0d6814e8e --- /dev/null +++ b/TESTING.rst @@ -0,0 +1,180 @@ +Testing Neutron +============================================================= + +Overview +-------- + +The unit tests are meant to cover as much code as possible and should +be executed without the service running. They are designed to test +the various pieces of the neutron tree to make sure any new changes +don't break existing functionality. + +The functional tests are intended to validate actual system +interaction. Mocks should be used sparingly, if at all. Care +should be taken to ensure that existing system resources are not +modified and that resources created in tests are properly cleaned +up. + +Development process +------------------- + +It is expected that any new changes that are proposed for merge +come with tests for that feature or code area. Ideally any bugs +fixes that are submitted also have tests to prove that they stay +fixed! In addition, before proposing for merge, all of the +current tests should be passing. + +Virtual environments +~~~~~~~~~~~~~~~~~~~~ + +Testing OpenStack projects, including Neutron, is made easier with `DevStack `_. + +Create a machine (such as a VM or Vagrant box) running a distribution supported +by DevStack and install DevStack there. For example, there is a Vagrant script +for DevStack at https://github.com/bcwaldon/vagrant_devstack. + + .. note:: + + If you prefer not to use DevStack, you can still check out source code on your local + machine and develop from there. + + +Running unit tests +------------------ + +There are three mechanisms for running tests: run_tests.sh, tox, +and nose. Before submitting a patch for review you should always +ensure all test pass; a tox run is triggered by the jenkins gate +executed on gerrit for each patch pushed for review. + +With these mechanisms you can either run the tests in the standard +environment or create a virtual environment to run them in. + +By default after running all of the tests, any pep8 errors +found in the tree will be reported. + + +With `run_tests.sh` +~~~~~~~~~~~~~~~~~~~ + +You can use the `run_tests.sh` script in the root source directory to execute +tests in a virtualenv:: + + ./run_tests -V + + +With `nose` +~~~~~~~~~~~ + +You can use `nose`_ to run individual tests, as well as use for debugging +portions of your code:: + + source .venv/bin/activate + pip install nose + nosetests + +There are disadvantages to running Nose - the tests are run sequentially, so +race condition bugs will not be triggered, and the full test suite will +take significantly longer than tox & testr. The upside is that testr has +some rough edges when it comes to diagnosing errors and failures, and there is +no easy way to set a breakpoint in the Neutron code, and enter an +interactive debugging session while using testr. + +.. _nose: https://nose.readthedocs.org/en/latest/index.html + +With `tox` +~~~~~~~~~~ + +Neutron, like other OpenStack projects, uses `tox`_ for managing the virtual +environments for running test cases. It uses `Testr`_ for managing the running +of the test cases. + +Tox handles the creation of a series of `virtualenvs`_ that target specific +versions of Python (2.6, 2.7, 3.3, etc). + +Testr handles the parallel execution of series of test cases as well as +the tracking of long-running tests and other things. + +Running unit tests is as easy as executing this in the root directory of the +Neutron source code:: + + tox + +For more information on the standard Tox-based test infrastructure used by +OpenStack and how to do some common test/debugging procedures with Testr, +see this wiki page: + + https://wiki.openstack.org/wiki/Testr + +.. _Testr: https://wiki.openstack.org/wiki/Testr +.. _tox: http://tox.readthedocs.org/en/latest/ +.. _virtualenvs: https://pypi.python.org/pypi/virtualenv + + +Running individual tests +~~~~~~~~~~~~~~~~~~~~~~~~ + +For running individual test modules or cases, you just need to pass +the dot-separated path to the module you want as an argument to it. + +For executing a specific test case, specify the name of the test case +class separating it from the module path with a colon. + +For example, the following would run only the JSONV2TestCase tests from +neutron/tests/unit/test_api_v2.py:: + + $ ./run_tests.sh neutron.tests.unit.test_api_v2:JSONV2TestCase + +or:: + + $ ./tox neutron.tests.unit.test_api_v2:JSONV2TestCase + +Adding more tests +~~~~~~~~~~~~~~~~~ + +Neutron has a fast growing code base and there is plenty of areas that +need to be covered by unit and functional tests. + +To get a grasp of the areas where tests are needed, you can check +current coverage by running:: + + $ ./run_tests.sh -c + +Debugging +--------- + +By default, calls to pdb.set_trace() will be ignored when tests +are run. For pdb statements to work, invoke run_tests as follows:: + + $ ./run_tests.sh -d [test module path] + +It's possible to debug tests in a tox environment:: + + $ tox -e venv -- python -m testtools.run [test module path] + +Tox-created virtual environments (venv's) can also be activated +after a tox run and reused for debugging:: + + $ tox -e venv + $ . .tox/venv/bin/activate + $ python -m testtools.run [test module path] + +Tox packages and installs the neutron source tree in a given venv +on every invocation, but if modifications need to be made between +invocation (e.g. adding more pdb statements), it is recommended +that the source tree be installed in the venv in editable mode:: + + # run this only after activating the venv + $ pip install --editable . + +Editable mode ensures that changes made to the source tree are +automatically reflected in the venv, and that such changes are not +overwritten during the next tox run. + +Post-mortem debugging +~~~~~~~~~~~~~~~~~~~~~ + +Setting OS_POST_MORTEM_DEBUG=1 in the shell environment will ensure +that pdb.post_mortem() will be invoked on test failure:: + + $ OS_POST_MORTEM_DEBUG=1 ./run_tests.sh -d [test module path] diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 000000000..15cd6cb76 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/neutron-rootwrap b/bin/neutron-rootwrap new file mode 100755 index 000000000..284037846 --- /dev/null +++ b/bin/neutron-rootwrap @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.rootwrap import cmd + +cmd.main() diff --git a/bin/neutron-rootwrap-xen-dom0 b/bin/neutron-rootwrap-xen-dom0 new file mode 100755 index 000000000..3f4251a4e --- /dev/null +++ b/bin/neutron-rootwrap-xen-dom0 @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 Openstack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Neutron root wrapper for dom0. + +Executes networking commands in dom0. The XenAPI plugin is +responsible determining whether a command is safe to execute. + +""" +from __future__ import print_function + +import ConfigParser +import json +import os +import select +import sys +import traceback + +import XenAPI + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 +RC_BADCONFIG = 97 +RC_XENAPI_ERROR = 96 + + +def parse_args(): + # Split arguments, require at least a command + exec_name = sys.argv.pop(0) + # argv[0] required; path to conf file + if len(sys.argv) < 2: + print("%s: No command specified" % exec_name) + sys.exit(RC_NOCOMMAND) + + config_file = sys.argv.pop(0) + user_args = sys.argv[:] + + return exec_name, config_file, user_args + + +def _xenapi_section_name(config): + sections = [sect for sect in config.sections() if sect.lower() == "xenapi"] + if len(sections) == 1: + return sections[0] + + print("Multiple [xenapi] sections or no [xenapi] section found!") + sys.exit(RC_BADCONFIG) + + +def load_configuration(exec_name, config_file): + config = ConfigParser.RawConfigParser() + config.read(config_file) + try: + exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") + filters_path = config.get("DEFAULT", "filters_path").split(",") + section = _xenapi_section_name(config) + url = config.get(section, "xenapi_connection_url") + username = config.get(section, "xenapi_connection_username") + password = config.get(section, "xenapi_connection_password") + except ConfigParser.Error: + print("%s: Incorrect configuration file: %s" % (exec_name, config_file)) + sys.exit(RC_BADCONFIG) + if not url or not password: + msg = ("%s: Must specify xenapi_connection_url, " + "xenapi_connection_username (optionally), and " + "xenapi_connection_password in %s") % (exec_name, config_file) + print(msg) + sys.exit(RC_BADCONFIG) + return dict( + filters_path=filters_path, + url=url, + username=username, + password=password, + exec_dirs=exec_dirs, + ) + + +def filter_command(exec_name, filters_path, user_args, exec_dirs): + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(exec_name), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "neutron", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from oslo.rootwrap import wrapper + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters(filters_path) + filter_match = wrapper.match_filter( + filters, user_args, exec_dirs=exec_dirs) + if not filter_match: + print("Unauthorized command: %s" % ' '.join(user_args)) + sys.exit(RC_UNAUTHORIZED) + + +def run_command(url, username, password, user_args, cmd_input): + try: + session = XenAPI.Session(url) + session.login_with_password(username, password) + host = session.xenapi.session.get_this_host(session.handle) + result = session.xenapi.host.call_plugin( + host, 'netwrap', 'run_command', + {'cmd': json.dumps(user_args), 'cmd_input': json.dumps(cmd_input)}) + return json.loads(result) + except Exception as e: + traceback.print_exc() + sys.exit(RC_XENAPI_ERROR) + + +def main(): + exec_name, config_file, user_args = parse_args() + config = load_configuration(exec_name, config_file) + filter_command(exec_name, config['filters_path'], user_args, config['exec_dirs']) + + # If data is available on the standard input, we need to pass it to the + # command executed in dom0 + cmd_input = None + if select.select([sys.stdin,],[],[],0.0)[0]: + cmd_input = "".join(sys.stdin) + + return run_command(config['url'], config['username'], config['password'], + user_args, cmd_input) + + +if __name__ == '__main__': + print(main()) diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 000000000..b63e30032 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,96 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXSOURCE = source +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +.DEFAULT_GOAL = html + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + if [ -f .autogenerated ] ; then \ + cat .autogenerated | xargs rm ; \ + rm .autogenerated ; \ + fi + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/pom.xml b/doc/pom.xml new file mode 100644 index 000000000..6fc579f80 --- /dev/null +++ b/doc/pom.xml @@ -0,0 +1,135 @@ + + + + 4.0.0 + + org.openstack.docs + openstack-guide + 1.0.0-SNAPSHOT + jar + OpenStack Guides + + + + + + Rackspace Research Repositories + + true + + + + rackspace-research + Rackspace Research Repository + http://maven.research.rackspacecloud.com/content/groups/public/ + + + + + rackspace-research + Rackspace Research Repository + http://maven.research.rackspacecloud.com/content/groups/public/ + + + + + + + + + target/docbkx/pdf + + **/*.fo + + + + + + + com.rackspace.cloud.api + clouddocs-maven-plugin + 1.0.5-SNAPSHOT + + + goal1 + + generate-pdf + + generate-sources + + false + + + + goal2 + + generate-webhelp + + generate-sources + + + 0 + openstackdocs + 1 + UA-17511903-6 + + appendix toc,title + article/appendix nop + article toc,title + book title,figure,table,example,equation + chapter toc,title + part toc,title + preface toc,title + qandadiv toc + qandaset toc + reference toc,title + set toc,title + + + 0 + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + true + source/docbkx + + neutron-api-1.0/neutron-api-guide.xml + + reviewer + openstack + + + + + + diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 000000000..b2f619a46 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2010 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Keystone documentation build configuration file, created by +# sphinx-quickstart on Tue May 18 13:50:15 2010. +# +# This file is execfile()'d with the current directory set to it's containing +# dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import sys + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +NEUTRON_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) +sys.path.insert(0, NEUTRON_DIR) + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.ifconfig', + 'sphinx.ext.intersphinx', + 'sphinx.ext.pngmath', + 'sphinx.ext.graphviz', + 'sphinx.ext.todo', + 'oslosphinx'] + +todo_include_todos = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = [] +if os.getenv('HUDSON_PUBLISH_DOCS'): + templates_path = ['_ga', '_templates'] +else: + templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Neutron' +copyright = u'2011-present, OpenStack Foundation.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# Version info +from neutron.version import version_info as neutron_version +release = neutron_version.release_string() +# The short X.Y version. +version = neutron_version.version_string() + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +# unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = [] + +# The reST default role (for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['neutron.'] + +# -- Options for man page output -------------------------------------------- + +# Grouping the document tree for man pages. +# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' + +man_pages = [ + ('man/neutron-server', 'neutron-server', u'Neutron Server', + [u'OpenStack'], 1) +] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = ['_theme'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' +git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" +html_last_updated_fmt = os.popen(git_cmd).read() + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'neutrondoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, +# documentclass [howto/manual]). +latex_documents = [ + ('index', 'Neutron.tex', u'Neutron Documentation', + u'Neutron development team', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'python': ('http://docs.python.org/', None), + 'nova': ('http://nova.openstack.org', None), + 'swift': ('http://swift.openstack.org', None), + 'glance': ('http://glance.openstack.org', None), + 'horizon': ('http://horizon.openstack.org', None), + 'keystone': ('http://keystone.openstack.org', None), + } diff --git a/doc/source/devref/advanced_services.rst b/doc/source/devref/advanced_services.rst new file mode 100644 index 000000000..2e877128a --- /dev/null +++ b/doc/source/devref/advanced_services.rst @@ -0,0 +1,7 @@ +Advanced Services +================= + +.. toctree:: + fwaas + lbaas + vpnaas diff --git a/doc/source/devref/api_extensions.rst b/doc/source/devref/api_extensions.rst new file mode 100644 index 000000000..2c8b3f64e --- /dev/null +++ b/doc/source/devref/api_extensions.rst @@ -0,0 +1,18 @@ +============== +API Extensions +============== + +API extensions is the standard way of introducing new functionality +to the Neutron project, it allows plugins to +determine if they wish to support the functionality or not. + +Examples +======== + +The easiest way to demonstrate how an API extension is written, is +by studying an existing API extension and explaining the different layers. + +.. toctree:: + :maxdepth: 1 + + security_group_api diff --git a/doc/source/devref/api_layer.rst b/doc/source/devref/api_layer.rst new file mode 100644 index 000000000..a3e5e7f69 --- /dev/null +++ b/doc/source/devref/api_layer.rst @@ -0,0 +1,57 @@ +Neutron WSGI/HTTP API layer +=========================== + +This section will cover the internals of Neutron's HTTP API, and the classes +in Neutron that can be used to create Extensions to the Neutron API. + +Python web applications interface with webservers through the Python Web +Server Gateway Interface (WSGI) - defined in `PEP 333 `_ + +Startup +------- + +Neutron's WSGI server is started from the `server module `_ +and the entry point `serve_wsgi` is called to build an instance of the +`NeutronApiService`_, which is then returned to the server module, +which spawns a `Eventlet`_ `GreenPool`_ that will run the WSGI +application and respond to requests from clients. + + +.. _NeutronApiService: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/service.py + +.. _Eventlet: http://eventlet.net/ + +.. _GreenPool: http://eventlet.net/doc/modules/greenpool.html + +WSGI Application +---------------- + +During the building of the NeutronApiService, the `_run_wsgi` function +creates a WSGI application using the `load_paste_app` function inside +`config.py`_ - which parses `api-paste.ini`_ - in order to create a WSGI app +using `Paste`_'s `deploy`_. + +The api-paste.ini file defines the WSGI applications and routes - using the +`Paste INI file format`_. + +The INI file directs paste to instantiate the `APIRouter`_ class of +Neutron, which contains several methods that map Neutron resources (such as +Ports, Networks, Subnets) to URLs, and the controller for each resource. + + +.. _config.py: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/common/config.py + +.. _api-paste.ini: http://git.openstack.org/cgit/openstack/neutron/tree/etc/api-paste.ini + +.. _APIRouter: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/router.py + +.. _Paste: http://pythonpaste.org/ + +.. _Deploy: http://pythonpaste.org/deploy/ + +.. _Paste INI file format: http://pythonpaste.org/deploy/#applications + +Further reading +--------------- + +`Yong Sheng Gong: Deep Dive into Neutron `_ diff --git a/doc/source/devref/common.rst b/doc/source/devref/common.rst new file mode 100644 index 000000000..537d4c291 --- /dev/null +++ b/doc/source/devref/common.rst @@ -0,0 +1,25 @@ +.. + Copyright 2010-2011 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Open Stack Common +================= + +A number of modules used are from the openstack-common project. +The imported files are in 'neutron/openstack-common.conf'. +More information can be found at `OpenStack Common`_. + +.. _`OpenStack Common`: https://launchpad.net/openstack-common diff --git a/doc/source/devref/db_layer.rst b/doc/source/devref/db_layer.rst new file mode 100644 index 000000000..54eff65b7 --- /dev/null +++ b/doc/source/devref/db_layer.rst @@ -0,0 +1,2 @@ +Neutron Database Layer +====================== diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst new file mode 100644 index 000000000..dc4be0838 --- /dev/null +++ b/doc/source/devref/development.environment.rst @@ -0,0 +1,49 @@ +.. + Copyright 2010-2013 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Setting Up a Development Environment +==================================== + +This page describes how to setup a working Python development +environment that can be used in developing Neutron on Ubuntu, Fedora or +Mac OS X. These instructions assume you're already familiar with +Git and Gerrit, which is a code repository mirror and code review toolset +, however if you aren't please see `this Git tutorial`_ for an introduction +to using Git and `this wiki page`_ for a tutorial on using Gerrit and Git for +code contribution to Openstack projects. + +.. _this Git tutorial: http://git-scm.com/book/en/Getting-Started +.. _this wiki page: https://wiki.openstack.org/wiki/Gerrit_Workflow + +Following these instructions will allow you to run the Neutron unit +tests. If you want to be able to run Neutron in a full OpenStack environment, +you can use the excellent `DevStack`_ project to do so. There is a wiki page +that describes `setting up Neutron using DevStack`_. + +.. _DevStack: https://github.com/openstack-dev/devstack +.. _setting up Neutron using Devstack: https://wiki.openstack.org/wiki/NeutronDevstack + +Getting the code +---------------- + +Grab the code from GitHub:: + + git clone git://git.openstack.org/openstack/neutron.git + cd neutron + + +.. include:: ../../../TESTING.rst diff --git a/doc/source/devref/fwaas.rst b/doc/source/devref/fwaas.rst new file mode 100644 index 000000000..7b7680c6f --- /dev/null +++ b/doc/source/devref/fwaas.rst @@ -0,0 +1,30 @@ +Firewall as a Service +===================== + +`Design Document`_ + +.. _Design Document: https://docs.google.com/document/d/1PJaKvsX2MzMRlLGfR0fBkrMraHYF0flvl0sqyZ704tA/edit#heading=h.aed6tiupj0qk + +Plugin +------ +.. automodule:: neutron.services.firewall.fwaas_plugin + +.. autoclass:: FirewallPlugin + :members: + +Database layer +-------------- + +.. automodule:: neutron.db.firewall.firewall_db + +.. autoclass:: Firewall_db_mixin + :members: + + +Driver layer +------------ + +.. automodule:: neutron.services.firewall.drivers.fwaas_base + +.. autoclass:: FwaasDriverBase + :members: diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst new file mode 100644 index 000000000..46d2edd0e --- /dev/null +++ b/doc/source/devref/index.rst @@ -0,0 +1,65 @@ +.. + Copyright 2010-2011 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Developer Guide +=============== + +In the Developer Guide, you will find information on Neutron's lower level +programming APIs. There are sections that cover the core pieces of Neutron, +including its database, message queue, and scheduler components. There are +also subsections that describe specific plugins inside Neutron. + + +Programming HowTos and Tutorials +-------------------------------- +.. toctree:: + :maxdepth: 3 + + development.environment + + +Neutron Internals +----------------- +.. toctree:: + :maxdepth: 3 + + api_layer + api_extensions + plugin-api + db_layer + rpc_api + layer3 + l2_agents + advanced_services + + +Module Reference +---------------- +.. toctree:: + :maxdepth: 3 + +.. todo:: + + Add in all the big modules as automodule indexes. + + +Indices and tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc/source/devref/l2_agents.rst b/doc/source/devref/l2_agents.rst new file mode 100644 index 000000000..83786dabe --- /dev/null +++ b/doc/source/devref/l2_agents.rst @@ -0,0 +1,7 @@ +L2 Agent Networking +------------------- +.. toctree:: + :maxdepth: 3 + + openvswitch_agent + linuxbridge_agent diff --git a/doc/source/devref/layer3.rst b/doc/source/devref/layer3.rst new file mode 100644 index 000000000..571f2a09a --- /dev/null +++ b/doc/source/devref/layer3.rst @@ -0,0 +1,199 @@ +Layer 3 Networking in Neutron - via Layer 3 agent & OpenVSwitch +=============================================================== + +This page discusses the usage of Neutron with Layer 3 functionality enabled. + +Neutron logical network setup +----------------------------- +:: + + vagrant@precise64:~/devstack$ neutron net-list + +--------------------------------------+---------+--------------------------------------------------+ + | id | name | subnets | + +--------------------------------------+---------+--------------------------------------------------+ + | 84b6b0cc-503d-448a-962f-43def05e85be | public | 3a56da7c-2f6e-41af-890a-b324d7bc374d | + | a4b4518c-800d-4357-9193-57dbb42ac5ee | private | 1a2d26fb-b733-4ab3-992e-88554a87afa6 10.0.0.0/24 | + +--------------------------------------+---------+--------------------------------------------------+ + vagrant@precise64:~/devstack$ neutron subnet-list + +--------------------------------------+------+-------------+--------------------------------------------+ + | id | name | cidr | allocation_pools | + +--------------------------------------+------+-------------+--------------------------------------------+ + | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | | 10.0.0.0/24 | {"start": "10.0.0.2", "end": "10.0.0.254"} | + +--------------------------------------+------+-------------+--------------------------------------------+ + vagrant@precise64:~/devstack$ neutron port-list + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + | id | name | mac_address | fixed_ips | + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + | 0ba8700e-da06-4318-8fe9-00676dd994b8 | | fa:16:3e:78:43:5b | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.1"} | + | b2044570-ad52-4f31-a2c3-5d767dc9a8a7 | | fa:16:3e:5b:cf:4c | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.3"} | + | bb60d1bb-0cab-41cb-9678-30d2b2fdb169 | | fa:16:3e:af:a9:bd | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.2"} | + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + + vagrant@precise64:~/devstack$ neutron subnet-show 1a2d26fb-b733-4ab3-992e-88554a87afa6 + +------------------+--------------------------------------------+ + | Field | Value | + +------------------+--------------------------------------------+ + | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} | + | cidr | 10.0.0.0/24 | + | dns_nameservers | | + | enable_dhcp | True | + | gateway_ip | 10.0.0.1 | + | host_routes | | + | id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | + | ip_version | 4 | + | name | | + | network_id | a4b4518c-800d-4357-9193-57dbb42ac5ee | + | tenant_id | 3368290ab10f417390acbb754160dbb2 | + +------------------+--------------------------------------------+ + + +Neutron logical router setup +---------------------------- + +* http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html#under_the_hood_openvswitch_scenario1_network + + +:: + + vagrant@precise64:~/devstack$ neutron router-list + +--------------------------------------+---------+--------------------------------------------------------+ + | id | name | external_gateway_info | + +--------------------------------------+---------+--------------------------------------------------------+ + | 569469c7-a2a5-4d32-9cdd-f0b18a13f45e | router1 | {"network_id": "84b6b0cc-503d-448a-962f-43def05e85be"} | + +--------------------------------------+---------+--------------------------------------------------------+ + vagrant@precise64:~/devstack$ neutron router-show router1 + +-----------------------+--------------------------------------------------------+ + | Field | Value | + +-----------------------+--------------------------------------------------------+ + | admin_state_up | True | + | external_gateway_info | {"network_id": "84b6b0cc-503d-448a-962f-43def05e85be"} | + | id | 569469c7-a2a5-4d32-9cdd-f0b18a13f45e | + | name | router1 | + | routes | | + | status | ACTIVE | + | tenant_id | 3368290ab10f417390acbb754160dbb2 | + +-----------------------+--------------------------------------------------------+ + vagrant@precise64:~/devstack$ neutron router-port-list router1 + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + | id | name | mac_address | fixed_ips | + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + | 0ba8700e-da06-4318-8fe9-00676dd994b8 | | fa:16:3e:78:43:5b | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.1"} | + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + +Neutron Routers are realized in OpenVSwitch +------------------------------------------- + +.. image:: http://docs.openstack.org/admin-guide-cloud/content/figures/10/a/common/figures/under-the-hood-scenario-1-ovs-network.png + + +"router1" in the Neutron logical network is realized through a port ("qr-0ba8700e-da") in OpenVSwitch - attached to "br-int":: + + vagrant@precise64:~/devstack$ sudo ovs-vsctl show + b9b27fc3-5057-47e7-ba64-0b6afe70a398 + Bridge br-int + Port "qr-0ba8700e-da" + tag: 1 + Interface "qr-0ba8700e-da" + type: internal + Port br-int + Interface br-int + type: internal + Port int-br-ex + Interface int-br-ex + Port "tapbb60d1bb-0c" + tag: 1 + Interface "tapbb60d1bb-0c" + type: internal + Port "qvob2044570-ad" + tag: 1 + Interface "qvob2044570-ad" + Port "int-br-eth1" + Interface "int-br-eth1" + Bridge "br-eth1" + Port "phy-br-eth1" + Interface "phy-br-eth1" + Port "br-eth1" + Interface "br-eth1" + type: internal + Bridge br-ex + Port phy-br-ex + Interface phy-br-ex + Port "qg-0143bce1-08" + Interface "qg-0143bce1-08" + type: internal + Port br-ex + Interface br-ex + type: internal + ovs_version: "1.4.0+build0" + + + vagrant@precise64:~/devstack$ brctl show + bridge name bridge id STP enabled interfaces + br-eth1 0000.e2e7fc5ccb4d no + br-ex 0000.82ee46beaf4d no phy-br-ex + qg-39efb3f9-f0 + qg-77e0666b-cd + br-int 0000.5e46cb509849 no int-br-ex + qr-54c9cd83-43 + qvo199abeb2-63 + qvo1abbbb60-b8 + tap74b45335-cc + qbr199abeb2-63 8000.ba06e5f8675c no qvb199abeb2-63 + tap199abeb2-63 + qbr1abbbb60-b8 8000.46a87ed4fb66 no qvb1abbbb60-b8 + tap1abbbb60-b8 + virbr0 8000.000000000000 yes + +Finding the router in ip/ipconfig +--------------------------------- + +* http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html + + The neutron-l3-agent uses the Linux IP stack and iptables to perform L3 forwarding and NAT. + In order to support multiple routers with potentially overlapping IP addresses, neutron-l3-agent + defaults to using Linux network namespaces to provide isolated forwarding contexts. As a result, + the IP addresses of routers will not be visible simply by running "ip addr list" or "ifconfig" on + the node. Similarly, you will not be able to directly ping fixed IPs. + + To do either of these things, you must run the command within a particular router's network + namespace. The namespace will have the name "qrouter-. + +.. image:: http://docs.openstack.org/admin-guide-cloud/content/figures/10/a/common/figures/under-the-hood-scenario-1-ovs-netns.png + +For example:: + + vagrant@precise64:~$ neutron router-list + +--------------------------------------+---------+--------------------------------------------------------+ + | id | name | external_gateway_info | + +--------------------------------------+---------+--------------------------------------------------------+ + | ad948c6e-afb6-422a-9a7b-0fc44cbb3910 | router1 | {"network_id": "e6634fef-03fa-482a-9fa7-e0304ce5c995"} | + +--------------------------------------+---------+--------------------------------------------------------+ + vagrant@precise64:~/devstack$ sudo ip netns exec qrouter-ad948c6e-afb6-422a-9a7b-0fc44cbb3910 ip addr list + 18: lo: mtu 16436 qdisc noqueue state UNKNOWN + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 19: qr-54c9cd83-43: mtu 1500 qdisc noqueue state UNKNOWN + link/ether fa:16:3e:dd:c1:8f brd ff:ff:ff:ff:ff:ff + inet 10.0.0.1/24 brd 10.0.0.255 scope global qr-54c9cd83-43 + inet6 fe80::f816:3eff:fedd:c18f/64 scope link + valid_lft forever preferred_lft forever + 20: qg-77e0666b-cd: mtu 1500 qdisc noqueue state UNKNOWN + link/ether fa:16:3e:1f:d3:ec brd ff:ff:ff:ff:ff:ff + inet 192.168.27.130/28 brd 192.168.27.143 scope global qg-77e0666b-cd + inet6 fe80::f816:3eff:fe1f:d3ec/64 scope link + valid_lft forever preferred_lft forever + + +Provider Networking +------------------- + +Neutron can also be configured to create `provider networks `_ + +Further Reading +--------------- +* `Packet Pushers - Neutron Network Implementation on Linux `_ +* `OpenStack Cloud Administrator Guide `_ +* `Neutron - Layer 3 API extension usage guide `_ +* `Darragh O'Reilly - The Quantum L3 router and floating IPs `_ diff --git a/doc/source/devref/lbaas.rst b/doc/source/devref/lbaas.rst new file mode 100644 index 000000000..86f408718 --- /dev/null +++ b/doc/source/devref/lbaas.rst @@ -0,0 +1,32 @@ +Loadbalancer as a Service +========================= + + +https://wiki.openstack.org/wiki/Neutron/LBaaS/Architecture + +https://wiki.openstack.org/wiki/Neutron/LBaaS/API_1.0 + + +Plugin +------ +.. automodule:: neutron.services.loadbalancer.plugin + +.. autoclass:: LoadBalancerPlugin + :members: + +Database layer +-------------- + +.. automodule:: neutron.db.loadbalancer.loadbalancer_db + +.. autoclass:: LoadBalancerPluginDb + :members: + + +Driver layer +------------ + +.. automodule:: neutron.services.loadbalancer.drivers.abstract_driver + +.. autoclass:: LoadBalancerAbstractDriver + :members: diff --git a/doc/source/devref/linuxbridge_agent.rst b/doc/source/devref/linuxbridge_agent.rst new file mode 100644 index 000000000..2c7b81d4f --- /dev/null +++ b/doc/source/devref/linuxbridge_agent.rst @@ -0,0 +1,2 @@ +L2 Networking with Linux Bridge +------------------------------- diff --git a/doc/source/devref/openvswitch_agent.rst b/doc/source/devref/openvswitch_agent.rst new file mode 100644 index 000000000..1c441e381 --- /dev/null +++ b/doc/source/devref/openvswitch_agent.rst @@ -0,0 +1,21 @@ +==================== +OpenVSwitch L2 Agent +==================== + +This Agent uses the `OpenVSwitch`_ virtual switch to create L2 +connectivity for instances, along with bridges created in conjunction +with OpenStack Nova for filtering. + +ovs-neutron-agent can be configured to use two different networking technologies to create tenant isolation, either GRE tunnels or VLAN tags. + +VLAN Tags +--------- + +.. image:: http://docs.openstack.org/admin-guide-cloud/content/figures/10/a/common/figures/under-the-hood-scenario-1-ovs-compute.png + +.. _OpenVSwitch: http://openvswitch.org + +Further Reading +--------------- + +* `Darragh O'Reilly - The Open vSwitch plugin with VLANs `_ diff --git a/doc/source/devref/plugin-api.rst b/doc/source/devref/plugin-api.rst new file mode 100644 index 000000000..bec544b0e --- /dev/null +++ b/doc/source/devref/plugin-api.rst @@ -0,0 +1,12 @@ +Neutron Plugin Architecture +=========================== + +`Salvatore Orlando: How to write a Neutron Plugin (if you really need to) `_ + +Plugin API +---------- + +.. automodule:: neutron.neutron_plugin_base_v2 + +.. autoclass:: NeutronPluginBaseV2 + :members: diff --git a/doc/source/devref/rpc_api.rst b/doc/source/devref/rpc_api.rst new file mode 100644 index 000000000..77c851103 --- /dev/null +++ b/doc/source/devref/rpc_api.rst @@ -0,0 +1,2 @@ +Neutron RCP API Layer +===================== diff --git a/doc/source/devref/security_group_api.rst b/doc/source/devref/security_group_api.rst new file mode 100644 index 000000000..ad990d9e4 --- /dev/null +++ b/doc/source/devref/security_group_api.rst @@ -0,0 +1,50 @@ +Guided Tour: The Neutron Security Group API +=========================================== + +https://wiki.openstack.org/wiki/Neutron/SecurityGroups + + +API Extension +------------- + +The API extension is the 'front' end portion of the code, which handles defining a `REST-ful API`_, which is used by tenants. + + +.. _`REST-ful API`: https://github.com/openstack/neutron/blob/master/neutron/extensions/securitygroup.py + + +Database API +------------ + +The Security Group API extension adds a number of `methods to the database layer`_ of Neutron + +.. _`methods to the database layer`: https://github.com/openstack/neutron/blob/master/neutron/db/securitygroups_db.py + +Agent RPC +--------- + +This portion of the code handles processing requests from tenants, after they have been stored in the database. It involves messaging all the L2 agents +running on the compute nodes, and modifying the IPTables rules on each hypervisor. + + +* `Plugin RPC classes `_ + + * `SecurityGroupServerRpcCallbackMixin `_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes + * SecurityGroupServerRpcMixin - Defines the API methods used to fetch data from the database, in order to return responses to agents via the RPC API + +* `Agent RPC classes `_ + + * The SecurityGroupServerRpcApiMixin defines the API methods that can be called by agents, back to the plugin that runs on the Neutron controller + * The SecurityGroupAgentRpcCallbackMixin defines methods that a plugin uses to call back to an agent after performing an action called by an agent. + + +IPTables Driver +--------------- + +* ``prepare_port_filter`` takes a ``port`` argument, which is a ``dictionary`` object that contains information about the port - including the ``security_group_rules`` + +* ``prepare_port_filter`` `appends the port to an internal dictionary `_, ``filtered_ports`` which is used to track the internal state. + +* Each security group has a `chain `_ in Iptables. + +* The ``IptablesFirewallDriver`` has a method to `convert security group rules into iptables statements `_ diff --git a/doc/source/devref/vpnaas.rst b/doc/source/devref/vpnaas.rst new file mode 100644 index 000000000..e2d2f1b6d --- /dev/null +++ b/doc/source/devref/vpnaas.rst @@ -0,0 +1,21 @@ +VPN as a Service +===================== + +`API Specification`_ + +.. _API Specification: http://docs.openstack.org/api/openstack-network/2.0/content/vpnaas_ext.html + +Plugin +------ +.. automodule:: neutron.services.vpn.plugin + +.. autoclass:: VPNPlugin + :members: + +Database layer +-------------- + +.. automodule:: neutron.db.vpn.vpn_db + +.. autoclass:: VPNPluginDb + :members: diff --git a/doc/source/docbkx/docbkx-example/README b/doc/source/docbkx/docbkx-example/README new file mode 100644 index 000000000..e1545671b --- /dev/null +++ b/doc/source/docbkx/docbkx-example/README @@ -0,0 +1,14 @@ +README + +This docbkx-example folder is provided for those who want to use the maven mojo supplied with the project to build their own documents to PDF and HTML (webhelp) format. It's intended to be a template and model. + +You can edit the src/docbkx/example.xml file using vi, emacs, or another DocBook editor. At Rackspace we use Oxygen. Both Oxygen and XML Mind offer free licenses to those working on open source project documentation. + +To build the output, install Apache Maven (https://maven.apache.org/) and then run: + +mvn clean generate-sources + +in the directory containing the pom.xml file. + +Feel free to ask questions of the openstack-docs team at https://launchpad.net/~openstack-doc. + diff --git a/doc/source/docbkx/docbkx-example/pom.xml b/doc/source/docbkx/docbkx-example/pom.xml new file mode 100644 index 000000000..f281971a5 --- /dev/null +++ b/doc/source/docbkx/docbkx-example/pom.xml @@ -0,0 +1,38 @@ + + 4.0.0 + my-groupid + my-guide + 1.0.0-SNAPSHOT + jar + OpenStack stand alone documentation examples + + + + + com.agilejava.docbkx + docbkx-maven-plugin + + + + generate-pdf + generate-webhelp + + generate-sources + + + + true + 100 + + + + + + + + + + + + + diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/example.xml b/doc/source/docbkx/docbkx-example/src/docbkx/example.xml new file mode 100644 index 000000000..96f1c64c1 --- /dev/null +++ b/doc/source/docbkx/docbkx-example/src/docbkx/example.xml @@ -0,0 +1,318 @@ + + Maven Example Documentation + + + + + + + + Badges! We don't need any stinking badges! + + + + 2011 + Timothy D. Witham + + Example v0.1 + Product Name Doesn't Exist - it's an example!â„¢ + 2011-01-01 + + + Copyright details are filled in by the template. Change the value of the role + attribute on the legalnotice element to change the license. + + + + This document is intended for individuals who whish to produce documentation using Maven and having + the same "feel" as the documentation that is produced by the mainline OpenStack projects. + + + + this is a placeholder for the front cover + + + this is a placeholder for the back cover + + + + Overview + Welcome to the getting started with Maven documentation. Congratulations you have + successfully downloaded and built the example. + + For more details on the Product Name service, please refer to http://www.rackspacecloud.com/cloud_hosting_products/product name + + We welcome feedback, comments, and bug reports at support@rackspacecloud.com. +
+ Intended Audience + This guide is intended to individuals who want to develop standalone documentation + to use within an OpenStack deployment. Using this tool chain will give you the look and + feel of the mainline OpenStack documentation. + +
+
+ Document Change History + This version of the Maven Getting Started Guide replaces and obsoletes all previous versions. The + most recent changes are described in the table below: + + + + Revision Date + Summary of Changes + + + + + July. 14, 2011 + + + + Initial document creation. + + + + + + +
+
+ Additional Resources + + + + + Openstack - Cloud Software + + + + + + + Docbook Main Web Site + + + + + + + Docbook Quick Reference + + + + +
+
+ + Concepts + + Need to put something here. + + + + How do I? + +
+ Notes and including images + So I want an note and an image in this section ... + + This is an example of a note. + + + Here's a sample figure in svg and png formats: +
+ Sample Image + + + + + + + + +
+
+
+ Multiple Related Documents + + What you need to do in order to have multiple documents fit within the + build structure. + +
+
+ Using multiple files for a document + + What you need to do in order to have a single document that is made up of multiple + files. + +
+
+ Who, What, Where, When and Why of pom.xml + + You will of noticed the pom.xml file at the root directory. + This file is used to set the project parameters for the documentation. Including + what type of documentation to produce and any post processing that needs to happen. + If you want to know more about + + pom.xml - need a link + + then follow the link. + + For the pom.xmlfile that was included in this distribution we will + parse the individual lines and explaine the meaning. + + + + +
+ <project> + + What is all of this stuff and why is it important? + +
+
+ <modelVersion> + + What goes in here and why? + +
+
+ <groupId> + + What goes in here and why? + +
+
+ <artifactId> + + What goes in here and why? + +
+
+ <version> + + What goes in here and why? + +
+
+ <packaging> + + What goes in here and why? + +
+
+ <name> + + Name of your document. + +
+
+ <build> + + Make some documents. + +
+ <plugin(s)> + + What does this do and why? + +
+ <groupId> + + What goes in here and why? + +
+
+ <artifactId> + + What goes in here and why? + +
+
+ <execution(s)> + + What goes in here and why? + +
+ <goal(s)> + + Different types of goals and why you use them. + +
+
+ <phase> + + What does this section do? What phases can you specify. + +
+
+
+ <configuration> + + What does this section do? + +
+ <xincludeSupported> + + What does this do and why? + +
+
+ <chunkSectionDepth> + + What does this do and why? + +
+
+ <postprocess> + + What does this section do? What are possible pieces? + +
+ <copy> + + What does this section do? What are possible pieces? + +
+ <fileset> + + What does this section do? What are possible pieces? + +
+ <include> + + What does this section do? What are possible pieces? + +
+
+
+
+
+
+
+
+
+ Who, What, Where, When and Why of build.xml + + You will of noticed the build.xml file at the root directory. + This file is used to set the project parameters for the documentation. Including + what type of documentation to produce and any post processing that needs to happen. + If you want to know more about + + pom.xml - need a link + + then follow the link. + +
+
+ + Troubleshooting + Sometimes things go wrong... + +
diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx new file mode 100644 index 000000000..3f2d86366 --- /dev/null +++ b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg new file mode 100644 index 000000000..58b98232d --- /dev/null +++ b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg @@ -0,0 +1,523 @@ + + + + +Creator: Quick Sequence Diagram Editor Producer: org.freehep.graphicsio.svg.SVGGraphics2D Revision: 12753 Source: Date: Monday, May 2, 2011 2:44:33 PM CDT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/source/docbkx/quantum-api-1.0/common.ent b/doc/source/docbkx/quantum-api-1.0/common.ent new file mode 100644 index 000000000..19acc7e9f --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/common.ent @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + '> + + + + + + '> + + + + + + '> + + + + + + '> + + + + + + '> + + + + + + + + +
+ ]]> + + + + + + + + +

+ Full file path. +

+
+
+
+ +
+
+ + + + + + + + + + + + + + + + +

+ A collection of addresses. +

+
+
+
+ + +
+ + + +

+ A id of an address list. This is typically a name + used to identify a network. +

+
+
+
+ +
+ + + + + + + +

+ An IP address. +

+
+
+
+ + + +

+ The IP Address version can be 4 or 6. The version + attribute is optional if it is left off, the type of + address will be determined by from its address + format. If it is specified it should + match the address format. +

+

+ The OpenStack compute API will always fill in the + version number as a convinence to the client. +

+
+
+
+ +
+ + + + + +

+ An extensible server status type allows all of the + strings defined in ServerStatus or an alias prefixed + status. +

+
+
+ +
+ + + + + + +

+ The server is ready to use. +

+
+
+
+ + + +

+ The server is in an inactive (suspended) state. +

+
+
+
+ + + +

+ The server has been deleted. +

+
+
+
+ + + +

+ The server is being resized. +

+
+
+
+ + + +

+ The server is waiting for the resize operation to be + confirmed so that the original server may be removed. +

+
+
+
+ + + +

+ The requested operation failed, the server is in an + error state. +

+
+
+
+ + + +

+ The server is being built. +

+
+
+
+ + + +

+ The server password is being changed. +

+
+
+
+ + + +

+ The server is being rebuilt. +

+
+
+
+ + + +

+ The server is going through a SOFT reboot. +

+
+
+
+ + + +

+ The server is going through a HARD reboot. +

+
+
+
+ + + +

+ The server is in an unknown state. +

+
+
+
+
+
+ + + + + + + + + + + + + + + + + + +

+ Denotes IPv4. +

+
+
+
+ + + +

+ Denotes IPv6. +

+
+
+
+
+
+ diff --git a/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xjb b/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xjb new file mode 100644 index 000000000..239caec7e --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xjb @@ -0,0 +1,11 @@ + + + + + + + diff --git a/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xsd b/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xsd new file mode 100644 index 000000000..b59836741 --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xsd @@ -0,0 +1,83 @@ + + + + + + + + +

+ The element defines request to share a public IP address. +

+
+ + + + + + + + + + +
+
+ + + + +

+ This type is used to represent a request to share an IP + address. +

+
+
+ + + + + + +

+ The + shared IP group + + use to + share the address. +

+
+
+
+ + + +

+ If true, the server is configured with the new address + though the address may not be enabled. +

+
+
+
+ +
+
\ No newline at end of file diff --git a/doc/source/docbkx/quantum-api-1.0/xsd/txt.htaccess b/doc/source/docbkx/quantum-api-1.0/xsd/txt.htaccess new file mode 100644 index 000000000..8aa2a2878 --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xsd/txt.htaccess @@ -0,0 +1,4 @@ +DirectoryIndex api.xsd +AddType application/xml wadl +AddType application/xml xsd +AddType application/xml xslt diff --git a/doc/source/docbkx/quantum-api-1.0/xsd/version.xsd b/doc/source/docbkx/quantum-api-1.0/xsd/version.xsd new file mode 100644 index 000000000..c89c2b2f5 --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xsd/version.xsd @@ -0,0 +1,355 @@ + + + + + + + + + + Version Types + + + +

+ This schema file defines all types related to versioning. +

+
+
+ + + + + + + + +

+ This element is returned when the version of the + resource cannot be determined. The element + provides a list of choices for the resource. +

+
+ + + + + + + + + + +
+
+ + + + + +

+ Provides a list of supported versions. +

+
+ + + + + + + + + + + + + +
+
+ + + +

+ This element provides detailed meta information + regarding the status of the current API version. + This is the XSD 1.0 compatible element definition. +

+
+
+
+ + + + +

+ This element provides detailed meta information + regarding the status of the current API + version. The description should include a pointer + to both a human readable and a machine processable + description of the API service. +

+
+ + + + + + + + + + + + + +
+
+ + + + + + + The VersionStatus type describes a service's operational status. + + + + + + + + + + This is a new service the API. Thi API + contract may be set, but the implementaiton + may not be 100% complient with it. Developers + are encouraged to begin testing aganst an + ALPHA version to provide feedback. + + + + + + + + + A status of BETA indicates that this + version is a candidate for the next major + release and may feature functionality not + available in the current + version. Developers are encouraged to test + and begin the migration processes to a + BETA version. Note that a BETA version is + undergoing testing, it has not been + officially released, and my not be stable. + + + + + + + + + The API version is stable and has been + tested. Developers are encouraged to + develop against this API version. The + current released version of the API will + always be marked as CURRENT. + + + + + + + + + A status of DEPRECATED indicates that a + newer version of the API is + available. Application developers are + discouraged from using this version and + should instead develop against the latest + current version of the API. + + + + + + + + + + + + A version choice list outlines a collection of + resources at various versions. + + + + + + + + + + + + + In version lists, every single version must + contain at least one self link. + + + + + + + + + + + + + + When used as a root element, a version choice + must contain at least one describedby link. + + + + + + + + + + + + + A version choice contains relevant information + about an available service that a user can then + use to target a specific version of the service. + + + + + + + + + + + + + + + The ID of a version choice represents the service version's unique + identifier. This ID is guaranteed to be unique only among the + service version choices outlined in the VersionChoiceList. + + + + + + + + + + A version choice's status describes the current operational state of + the given service version. The operational status is captured in a + simple type enumeration called VersionStatus. + + + + + + + + + + A version choice's updated attribute describes + the time when the version was updated. The + time should be updated anytime + anything in the + version has changed: documentation, + extensions, bug fixes. + + + + + + + + + + + + A MediaTypeList outlines a collection of valid media types for a given + service version. + + + + + + + + + + + + + + + + A MediaType describes what content types the service version understands. + + + + + + + + + + + The base of a given media type describes the + simple MIME type that then a more complicated + media type can be derived from. These types + are basic and provide no namespace or version + specific data are are only provided as a + convenience. + + + + + + + + + + The type attribute of a MediaType describes + the MIME specific identifier of the media type + in question. + + + + + + +
diff --git a/doc/source/docbkx/quantum-api-1.0/xslt/schema.xsl b/doc/source/docbkx/quantum-api-1.0/xslt/schema.xsl new file mode 100644 index 000000000..d8bc7fe16 --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xslt/schema.xsl @@ -0,0 +1,1342 @@ + + + + + + + + + + + + + + + + + .. + + + + + + XML Schema Documentation + application/xhtml+xml + http://www.w3.org/2001/XMLSchema + http://web4.w3.org/TR/2001/REC-xmlschema-2-20010502/# + + " + ' + + + + + + + + + + + + + + + + + + + + element_ + attrib_ + attgrp_ + grp_ + type_ + + + + http://yui.yahooapis.com/2.7.0/build/ + + + + + + + + + + + + + + + + + stylesheet + text/css + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <xslt:value-of select="xsd:annotation/xsd:appinfo/xsdxt:title"/> + + + <xslt:value-of select="$defaultTitle"/> + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
Loading...
+
+
+
+ + + +

+
+ +

+
+
+ + + + + + + + + + + +
+ + + + + + + + + +

Namespaces

+ + + +
+

+ Your browser does not seem to have support for + namespace nodes in XPath. If you're a Firefox + user, please consider voting to get this issue + resolved: + + https://bugzilla.mozilla.org/show_bug.cgi?id=94270 + +

+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + + + + trc.schema.controller.links[' + + ']=[ + + + + + + + + + + + + + + + + + + + + + + + + + + , + + + ]; + + + + + + trc.schema.controller.index = + + + + + + index + + + Index Schema Document + + + ; + + + + + + + trc.schema.controller.links[' + + ']=[ + + + + # + + + + + + + + See definition of + + + + + , + + + ]; + + + + + + + + { href : + + + + + + , name : + + + + + + , title : + + + + + + } + + + + + + + + +

Imports

+ + + + + + + + + +
+ + +
+
+ + + Visit + + +
+
+ +
+
+
+
+ + +

Includes

+ + + + + + + + +
+
+
+ + + Visit + + +
+
+ +
+
+
+
+ + +

Elements

+ + + + + + + + +
+ + + + + + +
+
+ + + trc.schema.sampleManager.showSample( + + + + ); + + + + + + + + + + + + + + + + + + + + +
+
+ + + + Sample +
+ +
+ +
+
+
+ + + +
+ + + + + + + Loading... + + + + + + +
+
+ + + + + + +

Complex Types

+ + + + + + +
+ + +

Simple Types

+ + + + + + +
+ + + + + + # + + + + + + + + + + + + + + + + + + +

+ +

+ + + + + +
+ extends: + + + + , + + +
+
+ +
+ restricts: + + + + , + + +
+
+
+
+ + + +
+ + + + + + + + + + SubAttributes + + + Attributes + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + SubDocumentation + + + Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Sequence + + +
+
+ + + +
+
+
+ +
+ + + +
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + <?> (Any Element) + + + + + + + + @? (Any Attribute) + + + + + +
+ restriction +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
enum values
+ + + + + + + + +
+ + + + + +
+
+ + + (id = + + ) + +
+ +
+ +
+
+
+ +
+ + + + + + + + (id = + + ) + + + (fixed) + + + + + + + + + + + + +
+ +
+ +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + < + + > + + + + + + @ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 000000000..51c63d277 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,60 @@ +.. + Copyright 2011-2013 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Welcome to Neutron's developer documentation! +============================================= + +Neutron is an OpenStack project to provide "network connectivity as a service" +between interface devices (e.g., vNICs) managed by other OpenStack services +(e.g., nova). It implements the `Neutron API`_. + +.. _`Neutron API`: http://docs.openstack.org/api/openstack-network/2.0/content/ + +This document describes Neutron for contributors of the project, and assumes +that you are already familiar with Neutron from an `end-user perspective`_. + +.. _`end-user perspective`: http://docs.openstack.org/trunk/openstack-network/admin/content/index.html + +This documentation is generated by the Sphinx toolkit and lives in the source +tree. Additional documentation on Neutron and other components of OpenStack +can be found on the `OpenStack wiki`_ and the `Neutron section of the wiki`. +The `Neutron Development wiki`_ is also a good resource for new contributors. + +.. _`OpenStack wiki`: http://wiki.openstack.org +.. _`Neutron section of the wiki`: http://wiki.openstack.org/Neutron +.. _`Neutron Development wiki`: http://wiki.openstack.org/NeutronDevelopment + +Enjoy! + +Developer Docs +============== + +.. toctree:: + :maxdepth: 1 + + devref/index + +API Extensions +============== + +Go to http://api.openstack.org for information about OpenStack Network API extensions. + +Man Pages +--------- + +.. toctree:: + + man/neutron-server diff --git a/doc/source/man/neutron-server.rst b/doc/source/man/neutron-server.rst new file mode 100644 index 000000000..ea6c4cbbb --- /dev/null +++ b/doc/source/man/neutron-server.rst @@ -0,0 +1,75 @@ +============== +neutron-server +============== + +-------------- +Neutron Server +-------------- + +:Author: openstack@lists.openstack.org +:Date: 2012-04-05 +:Copyright: OpenStack Foundation +:Version: 2012.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + + neutron-server [options] + +DESCRIPTION +=========== + +neutron-server provides a webserver that exposes the Neutron API, and +passes all webservice calls to the Neutron plugin for processing. + +OPTIONS +======= + + --version show program's version number and exit + -h, --help show this help message and exit + -v, --verbose Print more verbose output + -d, --debug Print debugging output + --config-file=PATH Path to the config file to use, for example, + /etc/neutron/neutron.conf. When not specified + (the default), we generally look at the first argument + specified to be a config file, and if that is also + missing, we search standard directories for a config + file. (/etc/neutron/, + /usr/lib/pythonX/site-packages/neutron/) + + Logging Options: + The following configuration options are specific to logging + functionality for this program. + + --log-config=PATH If this option is specified, the logging configuration + file specified is used and overrides any other logging + options specified. Please see the Python logging + module documentation for details on logging + configuration files. + --log-date-format=FORMAT + Format string for %(asctime)s in log records. Default: + %Y-%m-%d %H:%M:%S + --use-syslog Output logs to syslog. + --log-file=PATH (Optional) Name of log file to output to. If not set, + logging will go to stdout. + --log-dir=LOG_DIR (Optional) The directory to keep log files in (will be + prepended to --logfile) + +FILES +======== + +plugins.ini file contains the plugin information +neutron.conf file contains configuration information in the form of python-gflags. + +SEE ALSO +======== + +* `OpenStack Neutron `__ + +BUGS +==== + +* Neutron is sourced in Launchpad so you can view current bugs at `OpenStack Bugs `__ + diff --git a/etc/api-paste.ini b/etc/api-paste.ini new file mode 100644 index 000000000..be8aae17f --- /dev/null +++ b/etc/api-paste.ini @@ -0,0 +1,30 @@ +[composite:neutron] +use = egg:Paste#urlmap +/: neutronversions +/v2.0: neutronapi_v2_0 + +[composite:neutronapi_v2_0] +use = call:neutron.auth:pipeline_factory +noauth = request_id catch_errors extensions neutronapiapp_v2_0 +keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 + +[filter:request_id] +paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:catch_errors] +paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory + +[filter:keystonecontext] +paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory + +[filter:extensions] +paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory + +[app:neutronversions] +paste.app_factory = neutron.api.versions:Versions.factory + +[app:neutronapiapp_v2_0] +paste.app_factory = neutron.api.v2.router:APIRouter.factory diff --git a/etc/dhcp_agent.ini b/etc/dhcp_agent.ini new file mode 100644 index 000000000..9836d3500 --- /dev/null +++ b/etc/dhcp_agent.ini @@ -0,0 +1,88 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +# enable_isolated_metadata = False + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +# enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +# dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +# dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/etc/fwaas_driver.ini b/etc/fwaas_driver.ini new file mode 100644 index 000000000..41f761abf --- /dev/null +++ b/etc/fwaas_driver.ini @@ -0,0 +1,3 @@ +[fwaas] +#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver +#enabled = True diff --git a/etc/init.d/neutron-server b/etc/init.d/neutron-server new file mode 100755 index 000000000..98e5da610 --- /dev/null +++ b/etc/init.d/neutron-server @@ -0,0 +1,68 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: neutron-server +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: neutron-server +# Description: Provides the Neutron networking service +### END INIT INFO + +set -e + +PIDFILE=/var/run/neutron/neutron-server.pid +LOGFILE=/var/log/neutron/neutron-server.log + +DAEMON=/usr/bin/neutron-server +DAEMON_ARGS="--log-file=$LOGFILE" +DAEMON_DIR=/var/run + +ENABLED=true + +if test -f /etc/default/neutron-server; then + . /etc/default/neutron-server +fi + +mkdir -p /var/run/neutron +mkdir -p /var/log/neutron + +. /lib/lsb/init-functions + +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" +export TMPDIR=/var/lib/neutron/tmp + +if [ ! -x ${DAEMON} ] ; then + exit 0 +fi + +case "$1" in + start) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Starting neutron server" "neutron-server" + start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS + log_end_msg $? + ;; + stop) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Stopping neutron server" "neutron-server" + start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} + log_end_msg $? + ;; + restart|force-reload) + test "$ENABLED" = "true" || exit 1 + $0 stop + sleep 1 + $0 start + ;; + status) + test "$ENABLED" = "true" || exit 0 + status_of_proc -p $PIDFILE $DAEMON neutron-server && exit 0 || exit $? + ;; + *) + log_action_msg "Usage: /etc/init.d/neutron-server {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/etc/l3_agent.ini b/etc/l3_agent.ini new file mode 100644 index 000000000..e6903988d --- /dev/null +++ b/etc/l3_agent.ini @@ -0,0 +1,79 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +# handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +# external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +# metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +# send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +# periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +# periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/etc/lbaas_agent.ini b/etc/lbaas_agent.ini new file mode 100644 index 000000000..68a2759e6 --- /dev/null +++ b/etc/lbaas_agent.ini @@ -0,0 +1,42 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output). +# debug = False + +# The LBaaS agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# periodic_interval = 10 + +# LBaas requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version. +# Multiple device drivers reflecting different service providers could be specified: +# device_driver = path.to.provider1.driver.Driver +# device_driver = path.to.provider2.driver.Driver +# Default is: +# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver + +[haproxy] +# Location to store config and state files +# loadbalancer_state_path = $state_path/lbaas + +# The user group +# user_group = nogroup + +# When delete and re-add the same vip, send this many gratuitous ARPs to flush +# the ARP cache in the Router. Set it below or equal to 0 to disable this feature. +# send_gratuitous_arp = 3 diff --git a/etc/metadata_agent.ini b/etc/metadata_agent.ini new file mode 100644 index 000000000..84442ea1a --- /dev/null +++ b/etc/metadata_agent.ini @@ -0,0 +1,59 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True + +# The Neutron user information for accessing the Neutron API. +auth_url = http://localhost:5000/v2.0 +auth_region = RegionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +# nova_metadata_ip = 127.0.0.1 + +# TCP Port used by Nova metadata server +# nova_metadata_port = 8775 + +# Which protocol to use for requests to Nova metadata server, http or https +# nova_metadata_protocol = http + +# Whether insecure SSL connection should be accepted for Nova metadata server +# requests +# nova_metadata_insecure = False + +# Client certificate for nova api, needed when nova api requires client +# certificates +# nova_client_cert = + +# Private key for nova client certificate +# nova_client_priv_key = + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +# metadata_proxy_shared_secret = + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server. Defaults to +# half the number of CPU cores +# metadata_workers = + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 4096 + +# URL to connect to the cache backend. +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = memory://?default_ttl=5 diff --git a/etc/metering_agent.ini b/etc/metering_agent.ini new file mode 100644 index 000000000..e6ab52209 --- /dev/null +++ b/etc/metering_agent.ini @@ -0,0 +1,15 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True + +# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver + +# Interval between two metering measures +# measure_interval = 30 + +# Interval between two metering reports +# report_interval = 300 + +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# use_namespaces = True diff --git a/etc/neutron.conf b/etc/neutron.conf new file mode 100644 index 000000000..31977a874 --- /dev/null +++ b/etc/neutron.conf @@ -0,0 +1,479 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +# verbose = False + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +# debug = False + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +# state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +# core_plugin = +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +# api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +# auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds). Use -1 to +# tell dnsmasq to use infinite lease times. +# dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +# allow_overlapping_ips = False +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +# Size of RPC thread pool +# rpc_thread_pool_size = 64 +# Size of RPC connection pool +# rpc_conn_pool_size = 30 +# Seconds to wait for a response from call or multicall +# rpc_response_timeout = 60 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# rpc_cast_timeout = 30 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# IP address of the RabbitMQ installation +# rabbit_host = localhost +# Password of the RabbitMQ server +# rabbit_password = guest +# Port where RabbitMQ server is running/listening +# rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +# rabbit_userid = guest +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false + +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +# default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +# notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +# notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +# notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +# nova_url = http://127.0.0.1:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +# nova_region_name = + +# Username for connection to nova in admin context +# nova_admin_username = + +# The uuid of the admin nova tenant +# nova_admin_tenant_id = + +# Password for connection to nova in admin context. +# nova_admin_password = + +# Authorization URL for connection to nova in admin context. +# nova_admin_auth_url = + +# CA file for novaclient to verify server certificates +# nova_ca_certificates_file = + +# Boolean to control ignoring SSL errors on the nova url +# nova_api_insecure = False + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitor = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +# root_helper = sudo + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:// +# NOTE: In deployment the [database] section and its connection attribute may +# be set in the corresponding core plugin '.ini' file. However, it is suggested +# to put the [database] section and its connection attribute in this +# configuration file. + +# Database engine for which script will be generated when using offline +# migration +# engine = + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/etc/neutron/plugins/bigswitch/restproxy.ini b/etc/neutron/plugins/bigswitch/restproxy.ini new file mode 100644 index 000000000..256f7855b --- /dev/null +++ b/etc/neutron/plugins/bigswitch/restproxy.ini @@ -0,0 +1,114 @@ +# Config file for neutron-proxy-plugin. + +[restproxy] +# All configuration for this plugin is in section '[restproxy]' +# +# The following parameters are supported: +# servers : [,]* (Error if not set) +# server_auth : (default: no auth) +# server_ssl : True | False (default: True) +# ssl_cert_directory : (default: /etc/neutron/plugins/bigswitch/ssl) +# no_ssl_validation : True | False (default: False) +# ssl_sticky : True | False (default: True) +# sync_data : True | False (default: False) +# auto_sync_on_failure : True | False (default: True) +# consistency_interval : (default: 60 seconds) +# server_timeout : (default: 10 seconds) +# neutron_id : (default: neutron-) +# add_meta_server_route : True | False (default: True) +# thread_pool_size : (default: 4) + +# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover. +servers=localhost:8080 + +# The username and password for authenticating against the BigSwitch or Floodlight controller. +# server_auth=username:password + +# Use SSL when connecting to the BigSwitch or Floodlight controller. +# server_ssl=True + +# Directory which contains the ca_certs and host_certs to be used to validate +# controller certificates. +# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/ + +# If a certificate does not exist for a controller, trust and store the first +# certificate received for that controller and use it to validate future +# connections to that controller. +# ssl_sticky=True + +# Do not validate the controller certificates for SSL +# Warning: This will not provide protection against man-in-the-middle attacks +# no_ssl_validation=False + +# Sync data on connect +# sync_data=False + +# If neutron fails to create a resource because the backend controller +# doesn't know of a dependency, automatically trigger a full data +# synchronization to the controller. +# auto_sync_on_failure=True + +# Time between verifications that the backend controller +# database is consistent with Neutron. (0 to disable) +# consistency_interval = 60 + +# Maximum number of seconds to wait for proxy request to connect and complete. +# server_timeout=10 + +# User defined identifier for this Neutron deployment +# neutron_id = + +# Flag to decide if a route to the metadata server should be injected into the VM +# add_meta_server_route = True + +# Number of threads to use to handle large volumes of port creation requests +# thread_pool_size = 4 + +[nova] +# Specify the VIF_TYPE that will be controlled on the Nova compute instances +# options: ivs or ovs +# default: ovs +# vif_type = ovs + +# Overrides for vif types based on nova compute node host IDs +# Comma separated list of host IDs to fix to a specific VIF type +# The VIF type is taken from the end of the configuration item +# node_override_vif_ +# For example, the following would set the VIF type to IVS for +# host-id1 and host-id2 +# node_overrride_vif_ivs=host-id1,host-id2 + +[router] +# Specify the default router rules installed in newly created tenant routers +# Specify multiple times for multiple rules +# Format is ::: +# Optionally, a comma-separated list of nexthops may be included after +# Use an * to specify default for all tenants +# Default is any any allow for all tenants +# tenant_default_router_rule=*:any:any:permit + +# Maximum number of rules that a single router may have +# Default is 200 +# max_router_rules=200 + +[restproxyagent] + +# Specify the name of the bridge used on compute nodes +# for attachment. +# Default: br-int +# integration_bridge=br-int + +# Change the frequency of polling by the restproxy agent. +# Value is seconds +# Default: 5 +# polling_interval=5 + +# Virtual switch type on the compute node. +# Options: ovs or ivs +# Default: ovs +# virtual_switch_type = ovs + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/etc/neutron/plugins/bigswitch/ssl/ca_certs/README b/etc/neutron/plugins/bigswitch/ssl/ca_certs/README new file mode 100644 index 000000000..e7e47a27c --- /dev/null +++ b/etc/neutron/plugins/bigswitch/ssl/ca_certs/README @@ -0,0 +1,3 @@ +Certificates in this folder will be used to +verify signatures for any controllers the plugin +connects to. diff --git a/etc/neutron/plugins/bigswitch/ssl/host_certs/README b/etc/neutron/plugins/bigswitch/ssl/host_certs/README new file mode 100644 index 000000000..8f5f5e77c --- /dev/null +++ b/etc/neutron/plugins/bigswitch/ssl/host_certs/README @@ -0,0 +1,6 @@ +Certificates in this folder must match the name +of the controller they should be used to authenticate +with a .pem extension. + +For example, the certificate for the controller +"192.168.0.1" should be named "192.168.0.1.pem". diff --git a/etc/neutron/plugins/brocade/brocade.ini b/etc/neutron/plugins/brocade/brocade.ini new file mode 100644 index 000000000..916e9e5d2 --- /dev/null +++ b/etc/neutron/plugins/brocade/brocade.ini @@ -0,0 +1,29 @@ +[switch] +# username = The SSH username to use +# password = The SSH password to use +# address = The address of the host to SSH to +# ostype = Should be NOS, but is unused otherwise +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS + +[physical_interface] +# physical_interface = The network interface to use when creating a port +# +# Example: +# physical_interface = physnet1 + +[vlans] +# network_vlan_ranges = :nnnn:mmmm +# +# Example: +# network_vlan_ranges = physnet1:1000:2999 + +[linux_bridge] +# physical_interface_mappings = : +# +# Example: +# physical_interface_mappings = physnet1:em1 diff --git a/etc/neutron/plugins/cisco/cisco_plugins.ini b/etc/neutron/plugins/cisco/cisco_plugins.ini new file mode 100644 index 000000000..13d81f182 --- /dev/null +++ b/etc/neutron/plugins/cisco/cisco_plugins.ini @@ -0,0 +1,138 @@ +[cisco_plugins] + +# (StrOpt) Period-separated module path to the plugin class to use for +# the Cisco Nexus switches. +# +# nexus_plugin = neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin + +# (StrOpt) Period-separated module path to the plugin class to use for +# the virtual switches on compute nodes. +# +# vswitch_plugin = neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2 + + +[cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# provider VLAN interface. For example, if an interface is being created +# for provider VLAN 3003 it will be named 'p-3003' using the default prefix. +# +# provider_vlan_name_prefix = p- +# Example: provider_vlan_name_prefix = PV- + +# (BoolOpt) A flag indicating whether Openstack networking should manage the +# creation and removal of VLAN interfaces for provider networks on the Nexus +# switches. If the flag is set to False then Openstack will not create or +# remove VLAN interfaces for provider networks, and the administrator needs +# to manage these interfaces manually or by external orchestration. +# +# provider_vlan_auto_create = True + +# (BoolOpt) A flag indicating whether Openstack networking should manage +# the adding and removing of provider VLANs from trunk ports on the Nexus +# switches. If the flag is set to False then Openstack will not add or +# remove provider VLANs from trunk ports, and the administrator needs to +# manage these operations manually or by external orchestration. +# +# provider_vlan_auto_trunk = True + +# (StrOpt) Period-separated module path to the model class to use for +# the Cisco neutron plugin. +# +# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2 + +# (StrOpt) Period-separated module path to the driver class to use for +# the Cisco Nexus switches. +# +# If no value is configured, a fake driver will be used. +# nexus_driver = neutron.plugins.cisco.test.nexus.fake_nexus_driver.CiscoNEXUSFakeDriver +# With real hardware, use the CiscoNEXUSDriver class: +# nexus_driver = neutron.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver + +# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches. +# Note: This feature is not supported on all models/versions of Cisco +# Nexus switches. To use this feature, all of the Nexus switches in the +# deployment must support it. +# nexus_l3_enable = False + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# Cisco Nexus Switch Format. +# [NEXUS_SWITCH:] +# = (1) +# ssh_port= (2) +# username= (3) +# password= (4) +# +# (1) For each host connected to a port on the switch, specify the hostname +# and the Nexus physical port (interface) it is connected to. +# (2) The TCP port for connecting via SSH to manage the switch. This is +# port number 22 unless the switch has been configured otherwise. +# (3) The username for logging into the switch to manage it. +# (4) The password for logging into the switch to manage it. +# +# Example: +# [NEXUS_SWITCH:1.1.1.1] +# compute1=1/1 +# compute2=1/2 +# ssh_port=22 +# username=admin +# password=mySecretPassword + +# +# N1KV Format. +# [N1KV:] +# username= +# password= +# +# Example: +# [N1KV:2.2.2.2] +# username=admin +# password=mySecretPassword + +[cisco_n1k] + +# (StrOpt) Specify the name of the integration bridge to which the VIFs are +# attached. +# +# integration_bridge = br-int + +# (StrOpt) Name of the policy profile to be associated with a port when no +# policy profile is specified during port creates. +# +# default_policy_profile = +# Example: default_policy_profile = service_profile + +# (StrOpt) Name of the policy profile to be associated with a port owned by +# network node (dhcp, router). +# +# network_node_policy_profile = +# Example: network_node_policy_profile = dhcp_pp + +# (StrOpt) Name of the network profile to be associated with a network when no +# network profile is specified during network creates. Admin should pre-create +# a network profile with this name. +# +# default_network_profile = +# Example: default_network_profile = network_pool + +# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in +# policy profiles. +# +# poll_duration = +# Example: poll_duration = 180 + +# (IntOpt) Number of threads to use to make HTTP requests to the VSM. +# +# http_pool_size = 4 diff --git a/etc/neutron/plugins/cisco/cisco_vpn_agent.ini b/etc/neutron/plugins/cisco/cisco_vpn_agent.ini new file mode 100644 index 000000000..d15069b7c --- /dev/null +++ b/etc/neutron/plugins/cisco/cisco_vpn_agent.ini @@ -0,0 +1,22 @@ +[cisco_csr_ipsec] +# Status check interval in seconds, for VPNaaS IPSec connections used on CSR +# status_check_interval = 60 + +# Cisco CSR management port information for REST access used by VPNaaS +# TODO(pcm): Remove once CSR is integrated in as a Neutron router. +# +# Format is: +# [cisco_csr_rest:] +# rest_mgmt = +# tunnel_ip = +# username = +# password = +# timeout = +# +# where: +# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR) +# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel +# mgmt port IP -- IP address of CSR for REST API access (not console port) +# user ---------- Username for REST management port access to Cisco CSR +# password ------ Password for REST management port access to Cisco CSR +# timeout ------- REST request timeout to Cisco CSR (optional) diff --git a/etc/neutron/plugins/embrane/heleos_conf.ini b/etc/neutron/plugins/embrane/heleos_conf.ini new file mode 100644 index 000000000..0ca9b46f8 --- /dev/null +++ b/etc/neutron/plugins/embrane/heleos_conf.ini @@ -0,0 +1,41 @@ +[heleos] +#configure the ESM management address +#in the first version of this plugin, only one ESM can be specified +#Example: +#esm_mgmt= + +#configure admin username and password +#admin_username= +#admin_password= + +#router image id +#Example: +#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0 + +#mgmt shared security zone id +#defines the shared management security zone. Each tenant can have a private one configured through the ESM +#Example: +#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a + +#in-band shared security zone id +#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM +#Example: +#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc + +#oob-band shared security zone id +#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM +#Example: +#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871 + +#dummy security zone id +#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces +#Example: +#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08 + +#resource pool id +#define the shared resource pool. Each tenant can have a private one configured through the ESM +#Example +#resource_pool_id= + +#define if the requests have to be executed asynchronously by the plugin or not +#async_requests= diff --git a/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini b/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini new file mode 100644 index 000000000..5eeec5706 --- /dev/null +++ b/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini @@ -0,0 +1,63 @@ +[hyperv] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or to 'flat'. +# Set to 'none' to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (ListOpt) Comma separated list of : +# where the physical networks can be expressed with wildcards, +# e.g.: ."*:external". +# The referred external virtual switches need to be already present on +# the Hyper-V server. +# If a given physical network name will not match any value in the list +# the plugin will look for a virtual switch with the same name. +# +# physical_network_vswitch_mappings = *:external +# Example: physical_network_vswitch_mappings = net1:external1,net2:external2 + +# (StrOpt) Private virtual switch name used for local networking. +# +# local_network_vswitch = private +# Example: local_network_vswitch = custom_vswitch + +# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's +# metric APIs. Collected data can by retrieved by other apps and services, +# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above. +# +# enable_metrics_collection = False + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# Neutron server: +# +# [HYPERV] +# tenant_network_type = vlan +# network_vlan_ranges = default:2000:3999 +# +# Agent running on Hyper-V node: +# +# [AGENT] +# polling_interval = 2 +# physical_network_vswitch_mappings = *:external +# local_network_vswitch = private diff --git a/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini new file mode 100644 index 000000000..0fab50706 --- /dev/null +++ b/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini @@ -0,0 +1,50 @@ +[sdnve] +# (ListOpt) The IP address of one (or more) SDN-VE controllers +# Default value is: controller_ips = 127.0.0.1 +# Example: controller_ips = 127.0.0.1,127.0.0.2 +# (StrOpt) The integration bridge for OF based implementation +# The default value for integration_bridge is None +# Example: integration_bridge = br-int +# (ListOpt) The interface mapping connecting the integration +# bridge to external network as a list of physical network names and +# interfaces: : +# Example: interface_mappings = default:eth2 +# (BoolOpt) Used to reset the integration bridge, if exists +# The default value for reset_bridge is True +# Example: reset_bridge = False +# (BoolOpt) Used to set the OVS controller as out-of-band +# The default value for out_of_band is True +# Example: out_of_band = False +# +# (BoolOpt) The fake controller for testing purposes +# Default value is: use_fake_controller = False +# (StrOpt) The port number for use with controller +# The default value for the port is 8443 +# Example: port = 8443 +# (StrOpt) The userid for use with controller +# The default value for the userid is admin +# Example: userid = sdnve_user +# (StrOpt) The password for use with controller +# The default value for the password is admin +# Example: password = sdnve_password +# +# (StrOpt) The default type of tenants (and associated resources) +# Available choices are: OVERLAY or OF +# The default value for tenant type is OVERLAY +# Example: default_tenant_type = OVERLAY +# (StrOpt) The string in tenant description that indicates +# Default value for OF tenants: of_signature = SDNVE-OF +# (StrOpt) The string in tenant description that indicates +# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY + +[sdnve_agent] +# (IntOpt) Agent's polling interval in seconds +# polling_interval = 2 +# (StrOpt) What to use for root helper +# The default value: root_helper = 'sudo' +# (BoolOpt) Whether to use rpc or not +# The default value: rpc = True + +[securitygroup] +# The security group is not supported: +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver diff --git a/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini new file mode 100644 index 000000000..94fe98036 --- /dev/null +++ b/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini @@ -0,0 +1,78 @@ +[vlans] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST change this to +# 'vlan' and configure network_vlan_ranges below in order for tenant +# networks to provide connectivity between hosts. Set to 'none' to +# disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +[linux_bridge] +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to the agent's node-specific physical network +# interfaces to be used for flat and VLAN networks. All physical +# networks listed in network_vlan_ranges on the server should have +# mappings to appropriate interfaces on each agent. +# +# physical_interface_mappings = +# Example: physical_interface_mappings = physnet1:eth1 + +[vxlan] +# (BoolOpt) enable VXLAN on the agent +# VXLAN support can be enabled when agent is managed by ml2 plugin using +# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin. +# enable_vxlan = False +# +# (IntOpt) use specific TTL for vxlan interface protocol packets +# ttl = +# +# (IntOpt) use specific TOS for vxlan interface protocol packets +# tos = +# +# (StrOpt) multicast group to use for broadcast emulation. +# This group must be the same on all the agents. +# vxlan_group = 224.0.0.1 +# +# (StrOpt) Local IP address to use for VXLAN endpoints (required) +# local_ip = +# +# (BoolOpt) Flag to enable l2population extension. This option should be used +# in conjunction with ml2 plugin l2population mechanism driver (in that case, +# both linuxbridge and l2population mechanism drivers should be loaded). +# It enables plugin to populate VXLAN forwarding table, in order to limit +# the use of broadcast emulation (multicast will be turned off if kernel and +# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10) +# l2_population = False + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (BoolOpt) Enable server RPC compatibility with old (pre-havana) +# agents. +# +# rpc_support_old_agents = False +# Example: rpc_support_old_agents = True + +[securitygroup] +# Firewall driver for realizing neutron security group function +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/etc/neutron/plugins/metaplugin/metaplugin.ini b/etc/neutron/plugins/metaplugin/metaplugin.ini new file mode 100644 index 000000000..2b9bfa5ea --- /dev/null +++ b/etc/neutron/plugins/metaplugin/metaplugin.ini @@ -0,0 +1,31 @@ +# Config file for Metaplugin + +[meta] +# Comma separated list of flavor:neutron_plugin for plugins to load. +# Extension method is searched in the list order and the first one is used. +plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2' + +# Comma separated list of flavor:neutron_plugin for L3 service plugins +# to load. +# This is intended for specifying L2 plugins which support L3 functions. +# If you use a router service plugin, set this blank. +l3_plugin_list = + +# Default flavor to use, when flavor:network is not specified at network +# creation. +default_flavor = 'nvp' + +# Default L3 flavor to use, when flavor:router is not specified at router +# creation. +# Ignored if 'l3_plugin_list' is blank. +default_l3_flavor = + +# Comma separated list of supported extension aliases. +supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler' + +# Comma separated list of method:flavor to select specific plugin for a method. +# This has priority over method search order based on 'plugin_list'. +extension_map = 'get_port_stats:nvp' + +# Specifies flavor for plugin to handle 'q-plugin' RPC requests. +rpc_flavor = 'ml2' diff --git a/etc/neutron/plugins/midonet/midonet.ini b/etc/neutron/plugins/midonet/midonet.ini new file mode 100644 index 000000000..f2e940529 --- /dev/null +++ b/etc/neutron/plugins/midonet/midonet.ini @@ -0,0 +1,19 @@ + +[midonet] +# MidoNet API server URI +# midonet_uri = http://localhost:8080/midonet-api + +# MidoNet admin username +# username = admin + +# MidoNet admin password +# password = passw0rd + +# ID of the project that MidoNet admin user belongs to +# project_id = 77777777-7777-7777-7777-777777777777 + +# Virtual provider router ID +# provider_router_id = 00112233-0011-0011-0011-001122334455 + +# Path to midonet host uuid file +# midonet_host_uuid_path = /etc/midolman/host_uuid.properties diff --git a/etc/neutron/plugins/ml2/ml2_conf.ini b/etc/neutron/plugins/ml2/ml2_conf.ini new file mode 100644 index 000000000..54722df91 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf.ini @@ -0,0 +1,62 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +# tunnel_id_ranges = + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/etc/neutron/plugins/ml2/ml2_conf_arista.ini b/etc/neutron/plugins/ml2/ml2_conf_arista.ini new file mode 100644 index 000000000..a4cfee0cd --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_arista.ini @@ -0,0 +1,45 @@ +# Defines configuration options specific for Arista ML2 Mechanism driver + +[ml2_arista] +# (StrOpt) EOS IP address. This is required field. If not set, all +# communications to Arista EOS will fail +# +# eapi_host = +# Example: eapi_host = 192.168.0.1 +# +# (StrOpt) EOS command API username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_username = +# Example: arista_eapi_username = admin +# +# (StrOpt) EOS command API password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_password = +# Example: eapi_password = my_password +# +# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs +# ("node1.domain.com") or as short names ("node1"). This is +# optional. If not set, a value of "True" is assumed. +# +# use_fqdn = +# Example: use_fqdn = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# sync_interval = +# Example: sync_interval = 60 +# +# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller. +# This is useful when multiple OpenStack/Neutron controllers are +# managing the same Arista HW clusters. Note that this name must +# match with the region name registered (or known) to keystone +# service. Authentication with Keysotne is performed by EOS. +# This is optional. If not set, a value of "RegionOne" is assumed. +# +# region_name = +# Example: region_name = RegionOne diff --git a/etc/neutron/plugins/ml2/ml2_conf_brocade.ini b/etc/neutron/plugins/ml2/ml2_conf_brocade.ini new file mode 100644 index 000000000..66987e991 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_brocade.ini @@ -0,0 +1,13 @@ +[ml2_brocade] +# username = +# password = +# address = +# ostype = NOS +# physical_networks = physnet1,physnet2 +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS +# physical_networks = physnet1,physnet2 diff --git a/etc/neutron/plugins/ml2/ml2_conf_cisco.ini b/etc/neutron/plugins/ml2/ml2_conf_cisco.ini new file mode 100644 index 000000000..95f963f83 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_cisco.ini @@ -0,0 +1,94 @@ +[ml2_cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# +# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch. +# This string value must be present in the ml2_conf.ini network_vlan_ranges +# variable. +# +# managed_physical_network = +# Example: managed_physical_network = physnet1 + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# Cisco Nexus Switch Format. +# [ml2_mech_cisco_nexus:] +# = (1) +# ssh_port= (2) +# username= (3) +# password= (4) +# +# (1) For each host connected to a port on the switch, specify the hostname +# and the Nexus physical port (interface) it is connected to. +# Valid intf_type's are 'ethernet' and 'port-channel'. +# The default setting for is 'ethernet' and need not be +# added to this setting. +# (2) The TCP port for connecting via SSH to manage the switch. This is +# port number 22 unless the switch has been configured otherwise. +# (3) The username for logging into the switch to manage it. +# (4) The password for logging into the switch to manage it. +# +# Example: +# [ml2_mech_cisco_nexus:1.1.1.1] +# compute1=1/1 +# compute2=ethernet:1/2 +# compute3=port-channel:1 +# ssh_port=22 +# username=admin +# password=mySecretPassword + +[ml2_cisco_apic] + +# Hostname for the APIC controller +# apic_host=1.1.1.1 + +# Username for the APIC controller +# apic_username=user + +# Password for the APIC controller +# apic_password=password + +# Port for the APIC Controller +# apic_port=80 + +# Names for APIC objects used by Neutron +# Note: When deploying multiple clouds against one APIC, +# these names must be unique between the clouds. +# apic_vmm_domain=openstack +# apic_vlan_ns_name=openstack_ns +# apic_node_profile=openstack_profile +# apic_entity_profile=openstack_entity +# apic_function_profile=openstack_function + +# The following flag will cause all the node profiles on the APIC to +# be cleared when neutron-server starts. This is typically used only +# for test environments that require clean-slate startup conditions. +# apic_clear_node_profiles=False + +# Specify your network topology. +# This section indicates how your compute nodes are connected to the fabric's +# switches and ports. The format is as follows: +# +# [switch:] +# ,= +# +# You can have multiple sections, one for each switch in your fabric that is +# participating in Openstack. e.g. +# +# [switch:17] +# ubuntu,ubuntu1=1/10 +# ubuntu2,ubuntu3=1/11 +# +# [switch:18] +# ubuntu5,ubuntu6=1/1 +# ubuntu7,ubuntu8=1/2 diff --git a/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini new file mode 100644 index 000000000..6ee4a4e00 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini @@ -0,0 +1,52 @@ +# Defines Configuration options for FSL SDN OS Mechanism Driver +# Cloud Resource Discovery (CRD) authorization credentials +[ml2_fslsdn] +#(StrOpt) User name for authentication to CRD. +# e.g.: user12 +# +# crd_user_name = + +#(StrOpt) Password for authentication to CRD. +# e.g.: secret +# +# crd_password = + +#(StrOpt) Tenant name for CRD service. +# e.g.: service +# +# crd_tenant_name = + +#(StrOpt) CRD auth URL. +# e.g.: http://127.0.0.1:5000/v2.0/ +# +# crd_auth_url = + +#(StrOpt) URL for connecting to CRD Service. +# e.g.: http://127.0.0.1:9797 +# +# crd_url= + +#(IntOpt) Timeout value for connecting to CRD service +# in seconds, e.g.: 30 +# +# crd_url_timeout= + +#(StrOpt) Region name for connecting to CRD in +# admin context, e.g.: RegionOne +# +# crd_region_name= + +#(BoolOpt)If set, ignore any SSL validation issues (boolean value) +# e.g.: False +# +# crd_api_insecure= + +#(StrOpt)Authorization strategy for connecting to CRD in admin +# context, e.g.: keystone +# +# crd_auth_strategy= + +#(StrOpt)Location of CA certificates file to use for CRD client +# requests. +# +# crd_ca_certificates_file= diff --git a/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini b/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini new file mode 100644 index 000000000..01b0797cf --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini @@ -0,0 +1,6 @@ +[eswitch] +# (StrOpt) Type of Network Interface to allocate for VM: +# mlnx_direct or hostdev according to libvirt terminology +# vnic_type = mlnx_direct +# (BoolOpt) Enable server compatibility with old nova +# apply_profile_patch = False diff --git a/etc/neutron/plugins/ml2/ml2_conf_ncs.ini b/etc/neutron/plugins/ml2/ml2_conf_ncs.ini new file mode 100644 index 000000000..dbbfcbd28 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_ncs.ini @@ -0,0 +1,28 @@ +# Defines configuration options specific to the Tail-f NCS Mechanism Driver + +[ml2_ncs] +# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack +# subtree. +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://ncs/api/running/services/openstack + +# (StrOpt) Username for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = +# Example: timeout = 15 + diff --git a/etc/neutron/plugins/ml2/ml2_conf_odl.ini b/etc/neutron/plugins/ml2/ml2_conf_odl.ini new file mode 100644 index 000000000..9e88c1bbf --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_odl.ini @@ -0,0 +1,30 @@ +# Configuration for the OpenDaylight MechanismDriver + +[ml2_odl] +# (StrOpt) OpenDaylight REST URL +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron + +# (StrOpt) Username for HTTP basic authentication to ODL. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to ODL. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = 10 +# Example: timeout = 15 + +# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. +# This is an optional parameter, default value is 30 minutes. +# +# session_timeout = 30 +# Example: session_timeout = 60 diff --git a/etc/neutron/plugins/ml2/ml2_conf_ofa.ini b/etc/neutron/plugins/ml2/ml2_conf_ofa.ini new file mode 100644 index 000000000..4a94b9870 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_ofa.ini @@ -0,0 +1,13 @@ +# Defines configuration options specific to the OpenFlow Agent Mechanism Driver + +[ovs] +# Please refer to configuration options to the OpenvSwitch + +[agent] +# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath. +# This is an optional parameter, default value is 60 seconds. +# +# get_datapath_retry_times = +# Example: get_datapath_retry_times = 30 + +# Please refer to configuration options to the OpenvSwitch else the above. diff --git a/etc/neutron/plugins/mlnx/mlnx_conf.ini b/etc/neutron/plugins/mlnx/mlnx_conf.ini new file mode 100644 index 000000000..b12251116 --- /dev/null +++ b/etc/neutron/plugins/mlnx/mlnx_conf.ini @@ -0,0 +1,79 @@ +[mlnx] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value is 'vlan' You MUST configure network_vlan_ranges below +# in order for tenant networks to provide connectivity between hosts. +# Set to 'none' to disable creation of tenant networks. +# +# tenant_network_type = vlan +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = default:1:100 + +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to physical network types. All physical +# networks listed in network_vlan_ranges should have +# mappings to appropriate physical network type. +# Type of the physical network can be either eth (Ethernet) or +# ib (InfiniBand). If empty, physical network eth type is assumed. +# +# physical_network_type_mappings = +# Example: physical_network_type_mappings = default:eth + +# (StrOpt) Type of the physical network, can be either 'eth' or 'ib' +# The default value is 'eth' +# physical_network_type = eth + +[eswitch] +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to the agent's node-specific physical network +# interfaces to be used for flat and VLAN networks. All physical +# networks listed in network_vlan_ranges on the server should have +# mappings to appropriate interfaces on each agent. +# +# physical_interface_mappings = +# Example: physical_interface_mappings = default:eth2 + +# (StrOpt) Type of Network Interface to allocate for VM: +# direct or hosdev according to libvirt terminology +# vnic_type = mlnx_direct + +# (StrOpt) Eswitch daemon end point connection url +# daemon_endpoint = 'tcp://127.0.0.1:60001' + +# The number of milliseconds the agent will wait for +# response on request to daemon +# request_timeout = 3000 + +# The number of retries the agent will send request +# to daemon before giving up +# retries = 3 + +# The backoff rate multiplier for waiting period between retries +# on request to daemon, i.e. value of 2 will double +# the request timeout each retry +# backoff_rate = 2 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (BoolOpt) Enable server RPC compatibility with old (pre-havana) +# agents. +# +# rpc_support_old_agents = False + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/etc/neutron/plugins/nec/nec.ini b/etc/neutron/plugins/nec/nec.ini new file mode 100644 index 000000000..aa4171da7 --- /dev/null +++ b/etc/neutron/plugins/nec/nec.ini @@ -0,0 +1,60 @@ +# Sample Configurations + +[ovs] +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch port". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# integration_bridge = br-int + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +[securitygroup] +# Firewall driver for realizing neutron security group function +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[ofc] +# Specify OpenFlow Controller Host, Port and Driver to connect. +# host = 127.0.0.1 +# port = 8888 + +# Base URL of OpenFlow Controller REST API. +# It is prepended to a path of each API request. +# path_prefix = + +# Drivers are in neutron/plugins/nec/drivers/ . +# driver = trema + +# PacketFilter is available when it's enabled in this configuration +# and supported by the driver. +# enable_packet_filter = true + +# Use SSL to connect +# use_ssl = false + +# Key file +# key_file = + +# Certificate file +# cert_file = + +# Disable SSL certificate verification +# insecure_ssl = false + +# Maximum attempts per OFC API request. NEC plugin retries +# API request to OFC when OFC returns ServiceUnavailable (503). +# The value must be greater than 0. +# api_max_attempts = 3 + +[provider] +# Default router provider to use. +# default_router_provider = l3-agent +# List of enabled router providers. +# router_providers = l3-agent,openflow diff --git a/etc/neutron/plugins/nuage/nuage_plugin.ini b/etc/neutron/plugins/nuage/nuage_plugin.ini new file mode 100644 index 000000000..994d1206c --- /dev/null +++ b/etc/neutron/plugins/nuage/nuage_plugin.ini @@ -0,0 +1,10 @@ +# Please fill in the correct data for all the keys below and uncomment key-value pairs +[restproxy] +#default_net_partition_name = +#auth_resource = /auth +#server = ip:port +#organization = org +#serverauth = uname:pass +#serverssl = True +#base_uri = /base + diff --git a/etc/neutron/plugins/oneconvergence/nvsdplugin.ini b/etc/neutron/plugins/oneconvergence/nvsdplugin.ini new file mode 100644 index 000000000..a1c05d971 --- /dev/null +++ b/etc/neutron/plugins/oneconvergence/nvsdplugin.ini @@ -0,0 +1,35 @@ +[nvsd] +# Configure the NVSD controller. The plugin proxies the api calls using +# to NVSD controller which implements the required functionality. + +# IP address of NVSD controller api server +# nvsd_ip = + +# Port number of NVSD controller api server +# nvsd_port = 8082 + +# Authentication credentials to access the api server +# nvsd_user = +# nvsd_passwd = + +# API request timeout in seconds +# request_timeout = + +# Maximum number of retry attempts to login to the NVSD controller +# Specify 0 to retry until success (default) +# nvsd_retries = 0 + +[securitygroup] +# Specify firewall_driver option, if neutron security groups are disabled, +# then NoopFirewallDriver otherwise OVSHybridIptablesFirewallDriver. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[agent] +# root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +[database] +# connection = mysql://root:@127.0.0.1/?charset=utf8 diff --git a/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini new file mode 100644 index 000000000..4beee58fa --- /dev/null +++ b/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini @@ -0,0 +1,179 @@ +[ovs] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or change this to +# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for +# tenant networks to provide connectivity between hosts. Set to 'none' +# to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = gre +# Example: tenant_network_type = vxlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre, vxlan and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +# (BoolOpt) Set to True in the server and the agents to enable support +# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and +# GRE or VXLAN tunneling. +# +# WARNING: This option will be deprecated in the Icehouse release, at which +# point setting tunnel_type below will be required to enable +# tunneling. +# +# enable_tunneling = False + +# (StrOpt) The type of tunnel network, if any, supported by the plugin. If +# this is set, it will cause tunneling to be enabled. If this is not set and +# the option enable_tunneling is set, this will default to 'gre'. +# +# tunnel_type = +# Example: tunnel_type = gre +# Example: tunnel_type = vxlan + +# (ListOpt) Comma-separated list of : tuples +# enumerating ranges of GRE or VXLAN tunnel IDs that are available for +# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'. +# +# tunnel_id_ranges = +# Example: tunnel_id_ranges = 1:1000 + +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch bay". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# +# integration_bridge = br-int + +# Only used for the agent if tunnel_id_ranges (above) is not empty for +# the server. In most cases, the default value should be fine. +# +# tunnel_bridge = br-tun + +# Peer patch port in integration bridge for tunnel bridge +# int_peer_patch_port = patch-tun + +# Peer patch port in tunnel bridge for integration bridge +# tun_peer_patch_port = patch-int + +# Uncomment this line for the agent if tunnel_id_ranges (above) is not +# empty for the server. Set local-ip to be the local IP address of +# this hypervisor. +# +# local_ip = + +# (ListOpt) Comma-separated list of : tuples +# mapping physical network names to the agent's node-specific OVS +# bridge names to be used for flat and VLAN networks. The length of +# bridge names should be no more than 11. Each bridge must +# exist, and should have a physical network interface configured as a +# port. All physical networks listed in network_vlan_ranges on the +# server should have mappings to appropriate bridges on each agent. +# +# bridge_mappings = +# Example: bridge_mappings = physnet1:br-eth1 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# Minimize polling by monitoring ovsdb for interface changes +# minimize_polling = True + +# When minimize_polling = True, the number of seconds to wait before +# respawning the ovsdb monitor after losing communication with it +# ovsdb_monitor_respawn_interval = 30 + +# (ListOpt) The types of tenant network tunnels supported by the agent. +# Setting this will enable tunneling support in the agent. This can be set to +# either 'gre' or 'vxlan'. If this is unset, it will default to [] and +# disable tunneling support in the agent. When running the agent with the OVS +# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section. +# When running the agent with ML2, you can specify as many values here as +# your compute hosts supports. +# +# tunnel_types = +# Example: tunnel_types = gre +# Example: tunnel_types = vxlan +# Example: tunnel_types = vxlan, gre + +# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By +# default, this will make use of the Open vSwitch default value of '4789' if +# not specified. +# +# vxlan_udp_port = +# Example: vxlan_udp_port = 8472 + +# (IntOpt) This is the MTU size of veth interfaces. +# Do not change unless you have a good reason to. +# The default MTU size of veth interfaces is 1500. +# veth_mtu = +# Example: veth_mtu = 1504 + +# (BoolOpt) Flag to enable l2-population extension. This option should only be +# used in conjunction with ml2 plugin and l2population mechanism driver. It'll +# enable plugin to populate remote ports macs and IPs (using fdb_add/remove +# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to +# optimize tunnel management. +# +# l2_population = False + +# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2 +# population ML2 MechanismDriver. +# +# arp_responder = False + +# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet +# carrying GRE/VXLAN tunnel. The default value is True. +# +# dont_fragment = True + +[securitygroup] +# Firewall driver for realizing neutron security group function. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# 1. With VLANs on eth1. +# [ovs] +# network_vlan_ranges = default:2000:3999 +# tunnel_id_ranges = +# integration_bridge = br-int +# bridge_mappings = default:br-eth1 +# +# 2. With GRE tunneling. +# [ovs] +# network_vlan_ranges = +# tunnel_id_ranges = 1:1000 +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 +# +# 3. With VXLAN tunneling. +# [ovs] +# network_vlan_ranges = +# tenant_network_type = vxlan +# tunnel_type = vxlan +# tunnel_id_ranges = 1:1000 +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 +# [agent] +# tunnel_types = vxlan diff --git a/etc/neutron/plugins/plumgrid/plumgrid.ini b/etc/neutron/plugins/plumgrid/plumgrid.ini new file mode 100644 index 000000000..bfe8062ae --- /dev/null +++ b/etc/neutron/plugins/plumgrid/plumgrid.ini @@ -0,0 +1,14 @@ +# Config file for Neutron PLUMgrid Plugin + +[plumgriddirector] +# This line should be pointing to the PLUMgrid Director, +# for the PLUMgrid platform. +# director_server= +# director_server_port= +# Authentification parameters for the Director. +# These are the admin credentials to manage and control +# the PLUMgrid Director server. +# username= +# password= +# servertimeout=5 +# driver= diff --git a/etc/neutron/plugins/ryu/ryu.ini b/etc/neutron/plugins/ryu/ryu.ini new file mode 100644 index 000000000..9d9cfa258 --- /dev/null +++ b/etc/neutron/plugins/ryu/ryu.ini @@ -0,0 +1,44 @@ +[ovs] +# integration_bridge = br-int + +# openflow_rest_api = : +# openflow_rest_api = 127.0.0.1:8080 + +# tunnel key range: 0 < tunnel_key_min < tunnel_key_max +# VLAN: 12bits, GRE, VXLAN: 24bits +# tunnel_key_min = 1 +# tunnel_key_max = 0xffffff + +# tunnel_ip = +# tunnel_interface = interface for tunneling +# when tunnel_ip is NOT specified, ip address is read +# from this interface +# tunnel_ip = +# tunnel_interface = +tunnel_interface = eth0 + +# ovsdb_port = port number on which ovsdb is listening +# ryu-agent uses this parameter to setup ovsdb. +# ovs-vsctl set-manager ptcp: +# See set-manager section of man ovs-vsctl for details. +# currently ptcp is only supported. +# ovsdb_ip = +# ovsdb_interface = interface for ovsdb +# when ovsdb_addr NOT specifiied, ip address is gotten +# from this interface +# ovsdb_port = 6634 +# ovsdb_ip = +# ovsdb_interface = +ovsdb_interface = eth0 + +[securitygroup] +# Firewall driver for realizing neutron security group function +# firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 diff --git a/etc/neutron/plugins/vmware/nsx.ini b/etc/neutron/plugins/vmware/nsx.ini new file mode 100644 index 000000000..6ce36c04b --- /dev/null +++ b/etc/neutron/plugins/vmware/nsx.ini @@ -0,0 +1,202 @@ +[DEFAULT] +# User name for NSX controller +# nsx_user = admin + +# Password for NSX controller +# nsx_password = admin + +# Total time limit for a cluster request +# (including retries across different controllers) +# req_timeout = 30 + +# Time before aborting a request on an unresponsive controller +# http_timeout = 30 + +# Maximum number of times a particular request should be retried +# retries = 2 + +# Maximum number of times a redirect response should be followed +# redirects = 2 + +# Comma-separated list of NSX controller endpoints (:). When port +# is omitted, 443 is assumed. This option MUST be specified, e.g.: +# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80 + +# UUID of the pre-existing default NSX Transport zone to be used for creating +# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.: +# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53 + +# (Optional) UUID for the default l3 gateway service to use with this cluster. +# To be specified if planning to use logical routers with external gateways. +# default_l3_gw_service_uuid = + +# (Optional) UUID for the default l2 gateway service to use with this cluster. +# To be specified for providing a predefined gateway tenant for connecting their networks. +# default_l2_gw_service_uuid = + +# (Optional) UUID for the default service cluster. A service cluster is introduced to +# represent a group of gateways and it is needed in order to use Logical Services like +# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this +# config parameter *MUST BE* set to a valid pre-existent service cluster uuid. +# default_service_cluster_uuid = + +# Name of the default interface name to be used on network-gateway. This value +# will be used for any device associated with a network gateway for which an +# interface name was not specified +# default_interface_name = breth0 + +[quotas] +# number of network gateways allowed per tenant, -1 means unlimited +# quota_network_gateway = 5 + +[vcns] +# URL for VCNS manager +# manager_uri = https://management_ip + +# User name for VCNS manager +# user = admin + +# Password for VCNS manager +# password = default + +# (Optional) Datacenter ID for Edge deployment +# datacenter_moid = + +# (Optional) Deployment Container ID for NSX Edge deployment +# If not specified, either a default global container will be used, or +# the resource pool and datastore specified below will be used +# deployment_container_id = + +# (Optional) Resource pool ID for NSX Edge deployment +# resource_pool_id = + +# (Optional) Datastore ID for NSX Edge deployment +# datastore_id = + +# (Required) UUID of logic switch for physical network connectivity +# external_network = + +# (Optional) Asynchronous task status check interval +# default is 2000 (millisecond) +# task_status_check_interval = 2000 + +[nsx] +# Maximum number of ports for each bridged logical switch +# The recommended value for this parameter varies with NSX version +# Please use: +# NSX 2.x -> 64 +# NSX 3.0, 3.1 -> 5000 +# NSX 3.2 -> 10000 +# max_lp_per_bridged_ls = 5000 + +# Maximum number of ports for each overlay (stt, gre) logical switch +# max_lp_per_overlay_ls = 256 + +# Number of connections to each controller node. +# default is 10 +# concurrent_connections = 10 + +# Number of seconds a generation id should be valid for (default -1 meaning do not time out) +# nsx_gen_timeout = -1 + +# Acceptable values for 'metadata_mode' are: +# - 'access_network': this enables a dedicated connection to the metadata +# proxy for metadata server access via Neutron router. +# - 'dhcp_host_route': this enables host route injection via the dhcp agent. +# This option is only useful if running on a host that does not support +# namespaces otherwise access_network should be used. +# metadata_mode = access_network + +# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt) +# default_transport_type = stt + +# Specifies in which mode the plugin needs to operate in order to provide DHCP and +# metadata proxy services to tenant instances. If 'agent' is chosen (default) +# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to +# provide such services. In this mode, the plugin supports API extensions 'agent' +# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), +# the plugin will use NSX logical services for DHCP and metadata proxy. This +# simplifies the deployment model for Neutron, in that the plugin no longer requires +# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode +# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. +# Furthermore, a 'combined' mode is also provided and is used to support existing +# deployments that want to adopt the agentless mode going forward. With this mode, +# existing networks keep being served by the existing infrastructure (thus preserving +# backward compatibility, whereas new networks will be served by the new infrastructure. +# Migration tools are provided to 'move' one network from one model to another; with +# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is +# ignored, as new networks will no longer be scheduled to existing dhcp agents. +# agent_mode = agent + +# Specifies which mode packet replication should be done in. If set to service +# a service node is required in order to perform packet replication. This can +# also be set to source if one wants replication to be performed locally (NOTE: +# usually only useful for testing if one does not want to deploy a service node). +# replication_mode = service + +[nsx_sync] +# Interval in seconds between runs of the status synchronization task. +# The plugin will aim at resynchronizing operational status for all +# resources in this interval, and it should be therefore large enough +# to ensure the task is feasible. Otherwise the plugin will be +# constantly synchronizing resource status, ie: a new task is started +# as soon as the previous is completed. +# If this value is set to 0, the state synchronization thread for this +# Neutron instance will be disabled. +# state_sync_interval = 10 + +# Random additional delay between two runs of the state synchronization task. +# An additional wait time between 0 and max_random_sync_delay seconds +# will be added on top of state_sync_interval. +# max_random_sync_delay = 0 + +# Minimum delay, in seconds, between two status synchronization requests for NSX. +# Depending on chunk size, controller load, and other factors, state +# synchronization requests might be pretty heavy. This means the +# controller might take time to respond, and its load might be quite +# increased by them. This parameter allows to specify a minimum +# interval between two subsequent requests. +# The value for this parameter must never exceed state_sync_interval. +# If this does, an error will be raised at startup. +# min_sync_req_delay = 1 + +# Minimum number of resources to be retrieved from NSX in a single status +# synchronization request. +# The actual size of the chunk will increase if the number of resources is such +# that using the minimum chunk size will cause the interval between two +# requests to be less than min_sync_req_delay +# min_chunk_size = 500 + +# Enable this option to allow punctual state synchronization on show +# operations. In this way, show operations will always fetch the operational +# status of the resource from the NSX backend, and this might have +# a considerable impact on overall performance. +# always_read_status = False + +[nsx_lsn] +# Pull LSN information from NSX in case it is missing from the local +# data store. This is useful to rebuild the local store in case of +# server recovery +# sync_on_missing_data = False + +[nsx_dhcp] +# (Optional) Comma separated list of additional dns servers. Default is an empty list +# extra_domain_name_servers = + +# Domain to use for building the hostnames +# domain_name = openstacklocal + +# Default DHCP lease time +# default_lease_time = 43200 + +[nsx_metadata] +# IP address used by Metadata server +# metadata_server_address = 127.0.0.1 + +# TCP Port used by Metadata server +# metadata_server_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it MUST match with the configuration used by the Metadata server +# metadata_shared_secret = diff --git a/etc/neutron/rootwrap.d/debug.filters b/etc/neutron/rootwrap.d/debug.filters new file mode 100644 index 000000000..b61d96017 --- /dev/null +++ b/etc/neutron/rootwrap.d/debug.filters @@ -0,0 +1,14 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# This is needed because we should ping +# from inside a namespace which requires root +ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ +ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+ diff --git a/etc/neutron/rootwrap.d/dhcp.filters b/etc/neutron/rootwrap.d/dhcp.filters new file mode 100644 index 000000000..88d61e8e3 --- /dev/null +++ b/etc/neutron/rootwrap.d/dhcp.filters @@ -0,0 +1,38 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# dhcp-agent +dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID= +# dhcp-agent uses kill as well, that's handled by the generic KillFilter +# it looks like these are the only signals needed, per +# neutron/agent/linux/dhcp.py +kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP +kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +ivs-ctl: CommandFilter, ivs-ctl, root +mm-ctl: CommandFilter, mm-ctl, root +dhcp_release: CommandFilter, dhcp_release, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +metadata_proxy_quantum: CommandFilter, quantum-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +metadata_proxy_local_quantum: CommandFilter, /usr/local/bin/quantum-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, /usr/bin/python, -9 +kill_metadata7: KillFilter, root, /usr/bin/python2.7, -9 +kill_metadata6: KillFilter, root, /usr/bin/python2.6, -9 + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/iptables-firewall.filters b/etc/neutron/rootwrap.d/iptables-firewall.filters new file mode 100644 index 000000000..b8a6ab5b3 --- /dev/null +++ b/etc/neutron/rootwrap.d/iptables-firewall.filters @@ -0,0 +1,21 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# neutron/agent/linux/iptables_manager.py +# "iptables-save", ... +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# neutron/agent/linux/iptables_manager.py +# "iptables", "-A", ... +iptables: CommandFilter, iptables, root +ip6tables: CommandFilter, ip6tables, root diff --git a/etc/neutron/rootwrap.d/l3.filters b/etc/neutron/rootwrap.d/l3.filters new file mode 100644 index 000000000..2031d779e --- /dev/null +++ b/etc/neutron/rootwrap.d/l3.filters @@ -0,0 +1,41 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# arping +arping: CommandFilter, arping, root + +# l3_agent +sysctl: CommandFilter, sysctl, root +route: CommandFilter, route, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +metadata_proxy_quantum: CommandFilter, quantum-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +metadata_proxy_local_quantum: CommandFilter, /usr/local/bin/quantum-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, /usr/bin/python, -9 +kill_metadata7: KillFilter, root, /usr/bin/python2.7, -9 +kill_metadata6: KillFilter, root, /usr/bin/python2.6, -9 + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root + +# ovs_lib (if OVSInterfaceDriver is used) +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# iptables_manager +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root diff --git a/etc/neutron/rootwrap.d/lbaas-haproxy.filters b/etc/neutron/rootwrap.d/lbaas-haproxy.filters new file mode 100644 index 000000000..b4e1ecba2 --- /dev/null +++ b/etc/neutron/rootwrap.d/lbaas-haproxy.filters @@ -0,0 +1,26 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# haproxy +haproxy: CommandFilter, haproxy, root + +# lbaas-agent uses kill as well, that's handled by the generic KillFilter +kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +mm-ctl: CommandFilter, mm-ctl, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +route: CommandFilter, route, root + +# arping +arping: CommandFilter, arping, root diff --git a/etc/neutron/rootwrap.d/linuxbridge-plugin.filters b/etc/neutron/rootwrap.d/linuxbridge-plugin.filters new file mode 100644 index 000000000..03df39592 --- /dev/null +++ b/etc/neutron/rootwrap.d/linuxbridge-plugin.filters @@ -0,0 +1,19 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# linuxbridge-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +brctl: CommandFilter, brctl, root +bridge: CommandFilter, bridge, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/nec-plugin.filters b/etc/neutron/rootwrap.d/nec-plugin.filters new file mode 100644 index 000000000..89c4cfe35 --- /dev/null +++ b/etc/neutron/rootwrap.d/nec-plugin.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# nec_neutron_agent +ovs-vsctl: CommandFilter, ovs-vsctl, root diff --git a/etc/neutron/rootwrap.d/openvswitch-plugin.filters b/etc/neutron/rootwrap.d/openvswitch-plugin.filters new file mode 100644 index 000000000..b63a83b94 --- /dev/null +++ b/etc/neutron/rootwrap.d/openvswitch-plugin.filters @@ -0,0 +1,22 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# openvswitch-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +ovs-vsctl: CommandFilter, ovs-vsctl, root +ovs-ofctl: CommandFilter, ovs-ofctl, root +kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 +ovsdb-client: CommandFilter, ovsdb-client, root +xe: CommandFilter, xe, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/ryu-plugin.filters b/etc/neutron/rootwrap.d/ryu-plugin.filters new file mode 100644 index 000000000..0a70b8bc9 --- /dev/null +++ b/etc/neutron/rootwrap.d/ryu-plugin.filters @@ -0,0 +1,21 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# ryu-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism + +# neutron/plugins/ryu/agent/ryu_neutron_agent.py: +# "ovs-vsctl", "--timeout=2", ... +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# neutron/plugins/ryu/agent/ryu_neutron_agent.py: +# "xe", "vif-param-get", ... +xe: CommandFilter, xe, root diff --git a/etc/neutron/rootwrap.d/vpnaas.filters b/etc/neutron/rootwrap.d/vpnaas.filters new file mode 100644 index 000000000..7848136b9 --- /dev/null +++ b/etc/neutron/rootwrap.d/vpnaas.filters @@ -0,0 +1,13 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +openswan: CommandFilter, ipsec, root diff --git a/etc/policy.json b/etc/policy.json new file mode 100644 index 000000000..369e0a80d --- /dev/null +++ b/etc/policy.json @@ -0,0 +1,136 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_only": "rule:context_is_admin", + "regular_user": "", + "shared": "field:networks:shared=True", + "shared_firewalls": "field:firewalls:shared=True", + "external": "field:networks:router:external=True", + "default": "rule:admin_or_owner", + + "subnets:private:read": "rule:admin_or_owner", + "subnets:private:write": "rule:admin_or_owner", + "subnets:shared:read": "rule:regular_user", + "subnets:shared:write": "rule:admin_only", + + "create_subnet": "rule:admin_or_network_owner", + "get_subnet": "rule:admin_or_owner or rule:shared", + "update_subnet": "rule:admin_or_network_owner", + "delete_subnet": "rule:admin_or_network_owner", + + "create_network": "", + "get_network": "rule:admin_or_owner or rule:shared or rule:external", + "get_network:router:external": "rule:regular_user", + "get_network:segments": "rule:admin_only", + "get_network:provider:network_type": "rule:admin_only", + "get_network:provider:physical_network": "rule:admin_only", + "get_network:provider:segmentation_id": "rule:admin_only", + "get_network:queue_id": "rule:admin_only", + "create_network:shared": "rule:admin_only", + "create_network:router:external": "rule:admin_only", + "create_network:segments": "rule:admin_only", + "create_network:provider:network_type": "rule:admin_only", + "create_network:provider:physical_network": "rule:admin_only", + "create_network:provider:segmentation_id": "rule:admin_only", + "update_network": "rule:admin_or_owner", + "update_network:segments": "rule:admin_only", + "update_network:shared": "rule:admin_only", + "update_network:provider:network_type": "rule:admin_only", + "update_network:provider:physical_network": "rule:admin_only", + "update_network:provider:segmentation_id": "rule:admin_only", + "delete_network": "rule:admin_or_owner", + + "create_port": "", + "create_port:mac_address": "rule:admin_or_network_owner", + "create_port:fixed_ips": "rule:admin_or_network_owner", + "create_port:port_security_enabled": "rule:admin_or_network_owner", + "create_port:binding:host_id": "rule:admin_only", + "create_port:binding:profile": "rule:admin_only", + "create_port:mac_learning_enabled": "rule:admin_or_network_owner", + "get_port": "rule:admin_or_owner", + "get_port:queue_id": "rule:admin_only", + "get_port:binding:vif_type": "rule:admin_only", + "get_port:binding:vif_details": "rule:admin_only", + "get_port:binding:host_id": "rule:admin_only", + "get_port:binding:profile": "rule:admin_only", + "update_port": "rule:admin_or_owner", + "update_port:fixed_ips": "rule:admin_or_network_owner", + "update_port:port_security_enabled": "rule:admin_or_network_owner", + "update_port:binding:host_id": "rule:admin_only", + "update_port:binding:profile": "rule:admin_only", + "update_port:mac_learning_enabled": "rule:admin_or_network_owner", + "delete_port": "rule:admin_or_owner", + + "create_router:external_gateway_info:enable_snat": "rule:admin_only", + "update_router:external_gateway_info:enable_snat": "rule:admin_only", + + "create_firewall": "", + "get_firewall": "rule:admin_or_owner", + "create_firewall:shared": "rule:admin_only", + "get_firewall:shared": "rule:admin_only", + "update_firewall": "rule:admin_or_owner", + "update_firewall:shared": "rule:admin_only", + "delete_firewall": "rule:admin_or_owner", + + "create_firewall_policy": "", + "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", + "create_firewall_policy:shared": "rule:admin_or_owner", + "update_firewall_policy": "rule:admin_or_owner", + "delete_firewall_policy": "rule:admin_or_owner", + + "create_firewall_rule": "", + "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", + "update_firewall_rule": "rule:admin_or_owner", + "delete_firewall_rule": "rule:admin_or_owner", + + "create_qos_queue": "rule:admin_only", + "get_qos_queue": "rule:admin_only", + + "update_agent": "rule:admin_only", + "delete_agent": "rule:admin_only", + "get_agent": "rule:admin_only", + + "create_dhcp-network": "rule:admin_only", + "delete_dhcp-network": "rule:admin_only", + "get_dhcp-networks": "rule:admin_only", + "create_l3-router": "rule:admin_only", + "delete_l3-router": "rule:admin_only", + "get_l3-routers": "rule:admin_only", + "get_dhcp-agents": "rule:admin_only", + "get_l3-agents": "rule:admin_only", + "get_loadbalancer-agent": "rule:admin_only", + "get_loadbalancer-pools": "rule:admin_only", + + "create_router": "rule:regular_user", + "get_router": "rule:admin_or_owner", + "update_router:add_router_interface": "rule:admin_or_owner", + "update_router:remove_router_interface": "rule:admin_or_owner", + "delete_router": "rule:admin_or_owner", + + "create_floatingip": "rule:regular_user", + "update_floatingip": "rule:admin_or_owner", + "delete_floatingip": "rule:admin_or_owner", + "get_floatingip": "rule:admin_or_owner", + + "create_network_profile": "rule:admin_only", + "update_network_profile": "rule:admin_only", + "delete_network_profile": "rule:admin_only", + "get_network_profiles": "", + "get_network_profile": "", + "update_policy_profiles": "rule:admin_only", + "get_policy_profiles": "", + "get_policy_profile": "", + + "create_metering_label": "rule:admin_only", + "delete_metering_label": "rule:admin_only", + "get_metering_label": "rule:admin_only", + + "create_metering_label_rule": "rule:admin_only", + "delete_metering_label_rule": "rule:admin_only", + "get_metering_label_rule": "rule:admin_only", + + "get_service_provider": "rule:regular_user", + "get_lsn": "rule:admin_only", + "create_lsn": "rule:admin_only" +} diff --git a/etc/rootwrap.conf b/etc/rootwrap.conf new file mode 100644 index 000000000..dee1dd94b --- /dev/null +++ b/etc/rootwrap.conf @@ -0,0 +1,34 @@ +# Configuration for neutron-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR + +[xenapi] +# XenAPI configuration is only required by the L2 agent if it is to +# target a XenServer/XCP compute host's dom0. +xenapi_connection_url= +xenapi_connection_username=root +xenapi_connection_password= diff --git a/etc/services.conf b/etc/services.conf new file mode 100644 index 000000000..f8a609005 --- /dev/null +++ b/etc/services.conf @@ -0,0 +1,40 @@ +[radware] +#vdirect_address = 0.0.0.0 +#ha_secondary_address= +#vdirect_user = vDirect +#vdirect_password = radware +#service_ha_pair = False +#service_throughput = 1000 +#service_ssl_throughput = 200 +#service_compression_throughput = 100 +#service_cache = 20 +#service_adc_type = VA +#service_adc_version= +#service_session_mirroring_enabled = False +#service_isl_vlan = -1 +#service_resource_pool_ids = [] +#actions_to_skip = 'setup_l2_l3' +#l4_action_name = 'BaseCreate' +#l2_l3_workflow_name = openstack_l2_l3 +#l4_workflow_name = openstack_l4 +#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True +#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2 + +[netscaler_driver] +#netscaler_ncc_uri = https://ncc_server.acme.org/ncc/v1/api +#netscaler_ncc_username = admin +#netscaler_ncc_password = secret + +[heleoslb] +#esm_mgmt = +#admin_username = +#admin_password = +#lb_image = +#inband_id = +#oob_id = +#mgmt_id = +#dummy_utif_id = +#resource_pool_id = +#async_requests = +#lb_flavor = small +#sync_interval = 60 diff --git a/etc/vpn_agent.ini b/etc/vpn_agent.ini new file mode 100644 index 000000000..c3089df95 --- /dev/null +++ b/etc/vpn_agent.ini @@ -0,0 +1,14 @@ +[DEFAULT] +# VPN-Agent configuration file +# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also + +[vpnagent] +# vpn device drivers which vpn agent will use +# If we want to use multiple drivers, we need to define this option multiple times. +# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver +# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver +# vpn_device_driver=another_driver + +[ipsec] +# Status check interval +# ipsec_status_check_interval=60 diff --git a/neutron/__init__.py b/neutron/__init__.py new file mode 100644 index 000000000..b2c81bde7 --- /dev/null +++ b/neutron/__init__.py @@ -0,0 +1,21 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gettext + + +gettext.install('neutron', unicode=1) diff --git a/neutron/agent/__init__.py b/neutron/agent/__init__.py new file mode 100644 index 000000000..0b3d2db5e --- /dev/null +++ b/neutron/agent/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/agent/common/__init__.py b/neutron/agent/common/__init__.py new file mode 100644 index 000000000..0b3d2db5e --- /dev/null +++ b/neutron/agent/common/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/agent/common/config.py b/neutron/agent/common/config.py new file mode 100644 index 000000000..5f83517d5 --- /dev/null +++ b/neutron/agent/common/config.py @@ -0,0 +1,123 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo.config import cfg + +from neutron.common import config +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +ROOT_HELPER_OPTS = [ + cfg.StrOpt('root_helper', default='sudo', + help=_('Root helper application.')), +] + +AGENT_STATE_OPTS = [ + cfg.FloatOpt('report_interval', default=30, + help=_('Seconds between nodes reporting state to server; ' + 'should be less than agent_down_time, best if it ' + 'is half or less than agent_down_time.')), +] + +INTERFACE_DRIVER_OPTS = [ + cfg.StrOpt('interface_driver', + help=_("The driver used to manage the virtual interface.")), +] + +USE_NAMESPACES_OPTS = [ + cfg.BoolOpt('use_namespaces', default=True, + help=_("Allow overlapping IP.")), +] + + +def get_log_args(conf, log_file_name): + cmd_args = [] + if conf.debug: + cmd_args.append('--debug') + if conf.verbose: + cmd_args.append('--verbose') + if (conf.log_dir or conf.log_file): + cmd_args.append('--log-file=%s' % log_file_name) + log_dir = None + if conf.log_dir and conf.log_file: + log_dir = os.path.dirname( + os.path.join(conf.log_dir, conf.log_file)) + elif conf.log_dir: + log_dir = conf.log_dir + elif conf.log_file: + log_dir = os.path.dirname(conf.log_file) + if log_dir: + cmd_args.append('--log-dir=%s' % log_dir) + else: + if conf.use_syslog: + cmd_args.append('--use-syslog') + if conf.syslog_log_facility: + cmd_args.append( + '--syslog-log-facility=%s' % conf.syslog_log_facility) + return cmd_args + + +def register_root_helper(conf): + # The first call is to ensure backward compatibility + conf.register_opts(ROOT_HELPER_OPTS) + conf.register_opts(ROOT_HELPER_OPTS, 'AGENT') + + +def register_agent_state_opts_helper(conf): + conf.register_opts(AGENT_STATE_OPTS, 'AGENT') + + +def register_interface_driver_opts_helper(conf): + conf.register_opts(INTERFACE_DRIVER_OPTS) + + +def register_use_namespaces_opts_helper(conf): + conf.register_opts(USE_NAMESPACES_OPTS) + + +def get_root_helper(conf): + root_helper = conf.AGENT.root_helper + if root_helper != 'sudo': + return root_helper + + root_helper = conf.root_helper + if root_helper != 'sudo': + LOG.deprecated(_('DEFAULT.root_helper is deprecated! Please move ' + 'root_helper configuration to [AGENT] section.')) + return root_helper + + return 'sudo' + + +def setup_conf(): + bind_opts = [ + cfg.StrOpt('state_path', + default='/var/lib/neutron', + help=_('Top-level directory for maintaining dhcp state')), + ] + + conf = cfg.ConfigOpts() + conf.register_opts(bind_opts) + return conf + +# add a logging setup method here for convenience +setup_logging = config.setup_logging diff --git a/neutron/agent/dhcp_agent.py b/neutron/agent/dhcp_agent.py new file mode 100644 index 000000000..5cdb30d0e --- /dev/null +++ b/neutron/agent/dhcp_agent.py @@ -0,0 +1,622 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import dhcp +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ovs_lib # noqa +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron import context +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import service +from neutron import service as neutron_service + +LOG = logging.getLogger(__name__) + + +class DhcpAgent(manager.Manager): + OPTS = [ + cfg.IntOpt('resync_interval', default=5, + help=_("Interval to resync.")), + cfg.StrOpt('dhcp_driver', + default='neutron.agent.linux.dhcp.Dnsmasq', + help=_("The driver used to manage the DHCP server.")), + cfg.BoolOpt('enable_isolated_metadata', default=False, + help=_("Support Metadata requests on isolated networks.")), + cfg.BoolOpt('enable_metadata_network', default=False, + help=_("Allows for serving metadata requests from a " + "dedicated network. Requires " + "enable_isolated_metadata = True")), + cfg.IntOpt('num_sync_threads', default=4, + help=_('Number of threads to use during sync process.')), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + ] + + def __init__(self, host=None): + super(DhcpAgent, self).__init__(host=host) + self.needs_resync_reasons = [] + self.conf = cfg.CONF + self.cache = NetworkCache() + self.root_helper = config.get_root_helper(self.conf) + self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver) + ctx = context.get_admin_context_without_session() + self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, + ctx, self.conf.use_namespaces) + # create dhcp dir to store dhcp info + dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path) + if not os.path.isdir(dhcp_dir): + os.makedirs(dhcp_dir, 0o755) + self.dhcp_version = self.dhcp_driver_cls.check_version() + self._populate_networks_cache() + + def _populate_networks_cache(self): + """Populate the networks cache when the DHCP-agent starts.""" + try: + existing_networks = self.dhcp_driver_cls.existing_dhcp_networks( + self.conf, + self.root_helper + ) + for net_id in existing_networks: + net = dhcp.NetModel(self.conf.use_namespaces, + {"id": net_id, + "subnets": [], + "ports": []}) + self.cache.put(net) + except NotImplementedError: + # just go ahead with an empty networks cache + LOG.debug( + _("The '%s' DHCP-driver does not support retrieving of a " + "list of existing networks"), + self.conf.dhcp_driver + ) + + def after_start(self): + self.run() + LOG.info(_("DHCP agent started")) + + def run(self): + """Activate the DHCP agent.""" + self.sync_state() + self.periodic_resync() + + def call_driver(self, action, network, **action_kwargs): + """Invoke an action on a DHCP driver instance.""" + LOG.debug(_('Calling driver for network: %(net)s action: %(action)s'), + {'net': network.id, 'action': action}) + try: + # the Driver expects something that is duck typed similar to + # the base models. + driver = self.dhcp_driver_cls(self.conf, + network, + self.root_helper, + self.dhcp_version, + self.plugin_rpc) + + getattr(driver, action)(**action_kwargs) + return True + except exceptions.Conflict: + # No need to resync here, the agent will receive the event related + # to a status update for the network + LOG.warning(_('Unable to %(action)s dhcp for %(net_id)s: there is ' + 'a conflict with its current state; please check ' + 'that the network and/or its subnet(s) still exist.') + % {'net_id': network.id, 'action': action}) + except Exception as e: + self.schedule_resync(e) + if (isinstance(e, rpc_compat.RemoteError) + and e.exc_type == 'NetworkNotFound' + or isinstance(e, exceptions.NetworkNotFound)): + LOG.warning(_("Network %s has been deleted."), network.id) + else: + LOG.exception(_('Unable to %(action)s dhcp for %(net_id)s.') + % {'net_id': network.id, 'action': action}) + + def schedule_resync(self, reason): + """Schedule a resync for a given reason.""" + self.needs_resync_reasons.append(reason) + + @utils.synchronized('dhcp-agent') + def sync_state(self): + """Sync the local DHCP state with Neutron.""" + LOG.info(_('Synchronizing state')) + pool = eventlet.GreenPool(cfg.CONF.num_sync_threads) + known_network_ids = set(self.cache.get_network_ids()) + + try: + active_networks = self.plugin_rpc.get_active_networks_info() + active_network_ids = set(network.id for network in active_networks) + for deleted_id in known_network_ids - active_network_ids: + try: + self.disable_dhcp_helper(deleted_id) + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Unable to sync network state on deleted ' + 'network %s'), deleted_id) + + for network in active_networks: + pool.spawn(self.safe_configure_dhcp_for_network, network) + pool.waitall() + LOG.info(_('Synchronizing state complete')) + + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Unable to sync network state.')) + + def _periodic_resync_helper(self): + """Resync the dhcp state at the configured interval.""" + while True: + eventlet.sleep(self.conf.resync_interval) + if self.needs_resync_reasons: + # be careful to avoid a race with additions to list + # from other threads + reasons = self.needs_resync_reasons + self.needs_resync_reasons = [] + for r in reasons: + LOG.debug(_("resync: %(reason)s"), + {"reason": r}) + self.sync_state() + + def periodic_resync(self): + """Spawn a thread to periodically resync the dhcp state.""" + eventlet.spawn(self._periodic_resync_helper) + + def safe_get_network_info(self, network_id): + try: + network = self.plugin_rpc.get_network_info(network_id) + if not network: + LOG.warn(_('Network %s has been deleted.'), network_id) + return network + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Network %s info call failed.'), network_id) + + def enable_dhcp_helper(self, network_id): + """Enable DHCP for a network that meets enabling criteria.""" + network = self.safe_get_network_info(network_id) + if network: + self.configure_dhcp_for_network(network) + + def safe_configure_dhcp_for_network(self, network): + try: + self.configure_dhcp_for_network(network) + except (exceptions.NetworkNotFound, RuntimeError): + LOG.warn(_('Network %s may have been deleted and its resources ' + 'may have already been disposed.'), network.id) + + def configure_dhcp_for_network(self, network): + if not network.admin_state_up: + return + + for subnet in network.subnets: + if subnet.enable_dhcp: + if self.call_driver('enable', network): + if (self.conf.use_namespaces and + self.conf.enable_isolated_metadata): + self.enable_isolated_metadata_proxy(network) + self.cache.put(network) + break + + def disable_dhcp_helper(self, network_id): + """Disable DHCP for a network known to the agent.""" + network = self.cache.get_network_by_id(network_id) + if network: + if (self.conf.use_namespaces and + self.conf.enable_isolated_metadata): + self.disable_isolated_metadata_proxy(network) + if self.call_driver('disable', network): + self.cache.remove(network) + + def refresh_dhcp_helper(self, network_id): + """Refresh or disable DHCP for a network depending on the current state + of the network. + """ + old_network = self.cache.get_network_by_id(network_id) + if not old_network: + # DHCP current not running for network. + return self.enable_dhcp_helper(network_id) + + network = self.safe_get_network_info(network_id) + if not network: + return + + old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp) + new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp) + + if new_cidrs and old_cidrs == new_cidrs: + self.call_driver('reload_allocations', network) + self.cache.put(network) + elif new_cidrs: + if self.call_driver('restart', network): + self.cache.put(network) + else: + self.disable_dhcp_helper(network.id) + + @utils.synchronized('dhcp-agent') + def network_create_end(self, context, payload): + """Handle the network.create.end notification event.""" + network_id = payload['network']['id'] + self.enable_dhcp_helper(network_id) + + @utils.synchronized('dhcp-agent') + def network_update_end(self, context, payload): + """Handle the network.update.end notification event.""" + network_id = payload['network']['id'] + if payload['network']['admin_state_up']: + self.enable_dhcp_helper(network_id) + else: + self.disable_dhcp_helper(network_id) + + @utils.synchronized('dhcp-agent') + def network_delete_end(self, context, payload): + """Handle the network.delete.end notification event.""" + self.disable_dhcp_helper(payload['network_id']) + + @utils.synchronized('dhcp-agent') + def subnet_update_end(self, context, payload): + """Handle the subnet.update.end notification event.""" + network_id = payload['subnet']['network_id'] + self.refresh_dhcp_helper(network_id) + + # Use the update handler for the subnet create event. + subnet_create_end = subnet_update_end + + @utils.synchronized('dhcp-agent') + def subnet_delete_end(self, context, payload): + """Handle the subnet.delete.end notification event.""" + subnet_id = payload['subnet_id'] + network = self.cache.get_network_by_subnet_id(subnet_id) + if network: + self.refresh_dhcp_helper(network.id) + + @utils.synchronized('dhcp-agent') + def port_update_end(self, context, payload): + """Handle the port.update.end notification event.""" + updated_port = dhcp.DictModel(payload['port']) + network = self.cache.get_network_by_id(updated_port.network_id) + if network: + self.cache.put_port(updated_port) + self.call_driver('reload_allocations', network) + + # Use the update handler for the port create event. + port_create_end = port_update_end + + @utils.synchronized('dhcp-agent') + def port_delete_end(self, context, payload): + """Handle the port.delete.end notification event.""" + port = self.cache.get_port_by_id(payload['port_id']) + if port: + network = self.cache.get_network_by_id(port.network_id) + self.cache.remove_port(port) + self.call_driver('reload_allocations', network) + + def enable_isolated_metadata_proxy(self, network): + + # The proxy might work for either a single network + # or all the networks connected via a router + # to the one passed as a parameter + neutron_lookup_param = '--network_id=%s' % network.id + meta_cidr = netaddr.IPNetwork(dhcp.METADATA_DEFAULT_CIDR) + has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr + for s in network.subnets) + if (self.conf.enable_metadata_network and has_metadata_subnet): + router_ports = [port for port in network.ports + if (port.device_owner == + constants.DEVICE_OWNER_ROUTER_INTF)] + if router_ports: + # Multiple router ports should not be allowed + if len(router_ports) > 1: + LOG.warning(_("%(port_num)d router ports found on the " + "metadata access network. Only the port " + "%(port_id)s, for router %(router_id)s " + "will be considered"), + {'port_num': len(router_ports), + 'port_id': router_ports[0].id, + 'router_id': router_ports[0].device_id}) + neutron_lookup_param = ('--router_id=%s' % + router_ports[0].device_id) + + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + neutron_lookup_param, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%d' % dhcp.METADATA_PORT] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % network.id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + network.id, + self.root_helper, + network.namespace) + pm.enable(callback) + + def disable_isolated_metadata_proxy(self, network): + pm = external_process.ProcessManager( + self.conf, + network.id, + self.root_helper, + network.namespace) + pm.disable() + + +class DhcpPluginApi(rpc_compat.RpcProxy): + """Agent side of the dhcp rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + + """ + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic, context, use_namespaces): + super(DhcpPluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.context = context + self.host = cfg.CONF.host + self.use_namespaces = use_namespaces + + def get_active_networks_info(self): + """Make a remote process call to retrieve all network info.""" + networks = self.call(self.context, + self.make_msg('get_active_networks_info', + host=self.host), + topic=self.topic) + return [dhcp.NetModel(self.use_namespaces, n) for n in networks] + + def get_network_info(self, network_id): + """Make a remote process call to retrieve network info.""" + network = self.call(self.context, + self.make_msg('get_network_info', + network_id=network_id, + host=self.host), + topic=self.topic) + if network: + return dhcp.NetModel(self.use_namespaces, network) + + def get_dhcp_port(self, network_id, device_id): + """Make a remote process call to get the dhcp port.""" + port = self.call(self.context, + self.make_msg('get_dhcp_port', + network_id=network_id, + device_id=device_id, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def create_dhcp_port(self, port): + """Make a remote process call to create the dhcp port.""" + port = self.call(self.context, + self.make_msg('create_dhcp_port', + port=port, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def update_dhcp_port(self, port_id, port): + """Make a remote process call to update the dhcp port.""" + port = self.call(self.context, + self.make_msg('update_dhcp_port', + port_id=port_id, + port=port, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def release_dhcp_port(self, network_id, device_id): + """Make a remote process call to release the dhcp port.""" + return self.call(self.context, + self.make_msg('release_dhcp_port', + network_id=network_id, + device_id=device_id, + host=self.host), + topic=self.topic) + + def release_port_fixed_ip(self, network_id, device_id, subnet_id): + """Make a remote process call to release a fixed_ip on the port.""" + return self.call(self.context, + self.make_msg('release_port_fixed_ip', + network_id=network_id, + subnet_id=subnet_id, + device_id=device_id, + host=self.host), + topic=self.topic) + + +class NetworkCache(object): + """Agent cache of the current network state.""" + def __init__(self): + self.cache = {} + self.subnet_lookup = {} + self.port_lookup = {} + + def get_network_ids(self): + return self.cache.keys() + + def get_network_by_id(self, network_id): + return self.cache.get(network_id) + + def get_network_by_subnet_id(self, subnet_id): + return self.cache.get(self.subnet_lookup.get(subnet_id)) + + def get_network_by_port_id(self, port_id): + return self.cache.get(self.port_lookup.get(port_id)) + + def put(self, network): + if network.id in self.cache: + self.remove(self.cache[network.id]) + + self.cache[network.id] = network + + for subnet in network.subnets: + self.subnet_lookup[subnet.id] = network.id + + for port in network.ports: + self.port_lookup[port.id] = network.id + + def remove(self, network): + del self.cache[network.id] + + for subnet in network.subnets: + del self.subnet_lookup[subnet.id] + + for port in network.ports: + del self.port_lookup[port.id] + + def put_port(self, port): + network = self.get_network_by_id(port.network_id) + for index in range(len(network.ports)): + if network.ports[index].id == port.id: + network.ports[index] = port + break + else: + network.ports.append(port) + + self.port_lookup[port.id] = network.id + + def remove_port(self, port): + network = self.get_network_by_port_id(port.id) + + for index in range(len(network.ports)): + if network.ports[index] == port: + del network.ports[index] + del self.port_lookup[port.id] + break + + def get_port_by_id(self, port_id): + network = self.get_network_by_port_id(port_id) + if network: + for port in network.ports: + if port.id == port_id: + return port + + def get_state(self): + net_ids = self.get_network_ids() + num_nets = len(net_ids) + num_subnets = 0 + num_ports = 0 + for net_id in net_ids: + network = self.get_network_by_id(net_id) + num_subnets += len(network.subnets) + num_ports += len(network.ports) + return {'networks': num_nets, + 'subnets': num_subnets, + 'ports': num_ports} + + +class DhcpAgentWithStateReport(DhcpAgent): + def __init__(self, host=None): + super(DhcpAgentWithStateReport, self).__init__(host=host) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-dhcp-agent', + 'host': host, + 'topic': topics.DHCP_AGENT, + 'configurations': { + 'dhcp_driver': cfg.CONF.dhcp_driver, + 'use_namespaces': cfg.CONF.use_namespaces, + 'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration}, + 'start_flag': True, + 'agent_type': constants.AGENT_TYPE_DHCP} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + self.agent_state.get('configurations').update( + self.cache.get_state()) + ctx = context.get_admin_context_without_session() + self.state_rpc.report_state(ctx, self.agent_state, self.use_call) + self.use_call = False + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + self.run() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + return + if self.agent_state.pop('start_flag', None): + self.run() + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.schedule_resync(_("Agent updated: %(payload)s") % + {"payload": payload}) + LOG.info(_("agent_updated by server side %s!"), payload) + + def after_start(self): + LOG.info(_("DHCP agent started")) + + +def register_options(): + cfg.CONF.register_opts(DhcpAgent.OPTS) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_use_namespaces_opts_helper(cfg.CONF) + config.register_agent_state_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + cfg.CONF.register_opts(dhcp.OPTS) + cfg.CONF.register_opts(interface.OPTS) + + +def main(): + register_options() + common_config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + server = neutron_service.Service.create( + binary='neutron-dhcp-agent', + topic=topics.DHCP_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager='neutron.agent.dhcp_agent.DhcpAgentWithStateReport') + service.launch(server).wait() diff --git a/neutron/agent/firewall.py b/neutron/agent/firewall.py new file mode 100644 index 000000000..6c9cd2502 --- /dev/null +++ b/neutron/agent/firewall.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import contextlib + +import six + + +@six.add_metaclass(abc.ABCMeta) +class FirewallDriver(object): + """Firewall Driver base class. + + Defines methods that any driver providing security groups + and provider firewall functionality should implement. + Note port attribute should have information of security group ids and + security group rules. + + the dict of port should have + device : interface name + fixed_ips: ips of the device + mac_address: mac_address of the device + security_groups: [sgid, sgid] + security_group_rules : [ rule, rule ] + the rule must contain ethertype and direction + the rule may contain security_group_id, + protocol, port_min, port_max + source_ip_prefix, source_port_min, + source_port_max, dest_ip_prefix, and + remote_group_id + Note: source_group_ip in REST API should be converted by this rule + if direction is ingress: + remote_group_ip will be a source_ip_prefix + if direction is egress: + remote_group_ip will be a dest_ip_prefix + Note: remote_group_id in REST API should be converted by this rule + if direction is ingress: + remote_group_id will be a list of source_ip_prefix + if direction is egress: + remote_group_id will be a list of dest_ip_prefix + remote_group_id will also remaining membership update management + """ + + def prepare_port_filter(self, port): + """Prepare filters for the port. + + This method should be called before the port is created. + """ + raise NotImplementedError() + + def apply_port_filter(self, port): + """Apply port filter. + + Once this method returns, the port should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_port_filter. + """ + raise NotImplementedError() + + def update_port_filter(self, port): + """Refresh security group rules from data store + + Gets called when an port gets added to or removed from + the security group the port is a member of or if the + group gains or looses a rule. + """ + raise NotImplementedError() + + def remove_port_filter(self, port): + """Stop filtering port.""" + raise NotImplementedError() + + def filter_defer_apply_on(self): + """Defer application of filtering rule.""" + pass + + def filter_defer_apply_off(self): + """Turn off deferral of rules and apply the rules now.""" + pass + + @property + def ports(self): + """Returns filtered ports.""" + pass + + @contextlib.contextmanager + def defer_apply(self): + """Defer apply context.""" + self.filter_defer_apply_on() + try: + yield + finally: + self.filter_defer_apply_off() + + +class NoopFirewallDriver(FirewallDriver): + """Noop Firewall Driver. + + Firewall driver which does nothing. + This driver is for disabling the firewall functionality. + """ + + def prepare_port_filter(self, port): + pass + + def apply_port_filter(self, port): + pass + + def update_port_filter(self, port): + pass + + def remove_port_filter(self, port): + pass + + def filter_defer_apply_on(self): + pass + + def filter_defer_apply_off(self): + pass + + @property + def ports(self): + return {} diff --git a/neutron/agent/l2population_rpc.py b/neutron/agent/l2population_rpc.py new file mode 100644 index 000000000..80c5a97de --- /dev/null +++ b/neutron/agent/l2population_rpc.py @@ -0,0 +1,56 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +import abc + +from oslo.config import cfg +import six + +from neutron.common import log + + +@six.add_metaclass(abc.ABCMeta) +class L2populationRpcCallBackMixin(object): + + @log.log + def add_fdb_entries(self, context, fdb_entries, host=None): + if not host or host == cfg.CONF.host: + self.fdb_add(context, fdb_entries) + + @log.log + def remove_fdb_entries(self, context, fdb_entries, host=None): + if not host or host == cfg.CONF.host: + self.fdb_remove(context, fdb_entries) + + @log.log + def update_fdb_entries(self, context, fdb_entries, host=None): + if not host or host == cfg.CONF.host: + self.fdb_update(context, fdb_entries) + + @abc.abstractmethod + def fdb_add(self, context, fdb_entries): + pass + + @abc.abstractmethod + def fdb_remove(self, context, fdb_entries): + pass + + @abc.abstractmethod + def fdb_update(self, context, fdb_entries): + pass diff --git a/neutron/agent/l3_agent.py b/neutron/agent/l3_agent.py new file mode 100644 index 000000000..7dcb81e3b --- /dev/null +++ b/neutron/agent/l3_agent.py @@ -0,0 +1,990 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sys + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import iptables_manager +from neutron.agent.linux import ovs_lib # noqa +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as l3_constants +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils as common_utils +from neutron import context +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.openstack.common import processutils +from neutron.openstack.common import service +from neutron import service as neutron_service +from neutron.services.firewall.agents.l3reference import firewall_l3_agent + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qrouter-' +INTERNAL_DEV_PREFIX = 'qr-' +EXTERNAL_DEV_PREFIX = 'qg-' +RPC_LOOP_INTERVAL = 1 +FLOATING_IP_CIDR_SUFFIX = '/32' + + +class L3PluginApi(rpc_compat.RpcProxy): + """Agent side of the l3 agent RPC API. + + API version history: + 1.0 - Initial version. + 1.1 - Floating IP operational status updates + + """ + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic, host): + super(L3PluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.host = host + + def get_routers(self, context, router_ids=None): + """Make a remote process call to retrieve the sync data for routers.""" + return self.call(context, + self.make_msg('sync_routers', host=self.host, + router_ids=router_ids), + topic=self.topic) + + def get_external_network_id(self, context): + """Make a remote process call to retrieve the external network id. + + @raise rpc_compat.RemoteError: with TooManyExternalNetworks + as exc_type if there are + more than one external network + """ + return self.call(context, + self.make_msg('get_external_network_id', + host=self.host), + topic=self.topic) + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Call the plugin update floating IPs's operational status.""" + return self.call(context, + self.make_msg('update_floatingip_statuses', + router_id=router_id, + fip_statuses=fip_statuses), + topic=self.topic, + version='1.1') + + +class RouterInfo(object): + + def __init__(self, router_id, root_helper, use_namespaces, router): + self.router_id = router_id + self.ex_gw_port = None + self._snat_enabled = None + self._snat_action = None + self.internal_ports = [] + self.floating_ips = set() + self.root_helper = root_helper + self.use_namespaces = use_namespaces + # Invoke the setter for establishing initial SNAT action + self.router = router + self.ns_name = NS_PREFIX + router_id if use_namespaces else None + self.iptables_manager = iptables_manager.IptablesManager( + root_helper=root_helper, + #FIXME(danwent): use_ipv6=True, + namespace=self.ns_name) + self.routes = [] + + @property + def router(self): + return self._router + + @router.setter + def router(self, value): + self._router = value + if not self._router: + return + # enable_snat by default if it wasn't specified by plugin + self._snat_enabled = self._router.get('enable_snat', True) + # Set a SNAT action for the router + if self._router.get('gw_port'): + self._snat_action = ('add_rules' if self._snat_enabled + else 'remove_rules') + elif self.ex_gw_port: + # Gateway port was removed, remove rules + self._snat_action = 'remove_rules' + + def perform_snat_action(self, snat_callback, *args): + # Process SNAT rules for attached subnets + if self._snat_action: + snat_callback(self, self._router.get('gw_port'), + *args, action=self._snat_action) + self._snat_action = None + + +class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, manager.Manager): + """Manager for L3NatAgent + + API version history: + 1.0 initial Version + 1.1 changed the type of the routers parameter + to the routers_updated method. + It was previously a list of routers in dict format. + It is now a list of router IDs only. + Per rpc versioning rules, it is backwards compatible. + """ + RPC_API_VERSION = '1.1' + + OPTS = [ + cfg.StrOpt('external_network_bridge', default='br-ex', + help=_("Name of bridge used for external network " + "traffic.")), + cfg.IntOpt('metadata_port', + default=9697, + help=_("TCP Port used by Neutron metadata namespace " + "proxy.")), + cfg.IntOpt('send_arp_for_ha', + default=3, + help=_("Send this many gratuitous ARPs for HA setup, if " + "less than or equal to 0, the feature is disabled")), + cfg.StrOpt('router_id', default='', + help=_("If namespaces is disabled, the l3 agent can only" + " configure a router that has the matching router " + "ID.")), + cfg.BoolOpt('handle_internal_only_routers', + default=True, + help=_("Agent should implement routers with no gateway")), + cfg.StrOpt('gateway_external_network_id', default='', + help=_("UUID of external network for routers implemented " + "by the agents.")), + cfg.BoolOpt('enable_metadata_proxy', default=True, + help=_("Allow running metadata proxy.")), + cfg.BoolOpt('router_delete_namespaces', default=False, + help=_("Delete namespace after removing a router.")), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + ] + + def __init__(self, host, conf=None): + if conf: + self.conf = conf + else: + self.conf = cfg.CONF + self.root_helper = config.get_root_helper(self.conf) + self.router_info = {} + + self._check_config_params() + + try: + self.driver = importutils.import_object( + self.conf.interface_driver, + self.conf + ) + except Exception: + msg = _("Error importing interface driver " + "'%s'") % self.conf.interface_driver + LOG.error(msg) + raise SystemExit(1) + + self.context = context.get_admin_context_without_session() + self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) + self.fullsync = True + self.updated_routers = set() + self.removed_routers = set() + self.sync_progress = False + + self._clean_stale_namespaces = self.conf.use_namespaces + + self.rpc_loop = loopingcall.FixedIntervalLoopingCall( + self._rpc_loop) + self.rpc_loop.start(interval=RPC_LOOP_INTERVAL) + super(L3NATAgent, self).__init__(conf=self.conf) + + self.target_ex_net_id = None + + def _check_config_params(self): + """Check items in configuration files. + + Check for required and invalid configuration items. + The actual values are not verified for correctness. + """ + if not self.conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + + if not self.conf.use_namespaces and not self.conf.router_id: + msg = _('Router id is required if not using namespaces.') + LOG.error(msg) + raise SystemExit(1) + + def _cleanup_namespaces(self, routers): + """Destroy stale router namespaces on host when L3 agent restarts + + This routine is called when self._clean_stale_namespaces is True. + + The argument routers is the list of routers that are recorded in + the database as being hosted on this node. + """ + try: + root_ip = ip_lib.IPWrapper(self.root_helper) + + host_namespaces = root_ip.get_namespaces(self.root_helper) + router_namespaces = set(ns for ns in host_namespaces + if ns.startswith(NS_PREFIX)) + ns_to_ignore = set(NS_PREFIX + r['id'] for r in routers) + ns_to_destroy = router_namespaces - ns_to_ignore + except RuntimeError: + LOG.exception(_('RuntimeError in obtaining router list ' + 'for namespace cleanup.')) + else: + self._destroy_stale_router_namespaces(ns_to_destroy) + + def _destroy_stale_router_namespaces(self, router_namespaces): + """Destroys the stale router namespaces + + The argumenet router_namespaces is a list of stale router namespaces + + As some stale router namespaces may not be able to be deleted, only + one attempt will be made to delete them. + """ + for ns in router_namespaces: + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns) + + try: + self._destroy_router_namespace(ns) + except RuntimeError: + LOG.exception(_('Failed to destroy stale router namespace ' + '%s'), ns) + self._clean_stale_namespaces = False + + def _destroy_router_namespace(self, namespace): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(INTERNAL_DEV_PREFIX): + # device is on default bridge + self.driver.unplug(d.name, namespace=namespace, + prefix=INTERNAL_DEV_PREFIX) + elif d.name.startswith(EXTERNAL_DEV_PREFIX): + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=namespace, + prefix=EXTERNAL_DEV_PREFIX) + + if self.conf.router_delete_namespaces: + try: + ns_ip.netns.delete(namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg % namespace) + + def _create_router_namespace(self, ri): + ip_wrapper_root = ip_lib.IPWrapper(self.root_helper) + ip_wrapper = ip_wrapper_root.ensure_namespace(ri.ns_name) + ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) + + def _fetch_external_net_id(self, force=False): + """Find UUID of single external network for this agent.""" + if self.conf.gateway_external_network_id: + return self.conf.gateway_external_network_id + + # L3 agent doesn't use external_network_bridge to handle external + # networks, so bridge_mappings with provider networks will be used + # and the L3 agent is able to handle any external networks. + if not self.conf.external_network_bridge: + return + + if not force and self.target_ex_net_id: + return self.target_ex_net_id + + try: + self.target_ex_net_id = self.plugin_rpc.get_external_network_id( + self.context) + return self.target_ex_net_id + except rpc_compat.RemoteError as e: + with excutils.save_and_reraise_exception() as ctx: + if e.exc_type == 'TooManyExternalNetworks': + ctx.reraise = False + msg = _( + "The 'gateway_external_network_id' option must be " + "configured for this agent as Neutron has more than " + "one external network.") + raise Exception(msg) + + def _router_added(self, router_id, router): + ri = RouterInfo(router_id, self.root_helper, + self.conf.use_namespaces, router) + self.router_info[router_id] = ri + if self.conf.use_namespaces: + self._create_router_namespace(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].add_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].add_rule(c, r) + ri.iptables_manager.apply() + super(L3NATAgent, self).process_router_add(ri) + if self.conf.enable_metadata_proxy: + self._spawn_metadata_proxy(ri.router_id, ri.ns_name) + + def _router_removed(self, router_id): + ri = self.router_info.get(router_id) + if ri is None: + LOG.warn(_("Info for router %s were not found. " + "Skipping router removal"), router_id) + return + ri.router['gw_port'] = None + ri.router[l3_constants.INTERFACE_KEY] = [] + ri.router[l3_constants.FLOATINGIP_KEY] = [] + self.process_router(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].remove_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].remove_rule(c, r) + ri.iptables_manager.apply() + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ri.router_id, ri.ns_name) + del self.router_info[router_id] + self._destroy_router_namespace(ri.ns_name) + + def _spawn_metadata_proxy(self, router_id, ns_name): + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + '--router_id=%s' % router_id, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%s' % self.conf.metadata_port] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % + router_id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.enable(callback) + + def _destroy_metadata_proxy(self, router_id, ns_name): + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.disable() + + def _set_subnet_info(self, port): + ips = port['fixed_ips'] + if not ips: + raise Exception(_("Router port %s has no IP address") % port['id']) + if len(ips) > 1: + LOG.error(_("Ignoring multiple IPs on router port %s"), + port['id']) + prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen + port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) + + def _get_existing_devices(self, ri): + ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper, + namespace=ri.ns_name) + ip_devs = ip_wrapper.get_devices(exclude_loopback=True) + return [ip_dev.name for ip_dev in ip_devs] + + def process_router(self, ri): + ri.iptables_manager.defer_apply_on() + ex_gw_port = self._get_ex_gw_port(ri) + internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + existing_port_ids = set([p['id'] for p in ri.internal_ports]) + current_port_ids = set([p['id'] for p in internal_ports + if p['admin_state_up']]) + new_ports = [p for p in internal_ports if + p['id'] in current_port_ids and + p['id'] not in existing_port_ids] + old_ports = [p for p in ri.internal_ports if + p['id'] not in current_port_ids] + for p in new_ports: + self._set_subnet_info(p) + self.internal_network_added(ri, p['network_id'], p['id'], + p['ip_cidr'], p['mac_address']) + ri.internal_ports.append(p) + + for p in old_ports: + self.internal_network_removed(ri, p['id'], p['ip_cidr']) + ri.internal_ports.remove(p) + + existing_devices = self._get_existing_devices(ri) + current_internal_devs = set([n for n in existing_devices + if n.startswith(INTERNAL_DEV_PREFIX)]) + current_port_devs = set([self.get_internal_device_name(id) for + id in current_port_ids]) + stale_devs = current_internal_devs - current_port_devs + for stale_dev in stale_devs: + LOG.debug(_('Deleting stale internal router device: %s'), + stale_dev) + self.driver.unplug(stale_dev, + namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + # Get IPv4 only internal CIDRs + internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports + if netaddr.IPNetwork(p['ip_cidr']).version == 4] + # TODO(salv-orlando): RouterInfo would be a better place for + # this logic too + ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or + ri.ex_gw_port and ri.ex_gw_port['id']) + + interface_name = None + if ex_gw_port_id: + interface_name = self.get_external_device_name(ex_gw_port_id) + if ex_gw_port and ex_gw_port != ri.ex_gw_port: + self._set_subnet_info(ex_gw_port) + self.external_gateway_added(ri, ex_gw_port, + interface_name, internal_cidrs) + elif not ex_gw_port and ri.ex_gw_port: + self.external_gateway_removed(ri, ri.ex_gw_port, + interface_name, internal_cidrs) + + stale_devs = [dev for dev in existing_devices + if dev.startswith(EXTERNAL_DEV_PREFIX) + and dev != interface_name] + for stale_dev in stale_devs: + LOG.debug(_('Deleting stale external router device: %s'), + stale_dev) + self.driver.unplug(stale_dev, + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + # Process static routes for router + self.routes_updated(ri) + # Process SNAT rules for external gateway + ri.perform_snat_action(self._handle_router_snat_rules, + internal_cidrs, interface_name) + + # Process SNAT/DNAT rules for floating IPs + fip_statuses = {} + try: + if ex_gw_port: + existing_floating_ips = ri.floating_ips + self.process_router_floating_ip_nat_rules(ri) + ri.iptables_manager.defer_apply_off() + # Once NAT rules for floating IPs are safely in place + # configure their addresses on the external gateway port + fip_statuses = self.process_router_floating_ip_addresses( + ri, ex_gw_port) + except Exception: + # TODO(salv-orlando): Less broad catching + # All floating IPs must be put in error state + for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): + fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR + + if ex_gw_port: + # Identify floating IPs which were disabled + ri.floating_ips = set(fip_statuses.keys()) + for fip_id in existing_floating_ips - ri.floating_ips: + fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN + # Update floating IP status on the neutron server + self.plugin_rpc.update_floatingip_statuses( + self.context, ri.router_id, fip_statuses) + + # Update ex_gw_port and enable_snat on the router info cache + ri.ex_gw_port = ex_gw_port + ri.enable_snat = ri.router.get('enable_snat') + + def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, + interface_name, action): + # Remove all the rules + # This is safe because if use_namespaces is set as False + # then the agent can only configure one router, otherwise + # each router's SNAT rules will be in their own namespace + ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + ri.iptables_manager.ipv4['nat'].empty_chain('snat') + + # Add back the jump to float-snat + ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action if add_rules + if action == 'add_rules' and ex_gw_port: + # ex_gw_port should not be None in this case + # NAT rules are added only if ex_gw_port has an IPv4 address + for ip_addr in ex_gw_port['fixed_ips']: + ex_gw_ip = ip_addr['ip_address'] + if netaddr.IPAddress(ex_gw_ip).version == 4: + rules = self.external_gateway_nat_rules(ex_gw_ip, + internal_cidrs, + interface_name) + for rule in rules: + ri.iptables_manager.ipv4['nat'].add_rule(*rule) + break + ri.iptables_manager.apply() + + def process_router_floating_ip_nat_rules(self, ri): + """Configure NAT rules for the router's floating IPs. + + Configures iptables rules for the floating ips of the given router + """ + # Clear out all iptables rules for floating ips + ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') + + # Loop once to ensure that floating ips are configured. + for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): + # Rebuild iptables rules for the floating ip. + fixed = fip['fixed_ip_address'] + fip_ip = fip['floating_ip_address'] + for chain, rule in self.floating_forward_rules(fip_ip, fixed): + ri.iptables_manager.ipv4['nat'].add_rule(chain, rule, + tag='floating_ip') + + ri.iptables_manager.apply() + + def process_router_floating_ip_addresses(self, ri, ex_gw_port): + """Configure IP addresses on router's external gateway interface. + + Ensures addresses for existing floating IPs and cleans up + those that should not longer be configured. + """ + fip_statuses = {} + interface_name = self.get_external_device_name(ex_gw_port['id']) + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + existing_cidrs = set([addr['cidr'] for addr in device.addr.list()]) + new_cidrs = set() + + # Loop once to ensure that floating ips are configured. + for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): + fip_ip = fip['floating_ip_address'] + ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX + + new_cidrs.add(ip_cidr) + + if ip_cidr not in existing_cidrs: + net = netaddr.IPNetwork(ip_cidr) + try: + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError): + # any exception occurred here should cause the floating IP + # to be set in error state + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ERROR) + LOG.warn(_("Unable to configure IP address for " + "floating IP: %s"), fip['id']) + continue + # As GARP is processed in a distinct thread the call below + # won't raise an exception to be handled. + self._send_gratuitous_arp_packet( + ri, interface_name, fip_ip) + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ACTIVE) + + # Clean up addresses that no longer belong on the gateway interface. + for ip_cidr in existing_cidrs - new_cidrs: + if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX): + net = netaddr.IPNetwork(ip_cidr) + device.addr.delete(net.version, ip_cidr) + return fip_statuses + + def _get_ex_gw_port(self, ri): + return ri.router.get('gw_port') + + def _arping(self, ri, interface_name, ip_address): + arping_cmd = ['arping', '-A', + '-I', interface_name, + '-c', self.conf.send_arp_for_ha, + ip_address] + try: + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) + except Exception as e: + LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) + + def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address): + if self.conf.send_arp_for_ha > 0: + eventlet.spawn_n(self._arping, ri, interface_name, ip_address) + + def get_internal_device_name(self, port_id): + return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_external_device_name(self, port_id): + return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def external_gateway_added(self, ri, ex_gw_port, + interface_name, internal_cidrs): + + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + # Compute a list of addresses this router is supposed to have. + # This avoids unnecessarily removing those addresses and + # causing a momentarily network outage. + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX + for ip in floating_ips] + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ri.ns_name, + gateway=ex_gw_port['subnet'].get('gateway_ip'), + extra_subnets=ex_gw_port.get('extra_subnets', []), + preserve_ips=preserve_ips) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ri, interface_name, ip_address) + + def external_gateway_removed(self, ri, ex_gw_port, + interface_name, internal_cidrs): + + self.driver.unplug(interface_name, + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + def metadata_filter_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' + '-p tcp -m tcp --dport %s ' + '-j ACCEPT' % self.conf.metadata_port)) + return rules + + def metadata_nat_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j REDIRECT ' + '--to-port %s' % self.conf.metadata_port)) + return rules + + def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs, + interface_name): + rules = [('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name})] + for cidr in internal_cidrs: + rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr)) + return rules + + def internal_network_added(self, ri, network_id, port_id, + internal_cidr, mac_address): + interface_name = self.get_internal_device_name(port_id) + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.plug(network_id, port_id, interface_name, mac_address, + namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + self.driver.init_l3(interface_name, [internal_cidr], + namespace=ri.ns_name) + ip_address = internal_cidr.split('/')[0] + self._send_gratuitous_arp_packet(ri, interface_name, ip_address) + + def internal_network_removed(self, ri, port_id, internal_cidr): + interface_name = self.get_internal_device_name(port_id) + if ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.unplug(interface_name, namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + def internal_network_nat_rules(self, ex_gw_ip, internal_cidr): + rules = [('snat', '-s %s -j SNAT --to-source %s' % + (internal_cidr, ex_gw_ip))] + return rules + + def floating_forward_rules(self, floating_ip, fixed_ip): + return [('PREROUTING', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('float-snat', '-s %s -j SNAT --to %s' % + (fixed_ip, floating_ip))] + + def router_deleted(self, context, router_id): + """Deal with router deletion RPC message.""" + LOG.debug(_('Got router deleted notification for %s'), router_id) + self.removed_routers.add(router_id) + + def routers_updated(self, context, routers): + """Deal with routers modification and creation RPC message.""" + LOG.debug(_('Got routers updated notification :%s'), routers) + if routers: + # This is needed for backward compatibility + if isinstance(routers[0], dict): + routers = [router['id'] for router in routers] + self.updated_routers.update(routers) + + def router_removed_from_agent(self, context, payload): + LOG.debug(_('Got router removed from agent :%r'), payload) + self.removed_routers.add(payload['router_id']) + + def router_added_to_agent(self, context, payload): + LOG.debug(_('Got router added to agent :%r'), payload) + self.routers_updated(context, payload) + + def _process_routers(self, routers, all_routers=False): + pool = eventlet.GreenPool() + if (self.conf.external_network_bridge and + not ip_lib.device_exists(self.conf.external_network_bridge)): + LOG.error(_("The external network bridge '%s' does not exist"), + self.conf.external_network_bridge) + return + + target_ex_net_id = self._fetch_external_net_id() + # if routers are all the routers we have (They are from router sync on + # starting or when error occurs during running), we seek the + # routers which should be removed. + # If routers are from server side notification, we seek them + # from subset of incoming routers and ones we have now. + if all_routers: + prev_router_ids = set(self.router_info) + else: + prev_router_ids = set(self.router_info) & set( + [router['id'] for router in routers]) + cur_router_ids = set() + for r in routers: + # If namespaces are disabled, only process the router associated + # with the configured agent id. + if (not self.conf.use_namespaces and + r['id'] != self.conf.router_id): + continue + ex_net_id = (r['external_gateway_info'] or {}).get('network_id') + if not ex_net_id and not self.conf.handle_internal_only_routers: + continue + if (target_ex_net_id and ex_net_id and + ex_net_id != target_ex_net_id): + # Double check that our single external_net_id has not changed + # by forcing a check by RPC. + if (ex_net_id != self._fetch_external_net_id(force=True)): + continue + cur_router_ids.add(r['id']) + if r['id'] not in self.router_info: + self._router_added(r['id'], r) + ri = self.router_info[r['id']] + ri.router = r + pool.spawn_n(self.process_router, ri) + # identify and remove routers that no longer exist + for router_id in prev_router_ids - cur_router_ids: + pool.spawn_n(self._router_removed, router_id) + pool.waitall() + + @lockutils.synchronized('l3-agent', 'neutron-') + def _rpc_loop(self): + # _rpc_loop and _sync_routers_task will not be + # executed in the same time because of lock. + # so we can clear the value of updated_routers + # and removed_routers, but they can be updated by + # updated_routers and removed_routers rpc call + try: + LOG.debug(_("Starting RPC loop for %d updated routers"), + len(self.updated_routers)) + if self.updated_routers: + # We're capturing and clearing the list, and will + # process the "captured" updates in this loop, + # and any updates that happen due to a context switch + # will be picked up on the next pass. + updated_routers = set(self.updated_routers) + self.updated_routers.clear() + router_ids = list(updated_routers) + routers = self.plugin_rpc.get_routers( + self.context, router_ids) + # routers with admin_state_up=false will not be in the fetched + fetched = set([r['id'] for r in routers]) + self.removed_routers.update(updated_routers - fetched) + + self._process_routers(routers) + self._process_router_delete() + LOG.debug(_("RPC loop successfully completed")) + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + def _process_router_delete(self): + current_removed_routers = list(self.removed_routers) + for router_id in current_removed_routers: + self._router_removed(router_id) + self.removed_routers.remove(router_id) + + def _router_ids(self): + if not self.conf.use_namespaces: + return [self.conf.router_id] + + @periodic_task.periodic_task + @lockutils.synchronized('l3-agent', 'neutron-') + def _sync_routers_task(self, context): + if self.services_sync: + super(L3NATAgent, self).process_services_sync(context) + LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), + self.fullsync) + if not self.fullsync: + return + try: + router_ids = self._router_ids() + self.updated_routers.clear() + self.removed_routers.clear() + routers = self.plugin_rpc.get_routers( + context, router_ids) + + LOG.debug(_('Processing :%r'), routers) + self._process_routers(routers, all_routers=True) + self.fullsync = False + LOG.debug(_("_sync_routers_task successfully completed")) + except rpc_compat.RPCException: + LOG.exception(_("Failed synchronizing routers due to RPC error")) + self.fullsync = True + return + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + # Resync is not necessary for the cleanup of stale + # namespaces. + if self._clean_stale_namespaces: + self._cleanup_namespaces(routers) + + def after_start(self): + LOG.info(_("L3 agent started")) + + def _update_routing_table(self, ri, operation, route): + cmd = ['ip', 'route', operation, 'to', route['destination'], + 'via', route['nexthop']] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def routes_updated(self, ri): + new_routes = ri.router['routes'] + old_routes = ri.routes + adds, removes = common_utils.diff_list_of_dict(old_routes, + new_routes) + for route in adds: + LOG.debug(_("Added route entry is '%s'"), route) + # remove replaced route from deleted route + for del_route in removes: + if route['destination'] == del_route['destination']: + removes.remove(del_route) + #replace success even if there is no existing route + self._update_routing_table(ri, 'replace', route) + for route in removes: + LOG.debug(_("Removed route entry is '%s'"), route) + self._update_routing_table(ri, 'delete', route) + ri.routes = new_routes + + +class L3NATAgentWithStateReport(L3NATAgent): + + def __init__(self, host, conf=None): + super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-l3-agent', + 'host': host, + 'topic': topics.L3_AGENT, + 'configurations': { + 'use_namespaces': self.conf.use_namespaces, + 'router_id': self.conf.router_id, + 'handle_internal_only_routers': + self.conf.handle_internal_only_routers, + 'external_network_bridge': self.conf.external_network_bridge, + 'gateway_external_network_id': + self.conf.gateway_external_network_id, + 'interface_driver': self.conf.interface_driver}, + 'start_flag': True, + 'agent_type': l3_constants.AGENT_TYPE_L3} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + LOG.debug(_("Report state task started")) + num_ex_gw_ports = 0 + num_interfaces = 0 + num_floating_ips = 0 + router_infos = self.router_info.values() + num_routers = len(router_infos) + for ri in router_infos: + ex_gw_port = self._get_ex_gw_port(ri) + if ex_gw_port: + num_ex_gw_ports += 1 + num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, + [])) + num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, + [])) + configurations = self.agent_state['configurations'] + configurations['routers'] = num_routers + configurations['ex_gw_ports'] = num_ex_gw_ports + configurations['interfaces'] = num_interfaces + configurations['floating_ips'] = num_floating_ips + try: + self.state_rpc.report_state(self.context, self.agent_state, + self.use_call) + self.agent_state.pop('start_flag', None) + self.use_call = False + LOG.debug(_("Report state task successfully completed")) + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.fullsync = True + LOG.info(_("agent_updated by server side %s!"), payload) + + +def main(manager='neutron.agent.l3_agent.L3NATAgentWithStateReport'): + conf = cfg.CONF + conf.register_opts(L3NATAgent.OPTS) + config.register_interface_driver_opts_helper(conf) + config.register_use_namespaces_opts_helper(conf) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + conf.register_opts(interface.OPTS) + conf.register_opts(external_process.OPTS) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-l3-agent', + topic=topics.L3_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager=manager) + service.launch(server).wait() diff --git a/neutron/agent/linux/__init__.py b/neutron/agent/linux/__init__.py new file mode 100644 index 000000000..0b3d2db5e --- /dev/null +++ b/neutron/agent/linux/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/agent/linux/async_process.py b/neutron/agent/linux/async_process.py new file mode 100644 index 000000000..d0fc3214a --- /dev/null +++ b/neutron/agent/linux/async_process.py @@ -0,0 +1,223 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import eventlet.event +import eventlet.queue + +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class AsyncProcessException(Exception): + pass + + +class AsyncProcess(object): + """Manages an asynchronous process. + + This class spawns a new process via subprocess and uses + greenthreads to read stderr and stdout asynchronously into queues + that can be read via repeatedly calling iter_stdout() and + iter_stderr(). + + If respawn_interval is non-zero, any error in communicating with + the managed process will result in the process and greenthreads + being cleaned up and the process restarted after the specified + interval. + + Example usage: + + >>> import time + >>> proc = AsyncProcess(['ping']) + >>> proc.start() + >>> time.sleep(5) + >>> proc.stop() + >>> for line in proc.iter_stdout(): + ... print line + """ + + def __init__(self, cmd, root_helper=None, respawn_interval=None): + """Constructor. + + :param cmd: The list of command arguments to invoke. + :param root_helper: Optional, utility to use when running shell cmds. + :param respawn_interval: Optional, the interval in seconds to wait + to respawn after unexpected process death. Respawn will + only be attempted if a value of 0 or greater is provided. + """ + self.cmd = cmd + self.root_helper = root_helper + if respawn_interval is not None and respawn_interval < 0: + raise ValueError(_('respawn_interval must be >= 0 if provided.')) + self.respawn_interval = respawn_interval + self._process = None + self._kill_event = None + self._reset_queues() + self._watchers = [] + + def _reset_queues(self): + self._stdout_lines = eventlet.queue.LightQueue() + self._stderr_lines = eventlet.queue.LightQueue() + + def start(self): + """Launch a process and monitor it asynchronously.""" + if self._kill_event: + raise AsyncProcessException(_('Process is already started')) + else: + LOG.debug(_('Launching async process [%s].'), self.cmd) + self._spawn() + + def stop(self): + """Halt the process and watcher threads.""" + if self._kill_event: + LOG.debug(_('Halting async process [%s].'), self.cmd) + self._kill() + else: + raise AsyncProcessException(_('Process is not running.')) + + def _spawn(self): + """Spawn a process and its watchers.""" + self._kill_event = eventlet.event.Event() + self._process, cmd = utils.create_process(self.cmd, + root_helper=self.root_helper) + self._watchers = [] + for reader in (self._read_stdout, self._read_stderr): + # Pass the stop event directly to the greenthread to + # ensure that assignment of a new event to the instance + # attribute does not prevent the greenthread from using + # the original event. + watcher = eventlet.spawn(self._watch_process, + reader, + self._kill_event) + self._watchers.append(watcher) + + def _kill(self, respawning=False): + """Kill the process and the associated watcher greenthreads. + + :param respawning: Optional, whether respawn will be subsequently + attempted. + """ + # Halt the greenthreads + self._kill_event.send() + + pid = self._get_pid_to_kill() + if pid: + self._kill_process(pid) + + if not respawning: + # Clear the kill event to ensure the process can be + # explicitly started again. + self._kill_event = None + + def _get_pid_to_kill(self): + pid = self._process.pid + # If root helper was used, two or more processes will be created: + # + # - a root helper process (e.g. sudo myscript) + # - possibly a rootwrap script (e.g. neutron-rootwrap) + # - a child process (e.g. myscript) + # + # Killing the root helper process will leave the child process + # running, re-parented to init, so the only way to ensure that both + # die is to target the child process directly. + if self.root_helper: + try: + pid = utils.find_child_pids(pid)[0] + except IndexError: + # Process is already dead + return None + while True: + try: + # We shouldn't have more than one child per process + # so keep getting the children of the first one + pid = utils.find_child_pids(pid)[0] + except IndexError: + # Last process in the tree, return it + break + return pid + + def _kill_process(self, pid): + try: + # A process started by a root helper will be running as + # root and need to be killed via the same helper. + utils.execute(['kill', '-9', pid], root_helper=self.root_helper) + except Exception as ex: + stale_pid = (isinstance(ex, RuntimeError) and + 'No such process' in str(ex)) + if not stale_pid: + LOG.exception(_('An error occurred while killing [%s].'), + self.cmd) + return False + return True + + def _handle_process_error(self): + """Kill the async process and respawn if necessary.""" + LOG.debug(_('Halting async process [%s] in response to an error.'), + self.cmd) + respawning = self.respawn_interval >= 0 + self._kill(respawning=respawning) + if respawning: + eventlet.sleep(self.respawn_interval) + LOG.debug(_('Respawning async process [%s].'), self.cmd) + self._spawn() + + def _watch_process(self, callback, kill_event): + while not kill_event.ready(): + try: + if not callback(): + break + except Exception: + LOG.exception(_('An error occurred while communicating ' + 'with async process [%s].'), self.cmd) + break + # Ensure that watching a process with lots of output does + # not block execution of other greenthreads. + eventlet.sleep() + # The kill event not being ready indicates that the loop was + # broken out of due to an error in the watched process rather + # than the loop condition being satisfied. + if not kill_event.ready(): + self._handle_process_error() + + def _read(self, stream, queue): + data = stream.readline() + if data: + data = data.strip() + queue.put(data) + return data + + def _read_stdout(self): + return self._read(self._process.stdout, self._stdout_lines) + + def _read_stderr(self): + return self._read(self._process.stderr, self._stderr_lines) + + def _iter_queue(self, queue): + while True: + try: + yield queue.get_nowait() + except eventlet.queue.Empty: + break + + def iter_stdout(self): + return self._iter_queue(self._stdout_lines) + + def iter_stderr(self): + return self._iter_queue(self._stderr_lines) diff --git a/neutron/agent/linux/daemon.py b/neutron/agent/linux/daemon.py new file mode 100644 index 000000000..59bcd8eb0 --- /dev/null +++ b/neutron/agent/linux/daemon.py @@ -0,0 +1,151 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import atexit +import fcntl +import os +import signal +import sys + +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class Pidfile(object): + def __init__(self, pidfile, procname, uuid=None): + self.pidfile = pidfile + self.procname = procname + self.uuid = uuid + try: + self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) + fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + LOG.exception(_("Error while handling pidfile: %s"), pidfile) + sys.exit(1) + + def __str__(self): + return self.pidfile + + def unlock(self): + if not not fcntl.flock(self.fd, fcntl.LOCK_UN): + raise IOError(_('Unable to unlock pid file')) + + def write(self, pid): + os.ftruncate(self.fd, 0) + os.write(self.fd, "%d" % pid) + os.fsync(self.fd) + + def read(self): + try: + pid = int(os.read(self.fd, 128)) + os.lseek(self.fd, 0, os.SEEK_SET) + return pid + except ValueError: + return + + def is_running(self): + pid = self.read() + if not pid: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + exec_out = f.readline() + return self.procname in exec_out and (not self.uuid or + self.uuid in exec_out) + except IOError: + return False + + +class Daemon(object): + """A generic daemon class. + + Usage: subclass the Daemon class and override the run() method + """ + def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', + stderr='/dev/null', procname='python', uuid=None): + self.stdin = stdin + self.stdout = stdout + self.stderr = stderr + self.procname = procname + self.pidfile = Pidfile(pidfile, procname, uuid) + + def _fork(self): + try: + pid = os.fork() + if pid > 0: + sys.exit(0) + except OSError: + LOG.exception(_('Fork failed')) + sys.exit(1) + + def daemonize(self): + """Daemonize process by doing Stevens double fork.""" + # fork first time + self._fork() + + # decouple from parent environment + os.chdir("/") + os.setsid() + os.umask(0) + + # fork second time + self._fork() + + # redirect standard file descriptors + sys.stdout.flush() + sys.stderr.flush() + stdin = open(self.stdin, 'r') + stdout = open(self.stdout, 'a+') + stderr = open(self.stderr, 'a+', 0) + os.dup2(stdin.fileno(), sys.stdin.fileno()) + os.dup2(stdout.fileno(), sys.stdout.fileno()) + os.dup2(stderr.fileno(), sys.stderr.fileno()) + + # write pidfile + atexit.register(self.delete_pid) + signal.signal(signal.SIGTERM, self.handle_sigterm) + self.pidfile.write(os.getpid()) + + def delete_pid(self): + os.remove(str(self.pidfile)) + + def handle_sigterm(self, signum, frame): + sys.exit(0) + + def start(self): + """Start the daemon.""" + + if self.pidfile.is_running(): + self.pidfile.unlock() + message = _('Pidfile %s already exist. Daemon already running?') + LOG.error(message, self.pidfile) + sys.exit(1) + + # Start the daemon + self.daemonize() + self.run() + + def run(self): + """Override this method when subclassing Daemon. + + start() will call this method after the process has daemonized. + """ + pass diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py new file mode 100644 index 000000000..84de415be --- /dev/null +++ b/neutron/agent/linux/dhcp.py @@ -0,0 +1,908 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import collections +import os +import re +import shutil +import socket +import sys + +import netaddr +from oslo.config import cfg +import six + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import utils as commonutils +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('dhcp_confs', + default='$state_path/dhcp', + help=_('Location to store DHCP server config files')), + cfg.StrOpt('dhcp_domain', + default='openstacklocal', + help=_('Domain to use for building the hostnames')), + cfg.StrOpt('dnsmasq_config_file', + default='', + help=_('Override the default dnsmasq settings with this file')), + cfg.ListOpt('dnsmasq_dns_servers', + help=_('Comma-separated list of the DNS servers which will be ' + 'used as forwarders.'), + deprecated_name='dnsmasq_dns_server'), + cfg.BoolOpt('dhcp_delete_namespaces', default=False, + help=_("Delete namespace after removing a dhcp server.")), + cfg.IntOpt( + 'dnsmasq_lease_max', + default=(2 ** 24), + help=_('Limit number of leases to prevent a denial-of-service.')), +] + +IPV4 = 4 +IPV6 = 6 +UDP = 'udp' +TCP = 'tcp' +DNS_PORT = 53 +DHCPV4_PORT = 67 +DHCPV6_PORT = 547 +METADATA_DEFAULT_PREFIX = 16 +METADATA_DEFAULT_IP = '169.254.169.254' +METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP, + METADATA_DEFAULT_PREFIX) +METADATA_PORT = 80 +WIN2k3_STATIC_DNS = 249 +NS_PREFIX = 'qdhcp-' + + +class DictModel(dict): + """Convert dict into an object that provides attribute access to values.""" + + def __init__(self, *args, **kwargs): + """Convert dict values to DictModel values.""" + super(DictModel, self).__init__(*args, **kwargs) + + def needs_upgrade(item): + """Check if `item` is a dict and needs to be changed to DictModel. + """ + return isinstance(item, dict) and not isinstance(item, DictModel) + + def upgrade(item): + """Upgrade item if it needs to be upgraded.""" + if needs_upgrade(item): + return DictModel(item) + else: + return item + + for key, value in self.iteritems(): + if isinstance(value, (list, tuple)): + # Keep the same type but convert dicts to DictModels + self[key] = type(value)( + (upgrade(item) for item in value) + ) + elif needs_upgrade(value): + # Change dict instance values to DictModel instance values + self[key] = DictModel(value) + + def __getattr__(self, name): + try: + return self[name] + except KeyError as e: + raise AttributeError(e) + + def __setattr__(self, name, value): + self[name] = value + + def __delattr__(self, name): + del self[name] + + +class NetModel(DictModel): + + def __init__(self, use_namespaces, d): + super(NetModel, self).__init__(d) + + self._ns_name = (use_namespaces and + "%s%s" % (NS_PREFIX, self.id) or None) + + @property + def namespace(self): + return self._ns_name + + +@six.add_metaclass(abc.ABCMeta) +class DhcpBase(object): + + def __init__(self, conf, network, root_helper='sudo', + version=None, plugin=None): + self.conf = conf + self.network = network + self.root_helper = root_helper + self.device_manager = DeviceManager(self.conf, + self.root_helper, plugin) + self.version = version + + @abc.abstractmethod + def enable(self): + """Enables DHCP for this network.""" + + @abc.abstractmethod + def disable(self, retain_port=False): + """Disable dhcp for this network.""" + + def restart(self): + """Restart the dhcp service for the network.""" + self.disable(retain_port=True) + self.enable() + + @abc.abstractproperty + def active(self): + """Boolean representing the running state of the DHCP server.""" + + @abc.abstractmethod + def reload_allocations(self): + """Force the DHCP server to reload the assignment database.""" + + @classmethod + def existing_dhcp_networks(cls, conf, root_helper): + """Return a list of existing networks ids that we have configs for.""" + + raise NotImplementedError + + @classmethod + def check_version(cls): + """Execute version checks on DHCP server.""" + + raise NotImplementedError + + +class DhcpLocalProcess(DhcpBase): + PORTS = [] + + def _enable_dhcp(self): + """check if there is a subnet within the network with dhcp enabled.""" + for subnet in self.network.subnets: + if subnet.enable_dhcp: + return True + return False + + def enable(self): + """Enables DHCP for this network by spawning a local process.""" + interface_name = self.device_manager.setup(self.network) + if self.active: + self.restart() + elif self._enable_dhcp(): + self.interface_name = interface_name + self.spawn_process() + + def disable(self, retain_port=False): + """Disable DHCP for this network by killing the local process.""" + pid = self.pid + + if pid: + if self.active: + cmd = ['kill', '-9', pid] + utils.execute(cmd, self.root_helper) + else: + LOG.debug(_('DHCP for %(net_id)s is stale, pid %(pid)d ' + 'does not exist, performing cleanup'), + {'net_id': self.network.id, 'pid': pid}) + if not retain_port: + self.device_manager.destroy(self.network, + self.interface_name) + else: + LOG.debug(_('No DHCP started for %s'), self.network.id) + + self._remove_config_files() + + if not retain_port: + if self.conf.dhcp_delete_namespaces and self.network.namespace: + ns_ip = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + try: + ns_ip.netns.delete(self.network.namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg, self.network.namespace) + + def _remove_config_files(self): + confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs)) + conf_dir = os.path.join(confs_dir, self.network.id) + shutil.rmtree(conf_dir, ignore_errors=True) + + def get_conf_file_name(self, kind, ensure_conf_dir=False): + """Returns the file name for a given kind of config file.""" + confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs)) + conf_dir = os.path.join(confs_dir, self.network.id) + if ensure_conf_dir: + if not os.path.isdir(conf_dir): + os.makedirs(conf_dir, 0o755) + + return os.path.join(conf_dir, kind) + + def _get_value_from_conf_file(self, kind, converter=None): + """A helper function to read a value from one of the state files.""" + file_name = self.get_conf_file_name(kind) + msg = _('Error while reading %s') + + try: + with open(file_name, 'r') as f: + try: + return converter and converter(f.read()) or f.read() + except ValueError: + msg = _('Unable to convert value in %s') + except IOError: + msg = _('Unable to access %s') + + LOG.debug(msg % file_name) + return None + + @property + def pid(self): + """Last known pid for the DHCP process spawned for this network.""" + return self._get_value_from_conf_file('pid', int) + + @property + def active(self): + pid = self.pid + if pid is None: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + return self.network.id in f.readline() + except IOError: + return False + + @property + def interface_name(self): + return self._get_value_from_conf_file('interface') + + @interface_name.setter + def interface_name(self, value): + interface_file_path = self.get_conf_file_name('interface', + ensure_conf_dir=True) + utils.replace_file(interface_file_path, value) + + @abc.abstractmethod + def spawn_process(self): + pass + + +class Dnsmasq(DhcpLocalProcess): + # The ports that need to be opened when security policies are active + # on the Neutron port used for DHCP. These are provided as a convenience + # for users of this class. + PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)], + IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)], + } + + _TAG_PREFIX = 'tag%d' + + NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID' + NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH' + MINIMUM_VERSION = 2.59 + + @classmethod + def check_version(cls): + ver = 0 + try: + cmd = ['dnsmasq', '--version'] + out = utils.execute(cmd) + ver = re.findall("\d+.\d+", out)[0] + is_valid_version = float(ver) >= cls.MINIMUM_VERSION + if not is_valid_version: + LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. ' + 'DHCP AGENT MAY NOT RUN CORRECTLY! ' + 'Please ensure that its version is %s ' + 'or above!'), cls.MINIMUM_VERSION) + except (OSError, RuntimeError, IndexError, ValueError): + LOG.warning(_('Unable to determine dnsmasq version. ' + 'Please ensure that its version is %s ' + 'or above!'), cls.MINIMUM_VERSION) + return float(ver) + + @classmethod + def existing_dhcp_networks(cls, conf, root_helper): + """Return a list of existing networks ids that we have configs for.""" + + confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs)) + + return [ + c for c in os.listdir(confs_dir) + if uuidutils.is_uuid_like(c) + ] + + def spawn_process(self): + """Spawns a Dnsmasq process for the network.""" + env = { + self.NEUTRON_NETWORK_ID_KEY: self.network.id, + } + + cmd = [ + 'dnsmasq', + '--no-hosts', + '--no-resolv', + '--strict-order', + '--bind-interfaces', + '--interface=%s' % self.interface_name, + '--except-interface=lo', + '--pid-file=%s' % self.get_conf_file_name( + 'pid', ensure_conf_dir=True), + '--dhcp-hostsfile=%s' % self._output_hosts_file(), + '--addn-hosts=%s' % self._output_addn_hosts_file(), + '--dhcp-optsfile=%s' % self._output_opts_file(), + '--leasefile-ro', + ] + + possible_leases = 0 + for i, subnet in enumerate(self.network.subnets): + # if a subnet is specified to have dhcp disabled + if not subnet.enable_dhcp: + continue + if subnet.ip_version == 4: + mode = 'static' + else: + # TODO(mark): how do we indicate other options + # ra-only, slaac, ra-nameservers, and ra-stateless. + mode = 'static' + if self.version >= self.MINIMUM_VERSION: + set_tag = 'set:' + else: + set_tag = '' + + cidr = netaddr.IPNetwork(subnet.cidr) + + if self.conf.dhcp_lease_duration == -1: + lease = 'infinite' + else: + lease = '%ss' % self.conf.dhcp_lease_duration + + cmd.append('--dhcp-range=%s%s,%s,%s,%s' % + (set_tag, self._TAG_PREFIX % i, + cidr.network, mode, lease)) + + possible_leases += cidr.size + + # Cap the limit because creating lots of subnets can inflate + # this possible lease cap. + cmd.append('--dhcp-lease-max=%d' % + min(possible_leases, self.conf.dnsmasq_lease_max)) + + cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file) + if self.conf.dnsmasq_dns_servers: + cmd.extend( + '--server=%s' % server + for server in self.conf.dnsmasq_dns_servers) + + if self.conf.dhcp_domain: + cmd.append('--domain=%s' % self.conf.dhcp_domain) + + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + ip_wrapper.netns.execute(cmd, addl_env=env) + + def _release_lease(self, mac_address, ip): + """Release a DHCP lease.""" + cmd = ['dhcp_release', self.interface_name, ip, mac_address] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + ip_wrapper.netns.execute(cmd) + + def reload_allocations(self): + """Rebuild the dnsmasq config and signal the dnsmasq to reload.""" + + # If all subnets turn off dhcp, kill the process. + if not self._enable_dhcp(): + self.disable() + LOG.debug(_('Killing dhcpmasq for network since all subnets have ' + 'turned off DHCP: %s'), self.network.id) + return + + self._release_unused_leases() + self._output_hosts_file() + self._output_addn_hosts_file() + self._output_opts_file() + if self.active: + cmd = ['kill', '-HUP', self.pid] + utils.execute(cmd, self.root_helper) + else: + LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid) + LOG.debug(_('Reloading allocations for network: %s'), self.network.id) + self.device_manager.update(self.network, self.interface_name) + + def _iter_hosts(self): + """Iterate over hosts. + + For each host on the network we yield a tuple containing: + ( + port, # a DictModel instance representing the port. + alloc, # a DictModel instance of the allocated ip and subnet. + host_name, # Host name. + name, # Host name and domain name in the format 'hostname.domain'. + ) + """ + for port in self.network.ports: + for alloc in port.fixed_ips: + hostname = 'host-%s' % alloc.ip_address.replace( + '.', '-').replace(':', '-') + fqdn = '%s.%s' % (hostname, self.conf.dhcp_domain) + yield (port, alloc, hostname, fqdn) + + def _output_hosts_file(self): + """Writes a dnsmasq compatible dhcp hosts file. + + The generated file is sent to the --dhcp-hostsfile option of dnsmasq, + and lists the hosts on the network which should receive a dhcp lease. + Each line in this file is in the form:: + + 'mac_address,FQDN,ip_address' + + IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in + this file if it did not give a lease to a host listed in it (e.g.: + multiple dnsmasq instances on the same network if this network is on + multiple network nodes). This file is only defining hosts which + should receive a dhcp lease, the hosts resolution in itself is + defined by the `_output_addn_hosts_file` method. + """ + buf = six.StringIO() + filename = self.get_conf_file_name('host') + + LOG.debug(_('Building host file: %s'), filename) + for (port, alloc, hostname, name) in self._iter_hosts(): + set_tag = '' + # (dzyu) Check if it is legal ipv6 address, if so, need wrap + # it with '[]' to let dnsmasq to distinguish MAC address from + # IPv6 address. + ip_address = alloc.ip_address + if netaddr.valid_ipv6(ip_address): + ip_address = '[%s]' % ip_address + + LOG.debug(_('Adding %(mac)s : %(name)s : %(ip)s'), + {"mac": port.mac_address, "name": name, + "ip": ip_address}) + + if getattr(port, 'extra_dhcp_opts', False): + if self.version >= self.MINIMUM_VERSION: + set_tag = 'set:' + + buf.write('%s,%s,%s,%s%s\n' % + (port.mac_address, name, ip_address, + set_tag, port.id)) + else: + buf.write('%s,%s,%s\n' % + (port.mac_address, name, ip_address)) + + utils.replace_file(filename, buf.getvalue()) + LOG.debug(_('Done building host file %s'), filename) + return filename + + def _read_hosts_file_leases(self, filename): + leases = set() + if os.path.exists(filename): + with open(filename) as f: + for l in f.readlines(): + host = l.strip().split(',') + leases.add((host[2], host[0])) + return leases + + def _release_unused_leases(self): + filename = self.get_conf_file_name('host') + old_leases = self._read_hosts_file_leases(filename) + + new_leases = set() + for port in self.network.ports: + for alloc in port.fixed_ips: + new_leases.add((alloc.ip_address, port.mac_address)) + + for ip, mac in old_leases - new_leases: + self._release_lease(mac, ip) + + def _output_addn_hosts_file(self): + """Writes a dnsmasq compatible additional hosts file. + + The generated file is sent to the --addn-hosts option of dnsmasq, + and lists the hosts on the network which should be resolved even if + the dnsmaq instance did not give a lease to the host (see the + `_output_hosts_file` method). + Each line in this file is in the same form as a standard /etc/hosts + file. + """ + buf = six.StringIO() + for (port, alloc, hostname, fqdn) in self._iter_hosts(): + # It is compulsory to write the `fqdn` before the `hostname` in + # order to obtain it in PTR responses. + buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname)) + addn_hosts = self.get_conf_file_name('addn_hosts') + utils.replace_file(addn_hosts, buf.getvalue()) + return addn_hosts + + def _output_opts_file(self): + """Write a dnsmasq compatible options file.""" + + if self.conf.enable_isolated_metadata: + subnet_to_interface_ip = self._make_subnet_interface_ip_map() + + options = [] + + dhcp_ips = collections.defaultdict(list) + subnet_idx_map = {} + for i, subnet in enumerate(self.network.subnets): + if not subnet.enable_dhcp: + continue + if subnet.dns_nameservers: + options.append( + self._format_option(i, 'dns-server', + ','.join(subnet.dns_nameservers))) + else: + # use the dnsmasq ip as nameservers only if there is no + # dns-server submitted by the server + subnet_idx_map[subnet.id] = i + + gateway = subnet.gateway_ip + host_routes = [] + for hr in subnet.host_routes: + if hr.destination == "0.0.0.0/0": + if not gateway: + gateway = hr.nexthop + else: + host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) + + # Add host routes for isolated network segments + + if self._enable_metadata(subnet): + subnet_dhcp_ip = subnet_to_interface_ip[subnet.id] + host_routes.append( + '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip) + ) + + if host_routes: + if gateway and subnet.ip_version == 4: + host_routes.append("%s,%s" % ("0.0.0.0/0", gateway)) + options.append( + self._format_option(i, 'classless-static-route', + ','.join(host_routes))) + options.append( + self._format_option(i, WIN2k3_STATIC_DNS, + ','.join(host_routes))) + + if subnet.ip_version == 4: + if gateway: + options.append(self._format_option(i, 'router', gateway)) + else: + options.append(self._format_option(i, 'router')) + + for port in self.network.ports: + if getattr(port, 'extra_dhcp_opts', False): + options.extend( + self._format_option(port.id, opt.opt_name, opt.opt_value) + for opt in port.extra_dhcp_opts) + + # provides all dnsmasq ip as dns-server if there is more than + # one dnsmasq for a subnet and there is no dns-server submitted + # by the server + if port.device_owner == constants.DEVICE_OWNER_DHCP: + for ip in port.fixed_ips: + i = subnet_idx_map.get(ip.subnet_id) + if i is None: + continue + dhcp_ips[i].append(ip.ip_address) + + for i, ips in dhcp_ips.items(): + if len(ips) > 1: + options.append(self._format_option(i, + 'dns-server', + ','.join(ips))) + + name = self.get_conf_file_name('opts') + utils.replace_file(name, '\n'.join(options)) + return name + + def _make_subnet_interface_ip_map(self): + ip_dev = ip_lib.IPDevice( + self.interface_name, + self.root_helper, + self.network.namespace + ) + + subnet_lookup = dict( + (netaddr.IPNetwork(subnet.cidr), subnet.id) + for subnet in self.network.subnets + ) + + retval = {} + + for addr in ip_dev.addr.list(): + ip_net = netaddr.IPNetwork(addr['cidr']) + + if ip_net in subnet_lookup: + retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0] + + return retval + + def _format_option(self, tag, option, *args): + """Format DHCP option by option name or code.""" + if self.version >= self.MINIMUM_VERSION: + set_tag = 'tag:' + else: + set_tag = '' + + option = str(option) + + if isinstance(tag, int): + tag = self._TAG_PREFIX % tag + + if not option.isdigit(): + option = 'option:%s' % option + + return ','.join((set_tag + tag, '%s' % option) + args) + + def _enable_metadata(self, subnet): + '''Determine if the metadata route will be pushed to hosts on subnet. + + If subnet has a Neutron router attached, we want the hosts to get + metadata from the router's proxy via their default route instead. + ''' + if self.conf.enable_isolated_metadata and subnet.ip_version == 4: + if subnet.gateway_ip is None: + return True + else: + for port in self.network.ports: + if port.device_owner == constants.DEVICE_OWNER_ROUTER_INTF: + for alloc in port.fixed_ips: + if alloc.subnet_id == subnet.id: + return False + return True + else: + return False + + @classmethod + def lease_update(cls): + network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY) + dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY) + + action = sys.argv[1] + if action not in ('add', 'del', 'old'): + sys.exit() + + mac_address = sys.argv[2] + ip_address = sys.argv[3] + + if action == 'del': + lease_remaining = 0 + else: + lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0)) + + data = dict(network_id=network_id, mac_address=mac_address, + ip_address=ip_address, lease_remaining=lease_remaining) + + if os.path.exists(dhcp_relay_socket): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(dhcp_relay_socket) + sock.send(jsonutils.dumps(data)) + sock.close() + + +class DeviceManager(object): + + def __init__(self, conf, root_helper, plugin): + self.conf = conf + self.root_helper = root_helper + self.plugin = plugin + if not conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + try: + self.driver = importutils.import_object( + conf.interface_driver, conf) + except Exception as e: + msg = (_("Error importing interface driver '%(driver)s': " + "%(inner)s") % {'driver': conf.interface_driver, + 'inner': e}) + LOG.error(msg) + raise SystemExit(1) + + def get_interface_name(self, network, port): + """Return interface(device) name for use by the DHCP process.""" + return self.driver.get_device_name(port) + + def get_device_id(self, network): + """Return a unique DHCP device ID for this host on the network.""" + # There could be more than one dhcp server per network, so create + # a device id that combines host and network ids + return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host) + + def _set_default_route(self, network, device_name): + """Sets the default gateway for this dhcp namespace. + + This method is idempotent and will only adjust the route if adjusting + it would change it from what it already is. This makes it safe to call + and avoids unnecessary perturbation of the system. + """ + device = ip_lib.IPDevice(device_name, + self.root_helper, + network.namespace) + gateway = device.route.get_gateway() + if gateway: + gateway = gateway['gateway'] + + for subnet in network.subnets: + skip_subnet = ( + subnet.ip_version != 4 + or not subnet.enable_dhcp + or subnet.gateway_ip is None) + + if skip_subnet: + continue + + if gateway != subnet.gateway_ip: + m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s') + LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip}) + + device.route.add_gateway(subnet.gateway_ip) + + return + + # No subnets on the network have a valid gateway. Clean it up to avoid + # confusion from seeing an invalid gateway here. + if gateway is not None: + msg = _('Removing gateway for dhcp netns on net %s') + LOG.debug(msg, network.id) + + device.route.delete_gateway(gateway) + + def setup_dhcp_port(self, network): + """Create/update DHCP port for the host if needed and return port.""" + + device_id = self.get_device_id(network) + subnets = {} + dhcp_enabled_subnet_ids = [] + for subnet in network.subnets: + if subnet.enable_dhcp: + dhcp_enabled_subnet_ids.append(subnet.id) + subnets[subnet.id] = subnet + + dhcp_port = None + for port in network.ports: + port_device_id = getattr(port, 'device_id', None) + if port_device_id == device_id: + port_fixed_ips = [] + for fixed_ip in port.fixed_ips: + port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id, + 'ip_address': fixed_ip.ip_address}) + if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: + dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id) + + # If there are dhcp_enabled_subnet_ids here that means that + # we need to add those to the port and call update. + if dhcp_enabled_subnet_ids: + port_fixed_ips.extend( + [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + dhcp_port = self.plugin.update_dhcp_port( + port.id, {'port': {'network_id': network.id, + 'fixed_ips': port_fixed_ips}}) + if not dhcp_port: + raise exceptions.Conflict() + else: + dhcp_port = port + # break since we found port that matches device_id + break + + # check for a reserved DHCP port + if dhcp_port is None: + LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s' + ' does not yet exist. Checking for a reserved port.'), + {'device_id': device_id, 'network_id': network.id}) + for port in network.ports: + port_device_id = getattr(port, 'device_id', None) + if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT: + dhcp_port = self.plugin.update_dhcp_port( + port.id, {'port': {'network_id': network.id, + 'device_id': device_id}}) + if dhcp_port: + break + + # DHCP port has not yet been created. + if dhcp_port is None: + LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s' + ' does not yet exist.'), {'device_id': device_id, + 'network_id': network.id}) + port_dict = dict( + name='', + admin_state_up=True, + device_id=device_id, + network_id=network.id, + tenant_id=network.tenant_id, + fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + dhcp_port = self.plugin.create_dhcp_port({'port': port_dict}) + + if not dhcp_port: + raise exceptions.Conflict() + + # Convert subnet_id to subnet dict + fixed_ips = [dict(subnet_id=fixed_ip.subnet_id, + ip_address=fixed_ip.ip_address, + subnet=subnets[fixed_ip.subnet_id]) + for fixed_ip in dhcp_port.fixed_ips] + + ips = [DictModel(item) if isinstance(item, dict) else item + for item in fixed_ips] + dhcp_port.fixed_ips = ips + + return dhcp_port + + def setup(self, network): + """Create and initialize a device for network's DHCP on this host.""" + port = self.setup_dhcp_port(network) + interface_name = self.get_interface_name(network, port) + + if ip_lib.ensure_device_is_ready(interface_name, + self.root_helper, + network.namespace): + LOG.debug(_('Reusing existing device: %s.'), interface_name) + else: + self.driver.plug(network.id, + port.id, + interface_name, + port.mac_address, + namespace=network.namespace) + ip_cidrs = [] + for fixed_ip in port.fixed_ips: + subnet = fixed_ip.subnet + net = netaddr.IPNetwork(subnet.cidr) + ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) + ip_cidrs.append(ip_cidr) + + if (self.conf.enable_isolated_metadata and + self.conf.use_namespaces): + ip_cidrs.append(METADATA_DEFAULT_CIDR) + + self.driver.init_l3(interface_name, ip_cidrs, + namespace=network.namespace) + + # ensure that the dhcp interface is first in the list + if network.namespace is None: + device = ip_lib.IPDevice(interface_name, + self.root_helper) + device.route.pullup_route(interface_name) + + if self.conf.use_namespaces: + self._set_default_route(network, interface_name) + + return interface_name + + def update(self, network, device_name): + """Update device settings for the network's DHCP on this host.""" + if self.conf.use_namespaces: + self._set_default_route(network, device_name) + + def destroy(self, network, device_name): + """Destroy the device used for the network's DHCP on this host.""" + self.driver.unplug(device_name, namespace=network.namespace) + + self.plugin.release_dhcp_port(network.id, + self.get_device_id(network)) diff --git a/neutron/agent/linux/external_process.py b/neutron/agent/linux/external_process.py new file mode 100644 index 000000000..d0e990880 --- /dev/null +++ b/neutron/agent/linux/external_process.py @@ -0,0 +1,104 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import os + +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('external_pids', + default='$state_path/external/pids', + help=_('Location to store child pid files')), +] + +cfg.CONF.register_opts(OPTS) + + +class ProcessManager(object): + """An external process manager for Neutron spawned processes. + + Note: The manager expects uuid to be in cmdline. + """ + def __init__(self, conf, uuid, root_helper='sudo', namespace=None): + self.conf = conf + self.uuid = uuid + self.root_helper = root_helper + self.namespace = namespace + + def enable(self, cmd_callback): + if not self.active: + cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True)) + + ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace) + ip_wrapper.netns.execute(cmd) + + def disable(self): + pid = self.pid + + if self.active: + cmd = ['kill', '-9', pid] + utils.execute(cmd, self.root_helper) + elif pid: + LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring ' + 'command'), {'uuid': self.uuid, 'pid': pid}) + else: + LOG.debug(_('No process started for %s'), self.uuid) + + def get_pid_file_name(self, ensure_pids_dir=False): + """Returns the file name for a given kind of config file.""" + pids_dir = os.path.abspath(os.path.normpath(self.conf.external_pids)) + if ensure_pids_dir and not os.path.isdir(pids_dir): + os.makedirs(pids_dir, 0o755) + + return os.path.join(pids_dir, self.uuid + '.pid') + + @property + def pid(self): + """Last known pid for this external process spawned for this uuid.""" + file_name = self.get_pid_file_name() + msg = _('Error while reading %s') + + try: + with open(file_name, 'r') as f: + return int(f.read()) + except IOError: + msg = _('Unable to access %s') + except ValueError: + msg = _('Unable to convert value in %s') + + LOG.debug(msg, file_name) + return None + + @property + def active(self): + pid = self.pid + if pid is None: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + return self.uuid in f.readline() + except IOError: + return False diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py new file mode 100644 index 000000000..a31250ee7 --- /dev/null +++ b/neutron/agent/linux/interface.py @@ -0,0 +1,450 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import netaddr +from oslo.config import cfg +import six + +from neutron.agent.common import config +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.extensions import flavor +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('ovs_integration_bridge', + default='br-int', + help=_('Name of Open vSwitch bridge to use')), + cfg.BoolOpt('ovs_use_veth', + default=False, + help=_('Uses veth for an interface or not')), + cfg.IntOpt('network_device_mtu', + help=_('MTU setting for device.')), + cfg.StrOpt('meta_flavor_driver_mappings', + help=_('Mapping between flavor and LinuxInterfaceDriver')), + cfg.StrOpt('admin_user', + help=_("Admin username")), + cfg.StrOpt('admin_password', + help=_("Admin password"), + secret=True), + cfg.StrOpt('admin_tenant_name', + help=_("Admin tenant name")), + cfg.StrOpt('auth_url', + help=_("Authentication URL")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('auth_region', + help=_("Authentication region")), +] + + +@six.add_metaclass(abc.ABCMeta) +class LinuxInterfaceDriver(object): + + # from linux IF_NAMESIZE + DEV_NAME_LEN = 14 + DEV_NAME_PREFIX = 'tap' + + def __init__(self, conf): + self.conf = conf + self.root_helper = config.get_root_helper(conf) + + def init_l3(self, device_name, ip_cidrs, namespace=None, + preserve_ips=[], gateway=None, extra_subnets=[]): + """Set the L3 settings for the interface using data from the port. + + ip_cidrs: list of 'X.X.X.X/YY' strings + preserve_ips: list of ip cidrs that should not be removed from device + """ + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace=namespace) + + previous = {} + for address in device.addr.list(scope='global', filters=['permanent']): + previous[address['cidr']] = address['ip_version'] + + # add new addresses + for ip_cidr in ip_cidrs: + + net = netaddr.IPNetwork(ip_cidr) + # Convert to compact IPv6 address because the return values of + # "ip addr list" are compact. + if net.version == 6: + ip_cidr = str(net) + if ip_cidr in previous: + del previous[ip_cidr] + continue + + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + + # clean up any old addresses + for ip_cidr, ip_version in previous.items(): + if ip_cidr not in preserve_ips: + device.addr.delete(ip_version, ip_cidr) + + if gateway: + device.route.add_gateway(gateway) + + new_onlink_routes = set(s['cidr'] for s in extra_subnets) + existing_onlink_routes = set(device.route.list_onlink_routes()) + for route in new_onlink_routes - existing_onlink_routes: + device.route.add_onlink_route(route) + for route in existing_onlink_routes - new_onlink_routes: + device.route.delete_onlink_route(route) + + def check_bridge_exists(self, bridge): + if not ip_lib.device_exists(bridge): + raise exceptions.BridgeDoesNotExist(bridge=bridge) + + def get_device_name(self, port): + return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN] + + @abc.abstractmethod + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plug in the interface.""" + + @abc.abstractmethod + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + + +class NullDriver(LinuxInterfaceDriver): + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + pass + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + pass + + +class OVSInterfaceDriver(LinuxInterfaceDriver): + """Driver for creating an internal interface on an OVS bridge.""" + + DEV_NAME_PREFIX = 'tap' + + def __init__(self, conf): + super(OVSInterfaceDriver, self).__init__(conf) + if self.conf.ovs_use_veth: + self.DEV_NAME_PREFIX = 'ns-' + + def _get_tap_name(self, dev_name, prefix=None): + if self.conf.ovs_use_veth: + dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap') + return dev_name + + def _ovs_add_port(self, bridge, device_name, port_id, mac_address, + internal=True): + cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port', device_name, '--', + 'add-port', bridge, device_name] + if internal: + cmd += ['--', 'set', 'Interface', device_name, 'type=internal'] + cmd += ['--', 'set', 'Interface', device_name, + 'external-ids:iface-id=%s' % port_id, + '--', 'set', 'Interface', device_name, + 'external-ids:iface-status=active', + '--', 'set', 'Interface', device_name, + 'external-ids:attached-mac=%s' % mac_address] + utils.execute(cmd, self.root_helper) + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plug in the interface.""" + if not bridge: + bridge = self.conf.ovs_integration_bridge + + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + + self.check_bridge_exists(bridge) + + ip = ip_lib.IPWrapper(self.root_helper) + tap_name = self._get_tap_name(device_name, prefix) + + if self.conf.ovs_use_veth: + # Create ns_dev in a namespace if one is configured. + root_dev, ns_dev = ip.add_veth(tap_name, + device_name, + namespace2=namespace) + else: + ns_dev = ip.device(device_name) + + internal = not self.conf.ovs_use_veth + self._ovs_add_port(bridge, tap_name, port_id, mac_address, + internal=internal) + + ns_dev.link.set_address(mac_address) + + if self.conf.network_device_mtu: + ns_dev.link.set_mtu(self.conf.network_device_mtu) + if self.conf.ovs_use_veth: + root_dev.link.set_mtu(self.conf.network_device_mtu) + + # Add an interface created by ovs to the namespace. + if not self.conf.ovs_use_veth and namespace: + namespace_obj = ip.ensure_namespace(namespace) + namespace_obj.add_device_to_namespace(ns_dev) + + ns_dev.link.set_up() + if self.conf.ovs_use_veth: + root_dev.link.set_up() + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + if not bridge: + bridge = self.conf.ovs_integration_bridge + + tap_name = self._get_tap_name(device_name, prefix) + self.check_bridge_exists(bridge) + ovs = ovs_lib.OVSBridge(bridge, self.root_helper) + + try: + ovs.delete_port(tap_name) + if self.conf.ovs_use_veth: + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace) + device.link.delete() + LOG.debug(_("Unplugged interface '%s'"), device_name) + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), + device_name) + + +class MidonetInterfaceDriver(LinuxInterfaceDriver): + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """This method is called by the Dhcp agent or by the L3 agent + when a new network is created + """ + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + ip = ip_lib.IPWrapper(self.root_helper) + tap_name = device_name.replace(prefix or 'tap', 'tap') + + # Create ns_dev in a namespace if one is configured. + root_dev, ns_dev = ip.add_veth(tap_name, device_name, + namespace2=namespace) + + ns_dev.link.set_address(mac_address) + +