Retire Packaging Deb project repos

This commit is part of a series to retire the Packaging Deb
project. Step 2 is to remove all content from the project
repos, replacing it with a README notification where to find
ongoing work, and how to recover the repo if needed at some
future point (as in
https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project).

Change-Id: I3d95fb5a3df20788b4f21e44df5cf50988c3ce42
This commit is contained in:
Tony Breeds 2017-09-12 15:43:24 -06:00
parent 00491eb31d
commit d1467ddd25
213 changed files with 14 additions and 31351 deletions

View File

@ -1,7 +0,0 @@
[run]
branch = True
source = networking_ovn
omit = networking_ovn/tests/*
[report]
ignore_errors = True

59
.gitignore vendored
View File

@ -1,59 +0,0 @@
*.py[cod]
# C extensions
*.so
# Packages
*.egg
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
nosetests.xml
# Translations
*.mo
# Complexity
output/*.html
output/*/index.html
# Sphinx
doc/build
# pbr generates these
AUTHORS
ChangeLog
# Editors
*~
*.sw?
# Hidden directories
/.*
!/.coveragerc
!/.gitignore
!/.gitreview
!/.mailmap
!/.pylintrc
!/.testr.conf
!/devstack/lib
etc
# Vagrant directory
vagrant/.vagrant
# Files created by releasenotes build
releasenotes/build

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/networking-ovn.git

View File

@ -1,3 +0,0 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>

101
.pylintrc
View File

@ -1,101 +0,0 @@
# The format of this file isn't really documented; just use --generate-rcfile
[MASTER]
# Add <file or directory> to the black list. It should be a base name, not a
# path. You may set this option multiple times.
ignore=.git,tests
[MESSAGES CONTROL]
# TODO: This list is copied from neutron, the options which do not need to be
# suppressed have been already removed, some of the remaining options will be
# removed by code adjustment.
disable=
# "F" Fatal errors that prevent further processing
import-error,
# "I" Informational noise
# "E" Error for important programming issues (likely bugs)
no-member,
# "W" Warnings for stylistic problems or minor programming issues
abstract-method,
arguments-differ,
attribute-defined-outside-init,
broad-except,
dangerous-default-value,
fixme,
global-statement,
no-init,
protected-access,
redefined-builtin,
redefined-outer-name,
signature-differs,
unused-argument,
unused-import,
unused-variable,
useless-super-delegation,
# "C" Coding convention violations
bad-continuation,
invalid-name,
len-as-condition,
misplaced-comparison-constant,
missing-docstring,
superfluous-parens,
ungrouped-imports,
wrong-import-order,
# "R" Refactor recommendations
duplicate-code,
no-else-return,
no-self-use,
too-few-public-methods,
too-many-ancestors,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-public-methods,
too-many-return-statements,
too-many-statements
[BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names should be at least 3 characters long
# and be lowercased with underscores
method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
# Module names matching
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=79
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
[CLASSES]
# List of interface methods to ignore, separated by a comma.
ignore-iface-methods=
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=
# should use oslo_serialization.jsonutils
json
[TYPECHECK]
# List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems
[REPORTS]
# Tells whether to display a full report or only the messages
reports=no

View File

@ -1,8 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
OS_LOG_CAPTURE=1 \
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./networking_ovn/tests/unit} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,13 +0,0 @@
If you would like to contribute to the development of OpenStack,
you must follow the steps in this page:
http://docs.openstack.org/infra/manual/developers.html
Once those steps have been completed, changes to OpenStack
should be submitted for review via the Gerrit tool, following
the workflow documented at:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/networking-ovn

View File

@ -1,4 +0,0 @@
networking-ovn Style Commandments
===============================================
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/

176
LICENSE
View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

14
README Normal file
View File

@ -0,0 +1,14 @@
This project is no longer maintained.
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
For ongoing work on maintaining OpenStack packages in the Debian
distribution, please see the Debian OpenStack packaging team at
https://wiki.debian.org/OpenStack/.
For any further questions, please email
openstack-dev@lists.openstack.org or join #openstack-dev on
Freenode.

View File

@ -1,21 +0,0 @@
=========================================================
networking-ovn - OpenStack Neutron integration with OVN
=========================================================
OVN provides virtual networking for Open vSwitch and is a component of the Open
vSwitch project. This project provides integration between OpenStack Neutron
and OVN.
* Free software: Apache license
* Source: http://git.openstack.org/cgit/openstack/networking-ovn
* Bugs: http://bugs.launchpad.net/networking-ovn
* Mailing list:
http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev
* IRC: #openstack-neutron-ovn on Freenode.
* Docs: https://docs.openstack.org/networking-ovn/latest
Team and repository tags
------------------------
.. image:: http://governance.openstack.org/badges/networking-ovn.svg
:target: http://governance.openstack.org/reference/tags/index.html

View File

@ -1,2 +0,0 @@
[python: **.py]

View File

@ -1,27 +0,0 @@
======================
Enabling in Devstack
======================
1. Download devstack and networking-ovn::
git clone https://git.openstack.org/openstack-dev/devstack.git
git clone https://git.openstack.org/openstack/networking-ovn.git
2. Add networking-ovn to devstack. The minimal set of critical local.conf
additions are the following::
cd devstack
cat << EOF >> local.conf
> enable_plugin networking-ovn https://git.openstack.org/openstack/networking-ovn
> enable_service ovn
> EOF
You can also use the provided example local.conf, or look at its contents to
add to your own::
cd devstack
cp ../networking-ovn/devstack/local.conf.sample local.conf
3. run devstack::
./stack.sh

View File

@ -1,71 +0,0 @@
#
# Sample DevStack local.conf.
#
# This sample file is intended to be used when adding an additional compute node
# to your test environment. It runs a very minimal set of services.
#
# For this configuration to work, you *must* set the SERVICE_HOST option to the
# IP address of the main DevStack host. You must also set HOST_IP to the IP
# address of this host.
#
[[local|localrc]]
DATABASE_PASSWORD=password
RABBIT_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
ADMIN_PASSWORD=password
# The DevStack plugin defaults to using the ovn branch from the official ovs
# repo. You can optionally use a different one. For example, you may want to
# use the latest patches in blp's ovn branch:
#OVN_REPO=https://github.com/blp/ovs-reviews.git
#OVN_BRANCH=ovn
enable_plugin networking-ovn https://git.openstack.org/openstack/networking-ovn
disable_all_services
enable_service n-cpu
enable_service placement-api
enable_service ovn-controller
# Set this to the address of the main DevStack host running the rest of the
# OpenStack services.
SERVICE_HOST=<IP address of host running everything else>
RABBIT_HOST=$SERVICE_HOST
Q_HOST=$SERVICE_HOST
# How to connect to ovsdb-server hosting the OVN SB database
OVN_SB_REMOTE=tcp:$SERVICE_HOST:6642
# A UUID to uniquely identify this system. If one is not specified, a random
# one will be generated and saved in the file 'ovn-uuid' for re-use in future
# DevStack runs.
#OVN_UUID=
# Whether or not to build custom openvswitch kernel modules from the ovs git
# tree. This is enabled by default. This is required unless your distro kernel
# includes ovs+conntrack support. This support was first released in Linux 4.3,
# and will likely be backported by some distros.
#OVN_BUILD_MODULES=False
HOST_IP=<IP address of current host>
NOVA_VNC_ENABLED=True
NOVNCPROXY_URL=http://$SERVICE_HOST:6080/vnc_auto.html
VNCSERVER_LISTEN=$HOST_IP
VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
# Skydive
#enable_plugin skydive https://github.com/redhat-cip/skydive.git
#enable_service skydive-agent
# Provider Network
# If you want to enable a provider network instead of the default private
# network after your DevStack environment installation, you *must* set the
# Q_USE_PROVIDER_NETWORKING to True, and give value to both PHYSICAL_NETWORK
# and OVS_PHYSICAL_BRIDGE.
#Q_USE_PROVIDER_NETWORKING=True
#PHYSICAL_NETWORK=providernet
#OVS_PHYSICAL_BRIDGE=br-provider
#PUBLIC_INTERFACE=<public interface>

View File

@ -1,39 +0,0 @@
#
# Sample DevStack local.conf.
#
# This sample file is intented to be used for running ovn-northd and the
# OVN DBs on a separate node.
#
# For this configuration to work, you *must* set the SERVICE_HOST option to the
# IP address of the main DevStack host.
#
[[local|localrc]]
DATABASE_PASSWORD=password
RABBIT_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
ADMIN_PASSWORD=password
# The DevStack plugin defaults to using the ovn branch from the official ovs
# repo. You can optionally use a different one. For example, you may want to
# use the latest patches in blp's ovn branch:
#OVN_REPO=https://github.com/blp/ovs-reviews.git
#OVN_BRANCH=ovn
enable_plugin networking-ovn https://git.openstack.org/openstack/networking-ovn
disable_all_services
enable_service ovn-northd
# A UUID to uniquely identify this system. If one is not specified, a random
# one will be generated and saved in the file 'ovn-uuid' for re-use in future
# DevStack runs.
#OVN_UUID=
# Whether or not to build custom openvswitch kernel modules from the ovs git
# tree. This is enabled by default. This is required unless your distro kernel
# includes ovs+conntrack support. This support was first released in Linux 4.3,
# and will likely be backported by some distros.
#OVN_BUILD_MODULES=False

View File

@ -1,25 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This script is executed in the install-dsvm-networking-ovn-kuryr
# OpenStack CI job that runs DevStack + kuryr. You can find the
# CI job configuration here:
#
# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/networking-ovn.yaml
#
export OVERRIDE_ENABLED_SERVICES=kuryr,etcd-server,docker-engine,key,n-api,n-cpu,n-cond,n-sch,n-crt,n-cauth,n-obj,placement-api,g-api,g-reg,c-sch,c-api,c-vol,rabbit,tempest,mysql,dstat,ovn-northd,ovn-controller,q-svc
export PROJECTS="openstack/networking-ovn openstack/kuryr $PROJECTS"
export DEVSTACK_LOCAL_CONFIG="enable_plugin networking-ovn git://git.openstack.org/openstack/networking-ovn"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin kuryr http://git.openstack.org/openstack/kuryr"

View File

@ -1,124 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This script is executed in the tempest-dsvm-networking-ovn
# OpenStack CI job that runs DevStack + tempest. It is also used by the
# rally job. You can find the CI job configuration here:
#
# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/networking-ovn.yaml
#
OVS_BRANCH=$1
OVERRIDE_ENABLED_SERVICES=key,n-api,n-cpu,n-cond,n-sch,n-crt,n-cauth,n-obj,n-api-meta,placement-api,g-api,g-reg,c-sch,c-api,c-vol,rabbit,mysql,dstat,ovn-northd,ovn-controller,q-svc
# FIXME(dalvarez): Remove this once OVS 2.8 is released. Metadata support depends on it.
if [[ "${OVS_BRANCH}" != "latest-release" ]] ; then
OVERRIDE_ENABLED_SERVICES=${OVERRIDE_ENABLED_SERVICES},networking-ovn-metadata-agent
fi
export OVERRIDE_ENABLED_SERVICES
if [ -z "${RALLY_SCENARIO}" ] ; then
# Only include tempest if this is not a rally job.
export OVERRIDE_ENABLED_SERVICES=${OVERRIDE_ENABLED_SERVICES},tempest
# FIXME(dalvarez): Remove this once OVS 2.8 is released. Metadata support depends on it.
if [[ "${OVS_BRANCH}" == "latest-release" ]] ; then
export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_RUN_VALIDATION=False"
fi
fi
export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_USE_PROVIDERNET_FOR_PUBLIC=True"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"PHYSICAL_NETWORK=public"
if [[ "${OVS_BRANCH}" == "latest-release" ]] ; then
export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVN_BRANCH=branch-2.7"
elif [[ "${OVS_BRANCH}" == "master" ]] ; then
export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVN_BRANCH=master"
elif [[ -z "${OVS_BRANCH}" ]] ; then
: # Use the default specified in the devstack plugin
else
echo "Unexpected value to ovs branch argument to devstackgaterc: \"${OVS_BRANCH}\""
exit 1
fi
if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]] ; then
# NOTE(rtheis): Multinode does not require creating an OVN L3 public network.
export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVN_L3_CREATE_PUBLIC_NETWORK=False"
# NOTE(rtheis): Configure the enabled services on the compute node.
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"ENABLED_SERVICES=n-cpu,dstat,c-vol,c-bak,ovn-controller"
# NOTE(rtheis): Configure OVN on the compute node.
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"OVN_SB_REMOTE=tcp:\$SERVICE_HOST:6642"
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"OVN_NB_REMOTE=tcp:\$SERVICE_HOST:6641"
# NOTE(rtheis): Since we are overriding the enabled services, we must
# also configure the database and rabbit services on the compute node.
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"DATABASE_HOST=\$SERVICE_HOST"
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"DATABASE_TYPE=mysql"
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"RABBIT_HOST=\$SERVICE_HOST"
else
export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVN_L3_CREATE_PUBLIC_NETWORK=True"
fi
# Begin list of exclusions.
r="^(?!.*"
# exclude the slow tag (part of the default for 'full')
r="$r(?:.*\[.*\bslow\b.*\])"
# TODO(chandrav): Opened bug #1629073 to track this
r="$r|(?:tempest\.scenario\.test_network_v6.*)"
# exclude things that just aren't enabled with OVN
r="$r|(?:tempest\.api\.network\.admin\.test_l3_agent_scheduler.*)"
r="$r|(?:tempest\.api\.network\.admin\.test_quotas\.QuotasTest\.test_lbaas_quotas.*)"
r="$r|(?:tempest\.api\.network\.test_load_balancer.*)"
r="$r|(?:tempest\.scenario\.test_load_balancer.*)"
r="$r|(?:tempest\.api\.network\.admin\.test_load_balancer.*)"
r="$r|(?:tempest\.api\.network\.admin\.test_lbaas.*)"
r="$r|(?:tempest\.api\.network\.test_fwaas_extensions.*)"
r="$r|(?:tempest\.api\.network\.test_metering_extensions.*)"
r="$r|(?:tempest\.thirdparty\.boto\.test_s3.*)"
# exclude dhcp agent tests
r="$r|(?:tempest\.api\.network\.admin\.test_dhcp_agent_scheduler.*)"
# exclude this test case because this expects dhcp interface to be present
r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_port_security_macspoofing_port)"
# exclude this test because there are no agents used by OVN
r="$r|(?:tempest\.api\.network\.admin\.test_agent_management.*)"
# exclude some unrelated stuff to make networking-ovn targeted runs go faster
r="$r|(?:tempest\.api\.identity*)"
r="$r|(?:tempest\.api\.image*)"
r="$r|(?:tempest\.api\.volume*)"
r="$r|(?:tempest\.api\.compute\.images*)"
r="$r|(?:tempest\.api\.compute\.keypairs*)"
r="$r|(?:tempest\.api\.compute\.certificates*)"
r="$r|(?:tempest\.api\.compute\.flavors*)"
r="$r|(?:tempest\.api\.compute\.test_quotas*)"
r="$r|(?:tempest\.api\.compute\.test_versions*)"
r="$r|(?:tempest\.api\.compute\.volumes*)"
# End list of exclusions.
r="$r)"
# only run tempest.api/scenario/thirdparty tests (part of the default for 'full')
r="$r(tempest\.(api|scenario|thirdparty)).*$"
if [ -z $DEVSTACK_GATE_GRENADE ]; then
# Do not run the tempest test cases on grenade jobs. By not setting this,
# we still do run the tempest smoke tests. The pre-upgraded stack too runs
# just the smoke tests. This is how openstack/neutron runs its post-upgrade
# tempest tests.
export DEVSTACK_GATE_TEMPEST_REGEX="$r"
fi

View File

@ -1,2 +0,0 @@
This file can be removed after this change merges:
https://review.openstack.org/#/c/435665

View File

@ -1,2 +0,0 @@
This file can be removed after this change merges:
https://review.openstack.org/#/c/435665

View File

@ -1,546 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# devstack/plugin.sh
# Functions to control the configuration and operation of the OVN service
# Dependencies:
#
# ``functions`` file
# ``DEST`` must be defined
# ``STACK_USER`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
# - install_ovn
# - configure_ovn
# - configure_ovn_plugin
# - init_ovn
# - start_ovn
# - stop_ovn
# - cleanup_ovn
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Libraries that could be installed from source
GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git}
GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-master}
GITDIR["ovsdbapp"]=$DEST/ovsdbapp
# Defaults
# --------
# The git repo to use
OVN_REPO=${OVN_REPO:-https://github.com/openvswitch/ovs.git}
OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.')
# The project directory
NETWORKING_OVN_DIR=$DEST/networking-ovn
# The branch to use from $OVN_REPO
OVN_BRANCH=${OVN_BRANCH:-master}
# How to connect to ovsdb-server hosting the OVN SB database.
OVN_SB_REMOTE=${OVN_SB_REMOTE:-tcp:$HOST_IP:6642}
# How to connect to ovsdb-server hosting the OVN NB database
OVN_NB_REMOTE=${OVN_NB_REMOTE:-tcp:$HOST_IP:6641}
# A UUID to uniquely identify this system. If one is not specified, a random
# one will be generated. A randomly generated UUID will be saved in a file
# 'ovn-uuid' so that the same one will be re-used if you re-run DevStack.
OVN_UUID=${OVN_UUID:-}
# Whether or not to build the openvswitch kernel module from ovs. This is required
# unless the distro kernel includes ovs+conntrack support.
OVN_BUILD_MODULES=$(trueorfalse True OVN_BUILD_MODULES)
# Whether or not to install the ovs python module from ovs source. This can be
# used to test and validate new ovs python features. This should only be used
# for development purposes since the ovs python version is controlled by OpenStack
# requirements.
OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE)
# GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version
# overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined
# based on the ML2 overlay_ip_version option. The ML2 framework will use this to
# configure the MTU DHCP option.
OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38}
# This sets whether to create a public network and bridge.
# If set to True, a public network and subnet(s) will be created, and a router
# will be created to route the default private network to the public one.
OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse False OVN_L3_CREATE_PUBLIC_NETWORK)
# ml2/config for neutron_sync_mode
OVN_NEUTRON_SYNC_MODE=${OVN_NEUTRON_SYNC_MODE:-log}
# The type of OVN L3 Scheduler to use. The OVN L3 Scheduler determines the
# hypervisor/chassis where a routers gateway should be hosted in OVN. The
# default OVN L3 scheduler is leastloaded
OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded}
# Neutron directory
NEUTRON_DIR=$DEST/neutron
OVN_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini
# Set variables for building OVS from source
OVS_REPO=$OVN_REPO
OVS_REPO_NAME=$OVN_REPO_NAME
OVS_BRANCH=$OVN_BRANCH
NETWORKING_OVN_BIN_DIR=$(get_python_exec_prefix)
NETWORKING_OVN_METADATA_BINARY="networking-ovn-metadata-agent"
# Utility Functions
# -----------------
# There are some ovs functions OVN depends on that must be sourced from
# the ovs neutron plugins. After doing this, the OVN overrides must be
# re-sourced.
source $TOP_DIR/lib/neutron_plugins/ovs_base
source $TOP_DIR/lib/neutron_plugins/openvswitch_agent
source $NETWORKING_OVN_DIR/devstack/override-defaults
source $NETWORKING_OVN_DIR/devstack/network_utils.sh
function is_ovn_service_enabled {
ovn_service=$1
is_service_enabled ovn && return 0
is_service_enabled $ovn_service && return 0
return 1
}
# NOTE(rtheis): Function copied from DevStack _neutron_ovs_base_setup_bridge
# and _neutron_ovs_base_add_bridge with the call to neutron-ovs-cleanup
# removed. The call is not relevant for OVN, as it is specific to the use
# of Neutron's OVS agent and hangs when running stack.sh because
# neutron-ovs-cleanup uses the OVSDB native interface.
function ovn_base_setup_bridge {
local bridge=$1
local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge"
if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then
addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}"
fi
$addbr_cmd
sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
}
# Entry Points
# ------------
# cleanup_ovn() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ovn {
local _pwd=$(pwd)
cd $DEST/$OVN_REPO_NAME
sudo make uninstall
sudo make distclean
cd $_pwd
}
# configure_ovn() - Set config files, create data dirs, etc
function configure_ovn {
echo "Configuring OVN"
if [ -z "$OVN_UUID" ] ; then
if [ -f ./ovn-uuid ] ; then
OVN_UUID=$(cat ovn-uuid)
else
OVN_UUID=$(uuidgen)
echo $OVN_UUID > ovn-uuid
fi
fi
# Metadata
if is_service_enabled networking-ovn-metadata-agent; then
sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
(cd $NETWORKING_OVN_DIR && exec ./tools/generate_config_file_samples.sh)
cp $NETWORKING_OVN_DIR/etc/metadata_agent.ini.sample $OVN_META_CONF
iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $OVN_META_CONF DEFAULT nova_metadata_ip $HOST_IP
iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
iniset $OVN_META_CONF ovs ovsdb_connection unix:/usr/local/var/run/openvswitch/db.sock
fi
}
function configure_ovn_plugin {
echo "Configuring Neutron for OVN"
if is_service_enabled q-svc ; then
# NOTE(arosen) needed for tempest
export NETWORK_API_EXTENSIONS=$(python -c \
'from networking_ovn.common import extensions ;\
print ",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS)')
export NETWORK_API_EXTENSIONS=$NETWORK_API_EXTENSIONS,$(python -c \
'from networking_ovn.common import extensions ;\
print ",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3)')
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE"
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE"
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE"
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER"
populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP"
inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver
if is_service_enabled networking-ovn-metadata-agent; then
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
else
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False
fi
fi
if is_service_enabled q-dhcp ; then
die $LINENO "The q-dhcp service must be disabled with OVN."
fi
if is_service_enabled q-l3 ; then
die $LINENO "The q-l3 service must be disabled with OVN."
fi
# NOTE(rtheis): OVN currently lacks support for metadata so enabling
# config drive is required to provide metadata to instances.
if is_service_enabled n-cpu ; then
if is_service_enabled networking-ovn-metadata-agent ; then
iniset $NOVA_CONF neutron service_metadata_proxy True
else
iniset $NOVA_CONF DEFAULT force_config_drive True
fi
fi
}
# init_ovn() - Initialize databases, etc.
function init_ovn {
# clean up from previous (possibly aborted) runs
# create required data files
# Assumption: this is a dedicated test system and there is nothing important
# in the ovn, ovn-nb, or ovs databases. We're going to trash them and
# create new ones on each devstack run.
base_dir=$DATA_DIR/ovs
mkdir -p $base_dir
for db in conf.db ovnsb.db ovnnb.db vtep.db ; do
if [ -f $base_dir/$db ] ; then
rm -f $base_dir/$db
fi
done
rm -f $base_dir/.*.db.~lock~
echo "Creating OVS, OVN-Southbound and OVN-Northbound Databases"
ovsdb-tool create $base_dir/conf.db $DEST/$OVN_REPO_NAME/vswitchd/vswitch.ovsschema
if is_ovn_service_enabled ovn-northd ; then
ovsdb-tool create $base_dir/ovnsb.db $DEST/$OVN_REPO_NAME/ovn/ovn-sb.ovsschema
ovsdb-tool create $base_dir/ovnnb.db $DEST/$OVN_REPO_NAME/ovn/ovn-nb.ovsschema
fi
if is_ovn_service_enabled ovn-controller-vtep ; then
ovsdb-tool create $base_dir/vtep.db $DEST/$OVN_REPO_NAME/vtep/vtep.ovsschema
fi
}
# install_ovn() - Collect source and prepare
function install_ovn {
echo "Installing OVN and dependent packages"
# If OVS is already installed, remove it, because we're about to re-install
# it from source.
for package in openvswitch openvswitch-switch openvswitch-common; do
if is_package_installed $package ; then
uninstall_package $package
fi
done
if ! is_neutron_enabled ; then
# NOTE(rtheis): networking-ovn depends on neutron, so ensure it at
# least gets installed and its configuration directory exists (which
# is needed by the multinode job).
install_neutron
sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
fi
# Install tox, used to generate the config (see devstack/override-defaults)
pip_install tox
source $NEUTRON_DIR/devstack/lib/ovs
compile_ovs $OVN_BUILD_MODULES
sudo mkdir -p /usr/local/var/run/openvswitch
sudo mkdir -p /usr/local/var/log/openvswitch
sudo chown $(whoami) /usr/local/var/run/openvswitch
sudo chown $(whoami) /usr/local/var/log/openvswitch
# Install ovsdbapp from source if requested
if use_library_from_git "ovsdbapp"; then
git_clone_by_name "ovsdbapp"
setup_dev_lib "ovsdbapp"
fi
setup_develop $DEST/networking-ovn
# Install ovs python module from ovs source.
if [[ "$OVN_INSTALL_OVS_PYTHON_MODULE" == "True" ]]; then
sudo pip uninstall -y ovs
sudo pip install -e $DEST/$OVS_REPO_NAME/python
fi
}
function start_ovs {
echo "Starting OVS"
local _pwd=$(pwd)
local ovsdb_logfile="ovsdb-server.log.${CURRENT_LOG_TIME}"
bash -c "cd '$LOGDIR' && touch '$ovsdb_logfile' && ln -sf '$ovsdb_logfile' ovsdb-server.log"
local ovsdb_nb_logfile="ovsdb-server-nb.log.${CURRENT_LOG_TIME}"
bash -c "cd '$LOGDIR' && touch '$ovsdb_nb_logfile' && ln -sf '$ovsdb_nb_logfile' ovsdb-server-nb.log"
local ovsdb_sb_logfile="ovsdb-server-sb.log.${CURRENT_LOG_TIME}"
bash -c "cd '$LOGDIR' && touch '$ovsdb_sb_logfile' && ln -sf '$ovsdb_sb_logfile' ovsdb-server-sb.log"
cd $DATA_DIR/ovs
EXTRA_DBS=""
OVSDB_SB_REMOTE=""
if is_ovn_service_enabled ovn-northd ; then
# TODO (regXboi): change ovn-ctl so that we can use something
# other than --db-nb-port for port and ip address
DB_NB_PORT="6641"
DB_NB_INSECURE_REMOTE="yes"
DB_NB_FILE="$DATA_DIR/ovs/ovnnb.db"
OVN_NB_LOGFILE="$LOGDIR/ovsdb-server-nb.log"
# TODO (regXboi): change ovn-ctl so that we can use something
# other than --db-sb-port for port and ip address
DB_SB_PORT="6642"
DB_SB_INSECURE_REMOTE="yes"
DB_SB_FILE="$DATA_DIR/ovs/ovnsb.db"
OVN_SB_LOGFILE="$LOGDIR/ovsdb-server-sb.log"
/usr/local/share/openvswitch/scripts/ovn-ctl start_ovsdb \
--db-nb-create-insecure-remote=$DB_NB_INSECURE_REMOTE \
--db-sb-create-insecure-remote=$DB_SB_INSECURE_REMOTE \
--db-nb-port=$DB_NB_PORT --db-sb-port=$DB_SB_PORT \
--db-nb-file=$DB_NB_FILE --ovn-nb-logfile=$OVN_NB_LOGFILE \
--db-sb-file=$DB_SB_FILE --ovn-sb-logfile=$OVN_SB_LOGFILE
echo "Waiting for ovn ovsdb servers to start ... "
DB_NB_SOCK="/usr/local/var/run/openvswitch/ovnnb_db.sock"
DB_SB_SOCK="/usr/local/var/run/openvswitch/ovnsb_db.sock"
local testcmd="test -e $DB_NB_SOCK -a -e $DB_SB_SOCK"
test_with_retry "$testcmd" "nb ovsdb-server did not start" $SERVICE_TIMEOUT 1
echo "done."
fi
# TODO (regXboi): it would be nice to run the following with run_process
# and have it end up under the control of screen. However, at the point
# this is called, screen isn't running, so we'd have to overload
# USE_SCREEN to get the process to start, but testing shows that the
# resulting process doesn't want to create br-int, which leaves things
# rather broken. So, stay with this for now and somebody more tenacious
# than I can figure out how to make it work...
if is_ovn_service_enabled ovn-controller || is_ovn_service_enabled ovn-controller-vtep ; then
local _OVSREMOTE="--remote=db:Open_vSwitch,Open_vSwitch,manager_options"
local _VTEPREMOTE=""
local _OVSDB=conf.db
local _VTEPDB=""
if is_ovn_service_enabled ovn-controller-vtep ; then
_VTEPREMOTE="--remote=db:hardware_vtep,Global,managers"
_VTEPDB=vtep.db
fi
ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock \
$_OVSREMOTE $_VTEPREMOTE \
--pidfile --detach -vconsole:off \
--log-file=$LOGDIR/ovsdb-server.log \
$_OVSDB $_VTEPDB
echo -n "Waiting for ovsdb-server to start ... "
local testcmd="test -e /usr/local/var/run/openvswitch/db.sock"
test_with_retry "$testcmd" "ovsdb-server did not start" $SERVICE_TIMEOUT 1
echo "done."
ovs-vsctl --no-wait init
ovs-vsctl --no-wait set open_vswitch . system-type="devstack"
ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID"
fi
if is_ovn_service_enabled ovn-controller || is_ovn_service_enabled ovn-controller-vtep ; then
ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve,vxlan"
ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
ovn_base_setup_bridge br-int
ovs-vsctl --no-wait set bridge br-int fail-mode=secure other-config:disable-in-band=true
local ovswd_logfile="ovs-switchd.log.${CURRENT_LOG_TIME}"
bash -c "cd '$LOGDIR' && touch '$ovswd_logfile' && ln -sf '$ovswd_logfile' ovs-vswitchd.log"
# Bump up the max number of open files ovs-vswitchd can have
sudo sh -c "ulimit -n 32000 && exec ovs-vswitchd --pidfile --detach -vconsole:off --log-file=$LOGDIR/ovs-vswitchd.log"
if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then
ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE
ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE}
fi
fi
if is_ovn_service_enabled ovn-controller-vtep ; then
ovn_base_setup_bridge br-vtep
vtep-ctl add-ps br-vtep
vtep-ctl set Physical_Switch br-vtep tunnel_ips=$HOST_IP
sudo /usr/local/share/openvswitch/scripts/ovs-vtep --log-file=$LOGDIR/ovs-vtep.log --pidfile --detach br-vtep
vtep-ctl set-manager tcp:$HOST_IP:6640
fi
cd $_pwd
}
# start_ovn() - Start running processes, including screen
function start_ovn {
echo "Starting OVN"
if is_ovn_service_enabled ovn-controller ; then
# (regXboi) pulling out --log-file to avoid double logging
# appears to break devstack, so let's not do that
run_process ovn-controller "/usr/local/bin/ovn-controller --pidfile --log-file unix:/usr/local/var/run/openvswitch/db.sock" root root
# This makes sure that the console logs have time stamps to
# the millisecond, but we need to make sure ovs-appctl has
# a pid file to work with, so ...
echo -n "Waiting for ovn-controller to start ... "
local testcmd="test -e /usr/local/var/run/openvswitch/ovn-controller.pid"
test_with_retry "$testcmd" "ovn-controller did not start" $SERVICE_TIMEOUT 1
echo "done."
sudo ovs-appctl -t ovn-controller vlog/set "PATTERN:CONSOLE:%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m"
fi
if is_ovn_service_enabled ovn-controller-vtep ; then
# (regXboi) pulling out --log-file to avoid double logging
# appears to break devstack, so let's not do that
run_process ovn-controller-vtep "/usr/local/bin/ovn-controller-vtep --pidfile --log-file --vtep-db=unix:/usr/local/var/run/openvswitch/db.sock --ovnsb-db=$OVN_SB_REMOTE" root root
# This makes sure that the console logs have time stamps to
# the millisecond, but we need to make sure ovs-appctl has
# a pid file to work with, so ...
echo -n "Waiting for ovn-controller-vtep to start ... "
local testcmd="test -e /usr/local/var/run/openvswitch/ovn-controller-vtep.pid"
test_with_retry "$testcmd" "ovn-controller-vtep did not start" $SERVICE_TIMEOUT 1
echo "done."
sudo ovs-appctl -t ovn-controller-vtep vlog/set "PATTERN:CONSOLE:%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m"
fi
if is_ovn_service_enabled ovn-northd ; then
run_process ovn-northd "/usr/local/bin/ovn-northd --log-file=$LOGDIR/ovn-northd.log --pidfile"
# This makes sure that the console logs have time stamps to
# the millisecond, but we need to make sure ovs-appctl has
# a pid file to work with, so ...
echo -n "Waiting for ovn-northd to start ... "
OVN_NORTHD_PID="/usr/local/var/run/openvswitch/ovn-northd.pid"
local testcmd="test -e $OVN_NORTHD_PID"
test_with_retry "$testcmd" "ovn-northd did not start" $SERVICE_TIMEOUT 1
echo "done."
sudo ovs-appctl -t ovn-northd vlog/set "PATTERN:CONSOLE:%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m"
fi
if is_service_enabled networking-ovn-metadata-agent; then
run_process networking-ovn-metadata-agent "$NETWORKING_OVN_BIN_DIR/$NETWORKING_OVN_METADATA_BINARY --config-file $NEUTRON_CONF --config-file $Q_PLUGIN_CONF_FILE --config-file $OVN_META_CONF"
fi
}
# stop_ovn() - Stop running processes (non-screen)
function stop_ovn {
if is_ovn_service_enabled ovn-controller ; then
stop_process ovn-controller
sudo killall ovs-vswitchd
fi
if is_ovn_service_enabled ovn-controller-vtep ; then
stop_process ovn-controller-vtep
sudo killall ovs-vtep
sudo killall ovs-vswitchd
fi
if is_ovn_service_enabled ovn-northd ; then
/usr/local/share/openvswitch/scripts/ovn-ctl stop_northd
fi
sudo killall ovsdb-server
if is_service_enabled networking-ovn-metadata-agent; then
sudo pkill -9 -f haproxy || :
stop_process networking-ovn-metadata-agent
fi
}
# stop_ovs_dp() - Stop OVS datapath
function stop_ovs_dp {
sudo ovs-dpctl dump-dps | sudo xargs -n1 ovs-dpctl del-dp
sudo rmmod vport_geneve
sudo rmmod openvswitch
}
function disable_libvirt_apparmor {
if ! sudo aa-status --enabled ; then
return 0
fi
# NOTE(arosen): This is used as a work around to allow newer versions
# of libvirt to work with ovs configured ports. See LP#1466631.
# requires the apparmor-utils
install_package apparmor-utils
# disables apparmor for libvirtd
sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd
}
function create_public_bridge {
# Create the public bridge that OVN will use
# This logic is based on the devstack neutron-legacy _neutron_configure_router_v4 and _v6
local ext_gw_ifc
ext_gw_ifc=$(get_ext_gw_interface)
sudo ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13
sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc
if [ -n "$FLOATING_RANGE" ]; then
local cidr_len=${FLOATING_RANGE#*/}
sudo ip addr add $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc
fi
# Ensure IPv6 RAs are accepted on the interface with the default route.
# This is needed for neutron-based devstack clouds to work in
# IPv6-only clouds in the gate. Please do not remove this without
# talking to folks in Infra. This fix is based on a devstack fix for
# neutron L3 agent: https://review.openstack.org/#/c/359490/.
default_route_dev=$(ip route | grep ^default | awk '{print $5}')
sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2
sudo sysctl -w net.ipv6.conf.all.forwarding=1
if [ -n "$IPV6_PUBLIC_RANGE" ]; then
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
sudo ip -6 addr add $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc
# NOTE(numans): Commenting the below code for now as this is breaking
# the CI after xenial upgrade.
# https://bugs.launchpad.net/networking-ovn/+bug/1648670
# sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_PUBLIC_NETWORK_GATEWAY dev $ext_gw_ifc
fi
sudo ip link set $ext_gw_ifc up
}

View File

@ -1,129 +0,0 @@
#
# Sample DevStack local.conf.
#
# This sample file is intended to be used for your typical DevStack environment
# that's running all of OpenStack on a single host. This can also be used as
# the first host of a multi-host test environment.
#
# No changes to this sample configuration are required for this to work.
#
[[local|localrc]]
DATABASE_PASSWORD=password
RABBIT_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
ADMIN_PASSWORD=password
# The DevStack plugin defaults to using the ovn branch from the official ovs
# repo. You can optionally use a different one. For example, you may want to
# use the latest patches in blp's ovn branch:
#OVN_REPO=https://github.com/blp/ovs-reviews.git
#OVN_BRANCH=ovn
enable_plugin networking-ovn https://git.openstack.org/openstack/networking-ovn
enable_service ovn-northd
enable_service ovn-controller
enable_service networking-ovn-metadata-agent
# Use Neutron instead of nova-network
disable_service n-net
enable_service q-svc
# Disable Neutron agents not used with OVN.
disable_service q-agt
disable_service q-l3
disable_service q-dhcp
disable_service q-meta
# Horizon (the web UI) is enabled by default. You may want to disable
# it here to speed up DevStack a bit.
enable_service horizon
#disable_service horizon
# Cinder (OpenStack Block Storage) is disabled by default to speed up
# DevStack a bit. You may enable it here if you would like to use it.
disable_service cinder c-sch c-api c-vol
#enable_service cinder c-sch c-api c-vol
# How to connect to ovsdb-server hosting the OVN NB database.
#OVN_NB_REMOTE=tcp:$SERVICE_HOST:6641
# How to connect to ovsdb-server hosting the OVN SB database.
#OVN_SB_REMOTE=tcp:$SERVICE_HOST:6642
# A UUID to uniquely identify this system. If one is not specified, a random
# one will be generated and saved in the file 'ovn-uuid' for re-use in future
# DevStack runs.
#OVN_UUID=
# If using the OVN native layer-3 service, choose a router scheduler to
# manage the distribution of router gateways on hypervisors/chassis.
# Default value is leastloaded.
#OVN_L3_SCHEDULER=leastloaded
# Whether or not to build custom openvswitch kernel modules from the ovs git
# tree. This is enabled by default. This is required unless your distro kernel
# includes ovs+conntrack support. This support was first released in Linux 4.3,
# and will likely be backported by some distros.
#OVN_BUILD_MODULES=False
# Enable services, these services depend on neutron plugin.
#enable_plugin neutron https://git.openstack.org/openstack/neutron
#enable_service q-qos
#enable_service q-trunk
# Skydive
#enable_plugin skydive https://github.com/redhat-cip/skydive.git
#enable_service skydive-analyzer
#enable_service skydive-agent
# If you want to enable a provider network instead of the default private
# network after your DevStack environment installation, you *must* set
# the Q_USE_PROVIDER_NETWORKING to True, and also give FIXED_RANGE,
# NETWORK_GATEWAY and ALLOCATION_POOL option to the correct value that can
# be used in your environment. Specifying Q_AGENT is needed to allow devstack
# to run various "ip link set" and "ovs-vsctl" commands for the provider
# network setup.
#Q_AGENT=openvswitch
#Q_USE_PROVIDER_NETWORKING=True
#PHYSICAL_NETWORK=providernet
#PROVIDER_NETWORK_TYPE=flat
#PUBLIC_INTERFACE=<public interface>
#OVS_PHYSICAL_BRIDGE=br-provider
#PROVIDER_SUBNET_NAME=provider-subnet
# use the following for IPv4
#IP_VERSION=4
#FIXED_RANGE=<CIDR for the Provider Network>
#NETWORK_GATEWAY=<Provider Network Gateway>
#ALLOCATION_POOL=<Provider Network Allocation Pool>
# use the following for IPv4+IPv6
#IP_VERSION=4+6
#FIXED_RANGE=<CIDR for the Provider Network>
#NETWORK_GATEWAY=<Provider Network Gateway>
#ALLOCATION_POOL=<Provider Network Allocation Pool>
# IPV6_PROVIDER_FIXED_RANGE=<v6 CDIR for the Provider Network>
# IPV6_PROVIDER_NETWORK_GATEWAY=<v6 Gateway for the Provider Network>
# If you wish to use the provider network for public access to the cloud,
# set the following
#Q_USE_PROVIDERNET_FOR_PUBLIC=True
#PUBLIC_NETWORK_NAME=<Provider network name>
#PUBLIC_NETWORK_GATEWAY=<Provider network gateway>
#PUBLIC_PHYSICAL_NETWORK=<Provider network name>
#IP_VERSION=4
#PUBLIC_SUBNET_NAME=<provider subnet name>
#Q_FLOATING_ALLOCATION_POOL=<Provider Network Allocation Pool>
#FLOATING_RANGE=<CIDR for the Provider Network>
# NOTE: DO NOT MOVE THESE SECTIONS FROM THE END OF THIS FILE
# IF YOU DO, THEY WON'T WORK!!!!!
#
# Enable Nova automatic host discovery for cell every 2 seconds
# Only needed in case of multinode devstack, as otherwise there will be issues
# when the 2nd compute node goes online.
[[post-config|$NOVA_CONF]]
[scheduler]
discover_hosts_in_cells_interval = 2

View File

@ -1,17 +0,0 @@
# Network utility functions that were copied mostly from
# devstack's neutron-legacy script so they could be used
# by the networking-ovn devstack plugin
function get_ext_gw_interface {
# Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH
# This function is copied directly from the devstack neutron-legacy script
if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then
echo $Q_PUBLIC_VETH_EX
else
# Disable in-band as we are going to use local port
# to communicate with VMs
sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \
other_config:disable-in-band=true
echo $PUBLIC_BRIDGE
fi
}

View File

@ -1,14 +0,0 @@
Q_PLUGIN=${Q_PLUGIN:-"ml2"}
Q_AGENT=${Q_AGENT:-""}
Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger}
Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve}
Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"}
Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"}
ML2_L3_PLUGIN="networking_ovn.l3.l3_ovn.OVNL3RouterPlugin"
# This function is invoked by DevStack's Neutron plugin setup
# code and is being overridden here since the OVN devstack
# plugin will handle the install.
function neutron_plugin_install_agent_packages {
:
}

View File

@ -1,71 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# devstack/plugin.sh
# networking-ovn actions for devstack plugin framework
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
source $DEST/networking-ovn/devstack/lib/networking-ovn
source $TOP_DIR/lib/neutron-legacy
# main loop
if is_service_enabled q-svc || is_ovn_service_enabled ovn-northd || is_ovn_service_enabled ovn-controller || is_ovn_service_enabled ovn-controller-vtep ; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
install_ovn
configure_ovn
init_ovn
# We have to start at install time, because Neutron's post-config
# phase runs ovs-vsctl.
start_ovs
disable_libvirt_apparmor
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
configure_ovn_plugin
if is_service_enabled nova; then
create_nova_conf_neutron
fi
start_ovn
# If not previously set by another process, set the OVN_*_DB
# variables to enable OVN commands from any node.
grep -lq 'OVN' ~/.bash_profile || echo -e "\n# Enable OVN commands from any node.\nexport OVN_NB_DB=$OVN_NB_REMOTE\nexport OVN_SB_DB=$OVN_SB_REMOTE" >> ~/.bash_profile
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored because"
echo "NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
else
create_public_bridge
fi
fi
fi
if [[ "$1" == "unstack" ]]; then
stop_ovn
stop_ovs_dp
cleanup_ovn
fi
fi
# Restore xtrace
$XTRACE
# Tell emacs to use shell-script-mode
## Local variables:
## mode: shell-script
## End:

View File

@ -1,61 +0,0 @@
#!/bin/bash
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
source $TOP_DIR/openrc admin admin
OVN_TEST_NETWORK=ovn-test-net
function early_create {
:
}
function create {
local net_id
net_id=$(openstack network create $OVN_TEST_NETWORK -f value -c id)
resource_save ovn net_id $net_id
}
function verify_noapi {
:
}
function verify {
local net_id
net_id=$(resource_get ovn net_id)
# verifiy will be called in base stage as well. But ovn-nbctl will be
# installed only during the target stage.
[ -z $(which ovn-nbctl || true) ] || ovn-nbctl list Logical_Switch neutron-$net_id
}
function destroy {
local net_id
net_id=$(resource_get ovn net_id)
openstack network delete $net_id
}
case $1 in
"early_create")
early_create
;;
"create")
create
;;
"verify_noapi")
verify_noapi
;;
"verify")
verify
;;
"destroy")
destroy
;;
"force_destroy")
set +o errexit
destroy
;;
esac

View File

@ -1,8 +0,0 @@
register_project_for_upgrade networking-ovn
devstack_localrc base disable_service ovn-northd ovn-controller
devstack_localrc base enable_service q-agt q-meta q-metering s-account s-container s-object s-proxy
devstack_localrc target enable_plugin networking-ovn http://git.openstack.org/openstack/networking-ovn
devstack_localrc target PUBLIC_BRIDGE=br-ex
devstack_localrc target enable_service s-account s-container s-object s-proxy
devstack_localrc target disable_service q-agt

View File

@ -1,115 +0,0 @@
echo "*********************************************************************"
echo "Begin $0"
echo "*********************************************************************"
# Clean up any resources that may be in use
cleanup() {
set +o errexit
echo "*********************************************************************"
echo "ERROR: Abort $0"
echo "*********************************************************************"
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
# Keep track of the grenade directory
RUN_DIR=$(cd $(dirname "$0") && pwd)
set -o xtrace
# Set for DevStack compatibility
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
source $TARGET_DEVSTACK_DIR/stackrc
set -o errexit
TOP_DIR=$TARGET_DEVSTACK_DIR
# Get functions from current DevStack
source $TARGET_DEVSTACK_DIR/lib/apache
source $TARGET_DEVSTACK_DIR/lib/tls
source $TARGET_DEVSTACK_DIR/lib/keystone
[[ -r $TARGET_DEVSTACK_DIR/lib/neutron ]] && source $TARGET_DEVSTACK_DIR/lib/neutron
source $TARGET_DEVSTACK_DIR/lib/neutron-legacy
source $TARGET_DEVSTACK_DIR/lib/neutron_plugins/services/l3
source $TARGET_DEVSTACK_DIR/lib/database
source $TARGET_DEVSTACK_DIR/lib/nova
NW_OVN_DEVSTACK_DIR=$(dirname "$0")/..
source $NW_OVN_DEVSTACK_DIR/lib/networking-ovn
export OVN_NEUTRON_SYNC_MODE=repair
set -x
# Restart rabbitmq. Without this, the tempest test cases on the upgraded stack
# fails randomly due to rabbitmq connection problems.
sudo service rabbitmq-server restart
# We are no more starting OVS agent, delete the dead agents from neutron
dead_agents=$(neutron --os-cloud devstack-admin agent-list --alive False -f value -c id || /bin/true)
for agent in $dead_agents; do
neutron --os-cloud devstack-admin agent-delete $agent || /bin/true
done
# stop neutron and its agents as the neutron configuration file is going to
# be modified now
stop_neutron || /bin/true
#Re use the exisiting vswitch db
ovs_db_file=$(/usr/share/openvswitch/scripts/ovs-ctl --help | grep DBDIR | awk '{gsub(/\:/, ""); printf $2"/"$1"\n"}')
mkdir -p $DATA_DIR/ovs
cp $ovs_db_file $DATA_DIR/ovs/conf.db
install_ovn
#uprade the db to the latest ovsschema
OVS_SHARE_ROOT=/usr/local/share/openvswitch/
/bin/bash -c ". $OVS_SHARE_ROOT/scripts/ovs-lib; upgrade_db $DATA_DIR/ovs/conf.db $OVS_SHARE_ROOT/vswitch.ovsschema"
configure_ovn
start_ovs
# We need to reconfigure br-ex because install_ovn must have removed the
# ovs kernel module thereby removing the br-ex interface. start_ovs
# must have recreated the br-ex interface.
sudo ip addr add $PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} dev br-ex
sudo ip link set br-ex up
# Reset the openflow protocol in the vswitchd Bridge tables
for br in br-int br-ex br-tun; do
ovs-vsctl set Bridge $br protocols=[] || /bin/true
done
disable_libvirt_apparmor
upgrade_project ovn $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
neutron_plugin_configure_common
Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
Q_ML2_PLUGIN_MECHANISM_DRIVERS=ovn,logger
Q_ML2_PLUGIN_TYPE_DRIVERS=local,flat,vlan,geneve,vxlan
Q_ML2_TENANT_NETWORK_TYPE="geneve"
neutron_plugin_configure_service
configure_ovn_plugin
if is_service_enabled nova; then
create_nova_conf_neutron
fi
start_ovn
ensure_services_started ovn-controller ovn-northd
start_neutron_service_and_check
start_neutron_agents
set +x
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End $0"
echo "*********************************************************************"

View File

@ -1,39 +0,0 @@
#
# Sample DevStack local.conf.
#
# This sample file is intended for running the HW VTEP emulator on a
# separate node.
#
# For this configuration to work, you *must* set the SERVICE_HOST option to the
# IP address of the main DevStack host.
#
[[local|localrc]]
DATABASE_PASSWORD=password
RABBIT_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
ADMIN_PASSWORD=password
# The DevStack plugin defaults to using the ovn branch from the official ovs
# repo. You can optionally use a different one. For example, you may want to
# use the latest patches in blp's ovn branch:
#OVN_REPO=https://github.com/blp/ovs-reviews.git
#OVN_BRANCH=ovn
enable_plugin networking-ovn https://git.openstack.org/openstack/networking-ovn
disable_all_services
enable_service ovn-controller-vtep
# A UUID to uniquely identify this system. If one is not specified, a random
# one will be generated and saved in the file 'ovn-uuid' for re-use in future
# DevStack runs.
#OVN_UUID=
# Whether or not to build custom openvswitch kernel modules from the ovs git
# tree. This is enabled by default. This is required unless your distro kernel
# includes ovs+conntrack support. This support was first released in Linux 4.3,
# and will likely be backported by some distros.
#OVN_BUILD_MODULES=False

View File

@ -1,100 +0,0 @@
Container Integration with OVN
=================================
OVN supports virtual networking for both VMs and containers. There are two
modes OVN can operate in with respect to containers. The first mode looks just
like it does with VMs. If you're running a bunch of containers in a cluster of
VMs, OVN can be used to provide a virtual networking overlay for those
containers to use.
The second mode is very interesting in the context of OpenStack. OVN makes
special accommodation for running containers inside of VMs when the networking
for those VMs is already being managed by OVN. You can create a special type
of port in OVN for these containers and have them directly connected to virtual
networks managed by OVN. There are two major benefits of this:
* It allows containers to use virtual networks without creating another layer
of overlay networks. This reduces networking complexity and increases
performance.
* It allows arbitrary connections between any VMs and any containers running
inside VMs.
Creating a Container Port
------------------------------
A container port has two additional attributes that do not exist with a normal
Neutron port. First, you must specify the parent port that the VM is using.
Second, you must specify a tag. This tag is a VLAN ID today, though that may
change in the future. Traffic from the container must be tagged with this VLAN
ID by open vSwitch running inside the VM. Traffic destined for the container
will arrive on the parent VM port with this VLAN ID. Open vSwitch inside the
VM will forward this traffic to the container.
These two attributes are not currently supported in the Neutron API. As a
result, we are initially allowing these attributes to be set in the
'binding:profile' extension for ports. If this approach gains traction and
more general support, we will revisit making this a real extension to the
Neutron API.
Note that the default /etc/neutron/policy.json does not allow a regular user
to set a 'binding:profile'. If you want to allow this, you must update
policy.json. To do so, change::
"create_port:binding:profile": "rule:admin_only",
to::
"create_port:binding:profile": "",
Here is an example of creating a port for a VM, and then creating a port for a
container that runs inside of that VM::
$ neutron port-create private
Created a new port:
+-----------------------+---------------------------------------------------------------------------------+
| Field | Value |
+-----------------------+---------------------------------------------------------------------------------+
| admin_state_up | True |
| allowed_address_pairs | |
| binding:vnic_type | normal |
| device_id | |
| device_owner | |
| fixed_ips | {"subnet_id": "ce5e0d61-10a1-44be-b917-f628616d686a", "ip_address": "10.0.0.3"} |
| id | 74e43404-f3c2-4f13-aeec-934db4e2de35 |
| mac_address | fa:16:3e:c5:a9:74 |
| name | |
| network_id | f654265f-baa6-4351-9d76-b5693521c521 |
| security_groups | fe25592f-3610-48b9-a114-4ec834c52349 |
| status | DOWN |
| tenant_id | db75dd6671ef4858a7fed450f1f8e995 |
+-----------------------+---------------------------------------------------------------------------------+
$ neutron port-create --binding-profile '{"parent_name":"74e43404-f3c2-4f13-aeec-934db4e2de35","tag":42}' private
Created a new port:
+-----------------------+---------------------------------------------------------------------------------+
| Field | Value |
+-----------------------+---------------------------------------------------------------------------------+
| admin_state_up | True |
| allowed_address_pairs | |
| binding:vnic_type | normal |
| device_id | |
| device_owner | |
| fixed_ips | {"subnet_id": "ce5e0d61-10a1-44be-b917-f628616d686a", "ip_address": "10.0.0.4"} |
| id | be155d07-ecd9-4ad7-91e5-5be60684572a |
| mac_address | fa:16:3e:74:ef:82 |
| name | |
| network_id | f654265f-baa6-4351-9d76-b5693521c521 |
| security_groups | fe25592f-3610-48b9-a114-4ec834c52349 |
| status | DOWN |
| tenant_id | db75dd6671ef4858a7fed450f1f8e995 |
+-----------------------+---------------------------------------------------------------------------------+
Now we can look at the corresponding logical switch ports in OVN to see that
the parent and tag were set as expected::
$ ovn-nbctl lsp-get-parent be155d07-ecd9-4ad7-91e5-5be60684572a
74e43404-f3c2-4f13-aeec-934db4e2de35
$ ovn-nbctl lsp-get-tag be155d07-ecd9-4ad7-91e5-5be60684572a
42

View File

@ -1,26 +0,0 @@
DPDK Support in OVN
===================
Configuration Settings
----------------------
The following configuration parameter needs to be set in the Neutron ML2
plugin configuration file under the 'ovn' section to enable DPDK support.
**vhost_sock_dir**
This is the directory path in which vswitch daemon in all the compute
nodes creates the virtio socket. Follow the instructions in
INSTALL.DPDK.md in openvswitch source tree to know how to configure DPDK
support in vswitch daemons.
Configuration Settings in compute hosts
---------------------------------------
Compute nodes configured with OVS DPDK should set the datapath_type as
"netdev" for the integration bridge (managed by OVN) and all other bridges if
connected to the integration bridge via patch ports. The below command can be
used to set the datapath_type.
.. code-block:: console
$ sudo ovs-vsctl set Bridge br-int datapath_type=netdev

View File

@ -1,53 +0,0 @@
.. _faq:
===
FAQ
===
**Q: Does OVN support DVR or distributed L3 routing?**
DVR (Distributed Virtual Router) is typically used to refer to a specific
implementation of distributed routers provided by the Neutron L3 agent. The
Neutron L3 agent in DVR mode has never been tested with OVN. Support for the
Neutron L3 agent is only temporary and will be removed once OVN's native L3
support includes enough functionality.
When using OVN's native L3 support, L3 routing is always distributed.
**Q: Does OVN support integration with physical switches?**
OVN currently integrates with physical switches by optionally using them as
VTEP gateways from logical to physical networks and via integrations provided
by the Neutron ML2 framework, hierarchical port binding.
**Q: What's the status of HA for networking-ovn and OVN?**
Typically, multiple copies of neutron-server are run across multiple servers
and uses a load balancer. The neutron ML2 mechanism driver provided by
networking-ovn supports this deployment model. In addition, multiple copies of
neutron-dhcp-agent and neutron-metadata-agent can be run with the option of
configuring neutron-dhcp-agent availability zones.
The network controller portion of OVN is distributed - an instance of the
ovn-controller service runs on every hypervisor. OVN also includes some
central components for control purposes.
ovn-northd is a centralized service that does some translation between the
northbound and southbound databases in OVN. Currently, you only run this
service once. You can manage it in an active/passive HA mode using something
like Pacemaker. The OVN project plans to allow this service to be horizontally
scaled both for scaling and HA reasons. This will allow it to be run in an
active/active HA mode.
OVN also makes use of ovsdb-server for the OVN northbound and southbound
databases. ovsdb-server supports active/passive HA using replication.
For more information, see:
https://github.com/openvswitch/ovs/blob/master/Documentation/OVSDB-replication.md
A typical deployment would use something like Pacemaker to manage the
active/passive HA process. Clients would be pointed at a virtual IP
address. When the HA manager detects a failure of the master, the
virtual IP would be moved and the passive replica would become the
new master.
See :doc:`ovn` for links to more details on OVN's architecture.

View File

@ -1,91 +0,0 @@
.. _features:
Features
========
Open Virtual Network (OVN) offers the following virtual network
services:
* Layer-2 (switching)
Native implementation. Replaces the conventional Open vSwitch (OVS)
agent.
* Layer-3 (routing)
Native implementation that supports distributed routing. Replaces the
conventional Neutron L3 agent.
* DHCP
Native distributed implementation. Replaces the conventional Neutron DHCP
agent. Note that the native implementation does not yet support DNS or
Metadata features.
* DPDK
OVN and networking-ovn may be used with OVS using either the Linux kernel
datapath or the DPDK datapath.
* Trunk driver
Uses OVN's functionality of parent port and port tagging to support trunk
service plugin. One has to enable the 'trunk' service plugin in neutron
configuration files to use this feature.
The following Neutron API extensions are supported with OVN:
+----------------------------------+---------------------------+
| Extension Name | Extension Alias |
+==================================+===========================+
| agent | agent |
+----------------------------------+---------------------------+
| Allowed Address Pairs | allowed-address-pairs |
+----------------------------------+---------------------------+
| Auto Allocated Topology Services | auto-allocated-topology |
+----------------------------------+---------------------------+
| Availability Zone | availability_zone |
+----------------------------------+---------------------------+
| Default Subnetpools | default-subnetpools |
+----------------------------------+---------------------------+
| Multi Provider Network | multi-provider |
+----------------------------------+---------------------------+
| Network IP Availability | network-ip-availability |
+----------------------------------+---------------------------+
| Neutron external network | external-net |
+----------------------------------+---------------------------+
| Neutron Extra DHCP opts | extra_dhcp_opt |
+----------------------------------+---------------------------+
| Neutron Extra Route | extraroute |
+----------------------------------+---------------------------+
| Neutron L3 external gateway | ext-gw-mode |
+----------------------------------+---------------------------+
| Neutron L3 Router | router |
+----------------------------------+---------------------------+
| Network MTU | net-mtu |
+----------------------------------+---------------------------+
| Port Binding | binding |
+----------------------------------+---------------------------+
| Port Security | port-security |
+----------------------------------+---------------------------+
| Provider Network | provider |
+----------------------------------+---------------------------+
| Quality of Service | qos |
+----------------------------------+---------------------------+
| Quota management support | quotas |
+----------------------------------+---------------------------+
| RBAC Policies | rbac-policies |
+----------------------------------+---------------------------+
| Resource revision numbers | revisions |
+----------------------------------+---------------------------+
| security-group | security-group |
+----------------------------------+---------------------------+
| standard-attr-description | standard-attr-description |
+----------------------------------+---------------------------+
| Subnet Allocation | subnet_allocation |
+----------------------------------+---------------------------+
| Tag support | tag |
+----------------------------------+---------------------------+
| Time Stamp Fields | timestamp_core |
+----------------------------------+---------------------------+

View File

@ -1,14 +0,0 @@
====================
Administration Guide
====================
.. toctree::
:maxdepth: 1
ovn
features
faq
refarch/refarch
dpdk
containers
troubleshooting

View File

@ -1,71 +0,0 @@
===============
OVN information
===============
The original OVN project announcement can be found here:
* http://networkheresy.com/2015/01/13/ovn-bringing-native-virtual-networking-to-ovs/
The OVN architecture is described here:
* http://openvswitch.org/support/dist-docs/ovn-architecture.7.html
Here are two tutorials that help with learning different aspects of OVN:
* http://blog.spinhirne.com/p/blog-series.html#introToOVN
* http://docs.openvswitch.org/en/latest/tutorials/ovn-sandbox/
There is also an in depth tutorial on using OVN with OpenStack:
* http://docs.openvswitch.org/en/latest/tutorials/ovn-openstack/
OVN DB schemas and other man pages:
* http://openvswitch.org/support/dist-docs/ovn-nb.5.html
* http://openvswitch.org/support/dist-docs/ovn-sb.5.html
* http://openvswitch.org/support/dist-docs/ovn-nbctl.8.html
* http://openvswitch.org/support/dist-docs/ovn-sbctl.8.html
* http://openvswitch.org/support/dist-docs/ovn-northd.8.html
* http://openvswitch.org/support/dist-docs/ovn-controller.8.html
* http://openvswitch.org/support/dist-docs/ovn-controller-vtep.8.html
or find a full list of OVS and OVN man pages here:
* http://docs.openvswitch.org/en/latest/ref/
The openvswitch web page includes a list of presentations, some of which are
about OVN:
* http://openvswitch.org/support/
Here are some direct links to past OVN presentations:
* OVN talk at OpenStack Summit in Boston, Spring 2017
* https://www.youtube.com/watch?v=sgc7myiX6ts
* OVN talk at OpenStack Summit in Barcelona, Fall 2016
* https://www.youtube.com/watch?v=q3cJ6ezPnCU
* OVN talk at OpenStack Summit in Austin, Spring 2016
* https://www.youtube.com/watch?v=okralc7LrZo
* OVN Project Update at the OpenStack Summit in Tokyo, Fall 2015
* http://openvswitch.org/support/slides/OVN_Tokyo.pdf
* https://www.youtube.com/watch?v=3IrG2xghJjs
* OVN at OpenStack Summit in Vancouver, Sping 2015
* http://openvswitch.org/support/slides/OVN-Vancouver.pdf
* https://www.youtube.com/watch?v=kEzXTq2fPDg
* OVS Conference 2015
* https://www.youtube.com/watch?v=JLGZOYi_Cqc
These blog resources may also help with testing and understanding OVN:
* http://networkop.co.uk/blog/2016/11/27/ovn-part1/
* http://networkop.co.uk/blog/2016/12/10/ovn-part2/
* https://blog.russellbryant.net/2016/12/19/comparing-openstack-neutron-ml2ovs-and-ovn-control-plane/
* https://blog.russellbryant.net/2016/11/11/ovn-logical-flows-and-ovn-trace/
* https://blog.russellbryant.net/2016/09/29/ovs-2-6-and-the-first-release-of-ovn/
* http://galsagie.github.io/2015/11/23/ovn-l3-deepdive/
* http://blog.russellbryant.net/2015/10/22/openstack-security-groups-using-ovn-acls/
* http://galsagie.github.io/sdn/openstack/ovs/2015/05/30/ovn-deep-dive/
* http://blog.russellbryant.net/2015/05/14/an-ez-bake-ovn-for-openstack/
* http://galsagie.github.io/sdn/openstack/ovs/2015/04/26/ovn-containers/
* http://blog.russellbryant.net/2015/04/21/ovn-and-openstack-status-2015-04-21/
* http://blog.russellbryant.net/2015/04/08/ovn-and-openstack-integration-development-update/

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 72 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 24 KiB

View File

@ -1,774 +0,0 @@
.. _refarch-launch-instance-provider-network:
Launch an instance on a provider network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. On the controller node, source the credentials for a regular
(non-privileged) project. The following example uses the ``demo``
project.
#. On the controller node, launch an instance using the UUID of the
provider network.
.. code-block:: console
$ openstack server create --flavor m1.tiny --image cirros \
--nic net-id=0243277b-4aa8-46d8-9e10-5c9ad5e01521 \
--security-group default --key-name mykey provider-instance
+--------------------------------------+-----------------------------------------------+
| Property | Value |
+--------------------------------------+-----------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | nova |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | hdF4LMQqC5PB |
| config_drive | |
| created | 2015-09-17T21:58:18Z |
| flavor | m1.tiny (1) |
| hostId | |
| id | 181c52ba-aebc-4c32-a97d-2e8e82e4eaaf |
| image | cirros (38047887-61a7-41ea-9b49-27987d5e8bb9) |
| key_name | mykey |
| metadata | {} |
| name | provider-instance |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | default |
| status | BUILD |
| tenant_id | f5b2ccaa75ac413591f12fcaa096aa5c |
| updated | 2015-09-17T21:58:18Z |
| user_id | 684286a9079845359882afc3aa5011fb |
+--------------------------------------+-----------------------------------------------+
OVN operations
^^^^^^^^^^^^^^
The OVN mechanism driver and OVN perform the following operations when
launching an instance.
#. The OVN mechanism driver creates a logical port for the instance.
.. code-block:: console
_uuid : cc891503-1259-47a1-9349-1c0293876664
addresses : ["fa:16:3e:1c:ca:6a 203.0.113.103"]
enabled : true
external_ids : {"neutron:port_name"=""}
name : "cafd4862-c69c-46e4-b3d2-6141ce06b205"
options : {}
parent_name : []
port_security : ["fa:16:3e:1c:ca:6a 203.0.113.103"]
tag : []
type : ""
up : true
#. The OVN mechanism driver updates the appropriate Address Set
entry with the address of this instance:
.. code-block:: console
_uuid : d0becdea-e1ed-48c4-9afc-e278cdef4629
addresses : ["203.0.113.103"]
external_ids : {"neutron:security_group_name"=default}
name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc"
#. The OVN mechanism driver creates ACL entries for this port and
any other ports in the project.
.. code-block:: console
_uuid : f8d27bfc-4d74-4e73-8fac-c84585443efd
action : drop
direction : from-lport
external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"}
log : false
match : "inport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip"
priority : 1001
_uuid : a61d0068-b1aa-4900-9882-e0671d1fc131
action : allow
direction : to-lport
external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"}
log : false
match : "outport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip4 && ip4.src == 203.0.113.0/24 && udp && udp.src == 67 && udp.dst == 68"
priority : 1002
_uuid : a5a787b8-7040-4b63-a20a-551bd73eb3d1
action : allow-related
direction : from-lport
external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"}
log : false
match : "inport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip6"
priority : 1002
_uuid : 7b3f63b8-e69a-476c-ad3d-37de043232b2
action : allow-related
direction : to-lport
external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"}
log : false
match : "outport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip4 && ip4.src = $as_ip4_90a78a43_b5649_4bee_8822_21fcccab58dc"
priority : 1002
_uuid : 36dbb1b1-cd30-4454-a0bf-923646eb7c3f
action : allow
direction : from-lport
external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"}
log : false
match : "inport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip4 && (ip4.dst == 255.255.255.255 || ip4.dst == 203.0.113.0/24) && udp && udp.src == 68 && udp.dst == 67"
priority : 1002
_uuid : 05a92f66-be48-461e-a7f1-b07bfbd3e667
action : allow-related
direction : from-lport
external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"}
log : false
match : "inport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip4"
priority : 1002
_uuid : 37f18377-d6c3-4c44-9e4d-2170710e50ff
action : drop
direction : to-lport
external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"}
log : false
match : "outport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip"
priority : 1001
_uuid : 6d4db3cf-c1f1-4006-ad66-ae582a6acd21
action : allow-related
direction : to-lport
external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"}
log : false
match : "outport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip6 && ip6.src = $as_ip6_90a78a43_b5649_4bee_8822_21fcccab58dc"
priority : 1002
#. The OVN mechanism driver updates the logical switch information with
the UUIDs of these objects.
.. code-block:: console
_uuid : 924500c4-8580-4d5f-a7ad-8769f6e58ff5
acls : [05a92f66-be48-461e-a7f1-b07bfbd3e667,
36dbb1b1-cd30-4454-a0bf-923646eb7c3f,
37f18377-d6c3-4c44-9e4d-2170710e50ff,
7b3f63b8-e69a-476c-ad3d-37de043232b2,
a5a787b8-7040-4b63-a20a-551bd73eb3d1,
a61d0068-b1aa-4900-9882-e0671d1fc131,
f8d27bfc-4d74-4e73-8fac-c84585443efd]
external_ids : {"neutron:network_name"=provider}
name : "neutron-670efade-7cd0-4d87-8a04-27f366eb8941"
ports : [38cf8b52-47c4-4e93-be8d-06bf71f6a7c9,
5e144ab9-3e08-4910-b936-869bbbf254c8,
a576b812-9c3e-4cfb-9752-5d8500b3adf9,
cc891503-1259-47a1-9349-1c0293876664]
#. The OVN northbound service creates port bindings for the logical
ports and adds them to the appropriate multicast group.
* Port bindings
.. code-block:: console
_uuid : e73e3fcd-316a-4418-bbd5-a8a42032b1c3
chassis : fc5ab9e7-bc28-40e8-ad52-2949358cc088
datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6
logical_port : "cafd4862-c69c-46e4-b3d2-6141ce06b205"
mac : ["fa:16:3e:1c:ca:6a 203.0.113.103"]
options : {}
parent_port : []
tag : []
tunnel_key : 4
type : ""
* Multicast groups
.. code-block:: console
_uuid : 39b32ccd-fa49-4046-9527-13318842461e
datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6
name : _MC_flood
ports : [030024f4-61c3-4807-859b-07727447c427,
904c3108-234d-41c0-b93c-116b7e352a75,
cc5bcd19-bcae-4e29-8cee-3ec8a8a75d46,
e73e3fcd-316a-4418-bbd5-a8a42032b1c3]
tunnel_key : 65535
#. The OVN northbound service translates the Address Set change into
the new Address Set in the OVN southbound database.
.. code-block:: console
_uuid : 2addbee3-7084-4fff-8f7b-15b1efebdaff
addresses : ["203.0.113.103"]
name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc"
#. The OVN northbound service translates the ACL and logical port objects
into logical flows in the OVN southbound database.
.. code-block:: console
Datapath: bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 Pipeline: ingress
table= 0( ls_in_port_sec_l2), priority= 50,
match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
eth.src == {fa:16:3e:1c:ca:6a}),
action=(next;)
table= 1( ls_in_port_sec_ip), priority= 90,
match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
eth.src == fa:16:3e:1c:ca:6a && ip4.src == {203.0.113.103}),
action=(next;)
table= 1( ls_in_port_sec_ip), priority= 90,
match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
eth.src == fa:16:3e:1c:ca:6a && ip4.src == 0.0.0.0 &&
ip4.dst == 255.255.255.255 && udp.src == 68 && udp.dst == 67),
action=(next;)
table= 1( ls_in_port_sec_ip), priority= 80,
match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
eth.src == fa:16:3e:1c:ca:6a && ip),
action=(drop;)
table= 2( ls_in_port_sec_nd), priority= 90,
match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
eth.src == fa:16:3e:1c:ca:6a &&
arp.sha == fa:16:3e:1c:ca:6a && (arp.spa == 203.0.113.103 )),
action=(next;)
table= 2( ls_in_port_sec_nd), priority= 80,
match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
(arp || nd)),
action=(drop;)
table= 3( ls_in_pre_acl), priority= 110,
match=(nd),
action=(next;)
table= 3( ls_in_pre_acl), priority= 100,
match=(ip),
action=(reg0[0] = 1; next;)
table= 6( ls_in_acl), priority=65535,
match=(ct.inv),
action=(drop;)
table= 6( ls_in_acl), priority=65535,
match=(nd),
action=(next;)
table= 6( ls_in_acl), priority=65535,
match=(ct.est && !ct.rel && !ct.new && !ct.inv),
action=(next;)
table= 6( ls_in_acl), priority=65535,
match=(!ct.est && ct.rel && !ct.new && !ct.inv),
action=(next;)
table= 6( ls_in_acl), priority= 2002,
match=(ct.new && (inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205"
&& ip6)),
action=(reg0[1] = 1; next;)
table= 6( ls_in_acl), priority= 2002,
match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip4 &&
(ip4.dst == 255.255.255.255 || ip4.dst == 203.0.113.0/24) &&
udp && udp.src == 68 && udp.dst == 67),
action=(reg0[1] = 1; next;)
table= 6( ls_in_acl), priority= 2002,
match=(ct.new && (inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
ip4)),
action=(reg0[1] = 1; next;)
table= 6( ls_in_acl), priority= 2001,
match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip),
action=(drop;)
table= 6( ls_in_acl), priority= 1,
match=(ip),
action=(reg0[1] = 1; next;)
table= 9( ls_in_arp_rsp), priority= 50,
match=(arp.tpa == 203.0.113.103 && arp.op == 1),
action=(eth.dst = eth.src; eth.src = fa:16:3e:1c:ca:6a;
arp.op = 2; /* ARP reply */ arp.tha = arp.sha;
arp.sha = fa:16:3e:1c:ca:6a; arp.tpa = arp.spa;
arp.spa = 203.0.113.103; outport = inport;
inport = ""; /* Allow sending out inport. */ output;)
table=10( ls_in_l2_lkup), priority= 50,
match=(eth.dst == fa:16:3e:1c:ca:6a),
action=(outport = "cafd4862-c69c-46e4-b3d2-6141ce06b205"; output;)
Datapath: bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 Pipeline: egress
table= 1( ls_out_pre_acl), priority= 110,
match=(nd),
action=(next;)
table= 1( ls_out_pre_acl), priority= 100,
match=(ip),
action=(reg0[0] = 1; next;)
table= 4( ls_out_acl), priority=65535,
match=(!ct.est && ct.rel && !ct.new && !ct.inv),
action=(next;)
table= 4( ls_out_acl), priority=65535,
match=(ct.est && !ct.rel && !ct.new && !ct.inv),
action=(next;)
table= 4( ls_out_acl), priority=65535,
match=(ct.inv),
action=(drop;)
table= 4( ls_out_acl), priority=65535,
match=(nd),
action=(next;)
table= 4( ls_out_acl), priority= 2002,
match=(ct.new &&
(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip6 &&
ip6.src == $as_ip6_90a78a43_b549_4bee_8822_21fcccab58dc)),
action=(reg0[1] = 1; next;)
table= 4( ls_out_acl), priority= 2002,
match=(ct.new &&
(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip4 &&
ip4.src == $as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc)),
action=(reg0[1] = 1; next;)
table= 4( ls_out_acl), priority= 2002,
match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip4 &&
ip4.src == 203.0.113.0/24 && udp && udp.src == 67 &&
udp.dst == 68),
action=(reg0[1] = 1; next;)
table= 4( ls_out_acl), priority= 2001,
match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip),
action=(drop;)
table= 4( ls_out_acl), priority= 1,
match=(ip),
action=(reg0[1] = 1; next;)
table= 6( ls_out_port_sec_ip), priority= 90,
match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
eth.dst == fa:16:3e:1c:ca:6a &&
ip4.dst == {255.255.255.255, 224.0.0.0/4, 203.0.113.103}),
action=(next;)
table= 6( ls_out_port_sec_ip), priority= 80,
match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
eth.dst == fa:16:3e:1c:ca:6a && ip),
action=(drop;)
table= 7( ls_out_port_sec_l2), priority= 50,
match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" &&
eth.dst == {fa:16:3e:1c:ca:6a}),
action=(output;)
#. The OVN controller service on each compute node translates these objects
into flows on the integration bridge ``br-int``. Exact flows depend on
whether the compute node containing the instance also contains a DHCP agent
on the subnet.
* On the compute node containing the instance, the Compute service creates
a port that connects the instance to the integration bridge and OVN
creates the following flows:
.. code-block:: console
# ovs-ofctl show br-int
OFPT_FEATURES_REPLY (xid=0x2): dpid:000022024a1dc045
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
9(tapcafd4862-c6): addr:fe:16:3e:1c:ca:6a
config: 0
state: 0
current: 10MB-FD COPPER
speed: 10 Mbps now, 0 Mbps max
.. code-block:: console
cookie=0x0, duration=184.992s, table=0, n_packets=175, n_bytes=15270,
idle_age=15, priority=100,in_port=9
actions=load:0x3->NXM_NX_REG5[],load:0x4->OXM_OF_METADATA[],
load:0x4->NXM_NX_REG6[],resubmit(,16)
cookie=0x0, duration=191.687s, table=16, n_packets=175, n_bytes=15270,
idle_age=15, priority=50,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a
actions=resubmit(,17)
cookie=0x0, duration=191.687s, table=17, n_packets=2, n_bytes=684,
idle_age=112, priority=90,udp,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a,nw_src=0.0.0.0,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=resubmit(,18)
cookie=0x0, duration=191.687s, table=17, n_packets=146, n_bytes=12780,
idle_age=20, priority=90,ip,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a,nw_src=203.0.113.103
actions=resubmit(,18)
cookie=0x0, duration=191.687s, table=17, n_packets=17, n_bytes=1386,
idle_age=92, priority=80,ipv6,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a
actions=drop
cookie=0x0, duration=191.687s, table=17, n_packets=0, n_bytes=0,
idle_age=191, priority=80,ip,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a
actions=drop
cookie=0x0, duration=191.687s, table=18, n_packets=10, n_bytes=420,
idle_age=15, priority=90,arp,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a,arp_spa=203.0.113.103,
arp_sha=fa:16:3e:1c:ca:6a
actions=resubmit(,19)
cookie=0x0, duration=191.687s, table=18, n_packets=0, n_bytes=0,
idle_age=191, priority=80,icmp6,reg6=0x4,metadata=0x4,
icmp_type=136,icmp_code=0
actions=drop
cookie=0x0, duration=191.687s, table=18, n_packets=0, n_bytes=0,
idle_age=191, priority=80,icmp6,reg6=0x4,metadata=0x4,
icmp_type=135,icmp_code=0
actions=drop
cookie=0x0, duration=191.687s, table=18, n_packets=0, n_bytes=0,
idle_age=191, priority=80,arp,reg6=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=75.033s, table=19, n_packets=0, n_bytes=0,
idle_age=75, priority=110,icmp6,metadata=0x4,icmp_type=135,
icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=75.032s, table=19, n_packets=0, n_bytes=0,
idle_age=75, priority=110,icmp6,metadata=0x4,icmp_type=136,
icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=75.032s, table=19, n_packets=34, n_bytes=5170,
idle_age=49, priority=100,ip,metadata=0x4
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=75.032s, table=19, n_packets=0, n_bytes=0,
idle_age=75, priority=100,ipv6,metadata=0x4
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=65535,icmp6,metadata=0x4,icmp_type=136,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=65535,icmp6,metadata=0x4,icmp_type=135,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=75.032s, table=22, n_packets=13, n_bytes=1118,
idle_age=49, priority=65535,ct_state=-new+est-rel-inv+trk,
metadata=0x4
actions=resubmit(,23)
cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=65535,ct_state=-new-est+rel-inv+trk,
metadata=0x4
actions=resubmit(,23)
cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=65535,ct_state=+inv+trk,metadata=0x4
actions=drop
cookie=0x0, duration=75.033s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=2002,ct_state=+new+trk,ipv6,reg6=0x4,
metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=75.032s, table=22, n_packets=15, n_bytes=1816,
idle_age=49, priority=2002,ct_state=+new+trk,ip,reg6=0x4,
metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=2002,udp,reg6=0x4,metadata=0x4,
nw_dst=203.0.113.0/24,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=2002,udp,reg6=0x4,metadata=0x4,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=75.033s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=2001,ip,reg6=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=2001,ipv6,reg6=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=75.032s, table=22, n_packets=6, n_bytes=2236,
idle_age=54, priority=1,ip,metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0,
idle_age=75, priority=1,ipv6,metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=67.064s, table=25, n_packets=0, n_bytes=0,
idle_age=67, priority=50,arp,metadata=0x4,arp_tpa=203.0.113.103,
arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:1c:ca:6a,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163ed63dca->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a81268->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[],
load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=75.033s, table=26, n_packets=19, n_bytes=2776,
idle_age=44, priority=50,metadata=0x4,dl_dst=fa:16:3e:1c:ca:6a
actions=load:0x4->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=221031.310s, table=33, n_packets=72, n_bytes=6292,
idle_age=20, hard_age=65534, priority=100,reg7=0x3,metadata=0x4
actions=load:0x1->NXM_NX_REG7[],resubmit(,33)
cookie=0x0, duration=184.992s, table=34, n_packets=2, n_bytes=684,
idle_age=112, priority=100,reg6=0x4,reg7=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=75.034s, table=49, n_packets=0, n_bytes=0,
idle_age=75, priority=110,icmp6,metadata=0x4,icmp_type=135,
icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=75.033s, table=49, n_packets=0, n_bytes=0,
idle_age=75, priority=110,icmp6,metadata=0x4,icmp_type=136,
icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=75.033s, table=49, n_packets=38, n_bytes=6566,
idle_age=49, priority=100,ip,metadata=0x4
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=75.033s, table=49, n_packets=0, n_bytes=0,
idle_age=75, priority=100,ipv6,metadata=0x4
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0,
idle_age=75, priority=65535,ct_state=-new-est+rel-inv+trk,
metadata=0x4
actions=resubmit(,53)
cookie=0x0, duration=75.033s, table=52, n_packets=13, n_bytes=1118,
idle_age=49, priority=65535,ct_state=-new+est-rel-inv+trk,
metadata=0x4
actions=resubmit(,53)
cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0,
idle_age=75, priority=65535,icmp6,metadata=0x4,icmp_type=135,
icmp_code=0
actions=resubmit(,53)
cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0,
idle_age=75, priority=65535,icmp6,metadata=0x4,icmp_type=136,
icmp_code=0
actions=resubmit(,53)
cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0,
idle_age=75, priority=65535,ct_state=+inv+trk,metadata=0x4
actions=drop
cookie=0x0, duration=75.034s, table=52, n_packets=4, n_bytes=1538,
idle_age=54, priority=2002,udp,reg7=0x4,metadata=0x4,
nw_src=203.0.113.0/24,tp_src=67,tp_dst=68
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0,
idle_age=75, priority=2002,ct_state=+new+trk,ip,reg7=0x4,
metadata=0x4,nw_src=203.0.113.103
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=2.041s, table=52, n_packets=0, n_bytes=0,
idle_age=2, priority=2002,ct_state=+new+trk,ipv6,reg7=0x4,
metadata=0x4,ipv6_src=::2/::2
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=75.033s, table=52, n_packets=2, n_bytes=698,
idle_age=54, priority=2001,ip,reg7=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0,
idle_age=75, priority=2001,ipv6,reg7=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=75.034s, table=52, n_packets=0, n_bytes=0,
idle_age=75, priority=1,ipv6,metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=75.033s, table=52, n_packets=19, n_bytes=3212,
idle_age=49, priority=1,ip,metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=75.034s, table=54, n_packets=17, n_bytes=2656,
idle_age=49, priority=90,ip,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a,nw_dst=203.0.113.103
actions=resubmit(,55)
cookie=0x0, duration=75.033s, table=54, n_packets=0, n_bytes=0,
idle_age=75, priority=90,ip,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a,nw_dst=255.255.255.255
actions=resubmit(,55)
cookie=0x0, duration=75.033s, table=54, n_packets=0, n_bytes=0,
idle_age=75, priority=90,ip,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a,nw_dst=224.0.0.0/4
actions=resubmit(,55)
cookie=0x0, duration=75.033s, table=54, n_packets=0, n_bytes=0,
idle_age=75, priority=80,ip,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a
actions=drop
cookie=0x0, duration=75.033s, table=54, n_packets=0, n_bytes=0,
idle_age=75, priority=80,ipv6,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a
actions=drop
cookie=0x0, duration=75.033s, table=55, n_packets=21, n_bytes=2860,
idle_age=44, priority=50,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a
actions=resubmit(,64)
cookie=0x0, duration=184.992s, table=64, n_packets=166, n_bytes=15088,
idle_age=15, priority=100,reg7=0x4,metadata=0x4
actions=output:9
* For each compute node that only contains a DHCP agent on the subnet, OVN
creates the following flows:
.. code-block:: console
cookie=0x0, duration=189.649s, table=16, n_packets=0, n_bytes=0,
idle_age=189, priority=50,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a
actions=resubmit(,17)
cookie=0x0, duration=189.650s, table=17, n_packets=0, n_bytes=0,
idle_age=189, priority=90,udp,reg6=0x4,metadata=0x4,
dl_src=fa:14:3e:1c:ca:6a,nw_src=0.0.0.0,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=resubmit(,18)
cookie=0x0, duration=189.649s, table=17, n_packets=0, n_bytes=0,
idle_age=189, priority=90,ip,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a,nw_src=203.0.113.103
actions=resubmit(,18)
cookie=0x0, duration=189.650s, table=17, n_packets=0, n_bytes=0,
idle_age=189, priority=80,ipv6,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a
actions=drop
cookie=0x0, duration=189.650s, table=17, n_packets=0, n_bytes=0,
idle_age=189, priority=80,ip,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a
actions=drop
cookie=0x0, duration=189.650s, table=18, n_packets=0, n_bytes=0,
idle_age=189, priority=90,arp,reg6=0x4,metadata=0x4,
dl_src=fa:16:3e:1c:ca:6a,arp_spa=203.0.113.103,
arp_sha=fa:16:3e:1c:ca:6a
actions=resubmit(,19)
cookie=0x0, duration=189.650s, table=18, n_packets=0, n_bytes=0,
idle_age=189, priority=80,icmp6,reg6=0x4,metadata=0x4,
icmp_type=136,icmp_code=0
actions=drop
cookie=0x0, duration=189.650s, table=18, n_packets=0, n_bytes=0,
idle_age=189, priority=80,icmp6,reg6=0x4,metadata=0x4,
icmp_type=135,icmp_code=0
actions=drop
cookie=0x0, duration=189.649s, table=18, n_packets=0, n_bytes=0,
idle_age=189, priority=80,arp,reg6=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=79.452s, table=19, n_packets=0, n_bytes=0,
idle_age=79, priority=110,icmp6,metadata=0x4,icmp_type=135,
icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=79.450s, table=19, n_packets=0, n_bytes=0,
idle_age=79, priority=110,icmp6,metadata=0x4,icmp_type=136,
icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=79.452s, table=19, n_packets=0, n_bytes=0,
idle_age=79, priority=100,ipv6,metadata=0x4
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=79.450s, table=19, n_packets=18, n_bytes=3164,
idle_age=57, priority=100,ip,metadata=0x4
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=79.450s, table=22, n_packets=6, n_bytes=510,
idle_age=57, priority=65535,ct_state=-new+est-rel-inv+trk,
metadata=0x4
actions=resubmit(,23)
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=65535,ct_state=-new-est+rel-inv+trk,
metadata=0x4
actions=resubmit(,23)
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=65535,icmp6,metadata=0x4,icmp_type=136,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=65535,icmp6,metadata=0x4,icmp_type=135,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=65535,ct_state=+inv+trk,metadata=0x4
actions=drop
cookie=0x0, duration=79.453s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=2002,ct_state=+new+trk,ipv6,reg6=0x4,
metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=2002,ct_state=+new+trk,ip,reg6=0x4,
metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=2002,udp,reg6=0x4,metadata=0x4,
nw_dst=203.0.113.0/24,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=2002,udp,reg6=0x4,metadata=0x4,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=79.452s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=2001,ip,reg6=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=2001,ipv6,reg6=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0,
idle_age=79, priority=1,ipv6,metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=79.450s, table=22, n_packets=12, n_bytes=2654,
idle_age=57, priority=1,ip,metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=71.483s, table=25, n_packets=0, n_bytes=0,
idle_age=71, priority=50,arp,metadata=0x4,arp_tpa=203.0.113.103,
arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:1c:ca:6a,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163ed63dca->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a81268->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[],
load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=79.450s, table=26, n_packets=8, n_bytes=1258,
idle_age=57, priority=50,metadata=0x4,dl_dst=fa:16:3e:1c:ca:6a
actions=load:0x4->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=182.952s, table=33, n_packets=74, n_bytes=7040,
idle_age=18, priority=100,reg7=0x4,metadata=0x4
actions=load:0x1->NXM_NX_REG7[],resubmit(,33)
cookie=0x0, duration=79.451s, table=49, n_packets=0, n_bytes=0,
idle_age=79, priority=110,icmp6,metadata=0x4,icmp_type=135,
icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=79.450s, table=49, n_packets=0, n_bytes=0,
idle_age=79, priority=110,icmp6,metadata=0x4,icmp_type=136,
icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=79.450s, table=49, n_packets=18, n_bytes=3164,
idle_age=57, priority=100,ip,metadata=0x4
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=79.450s, table=49, n_packets=0, n_bytes=0,
idle_age=79, priority=100,ipv6,metadata=0x4
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=65535,ct_state=-new-est+rel-inv+trk,
metadata=0x4
actions=resubmit(,53)
cookie=0x0, duration=79.450s, table=52, n_packets=6, n_bytes=510,
idle_age=57, priority=65535,ct_state=-new+est-rel-inv+trk,
metadata=0x4
actions=resubmit(,53)
cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=65535,icmp6,metadata=0x4,icmp_type=135,
icmp_code=0
actions=resubmit(,53)
cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=65535,icmp6,metadata=0x4,icmp_type=136,
icmp_code=0
actions=resubmit(,53)
cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=65535,ct_state=+inv+trk,metadata=0x4
actions=drop
cookie=0x0, duration=79.452s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=2002,udp,reg7=0x4,metadata=0x4,
nw_src=203.0.113.0/24,tp_src=67,tp_dst=68
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=2002,ct_state=+new+trk,ip,reg7=0x4,
metadata=0x4,nw_src=203.0.113.103
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=71.483s, table=52, n_packets=0, n_bytes=0,
idle_age=71, priority=2002,ct_state=+new+trk,ipv6,reg7=0x4,
metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=2001,ipv6,reg7=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=2001,ip,reg7=0x4,metadata=0x4
actions=drop
cookie=0x0, duration=79.453s, table=52, n_packets=0, n_bytes=0,
idle_age=79, priority=1,ipv6,metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=79.450s, table=52, n_packets=12, n_bytes=2654,
idle_age=57, priority=1,ip,metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=79.452s, table=54, n_packets=0, n_bytes=0,
idle_age=79, priority=90,ip,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a,nw_dst=255.255.255.255
actions=resubmit(,55)
cookie=0x0, duration=79.452s, table=54, n_packets=0, n_bytes=0,
idle_age=79, priority=90,ip,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a,nw_dst=203.0.113.103
actions=resubmit(,55)
cookie=0x0, duration=79.452s, table=54, n_packets=0, n_bytes=0,
idle_age=79, priority=90,ip,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a,nw_dst=224.0.0.0/4
actions=resubmit(,55)
cookie=0x0, duration=79.450s, table=54, n_packets=0, n_bytes=0,
idle_age=79, priority=80,ip,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a
actions=drop
cookie=0x0, duration=79.450s, table=54, n_packets=0, n_bytes=0,
idle_age=79, priority=80,ipv6,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a
actions=drop
cookie=0x0, duration=79.450s, table=55, n_packets=0, n_bytes=0,
idle_age=79, priority=50,reg7=0x4,metadata=0x4,
dl_dst=fa:16:3e:1c:ca:6a
actions=resubmit(,64)

View File

@ -1,757 +0,0 @@
.. _refarch-launch-instance-selfservice-network:
Launch an instance on a self-service network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To launch an instance on a self-service network, follow the same steps as
:ref:`launching an instance on the provider network
<refarch-launch-instance-provider-network>`, but using the UUID of the
self-service network.
OVN operations
^^^^^^^^^^^^^^
The OVN mechanism driver and OVN perform the following operations when
launching an instance.
#. The OVN mechanism driver creates a logical port for the instance.
.. code-block:: console
_uuid : c754d1d2-a7fb-4dd0-b14c-c076962b06b9
addresses : ["fa:16:3e:15:7d:13 192.168.1.5"]
enabled : true
external_ids : {"neutron:port_name"=""}
name : "eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"
options : {}
parent_name : []
port_security : ["fa:16:3e:15:7d:13 192.168.1.5"]
tag : []
type : ""
up : true
#. The OVN mechanism driver updates the appropriate Address Set object(s)
with the address of the new instance:
.. code-block:: console
_uuid : d0becdea-e1ed-48c4-9afc-e278cdef4629
addresses : ["192.168.1.5", "203.0.113.103"]
external_ids : {"neutron:security_group_name"=default}
name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc"
#. The OVN mechanism driver creates ACL entries for this port and
any other ports in the project.
.. code-block:: console
_uuid : 00ecbe8f-c82a-4e18-b688-af2a1941cff7
action : allow
direction : from-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "inport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4 && (ip4.dst == 255.255.255.255 || ip4.dst == 192.168.1.0/24) && udp && udp.src == 68 && udp.dst == 67"
priority : 1002
_uuid : 2bf5b7ed-008e-4676-bba5-71fe58897886
action : allow-related
direction : from-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "inport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4"
priority : 1002
_uuid : 330b4e27-074f-446a-849b-9ab0018b65c5
action : allow
direction : to-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4 && ip4.src == 192.168.1.0/24 && udp && udp.src == 67 && udp.dst == 68"
priority : 1002
_uuid : 683f52f2-4be6-4bd7-a195-6c782daa7840
action : allow-related
direction : from-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "inport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip6"
priority : 1002
_uuid : 8160f0b4-b344-43d5-bbd4-ca63a71aa4fc
action : drop
direction : to-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip"
priority : 1001
_uuid : 97c6b8ca-14ea-4812-8571-95d640a88f4f
action : allow-related
direction : to-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip6"
priority : 1002
_uuid : 9cfd8eb5-5daa-422e-8fe8-bd22fd7fa826
action : allow-related
direction : to-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4 && ip4.src == 0.0.0.0/0 && icmp4"
priority : 1002
_uuid : f72c2431-7a64-4cea-b84a-118bdc761be2
action : drop
direction : from-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "inport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip"
priority : 1001
_uuid : f94133fa-ed27-4d5e-a806-0d528e539cb3
action : allow-related
direction : to-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4 && ip4.src == $as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc"
priority : 1002
_uuid : 7f7a92ff-b7e9-49b0-8be0-0dc388035df3
action : allow-related
direction : to-lport
external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"}
log : false
match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip6 && ip6.src == $as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc"
priority : 1002
#. The OVN mechanism driver updates the logical switch information with
the UUIDs of these objects.
.. code-block:: console
_uuid : 15e2c80b-1461-4003-9869-80416cd97de5
acls : [00ecbe8f-c82a-4e18-b688-af2a1941cff7,
2bf5b7ed-008e-4676-bba5-71fe58897886,
330b4e27-074f-446a-849b-9ab0018b65c5,
683f52f2-4be6-4bd7-a195-6c782daa7840,
7f7a92ff-b7e9-49b0-8be0-0dc388035df3,
8160f0b4-b344-43d5-bbd4-ca63a71aa4fc,
97c6b8ca-14ea-4812-8571-95d640a88f4f,
9cfd8eb5-5daa-422e-8fe8-bd22fd7fa826,
f72c2431-7a64-4cea-b84a-118bdc761be2,
f94133fa-ed27-4d5e-a806-0d528e539cb3]
external_ids : {"neutron:network_name"="selfservice"}
name : "neutron-6cc81cae-8c5f-4c09-aaf2-35d0aa95c084"
ports : [2df457a5-f71c-4a2f-b9ab-d9e488653872,
67c2737c-b380-492b-883b-438048b48e56,
c754d1d2-a7fb-4dd0-b14c-c076962b06b9]
#. With address sets, it is no longer necessary for the OVN mechanism
driver to create separate ACLs for other instances in the project.
That is handled automagically via address sets.
#. The OVN northbound service translates the updated Address Set object(s)
into updated Address Set objects in the OVN southbound database:
.. code-block:: console
_uuid : 2addbee3-7084-4fff-8f7b-15b1efebdaff
addresses : ["192.168.1.5", "203.0.113.103"]
name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc"
#. The OVN northbound service adds a Port Binding for the new Logical
Switch Port object:
.. code-block:: console
_uuid : 7a558e7b-ed7a-424f-a0cf-ab67d2d832d7
chassis : b67d6da9-0222-4ab1-a852-ab2607610bf8
datapath : 3f6e16b5-a03a-48e5-9b60-7b7a0396c425
logical_port : "e9cb7857-4cb1-4e91-aae5-165a7ab5b387"
mac : ["fa:16:3e:b6:91:70 192.168.1.5"]
options : {}
parent_port : []
tag : []
tunnel_key : 3
type : ""
#. The OVN northbound service updates the flooding multicast group
for the logical datapath with the new port binding:
.. code-block:: console
_uuid : c08d0102-c414-4a47-98d9-dd3fa9f9901c
datapath : 0b214af6-8910-489c-926a-fd0ed16a8251
name : _MC_flood
ports : [3e463ca0-951c-46fd-b6cf-05392fa3aa1f,
794a6f03-7941-41ed-b1c6-0e00c1e18da0,
fa7b294d-2a62-45ae-8de3-a41c002de6de]
tunnel_key : 65535
#. The OVN northbound service adds Logical Flows based on the updated
Address Set, ACL and Logical_Switch_Port objects:
.. code-block:: console
Datapath: 3f6e16b5-a03a-48e5-9b60-7b7a0396c425 Pipeline: ingress
table= 0( ls_in_port_sec_l2), priority= 50,
match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
eth.src == {fa:16:3e:b6:a3:54}),
action=(next;)
table= 1( ls_in_port_sec_ip), priority= 90,
match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
eth.src == fa:16:3e:b6:a3:54 && ip4.src == 0.0.0.0 &&
ip4.dst == 255.255.255.255 && udp.src == 68 && udp.dst == 67),
action=(next;)
table= 1( ls_in_port_sec_ip), priority= 90,
match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
eth.src == fa:16:3e:b6:a3:54 && ip4.src == {192.168.1.5}),
action=(next;)
table= 1( ls_in_port_sec_ip), priority= 80,
match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
eth.src == fa:16:3e:b6:a3:54 && ip),
action=(drop;)
table= 2( ls_in_port_sec_nd), priority= 90,
match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
eth.src == fa:16:3e:b6:a3:54 && arp.sha == fa:16:3e:b6:a3:54 &&
(arp.spa == 192.168.1.5 )),
action=(next;)
table= 2( ls_in_port_sec_nd), priority= 80,
match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
(arp || nd)),
action=(drop;)
table= 3( ls_in_pre_acl), priority= 110, match=(nd),
action=(next;)
table= 3( ls_in_pre_acl), priority= 100, match=(ip),
action=(reg0[0] = 1; next;)
table= 6( ls_in_acl), priority=65535,
match=(!ct.est && ct.rel && !ct.new && !ct.inv),
action=(next;)
table= 6( ls_in_acl), priority=65535,
match=(ct.est && !ct.rel && !ct.new && !ct.inv),
action=(next;)
table= 6( ls_in_acl), priority=65535, match=(ct.inv),
action=(drop;)
table= 6( ls_in_acl), priority=65535, match=(nd),
action=(next;)
table= 6( ls_in_acl), priority= 2002,
match=(ct.new && (inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
ip6)),
action=(reg0[1] = 1; next;)
table= 6( ls_in_acl), priority= 2002,
match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip4 &&
(ip4.dst == 255.255.255.255 || ip4.dst == 192.168.1.0/24) &&
udp && udp.src == 68 && udp.dst == 67),
action=(reg0[1] = 1; next;)
table= 6( ls_in_acl), priority= 2002,
match=(ct.new && (inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
ip4)),
action=(reg0[1] = 1; next;)
table= 6( ls_in_acl), priority= 2001,
match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip),
action=(drop;)
table= 6( ls_in_acl), priority= 1, match=(ip),
action=(reg0[1] = 1; next;)
table= 9( ls_in_arp_nd_rsp), priority= 50,
match=(arp.tpa == 192.168.1.5 && arp.op == 1),
action=(eth.dst = eth.src; eth.src = fa:16:3e:b6:a3:54; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:b6:a3:54; arp.tpa = arp.spa; arp.spa = 192.168.1.5; outport = inport; inport = ""; /* Allow sending out inport. */ output;)
table=10( ls_in_l2_lkup), priority= 50,
match=(eth.dst == fa:16:3e:b6:a3:54),
action=(outport = "e9cb7857-4cb1-4e91-aae5-165a7ab5b387"; output;)
Datapath: 3f6e16b5-a03a-48e5-9b60-7b7a0396c425 Pipeline: egress
table= 1( ls_out_pre_acl), priority= 110, match=(nd),
action=(next;)
table= 1( ls_out_pre_acl), priority= 100, match=(ip),
action=(reg0[0] = 1; next;)
table= 4( ls_out_acl), priority=65535, match=(nd),
action=(next;)
table= 4( ls_out_acl), priority=65535,
match=(!ct.est && ct.rel && !ct.new && !ct.inv),
action=(next;)
table= 4( ls_out_acl), priority=65535,
match=(ct.est && !ct.rel && !ct.new && !ct.inv),
action=(next;)
table= 4( ls_out_acl), priority=65535, match=(ct.inv),
action=(drop;)
table= 4( ls_out_acl), priority= 2002,
match=(ct.new &&
(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip6 &&
ip6.src == $as_ip6_90a78a43_b549_4bee_8822_21fcccab58dc)),
action=(reg0[1] = 1; next;)
table= 4( ls_out_acl), priority= 2002,
match=(ct.new &&
(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip4 &&
ip4.src == $as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc)),
action=(reg0[1] = 1; next;)
table= 4( ls_out_acl), priority= 2002,
match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip4 &&
ip4.src == 192.168.1.0/24 && udp && udp.src == 67 && udp.dst == 68),
action=(reg0[1] = 1; next;)
table= 4( ls_out_acl), priority= 2001,
match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip),
action=(drop;)
table= 4( ls_out_acl), priority= 1, match=(ip),
action=(reg0[1] = 1; next;)
table= 6( ls_out_port_sec_ip), priority= 90,
match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
eth.dst == fa:16:3e:b6:a3:54 &&
ip4.dst == {255.255.255.255, 224.0.0.0/4, 192.168.1.5}),
action=(next;)
table= 6( ls_out_port_sec_ip), priority= 80,
match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
eth.dst == fa:16:3e:b6:a3:54 && ip),
action=(drop;)
table= 7( ls_out_port_sec_l2), priority= 50,
match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" &&
eth.dst == {fa:16:3e:b6:a3:54}),
action=(output;)
#. The OVN controller service on each compute node translates these objects
into flows on the integration bridge ``br-int``. Exact flows depend on
whether the compute node containing the instance also contains a DHCP agent
on the subnet.
* On the compute node containing the instance, the Compute service creates
a port that connects the instance to the integration bridge and OVN
creates the following flows:
.. code-block:: console
# ovs-ofctl show br-int
OFPT_FEATURES_REPLY (xid=0x2): dpid:000022024a1dc045
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
12(tapeaf36f62-56): addr:fe:16:3e:15:7d:13
config: 0
state: 0
current: 10MB-FD COPPER
.. code-block:: console
cookie=0x0, duration=179.460s, table=0, n_packets=122, n_bytes=10556,
idle_age=1, priority=100,in_port=12
actions=load:0x4->NXM_NX_REG5[],load:0x5->OXM_OF_METADATA[],
load:0x3->NXM_NX_REG6[],resubmit(,16)
cookie=0x0, duration=187.408s, table=16, n_packets=122, n_bytes=10556,
idle_age=1, priority=50,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13
actions=resubmit(,17)
cookie=0x0, duration=187.408s, table=17, n_packets=2, n_bytes=684,
idle_age=84, priority=90,udp,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13,nw_src=0.0.0.0,nw_dst=255.255.255.255,
tp_src=68,tp_dst=67
actions=resubmit(,18)
cookie=0x0, duration=187.408s, table=17, n_packets=98, n_bytes=8276,
idle_age=1, priority=90,ip,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13,nw_src=192.168.1.5
actions=resubmit(,18)
cookie=0x0, duration=187.408s, table=17, n_packets=17, n_bytes=1386,
idle_age=55, priority=80,ipv6,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13
actions=drop
cookie=0x0, duration=187.408s, table=17, n_packets=0, n_bytes=0,
idle_age=187, priority=80,ip,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13
actions=drop
cookie=0x0, duration=187.408s, table=18, n_packets=5, n_bytes=210,
idle_age=10, priority=90,arp,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13,arp_spa=192.168.1.5,
arp_sha=fa:16:3e:15:7d:13
actions=resubmit(,19)
cookie=0x0, duration=187.408s, table=18, n_packets=0, n_bytes=0,
idle_age=187, priority=80,icmp6,reg6=0x3,metadata=0x5,
icmp_type=135,icmp_code=0
actions=drop
cookie=0x0, duration=187.408s, table=18, n_packets=0, n_bytes=0,
idle_age=187, priority=80,icmp6,reg6=0x3,metadata=0x5,
icmp_type=136,icmp_code=0
actions=drop
cookie=0x0, duration=187.408s, table=18, n_packets=0, n_bytes=0,
idle_age=187, priority=80,arp,reg6=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0,
idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=135,
icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0,
idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=136,
icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=47.068s, table=19, n_packets=33, n_bytes=4081,
idle_age=0, priority=100,ip,metadata=0x5
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0,
idle_age=47, priority=100,ipv6,metadata=0x5
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=47.068s, table=22, n_packets=15, n_bytes=1392,
idle_age=0, priority=65535,ct_state=-new+est-rel-inv+trk,
metadata=0x5
actions=resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,ct_state=-new-est+rel-inv+trk,
metadata=0x5
actions=resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=135,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=136,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,ct_state=+inv+trk,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2002,ct_state=+new+trk,ipv6,reg6=0x3,
metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=16, n_bytes=1922,
idle_age=2, priority=2002,ct_state=+new+trk,ip,reg6=0x3,
metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2002,udp,reg6=0x3,metadata=0x5,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2002,udp,reg6=0x3,metadata=0x5,
nw_dst=192.168.1.0/24,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.069s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2001,ipv6,reg6=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2001,ip,reg6=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=22, n_packets=2, n_bytes=767,
idle_age=27, priority=1,ip,metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=1,ipv6,metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=179.457s, table=25, n_packets=2, n_bytes=84,
idle_age=33, priority=50,arp,metadata=0x5,arp_tpa=192.168.1.5,
arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:15:7d:13,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163e157d13->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a80105->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],
load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=187.408s, table=26, n_packets=50, n_bytes=4806,
idle_age=1, priority=50,metadata=0x5,dl_dst=fa:16:3e:15:7d:13
actions=load:0x3->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=469.575s, table=33, n_packets=74, n_bytes=7040,
idle_age=305, priority=100,reg7=0x4,metadata=0x4
actions=load:0x1->NXM_NX_REG7[],resubmit(,33)
cookie=0x0, duration=179.460s, table=34, n_packets=2, n_bytes=684,
idle_age=84, priority=100,reg6=0x3,reg7=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=47.069s, table=49, n_packets=0, n_bytes=0,
idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=135,
icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=47.068s, table=49, n_packets=0, n_bytes=0,
idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=136,
icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=47.068s, table=49, n_packets=34, n_bytes=4455,
idle_age=0, priority=100,ip,metadata=0x5
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=47.068s, table=49, n_packets=0, n_bytes=0,
idle_age=47, priority=100,ipv6,metadata=0x5
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=47.069s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,ct_state=+inv+trk,metadata=0x5
actions=drop
cookie=0x0, duration=47.069s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=136,
icmp_code=0
actions=resubmit(,53)
cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=135,
icmp_code=0
actions=resubmit(,53)
cookie=0x0, duration=47.068s, table=52, n_packets=22, n_bytes=2000,
idle_age=0, priority=65535,ct_state=-new+est-rel-inv+trk,
metadata=0x5
actions=resubmit(,53)
cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,ct_state=-new-est+rel-inv+trk,
metadata=0x5
actions=resubmit(,53)
cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=2002,ct_state=+new+trk,ip,reg7=0x3,
metadata=0x5,nw_src=192.168.1.5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=2002,ct_state=+new+trk,ip,reg7=0x3,
metadata=0x5,nw_src=203.0.113.103
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=47.068s, table=52, n_packets=3, n_bytes=1141,
idle_age=27, priority=2002,udp,reg7=0x3,metadata=0x5,
nw_src=192.168.1.0/24,tp_src=67,tp_dst=68
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=39.497s, table=52, n_packets=0, n_bytes=0,
idle_age=39, priority=2002,ct_state=+new+trk,ipv6,reg7=0x3,
metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=2001,ip,reg7=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=2001,ipv6,reg7=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=52, n_packets=9, n_bytes=1314,
idle_age=2, priority=1,ip,metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0,
idle_age=47, priority=1,ipv6,metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=47.068s, table=54, n_packets=23, n_bytes=2945,
idle_age=0, priority=90,ip,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13,nw_dst=192.168.1.11
actions=resubmit(,55)
cookie=0x0, duration=47.068s, table=54, n_packets=0, n_bytes=0,
idle_age=47, priority=90,ip,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13,nw_dst=255.255.255.255
actions=resubmit(,55)
cookie=0x0, duration=47.068s, table=54, n_packets=0, n_bytes=0,
idle_age=47, priority=90,ip,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13,nw_dst=224.0.0.0/4
actions=resubmit(,55)
cookie=0x0, duration=47.068s, table=54, n_packets=0, n_bytes=0,
idle_age=47, priority=80,ip,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13
actions=drop
cookie=0x0, duration=47.068s, table=54, n_packets=0, n_bytes=0,
idle_age=47, priority=80,ipv6,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13
actions=drop
cookie=0x0, duration=47.068s, table=55, n_packets=25, n_bytes=3029,
idle_age=0, priority=50,reg7=0x3,metadata=0x7,
dl_dst=fa:16:3e:15:7d:13
actions=resubmit(,64)
cookie=0x0, duration=179.460s, table=64, n_packets=116, n_bytes=10623,
idle_age=1, priority=100,reg7=0x3,metadata=0x5
actions=output:12
* For each compute node that only contains a DHCP agent on the subnet,
OVN creates the following flows:
.. code-block:: console
cookie=0x0, duration=192.587s, table=16, n_packets=0, n_bytes=0,
idle_age=192, priority=50,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13
actions=resubmit(,17)
cookie=0x0, duration=192.587s, table=17, n_packets=0, n_bytes=0,
idle_age=192, priority=90,ip,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13,nw_src=192.168.1.5
actions=resubmit(,18)
cookie=0x0, duration=192.587s, table=17, n_packets=0, n_bytes=0,
idle_age=192, priority=90,udp,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13,nw_src=0.0.0.0,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=resubmit(,18)
cookie=0x0, duration=192.587s, table=17, n_packets=0, n_bytes=0,
idle_age=192, priority=80,ipv6,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13
actions=drop
cookie=0x0, duration=192.587s, table=17, n_packets=0, n_bytes=0,
idle_age=192, priority=80,ip,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13
actions=drop
cookie=0x0, duration=192.587s, table=18, n_packets=0, n_bytes=0,
idle_age=192, priority=90,arp,reg6=0x3,metadata=0x5,
dl_src=fa:16:3e:15:7d:13,arp_spa=192.168.1.5,
arp_sha=fa:16:3e:15:7d:13
actions=resubmit(,19)
cookie=0x0, duration=192.587s, table=18, n_packets=0, n_bytes=0,
idle_age=192, priority=80,arp,reg6=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=192.587s, table=18, n_packets=0, n_bytes=0,
idle_age=192, priority=80,icmp6,reg6=0x3,metadata=0x5,
icmp_type=135,icmp_code=0
actions=drop
cookie=0x0, duration=192.587s, table=18, n_packets=0, n_bytes=0,
idle_age=192, priority=80,icmp6,reg6=0x3,metadata=0x5,
icmp_type=136,icmp_code=0
actions=drop
cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0,
idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=135,
icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0,
idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=136,
icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=47.068s, table=19, n_packets=33, n_bytes=4081,
idle_age=0, priority=100,ip,metadata=0x5
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0,
idle_age=47, priority=100,ipv6,metadata=0x5
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=47.068s, table=22, n_packets=15, n_bytes=1392,
idle_age=0, priority=65535,ct_state=-new+est-rel-inv+trk,
metadata=0x5
actions=resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,ct_state=-new-est+rel-inv+trk,
metadata=0x5
actions=resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=135,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=136,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=65535,ct_state=+inv+trk,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2002,ct_state=+new+trk,ipv6,reg6=0x3,
metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=16, n_bytes=1922,
idle_age=2, priority=2002,ct_state=+new+trk,ip,reg6=0x3,
metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2002,udp,reg6=0x3,metadata=0x5,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2002,udp,reg6=0x3,metadata=0x5,
nw_dst=192.168.1.0/24,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.069s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2001,ipv6,reg6=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=2001,ip,reg6=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=47.068s, table=22, n_packets=2, n_bytes=767,
idle_age=27, priority=1,ip,metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0,
idle_age=47, priority=1,ipv6,metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=179.457s, table=25, n_packets=2, n_bytes=84,
idle_age=33, priority=50,arp,metadata=0x5,arp_tpa=192.168.1.5,
arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:15:7d:13,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163e157d13->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a80105->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],
load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=192.587s, table=26, n_packets=61, n_bytes=5607,
idle_age=6, priority=50,metadata=0x5,dl_dst=fa:16:3e:15:7d:13
actions=load:0x3->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=184.640s, table=32, n_packets=61, n_bytes=5607,
idle_age=6, priority=100,reg7=0x3,metadata=0x5
actions=load:0x5->NXM_NX_TUN_ID[0..23],
set_field:0x3/0xffffffff->tun_metadata0,
move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:4
cookie=0x0, duration=47.069s, table=49, n_packets=0, n_bytes=0,
idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=135,
icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=47.068s, table=49, n_packets=0, n_bytes=0,
idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=136,
icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=47.068s, table=49, n_packets=34, n_bytes=4455,
idle_age=0, priority=100,ip,metadata=0x5
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=47.068s, table=49, n_packets=0, n_bytes=0,
idle_age=47, priority=100,ipv6,metadata=0x5
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=65535,ct_state=+inv+trk,
metadata=0x5
actions=drop
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=65535,ct_state=-new-est+rel-inv+trk,
metadata=0x5
actions=resubmit(,50)
cookie=0x0, duration=192.587s, table=52, n_packets=27, n_bytes=2316,
idle_age=6, priority=65535,ct_state=-new+est-rel-inv+trk,
metadata=0x5
actions=resubmit(,50)
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=2002,ct_state=+new+trk,icmp,reg7=0x3,
metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,50)
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=2002,ct_state=+new+trk,ipv6,reg7=0x3,
metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,50)
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=2002,udp,reg7=0x3,metadata=0x5,
nw_src=192.168.1.0/24,tp_src=67,tp_dst=68
actions=load:0x1->NXM_NX_REG0[1],resubmit(,50)
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=2002,ct_state=+new+trk,ip,reg7=0x3,
metadata=0x5,nw_src=203.0.113.103
actions=load:0x1->NXM_NX_REG0[1],resubmit(,50)
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=2001,ip,reg7=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=2001,ipv6,reg7=0x3,metadata=0x5
actions=drop
cookie=0x0, duration=192.587s, table=52, n_packets=25, n_bytes=2604,
idle_age=6, priority=1,ip,metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0,
idle_age=192, priority=1,ipv6,metadata=0x5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0,
idle_age=192, priority=90,ip,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13,nw_dst=224.0.0.0/4
actions=resubmit(,55)
cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0,
idle_age=192, priority=90,ip,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13,nw_dst=255.255.255.255
actions=resubmit(,55)
cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0,
idle_age=192, priority=90,ip,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13,nw_dst=192.168.1.5
actions=resubmit(,55)
cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0,
idle_age=192, priority=80,ipv6,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13
actions=drop
cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0,
idle_age=192, priority=80,ip,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13
actions=drop
cookie=0x0, duration=192.587s, table=55, n_packets=0, n_bytes=0,
idle_age=192, priority=50,reg7=0x3,metadata=0x5,
dl_dst=fa:16:3e:15:7d:13
actions=resubmit(,64)
* For each compute node that contains neither the instance nor a DHCP
agent on the subnet, OVN creates the following flows:
.. code-block:: console
cookie=0x0, duration=189.763s, table=52, n_packets=0, n_bytes=0,
idle_age=189, priority=2002,ct_state=+new+trk,ipv6,reg7=0x4,
metadata=0x4
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=189.763s, table=52, n_packets=0, n_bytes=0,
idle_age=189, priority=2002,ct_state=+new+trk,ip,reg7=0x4,
metadata=0x4,nw_src=192.168.1.5
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)

View File

@ -1,645 +0,0 @@
.. _refarch-provider-networks:
Provider networks
-----------------
A provider (external) network bridges instances to physical network
infrastructure that provides layer-3 services. In most cases, provider networks
implement layer-2 segmentation using VLAN IDs. A provider network maps to a
provider bridge on each compute node that supports launching instances on the
provider network. You can create more than one provider bridge, each one
requiring a unique name and underlying physical network interface to prevent
switching loops. Provider networks and bridges can use arbitrary names,
but each mapping must reference valid provider network and bridge names.
Each provider bridge can contain one ``flat`` (untagged) network and up to
the maximum number of ``vlan`` (tagged) networks that the physical network
infrastructure supports, typically around 4000.
Creating a provider network involves several commands at the host, OVS,
and Networking service levels that yield a series of operations at the
OVN level to create the virtual network components. The following example
creates a ``flat`` provider network ``provider`` using the provider bridge
``br-provider`` and binds a subnet to it.
Create a provider network
~~~~~~~~~~~~~~~~~~~~~~~~~
#. On each compute node, create the provider bridge, map the provider
network to it, and add the underlying physical or logical (typically
a bond) network interface to it.
.. code-block:: console
# ovs-vsctl --may-exist add-br br-provider -- set bridge br-provider \
protocols=OpenFlow13
# ovs-vsctl set open . external-ids:ovn-bridge-mappings=provider:br-provider
# ovs-vsctl --may-exist add-port br-provider INTERFACE_NAME
Replace ``INTERFACE_NAME`` with the name of the underlying network
interface.
.. note::
These commands provide no output if successful.
#. On the controller node, source the administrative project credentials.
#. On the controller node, create the provider network in the Networking
service. In this case, instances and routers in other projects can use
the network.
.. code-block:: console
$ openstack network create --external --share \
--provider-physical-network provider --provider-network-type flat \
provider
+---------------------------+--------------------------------------+
| Field | Value |
+---------------------------+--------------------------------------+
| admin_state_up | UP |
| availability_zone_hints | |
| availability_zones | nova |
| created_at | 2016-06-15 15:50:37+00:00 |
| description | |
| id | 0243277b-4aa8-46d8-9e10-5c9ad5e01521 |
| ipv4_address_scope | None |
| ipv6_address_scope | None |
| is_default | False |
| mtu | 1500 |
| name | provider |
| project_id | b1ebf33664df402693f729090cfab861 |
| provider:network_type | flat |
| provider:physical_network | provider |
| provider:segmentation_id | None |
| qos_policy_id | None |
| router:external | External |
| shared | True |
| status | ACTIVE |
| subnets | 32a61337-c5a3-448a-a1e7-c11d6f062c21 |
| tags | [] |
| updated_at | 2016-06-15 15:50:37+00:00 |
+---------------------------+--------------------------------------+
.. note::
The value of ``--provider-physical-network`` must refer to the
provider network name in the mapping.
OVN operations
^^^^^^^^^^^^^^
.. todo: I don't like going this deep with headers, so a future patch
will probably break this content into multiple files.
The OVN mechanism driver and OVN perform the following operations during
creation of a provider network.
#. The mechanism driver translates the network into a logical switch
in the OVN northbound database.
.. code-block:: console
_uuid : 98edf19f-2dbc-4182-af9b-79cafa4794b6
acls : []
external_ids : {"neutron:network_name"=provider}
load_balancer : []
name : "neutron-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"
ports : [92ee7c2f-cd22-4cac-a9d9-68a374dc7b17]
.. note::
The ``neutron:network_name`` field in ``external_ids`` contains
the network name and ``name`` contains the network UUID.
#. In addition, because the provider network is handled by a separate
bridge, the following logical port is created in the OVN northbound
database.
.. code-block:: console
_uuid : 92ee7c2f-cd22-4cac-a9d9-68a374dc7b17
addresses : [unknown]
enabled : []
external_ids : {}
name : "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"
options : {network_name=provider}
parent_name : []
port_security : []
tag : []
type : localnet
up : false
#. The OVN northbound service translates these objects into datapath bindings,
port bindings, and the appropriate multicast groups in the OVN southbound
database.
* Datapath bindings
.. code-block:: console
_uuid : f1f0981f-a206-4fac-b3a1-dc2030c9909f
external_ids : {logical-switch="98edf19f-2dbc-4182-af9b-79cafa4794b6"}
tunnel_key : 109
* Port bindings
.. code-block:: console
_uuid : 8427506e-46b5-41e5-a71b-a94a6859e773
chassis : []
datapath : f1f0981f-a206-4fac-b3a1-dc2030c9909f
logical_port : "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"
mac : [unknown]
options : {network_name=provider}
parent_port : []
tag : []
tunnel_key : 1
type : localnet
* Logical flows
.. code-block:: console
Datapath: f1f0981f-a206-4fac-b3a1-dc2030c9909f Pipeline: ingress
table= 0( ls_in_port_sec_l2), priority= 100, match=(eth.src[40]),
action=(drop;)
table= 0( ls_in_port_sec_l2), priority= 100, match=(vlan.present),
action=(drop;)
table= 0( ls_in_port_sec_l2), priority= 50,
match=(inport == "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"),
action=(next;)
table= 1( ls_in_port_sec_ip), priority= 0, match=(1),
action=(next;)
table= 2( ls_in_port_sec_nd), priority= 0, match=(1),
action=(next;)
table= 3( ls_in_pre_acl), priority= 0, match=(1),
action=(next;)
table= 4( ls_in_pre_lb), priority= 0, match=(1),
action=(next;)
table= 5( ls_in_pre_stateful), priority= 100, match=(reg0[0] == 1),
action=(ct_next;)
table= 5( ls_in_pre_stateful), priority= 0, match=(1),
action=(next;)
table= 6( ls_in_acl), priority= 0, match=(1),
action=(next;)
table= 7( ls_in_lb), priority= 0, match=(1),
action=(next;)
table= 8( ls_in_stateful), priority= 100, match=(reg0[1] == 1),
action=(ct_commit; next;)
table= 8( ls_in_stateful), priority= 100, match=(reg0[2] == 1),
action=(ct_lb;)
table= 8( ls_in_stateful), priority= 0, match=(1),
action=(next;)
table= 9( ls_in_arp_rsp), priority= 100,
match=(inport == "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"),
action=(next;)
table= 9( ls_in_arp_rsp), priority= 0, match=(1),
action=(next;)
table=10( ls_in_l2_lkup), priority= 100, match=(eth.mcast),
action=(outport = "_MC_flood"; output;)
table=10( ls_in_l2_lkup), priority= 0, match=(1),
action=(outport = "_MC_unknown"; output;)
Datapath: f1f0981f-a206-4fac-b3a1-dc2030c9909f Pipeline: egress
table= 0( ls_out_pre_lb), priority= 0, match=(1),
action=(next;)
table= 1( ls_out_pre_acl), priority= 0, match=(1),
action=(next;)
table= 2(ls_out_pre_stateful), priority= 100, match=(reg0[0] == 1),
action=(ct_next;)
table= 2(ls_out_pre_stateful), priority= 0, match=(1),
action=(next;)
table= 3( ls_out_lb), priority= 0, match=(1),
action=(next;)
table= 4( ls_out_acl), priority= 0, match=(1),
action=(next;)
table= 5( ls_out_stateful), priority= 100, match=(reg0[1] == 1),
action=(ct_commit; next;)
table= 5( ls_out_stateful), priority= 100, match=(reg0[2] == 1),
action=(ct_lb;)
table= 5( ls_out_stateful), priority= 0, match=(1),
action=(next;)
table= 6( ls_out_port_sec_ip), priority= 0, match=(1),
action=(next;)
table= 7( ls_out_port_sec_l2), priority= 100, match=(eth.mcast),
action=(output;)
table= 7( ls_out_port_sec_l2), priority= 50,
match=(outport == "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"),
action=(output;)
* Multicast groups
.. code-block:: console
_uuid : 0102f08d-c658-4d0a-a18a-ec8adcaddf4f
datapath : f1f0981f-a206-4fac-b3a1-dc2030c9909f
name : _MC_unknown
ports : [8427506e-46b5-41e5-a71b-a94a6859e773]
tunnel_key : 65534
_uuid : fbc38e51-ac71-4c57-a405-e6066e4c101e
datapath : f1f0981f-a206-4fac-b3a1-dc2030c9909f
name : _MC_flood
ports : [8427506e-46b5-41e5-a71b-a94a6859e773]
tunnel_key : 65535
Create a subnet on the provider network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The provider network requires at least one subnet that contains the IP
address allocation available for instances, default gateway IP address,
and metadata such as name resolution.
#. On the controller node, create a subnet bound to the provider network
``provider``.
.. code-block:: console
$ openstack subnet create --network provider --subnet-range \
203.0.113.0/24 --allocation-pool start=203.0.113.101,end=203.0.113.250 \
--dns-nameserver 8.8.8.8,8.8.4.4 --gateway 203.0.113.1 provider-v4
+-------------------+--------------------------------------+
| Field | Value |
+-------------------+--------------------------------------+
| allocation_pools | 203.0.113.101-203.0.113.250 |
| cidr | 203.0.113.0/24 |
| created_at | 2016-06-15 15:50:45+00:00 |
| description | |
| dns_nameservers | 8.8.8.8, 8.8.4.4 |
| enable_dhcp | True |
| gateway_ip | 203.0.113.1 |
| host_routes | |
| id | 32a61337-c5a3-448a-a1e7-c11d6f062c21 |
| ip_version | 4 |
| ipv6_address_mode | None |
| ipv6_ra_mode | None |
| name | provider-v4 |
| network_id | 0243277b-4aa8-46d8-9e10-5c9ad5e01521 |
| project_id | b1ebf33664df402693f729090cfab861 |
| subnetpool_id | None |
| updated_at | 2016-06-15 15:50:45+00:00 |
+-------------------+--------------------------------------+
If using DHCP to manage instance IP addresses, adding a subnet causes a series
of operations in the Networking service and OVN.
* The Networking service schedules the network on appropriate number of DHCP
agents. The example environment contains three DHCP agents.
* Each DHCP agent spawns a network namespace with a ``dnsmasq`` process using
an IP address from the subnet allocation.
* The OVN mechanism driver creates a logical switch port object in the OVN
northbound database for each ``dnsmasq`` process.
OVN operations
^^^^^^^^^^^^^^
The OVN mechanism driver and OVN perform the following operations
during creation of a subnet on the provider network.
#. If the subnet uses DHCP for IP address management, create logical ports
ports for each DHCP agent serving the subnet and bind them to the logical
switch. In this example, the subnet contains two DHCP agents.
.. code-block:: console
_uuid : 5e144ab9-3e08-4910-b936-869bbbf254c8
addresses : ["fa:16:3e:57:f9:ca 203.0.113.101"]
enabled : true
external_ids : {"neutron:port_name"=""}
name : "6ab052c2-7b75-4463-b34f-fd3426f61787"
options : {}
parent_name : []
port_security : []
tag : []
type : ""
up : true
_uuid : 38cf8b52-47c4-4e93-be8d-06bf71f6a7c9
addresses : ["fa:16:3e:e0:eb:6d 203.0.113.102"]
enabled : true
external_ids : {"neutron:port_name"=""}
name : "94aee636-2394-48bc-b407-8224ab6bb1ab"
options : {}
parent_name : []
port_security : []
tag : []
type : ""
up : true
_uuid : 924500c4-8580-4d5f-a7ad-8769f6e58ff5
acls : []
external_ids : {"neutron:network_name"=provider}
load_balancer : []
name : "neutron-670efade-7cd0-4d87-8a04-27f366eb8941"
ports : [38cf8b52-47c4-4e93-be8d-06bf71f6a7c9,
5e144ab9-3e08-4910-b936-869bbbf254c8,
a576b812-9c3e-4cfb-9752-5d8500b3adf9]
#. The OVN northbound service creates port bindings for these logical
ports and adds them to the appropriate multicast group.
* Port bindings
.. code-block:: console
_uuid : 030024f4-61c3-4807-859b-07727447c427
chassis : fc5ab9e7-bc28-40e8-ad52-2949358cc088
datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6
logical_port : "6ab052c2-7b75-4463-b34f-fd3426f61787"
mac : ["fa:16:3e:57:f9:ca 203.0.113.101"]
options : {}
parent_port : []
tag : []
tunnel_key : 2
type : ""
_uuid : cc5bcd19-bcae-4e29-8cee-3ec8a8a75d46
chassis : 6a9d0619-8818-41e6-abef-2f3d9a597c03
datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6
logical_port : "94aee636-2394-48bc-b407-8224ab6bb1ab"
mac : ["fa:16:3e:e0:eb:6d 203.0.113.102"]
options : {}
parent_port : []
tag : []
tunnel_key : 3
type : ""
* Multicast groups
.. code-block:: console
_uuid : 39b32ccd-fa49-4046-9527-13318842461e
datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6
name : _MC_flood
ports : [030024f4-61c3-4807-859b-07727447c427,
904c3108-234d-41c0-b93c-116b7e352a75,
cc5bcd19-bcae-4e29-8cee-3ec8a8a75d46]
tunnel_key : 65535
#. The OVN northbound service translates the logical ports into
additional logical flows in the OVN southbound database.
.. code-block:: console
Datapath: bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 Pipeline: ingress
table= 0( ls_in_port_sec_l2), priority= 50,
match=(inport == "94aee636-2394-48bc-b407-8224ab6bb1ab"),
action=(next;)
table= 0( ls_in_port_sec_l2), priority= 50,
match=(inport == "6ab052c2-7b75-4463-b34f-fd3426f61787"),
action=(next;)
table= 9( ls_in_arp_rsp), priority= 50,
match=(arp.tpa == 203.0.113.101 && arp.op == 1),
action=(eth.dst = eth.src; eth.src = fa:16:3e:57:f9:ca;
arp.op = 2; /* ARP reply */ arp.tha = arp.sha;
arp.sha = fa:16:3e:57:f9:ca; arp.tpa = arp.spa;
arp.spa = 203.0.113.101; outport = inport; inport = "";
/* Allow sending out inport. */ output;)
table= 9( ls_in_arp_rsp), priority= 50,
match=(arp.tpa == 203.0.113.102 && arp.op == 1),
action=(eth.dst = eth.src; eth.src = fa:16:3e:e0:eb:6d;
arp.op = 2; /* ARP reply */ arp.tha = arp.sha;
arp.sha = fa:16:3e:e0:eb:6d; arp.tpa = arp.spa;
arp.spa = 203.0.113.102; outport = inport;
inport = ""; /* Allow sending out inport. */ output;)
table=10( ls_in_l2_lkup), priority= 50,
match=(eth.dst == fa:16:3e:57:f9:ca),
action=(outport = "6ab052c2-7b75-4463-b34f-fd3426f61787"; output;)
table=10( ls_in_l2_lkup), priority= 50,
match=(eth.dst == fa:16:3e:e0:eb:6d),
action=(outport = "94aee636-2394-48bc-b407-8224ab6bb1ab"; output;)
Datapath: bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 Pipeline: egress
table= 7( ls_out_port_sec_l2), priority= 50,
match=(outport == "6ab052c2-7b75-4463-b34f-fd3426f61787"),
action=(output;)
table= 7( ls_out_port_sec_l2), priority= 50,
match=(outport == "94aee636-2394-48bc-b407-8224ab6bb1ab"),
action=(output;)
#. For each compute node without a DHCP agent on the subnet:
* The OVN controller service translates the logical flows into flows on the
integration bridge ``br-int``.
.. code-block:: console
cookie=0x0, duration=22.303s, table=32, n_packets=0, n_bytes=0,
idle_age=22, priority=100,reg7=0xffff,metadata=0x4
actions=load:0x4->NXM_NX_TUN_ID[0..23],
set_field:0xffff/0xffffffff->tun_metadata0,
move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],
output:5,output:4,resubmit(,33)
#. For each compute node with a DHCP agent on a subnet:
* Creation of a DHCP network namespace adds two virtual switch ports.
The first port connects the DHCP agent with ``dnsmasq`` process to the
integration bridge and the second port patches the integration bridge
to the provider bridge ``br-provider``.
.. code-block:: console
# ovs-ofctl show br-int
OFPT_FEATURES_REPLY (xid=0x2): dpid:000022024a1dc045
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
7(tap6ab052c2-7b): addr:00:00:00:00:10:7f
config: PORT_DOWN
state: LINK_DOWN
speed: 0 Mbps now, 0 Mbps max
8(patch-br-int-to): addr:6a:8c:30:3f:d7:dd
config: 0
state: 0
speed: 0 Mbps now, 0 Mbps max
# ovs-ofctl -O OpenFlow13 show br-provider
OFPT_FEATURES_REPLY (OF1.3) (xid=0x2): dpid:0000080027137c4a
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS GROUP_STATS QUEUE_STATS
OFPST_PORT_DESC reply (OF1.3) (xid=0x3):
1(patch-provnet-0): addr:fa:42:c5:3f:d7:6f
config: 0
state: 0
speed: 0 Mbps now, 0 Mbps max
* The OVN controller service translates these logical flows into flows on
the integration bridge.
.. code-block:: console
cookie=0x0, duration=17.731s, table=0, n_packets=3, n_bytes=258,
idle_age=16, priority=100,in_port=7
actions=load:0x2->NXM_NX_REG5[],load:0x4->OXM_OF_METADATA[],
load:0x2->NXM_NX_REG6[],resubmit(,16)
cookie=0x0, duration=17.730s, table=0, n_packets=15, n_bytes=954,
idle_age=2, priority=100,in_port=8,vlan_tci=0x0000/0x1000
actions=load:0x1->NXM_NX_REG5[],load:0x4->OXM_OF_METADATA[],
load:0x1->NXM_NX_REG6[],resubmit(,16)
cookie=0x0, duration=17.730s, table=0, n_packets=0, n_bytes=0,
idle_age=17, priority=100,in_port=8,dl_vlan=0
actions=strip_vlan,load:0x1->NXM_NX_REG5[],
load:0x4->OXM_OF_METADATA[],load:0x1->NXM_NX_REG6[],
resubmit(,16)
cookie=0x0, duration=17.732s, table=16, n_packets=0, n_bytes=0,
idle_age=17, priority=100,metadata=0x4,
dl_src=01:00:00:00:00:00/01:00:00:00:00:00
actions=drop
cookie=0x0, duration=17.732s, table=16, n_packets=0, n_bytes=0,
idle_age=17, priority=100,metadata=0x4,vlan_tci=0x1000/0x1000
actions=drop
cookie=0x0, duration=17.732s, table=16, n_packets=3, n_bytes=258,
idle_age=16, priority=50,reg6=0x2,metadata=0x4 actions=resubmit(,17)
cookie=0x0, duration=17.732s, table=16, n_packets=0, n_bytes=0,
idle_age=17, priority=50,reg6=0x3,metadata=0x4 actions=resubmit(,17)
cookie=0x0, duration=17.732s, table=16, n_packets=15, n_bytes=954,
idle_age=2, priority=50,reg6=0x1,metadata=0x4 actions=resubmit(,17)
cookie=0x0, duration=21.714s, table=17, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,18)
cookie=0x0, duration=21.714s, table=18, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,19)
cookie=0x0, duration=21.714s, table=19, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,20)
cookie=0x0, duration=21.714s, table=20, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,21)
cookie=0x0, duration=21.714s, table=21, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ip,reg0=0x1/0x1,metadata=0x4
actions=ct(table=22,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=21.714s, table=21, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ipv6,reg0=0x1/0x1,metadata=0x4
actions=ct(table=22,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=21.714s, table=21, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,22)
cookie=0x0, duration=21.714s, table=22, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,23)
cookie=0x0, duration=21.714s, table=23, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,24)
cookie=0x0, duration=21.714s, table=24, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ipv6,reg0=0x4/0x4,metadata=0x4
actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=21.714s, table=24, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ip,reg0=0x4/0x4,metadata=0x4
actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=21.714s, table=24, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ip,reg0=0x2/0x2,metadata=0x4
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25)
cookie=0x0, duration=21.714s, table=24, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ipv6,reg0=0x2/0x2,metadata=0x4
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25)
cookie=0x0, duration=21.714s, table=24, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,25)
cookie=0x0, duration=21.714s, table=25, n_packets=15, n_bytes=954,
idle_age=6, priority=100,reg6=0x1,metadata=0x4 actions=resubmit(,26)
cookie=0x0, duration=21.714s, table=25, n_packets=0, n_bytes=0,
idle_age=21, priority=50,arp,metadata=0x4,
arp_tpa=203.0.113.101,arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:f9:5d:f3,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163ef95df3->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a81264->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],
load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=21.714s, table=25, n_packets=0, n_bytes=0,
idle_age=21, priority=50,arp,metadata=0x4,
arp_tpa=203.0.113.102,arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:f0:a5:9f,
load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163ef0a59f->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a81265->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],
load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=21.714s, table=25, n_packets=3, n_bytes=258,
idle_age=20, priority=0,metadata=0x4 actions=resubmit(,26)
cookie=0x0, duration=21.714s, table=26, n_packets=18, n_bytes=1212,
idle_age=6, priority=100,metadata=0x4,
dl_dst=01:00:00:00:00:00/01:00:00:00:00:00
actions=load:0xffff->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=21.714s, table=26, n_packets=0, n_bytes=0,
idle_age=21, priority=50,metadata=0x4,dl_dst=fa:16:3e:f0:a5:9f
actions=load:0x3->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=21.714s, table=26, n_packets=0, n_bytes=0,
idle_age=21, priority=50,metadata=0x4,dl_dst=fa:16:3e:f9:5d:f3
actions=load:0x2->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=21.714s, table=26, n_packets=0, n_bytes=0,
idle_age=21, priority=0,metadata=0x4
actions=load:0xfffe->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=17.731s, table=33, n_packets=0, n_bytes=0,
idle_age=17, priority=100,reg7=0x2,metadata=0x4
actions=load:0x2->NXM_NX_REG5[],resubmit(,34)
cookie=0x0, duration=118.126s, table=33, n_packets=0, n_bytes=0,
idle_age=118, hard_age=17, priority=100,reg7=0xfffe,metadata=0x4
actions=load:0x1->NXM_NX_REG5[],load:0x1->NXM_NX_REG7[],
resubmit(,34),load:0xfffe->NXM_NX_REG7[]
cookie=0x0, duration=118.126s, table=33, n_packets=18, n_bytes=1212,
idle_age=2, hard_age=17, priority=100,reg7=0xffff,metadata=0x4
actions=load:0x2->NXM_NX_REG5[],load:0x2->NXM_NX_REG7[],
resubmit(,34),load:0x1->NXM_NX_REG5[],load:0x1->NXM_NX_REG7[],
resubmit(,34),load:0xffff->NXM_NX_REG7[]
cookie=0x0, duration=17.730s, table=33, n_packets=0, n_bytes=0,
idle_age=17, priority=100,reg7=0x1,metadata=0x4
actions=load:0x1->NXM_NX_REG5[],resubmit(,34)
cookie=0x0, duration=17.697s, table=33, n_packets=0, n_bytes=0,
idle_age=17, priority=100,reg7=0x3,metadata=0x4
actions=load:0x1->NXM_NX_REG7[],resubmit(,33)
cookie=0x0, duration=17.731s, table=34, n_packets=3, n_bytes=258,
idle_age=16, priority=100,reg6=0x2,reg7=0x2,metadata=0x4
actions=drop
cookie=0x0, duration=17.730s, table=34, n_packets=15, n_bytes=954,
idle_age=2, priority=100,reg6=0x1,reg7=0x1,metadata=0x4
actions=drop
cookie=0x0, duration=21.714s, table=48, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,49)
cookie=0x0, duration=21.714s, table=49, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,50)
cookie=0x0, duration=21.714s, table=50, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ip,reg0=0x1/0x1,metadata=0x4
actions=ct(table=51,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=21.714s, table=50, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ipv6,reg0=0x1/0x1,metadata=0x4
actions=ct(table=51,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=21.714s, table=50, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,51)
cookie=0x0, duration=21.714s, table=51, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,52)
cookie=0x0, duration=21.714s, table=52, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,53)
cookie=0x0, duration=21.714s, table=53, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ip,reg0=0x4/0x4,metadata=0x4
actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=21.714s, table=53, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ipv6,reg0=0x4/0x4,metadata=0x4
actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=21.714s, table=53, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ipv6,reg0=0x2/0x2,metadata=0x4
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54)
cookie=0x0, duration=21.714s, table=53, n_packets=0, n_bytes=0,
idle_age=21, priority=100,ip,reg0=0x2/0x2,metadata=0x4
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54)
cookie=0x0, duration=21.714s, table=53, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,54)
cookie=0x0, duration=21.714s, table=54, n_packets=18, n_bytes=1212,
idle_age=6, priority=0,metadata=0x4 actions=resubmit(,55)
cookie=0x0, duration=21.714s, table=55, n_packets=18, n_bytes=1212,
idle_age=6, priority=100,metadata=0x4,
dl_dst=01:00:00:00:00:00/01:00:00:00:00:00
actions=resubmit(,64)
cookie=0x0, duration=21.714s, table=55, n_packets=0, n_bytes=0,
idle_age=21, priority=50,reg7=0x3,metadata=0x4
actions=resubmit(,64)
cookie=0x0, duration=21.714s, table=55, n_packets=0, n_bytes=0,
idle_age=21, priority=50,reg7=0x2,metadata=0x4
actions=resubmit(,64)
cookie=0x0, duration=21.714s, table=55, n_packets=0, n_bytes=0,
idle_age=21, priority=50,reg7=0x1,metadata=0x4
actions=resubmit(,64)
cookie=0x0, duration=21.712s, table=64, n_packets=15, n_bytes=954,
idle_age=6, priority=100,reg7=0x3,metadata=0x4 actions=output:7
cookie=0x0, duration=21.711s, table=64, n_packets=3, n_bytes=258,
idle_age=20, priority=100,reg7=0x1,metadata=0x4 actions=output:8

View File

@ -1,249 +0,0 @@
======================
Reference architecture
======================
The reference architecture defines the minimum environment necessary
to deploy OpenStack with Open Virtual Network (OVN) integration for
the Networking service in production with sufficient expectations
of scale and performance. For evaluation purposes, you can deploy this
environment using the :ref:`Installation Guide <installation>` or
`Vagrant <https://github.com/openstack/networking-ovn/tree/master/vagrant>`_.
Any scaling or performance evaluations should use bare metal instead of
virtual machines.
Layout
------
The reference architecture includes a minimum of four nodes.
The controller node contains the following components that provide enough
functionality to launch basic instances:
* One network interface for management
* Identity service
* Image service
* Networking management with ML2 mechanism driver for OVN (control plane)
* Compute management (control plane)
The database node contains the following components:
* One network interface for management
* OVN northbound service (``ovn-northd``)
* Open vSwitch (OVS) database service (``ovsdb-server``) for the OVN
northbound database (``ovnnb.db``)
* Open vSwitch (OVS) database service (``ovsdb-server``) for the OVN
southbound database (``ovnsb.db``)
.. note::
For functional evaluation only, you can combine the controller and
database nodes.
The two compute nodes contain the following components:
* Three network interfaces for management, overlay networks, and provider
networks
* Compute management (hypervisor)
* Hypervisor (KVM)
* OVN controller service (``ovn-controller``)
* OVS data plane service (``ovs-vswitchd``)
* OVS database service (``ovsdb-server``) with OVS local configuration
(``conf.db``) database
* Networking DHCP agent
* Networking metadata agent
.. note::
By default, deploying DHCP and metadata agents on two compute nodes
provides basic redundancy for these services. For larger environments,
consider deploying the agents on a fraction of the compute nodes to
minimize control plane traffic.
.. image:: figures/ovn-hw.png
:alt: Hardware layout
:align: center
.. image:: figures/ovn-services.png
:alt: Service layout
:align: center
Networking service with OVN integration
---------------------------------------
The reference architecture deploys the Networking service with OVN
integration as follows:
.. image:: figures/ovn-architecture1.png
:alt: Architecture for Networking service with OVN integration
:align: center
Each compute node contains the following network components:
.. image:: figures/ovn-compute1.png
:alt: Compute node network components
:align: center
.. note::
The Networking service creates a unique network namespace for each
virtual subnet that enables the DHCP service.
.. _refarch_database-access:
Accessing OVN database content
------------------------------
OVN stores configuration data in a collection of OVS database tables.
The following commands show the contents of the most common database
tables in the northbound and southbound databases. The example database
output in this section uses these commands with various output filters.
.. code-block:: console
$ ovn-nbctl list Logical_Switch
$ ovn-nbctl list Logical_Switch_Port
$ ovn-nbctl list ACL
$ ovn-nbctl list Address_Set
$ ovn-nbctl list Logical_Router
$ ovn-nbctl list Logical_Router_Port
$ ovn-sbctl list Chassis
$ ovn-sbctl list Encap
$ ovn-nbctl list Address_Set
$ ovn-sbctl lflow-list
$ ovn-sbctl list Multicast_Group
$ ovn-sbctl list Datapath_Binding
$ ovn-sbctl list Port_Binding
$ ovn-sbctl list MAC_Binding
.. note::
By default, you must run these commands from the node containing
the OVN databases.
.. _refarch-adding-compute-node:
Adding a compute node
---------------------
When you add a compute node to the environment, the OVN controller
service on it connects to the OVN southbound database and registers
the node as a chassis.
.. code-block:: console
_uuid : 9be8639d-1d0b-4e3d-9070-03a655073871
encaps : [2fcefdf4-a5e7-43ed-b7b2-62039cc7e32e]
external_ids : {ovn-bridge-mappings=""}
hostname : "compute1"
name : "410ee302-850b-4277-8610-fa675d620cb7"
vtep_logical_switches: []
The ``encaps`` field value refers to tunnel endpoint information
for the compute node.
.. code-block:: console
_uuid : 2fcefdf4-a5e7-43ed-b7b2-62039cc7e32e
ip : "10.0.0.32"
options : {}
type : geneve
Security Groups/Rules
---------------------
Each security group will map to 2 Address_Sets in the OVN NB and SB
tables, one for ipv4 and another for ipv6, which will be used to hold ip
addresses for the ports that belong to the security group, so that rules
with remote_group_id can be efficiently applied.
.. todo: add block with openstack security group rule example
OVN operations
^^^^^^^^^^^^^^
#. Creating a security group will cause the OVN mechanism driver to create
2 new entries in the Address Set table of the northbound DB:
.. code-block:: console
_uuid : 9a9d01bd-4afc-4d12-853a-cd21b547911d
addresses : []
external_ids : {"neutron:security_group_name"=default}
name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc"
_uuid : 27a91327-636e-4125-99f0-6f2937a3b6d8
addresses : []
external_ids : {"neutron:security_group_name"=default}
name : "as_ip6_90a78a43_b549_4bee_8822_21fcccab58dc"
In the above entries, the address set name include the protocol (IPv4
or IPv6, written as ip4 or ip6) and the UUID of the Openstack security
group, dashes translated to underscores.
#. In turn, these new entries will be translated by the OVN northd daemon
into entries in the southbound DB:
.. code-block:: console
_uuid : 886d7b3a-e460-470f-8af2-7c7d88ce45d2
addresses : []
name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc"
_uuid : 355ddcba-941d-4f1c-b823-dc811cec59ca
addresses : []
name : "as_ip6_90a78a43_b549_4bee_8822_21fcccab58dc"
Networks
--------
.. toctree::
:maxdepth: 1
provider-networks
selfservice-networks
Routers
-------
.. toctree::
:maxdepth: 1
routers
.. note::
Currently, OVN lacks support for routing between self-service (private)
and provider networks. However, it supports routing between
self-service networks.
Instances
---------
Launching an instance causes the same series of operations regardless
of the network. The following example uses the ``provider`` provider
network, ``cirros`` image, ``m1.tiny`` flavor, ``default`` security
group, and ``mykey`` key.
.. toctree::
:maxdepth: 1
launch-instance-provider-network
launch-instance-selfservice-network
.. todo: Add north-south when OVN gains support for it.
Traffic flows
-------------
East-west for instances on the same provider network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
East-west for instances on different provider networks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
East-west for instances on the same self-service network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
East-west for instances on different self-service networks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -1,861 +0,0 @@
.. _refarch-routers:
Routers
-------
Routers pass traffic between layer-3 networks.
.. note::
Currently, OVN lacks support for routing between self-service (private)
and provider networks. However, it supports routing between
self-service networks.
Create a router
~~~~~~~~~~~~~~~
#. On the controller node, source the credentials for a regular
(non-privileged) project. The following example uses the ``demo``
project.
#. On the controller node, create router in the Networking service.
.. code-block:: console
$ openstack router create router
+-----------------------+--------------------------------------+
| Field | Value |
+-----------------------+--------------------------------------+
| admin_state_up | UP |
| description | |
| external_gateway_info | null |
| headers | |
| id | 24addfcd-5506-405d-a59f-003644c3d16a |
| name | router |
| project_id | b1ebf33664df402693f729090cfab861 |
| routes | |
| status | ACTIVE |
+-----------------------+--------------------------------------+
OVN operations
^^^^^^^^^^^^^^
The OVN mechanism driver and OVN perform the following operations when
creating a router.
#. The OVN mechanism driver translates the router into a logical
router object in the OVN northbound database.
.. code-block:: console
_uuid : 1c2e340d-dac9-496b-9e86-1065f9dab752
default_gw : []
enabled : []
external_ids : {"neutron:router_name"="router"}
name : "neutron-a24fd760-1a99-4eec-9f02-24bb284ff708"
ports : []
static_routes : []
#. The OVN northbound service translates this object into logical flows
and datapath bindings in the OVN southbound database.
* Datapath bindings
.. code-block:: console
_uuid : 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa
external_ids : {logical-router="1c2e340d-dac9-496b-9e86-1065f9dab752"}
tunnel_key : 3
* Logical flows
.. code-block:: console
Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: ingress
table= 0( lr_in_admission), priority= 100,
match=(vlan.present || eth.src[40]),
action=(drop;)
table= 1( lr_in_ip_input), priority= 100,
match=(ip4.mcast || ip4.src == 255.255.255.255 ||
ip4.src == 127.0.0.0/8 || ip4.dst == 127.0.0.0/8 ||
ip4.src == 0.0.0.0/8 || ip4.dst == 0.0.0.0/8),
action=(drop;)
table= 1( lr_in_ip_input), priority= 50, match=(ip4.mcast),
action=(drop;)
table= 1( lr_in_ip_input), priority= 50, match=(eth.bcast),
action=(drop;)
table= 1( lr_in_ip_input), priority= 30,
match=(ip4 && ip.ttl == {0, 1}), action=(drop;)
table= 1( lr_in_ip_input), priority= 0, match=(1),
action=(next;)
table= 2( lr_in_unsnat), priority= 0, match=(1),
action=(next;)
table= 3( lr_in_dnat), priority= 0, match=(1),
action=(next;)
table= 5( lr_in_arp_resolve), priority= 0, match=(1),
action=(get_arp(outport, reg0); next;)
table= 6( lr_in_arp_request), priority= 100,
match=(eth.dst == 00:00:00:00:00:00),
action=(arp { eth.dst = ff:ff:ff:ff:ff:ff; arp.spa = reg1;
arp.op = 1; output; };)
table= 6( lr_in_arp_request), priority= 0, match=(1),
action=(output;)
Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: egress
table= 0( lr_out_snat), priority= 0, match=(1),
action=(next;)
#. The OVN controller service on each compute node translates these objects
into flows on the integration bridge ``br-int``.
.. code-block:: console
# ovs-ofctl dump-flows br-int
cookie=0x0, duration=6.402s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=100,metadata=0x5,vlan_tci=0x1000/0x1000
actions=drop
cookie=0x0, duration=6.402s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=100,metadata=0x5,
dl_src=01:00:00:00:00:00/01:00:00:00:00:00
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x5,nw_dst=127.0.0.0/8
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x5,nw_dst=0.0.0.0/8
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x5,nw_dst=224.0.0.0/4
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=50,ip,metadata=0x5,nw_dst=224.0.0.0/4
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x5,nw_src=255.255.255.255
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x5,nw_src=127.0.0.0/8
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x5,nw_src=0.0.0.0/8
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=90,arp,metadata=0x5,arp_op=2
actions=push:NXM_NX_REG0[],push:NXM_OF_ETH_SRC[],
push:NXM_NX_ARP_SHA[],push:NXM_OF_ARP_SPA[],
pop:NXM_NX_REG0[],pop:NXM_OF_ETH_SRC[],
controller(userdata=00.00.00.01.00.00.00.00),
pop:NXM_OF_ETH_SRC[],pop:NXM_NX_REG0[]
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=50,metadata=0x5,dl_dst=ff:ff:ff:ff:ff:ff
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=30,ip,metadata=0x5,nw_ttl=0
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=30,ip,metadata=0x5,nw_ttl=1
actions=drop
cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x5
actions=resubmit(,18)
cookie=0x0, duration=6.402s, table=18, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x5
actions=resubmit(,19)
cookie=0x0, duration=6.402s, table=19, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x5
actions=resubmit(,20)
cookie=0x0, duration=6.402s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x5
actions=resubmit(,32)
cookie=0x0, duration=6.402s, table=48, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x5
actions=resubmit(,49)
Attach a self-service network to the router
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Self-service networks, particularly subnets, must interface with a
router to enable connectivity with other self-service and provider
networks.
#. On the controller node, add the self-service network subnet
``selfservice-v4`` to the router ``router``.
.. code-block:: console
$ openstack router add subnet router selfservice-v4
.. note::
This command provides no output.
OVN operations
^^^^^^^^^^^^^^
The OVN mechanism driver and OVN perform the following operations when
adding a subnet as an interface on a router.
#. The OVN mechanism driver translates the operation into logical
objects and devices in the OVN northbound database and performs a
series of operations on them.
* Create a logical port.
.. code-block:: console
_uuid : 4c9e70b1-fff0-4d0d-af8e-42d3896eb76f
addresses : ["fa:16:3e:0c:55:62 192.168.1.1"]
enabled : true
external_ids : {"neutron:port_name"=""}
name : "5b72d278-5b16-44a6-9aa0-9e513a429506"
options : {router-port="lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"}
parent_name : []
port_security : []
tag : []
type : router
up : false
* Add the logical port to logical switch.
.. code-block:: console
_uuid : 0ab40684-7cf8-4d6c-ae8b-9d9143762d37
acls : []
external_ids : {"neutron:network_name"="selfservice"}
name : "neutron-d5aadceb-d8d6-41c8-9252-c5e0fe6c26a5"
ports : [1ed7c28b-dc69-42b8-bed6-46477bb8b539,
4c9e70b1-fff0-4d0d-af8e-42d3896eb76f,
ae10a5e0-db25-4108-b06a-d2d5c127d9c4]
* Create a logical router port object.
.. code-block:: console
_uuid : f60ccb93-7b3d-4713-922c-37104b7055dc
enabled : []
external_ids : {}
mac : "fa:16:3e:0c:55:62"
name : "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"
network : "192.168.1.1/24"
peer : []
* Add the logical router port to the logical router object.
.. code-block:: console
_uuid : 1c2e340d-dac9-496b-9e86-1065f9dab752
default_gw : []
enabled : []
external_ids : {"neutron:router_name"="router"}
name : "neutron-a24fd760-1a99-4eec-9f02-24bb284ff708"
ports : [f60ccb93-7b3d-4713-922c-37104b7055dc]
static_routes : []
#. The OVN northbound service translates these objects into logical flows,
datapath bindings, and the appropriate multicast groups in the OVN
southbound database.
* Logical flows in the logical router datapath
.. code-block:: console
Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: ingress
table= 0( lr_in_admission), priority= 50,
match=((eth.mcast || eth.dst == fa:16:3e:0c:55:62) &&
inport == "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"),
action=(next;)
table= 1( lr_in_ip_input), priority= 100,
match=(ip4.src == {192.168.1.1, 192.168.1.255}), action=(drop;)
table= 1( lr_in_ip_input), priority= 90,
match=(ip4.dst == 192.168.1.1 && icmp4.type == 8 &&
icmp4.code == 0),
action=(ip4.dst = ip4.src; ip4.src = 192.168.1.1; ip.ttl = 255;
icmp4.type = 0;
inport = ""; /* Allow sending out inport. */ next; )
table= 1( lr_in_ip_input), priority= 90,
match=(inport == "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506" &&
arp.tpa == 192.168.1.1 && arp.op == 1),
action=(eth.dst = eth.src; eth.src = fa:16:3e:0c:55:62;
arp.op = 2; /* ARP reply */ arp.tha = arp.sha;
arp.sha = fa:16:3e:0c:55:62; arp.tpa = arp.spa;
arp.spa = 192.168.1.1;
outport = "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506";
inport = ""; /* Allow sending out inport. */ output;)
table= 1( lr_in_ip_input), priority= 60,
match=(ip4.dst == 192.168.1.1), action=(drop;)
table= 4( lr_in_ip_routing), priority= 24,
match=(ip4.dst == 192.168.1.0/255.255.255.0),
action=(ip.ttl--; reg0 = ip4.dst; reg1 = 192.168.1.1;
eth.src = fa:16:3e:0c:55:62;
outport = "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506";
next;)
Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: egress
table= 1( lr_out_delivery), priority= 100,
match=(outport == "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506),
action=(output;)
* Logical flows in the logical switch datapath
.. code-block:: console
Datapath: 611d35e8-b1e1-442c-bc07-7c6192ad6216 Pipeline: ingress
table= 0( ls_in_port_sec_l2), priority= 50,
match=(inport == "5b72d278-5b16-44a6-9aa0-9e513a429506"),
action=(next;)
table= 3( ls_in_pre_acl), priority= 110,
match=(ip && inport == "5b72d278-5b16-44a6-9aa0-9e513a429506"),
action=(next;)
table= 9( ls_in_arp_rsp), priority= 50,
match=(arp.tpa == 192.168.1.1 && arp.op == 1),
action=(eth.dst = eth.src; eth.src = fa:16:3e:0c:55:62;
arp.op = 2; /* ARP reply */ arp.tha = arp.sha;
arp.sha = fa:16:3e:0c:55:62; arp.tpa = arp.spa;
arp.spa = 192.168.1.1; outport = inport;
inport = ""; /* Allow sending out inport. */ output;)
table=10( ls_in_l2_lkup), priority= 50,
match=(eth.dst == fa:16:3e:fa:76:8f),
action=(outport = "f112b99a-8ccc-4c52-8733-7593fa0966ea"; output;)
Datapath: 611d35e8-b1e1-442c-bc07-7c6192ad6216 Pipeline: egress
table= 1( ls_out_pre_acl), priority= 110,
match=(ip && outport == "f112b99a-8ccc-4c52-8733-7593fa0966ea"),
action=(next;)
table= 7( ls_out_port_sec_l2), priority= 50,
match=(outport == "f112b99a-8ccc-4c52-8733-7593fa0966ea"),
action=(output;)
* Port bindings
.. code-block:: console
_uuid : 0f86395b-a0d8-40fd-b22c-4c9e238a7880
chassis : []
datapath : 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa
logical_port : "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"
mac : []
options : {peer="5b72d278-5b16-44a6-9aa0-9e513a429506"}
parent_port : []
tag : []
tunnel_key : 1
type : patch
_uuid : 8d95ab8c-c2ea-4231-9729-7ecbfc2cd676
chassis : []
datapath : 4aef86e4-e54a-4c83-bb27-d65c670d4b51
logical_port : "5b72d278-5b16-44a6-9aa0-9e513a429506"
mac : ["fa:16:3e:0c:55:62 192.168.1.1"]
options : {peer="lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"}
parent_port : []
tag : []
tunnel_key : 3
type : patch
* Multicast groups
.. code-block:: console
_uuid : 4a6191aa-d8ac-4e93-8306-b0d8fbbe4e35
datapath : 4aef86e4-e54a-4c83-bb27-d65c670d4b51
name : _MC_flood
ports : [8d95ab8c-c2ea-4231-9729-7ecbfc2cd676,
be71fac3-9f04-41c9-9951-f3f7f1fa1ec5,
da5c1269-90b7-4df2-8d76-d4575754b02d]
tunnel_key : 65535
In addition, if the self-service network contains ports with IP addresses
(typically instances or DHCP servers), OVN creates a logical flow for
each port, similar to the following example.
.. code-block:: console
Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: ingress
table= 5( lr_in_arp_resolve), priority= 100,
match=(outport == "lrp-f112b99a-8ccc-4c52-8733-7593fa0966ea" &&
reg0 == 192.168.1.11),
action=(eth.dst = fa:16:3e:b6:91:70; next;)
#. On each compute node, the OVN controller service creates patch ports,
similar to the following example.
.. code-block:: console
7(patch-f112b99a-): addr:4e:01:91:2a:73:66
config: 0
state: 0
speed: 0 Mbps now, 0 Mbps max
8(patch-lrp-f112b): addr:be:9d:7b:31:bb:87
config: 0
state: 0
speed: 0 Mbps now, 0 Mbps max
#. On all compute nodes, the OVN controller service creates the
following additional flows:
.. code-block:: console
cookie=0x0, duration=6.667s, table=0, n_packets=0, n_bytes=0,
idle_age=6, priority=100,in_port=8
actions=load:0x9->OXM_OF_METADATA[],load:0x1->NXM_NX_REG6[],
resubmit(,16)
cookie=0x0, duration=6.667s, table=0, n_packets=0, n_bytes=0,
idle_age=6, priority=100,in_port=7
actions=load:0x7->OXM_OF_METADATA[],load:0x4->NXM_NX_REG6[],
resubmit(,16)
cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg6=0x4,metadata=0x7
actions=resubmit(,17)
cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg6=0x1,metadata=0x9,
dl_dst=fa:16:3e:fa:76:8f
actions=resubmit(,17)
cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg6=0x1,metadata=0x9,
dl_dst=01:00:00:00:00:00/01:00:00:00:00:00
actions=resubmit(,17)
cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x9,nw_src=192.168.1.1
actions=drop
cookie=0x0, duration=6.673s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x9,nw_src=192.168.1.255
actions=drop
cookie=0x0, duration=6.673s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=90,arp,reg6=0x1,metadata=0x9,
arp_tpa=192.168.1.1,arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:fa:76:8f,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163efa768f->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a80101->NXM_OF_ARP_SPA[],load:0x1->NXM_NX_REG7[],
load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=6.673s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=90,icmp,metadata=0x9,nw_dst=192.168.1.1,
icmp_type=8,icmp_code=0
actions=move:NXM_OF_IP_SRC[]->NXM_OF_IP_DST[],mod_nw_src:192.168.1.1,
load:0xff->NXM_NX_IP_TTL[],load:0->NXM_OF_ICMP_TYPE[],
load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,18)
cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=60,ip,metadata=0x9,nw_dst=192.168.1.1
actions=drop
cookie=0x0, duration=6.674s, table=20, n_packets=0, n_bytes=0,
idle_age=6, priority=24,ip,metadata=0x9,nw_dst=192.168.1.0/24
actions=dec_ttl(),move:NXM_OF_IP_DST[]->NXM_NX_REG0[],
load:0xc0a80101->NXM_NX_REG1[],mod_dl_src:fa:16:3e:fa:76:8f,
load:0x1->NXM_NX_REG7[],resubmit(,21)
cookie=0x0, duration=6.674s, table=21, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg0=0xc0a80103,reg7=0x1,metadata=0x9
actions=mod_dl_dst:fa:16:3e:d5:00:02,resubmit(,22)
cookie=0x0, duration=6.674s, table=21, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg0=0xc0a80102,reg7=0x1,metadata=0x9
actions=mod_dl_dst:fa:16:3e:82:8b:0e,resubmit(,22)
cookie=0x0, duration=6.673s, table=21, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg0=0xc0a8010b,reg7=0x1,metadata=0x9
actions=mod_dl_dst:fa:16:3e:b6:91:70,resubmit(,22)
cookie=0x0, duration=6.673s, table=25, n_packets=0, n_bytes=0,
idle_age=6, priority=50,arp,metadata=0x7,arp_tpa=192.168.1.1,
arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:fa:76:8f,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163efa768f->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a80101->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[],
load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=6.674s, table=26, n_packets=0, n_bytes=0,
idle_age=6, priority=50,metadata=0x7,dl_dst=fa:16:3e:fa:76:8f
actions=load:0x4->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=6.667s, table=33, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg7=0x4,metadata=0x7
actions=resubmit(,34)
cookie=0x0, duration=6.667s, table=33, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg7=0x1,metadata=0x9
actions=resubmit(,34)
cookie=0x0, duration=6.667s, table=34, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg6=0x4,reg7=0x4,metadata=0x7
actions=drop
cookie=0x0, duration=6.667s, table=34, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg6=0x1,reg7=0x1,metadata=0x9
actions=drop
cookie=0x0, duration=6.674s, table=49, n_packets=0, n_bytes=0,
idle_age=6, priority=110,ipv6,reg7=0x4,metadata=0x7
actions=resubmit(,50)
cookie=0x0, duration=6.673s, table=49, n_packets=0, n_bytes=0,
idle_age=6, priority=110,ip,reg7=0x4,metadata=0x7
actions=resubmit(,50)
cookie=0x0, duration=6.673s, table=49, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg7=0x1,metadata=0x9
actions=resubmit(,64)
cookie=0x0, duration=6.673s, table=55, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg7=0x4,metadata=0x7
actions=resubmit(,64)
cookie=0x0, duration=6.667s, table=64, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg7=0x4,metadata=0x7
actions=output:7
cookie=0x0, duration=6.667s, table=64, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg7=0x1,metadata=0x9
actions=output:8
#. On compute nodes not containing a port on the network, the OVN controller
also creates additional flows.
.. code-block:: console
cookie=0x0, duration=6.673s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=100,metadata=0x7,
dl_src=01:00:00:00:00:00/01:00:00:00:00:00
actions=drop
cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=100,metadata=0x7,vlan_tci=0x1000/0x1000
actions=drop
cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg6=0x3,metadata=0x7,
dl_src=fa:16:3e:b6:91:70
actions=resubmit(,17)
cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg6=0x2,metadata=0x7
actions=resubmit(,17)
cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg6=0x1,metadata=0x7
actions=resubmit(,17)
cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=90,ip,reg6=0x3,metadata=0x7,
dl_src=fa:16:3e:b6:91:70,nw_src=192.168.1.11
actions=resubmit(,18)
cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=90,udp,reg6=0x3,metadata=0x7,
dl_src=fa:16:3e:b6:91:70,nw_src=0.0.0.0,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=resubmit(,18)
cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=80,ip,reg6=0x3,metadata=0x7,
dl_src=fa:16:3e:b6:91:70
actions=drop
cookie=0x0, duration=6.673s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=80,ipv6,reg6=0x3,metadata=0x7,
dl_src=fa:16:3e:b6:91:70
actions=drop
cookie=0x0, duration=6.670s, table=17, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,18)
cookie=0x0, duration=6.674s, table=18, n_packets=0, n_bytes=0,
idle_age=6, priority=90,arp,reg6=0x3,metadata=0x7,
dl_src=fa:16:3e:b6:91:70,arp_spa=192.168.1.11,
arp_sha=fa:16:3e:b6:91:70
actions=resubmit(,19)
cookie=0x0, duration=6.673s, table=18, n_packets=0, n_bytes=0,
idle_age=6, priority=80,icmp6,reg6=0x3,metadata=0x7,icmp_type=135,
icmp_code=0
actions=drop
cookie=0x0, duration=6.673s, table=18, n_packets=0, n_bytes=0,
idle_age=6, priority=80,icmp6,reg6=0x3,metadata=0x7,icmp_type=136,
icmp_code=0
actions=drop
cookie=0x0, duration=6.673s, table=18, n_packets=0, n_bytes=0,
idle_age=6, priority=80,arp,reg6=0x3,metadata=0x7
actions=drop
cookie=0x0, duration=6.673s, table=18, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,19)
cookie=0x0, duration=6.673s, table=19, n_packets=0, n_bytes=0,
idle_age=6, priority=110,icmp6,metadata=0x7,icmp_type=136,icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=6.673s, table=19, n_packets=0, n_bytes=0,
idle_age=6, priority=110,icmp6,metadata=0x7,icmp_type=135,icmp_code=0
actions=resubmit(,20)
cookie=0x0, duration=6.674s, table=19, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x7
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=6.670s, table=19, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ipv6,metadata=0x7
actions=load:0x1->NXM_NX_REG0[0],resubmit(,20)
cookie=0x0, duration=6.674s, table=19, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,20)
cookie=0x0, duration=6.673s, table=20, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,21)
cookie=0x0, duration=6.674s, table=21, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ipv6,reg0=0x1/0x1,metadata=0x7
actions=ct(table=22,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=6.670s, table=21, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,reg0=0x1/0x1,metadata=0x7
actions=ct(table=22,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=6.674s, table=21, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,22)
cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,ct_state=-new+est-rel-inv+trk,metadata=0x7
actions=resubmit(,23)
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,ct_state=-new-est+rel-inv+trk,metadata=0x7
actions=resubmit(,23)
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,ct_state=+inv+trk,metadata=0x7
actions=drop
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,icmp6,metadata=0x7,icmp_type=135,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,icmp6,metadata=0x7,icmp_type=136,
icmp_code=0
actions=resubmit(,23)
cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=2002,udp,reg6=0x3,metadata=0x7,
nw_dst=255.255.255.255,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=2002,udp,reg6=0x3,metadata=0x7,
nw_dst=192.168.1.0/24,tp_src=68,tp_dst=67
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=2002,ct_state=+new+trk,ipv6,reg6=0x3,metadata=0x7
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=2002,ct_state=+new+trk,ip,reg6=0x3,metadata=0x7
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=2001,ip,reg6=0x3,metadata=0x7
actions=drop
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=2001,ipv6,reg6=0x3,metadata=0x7
actions=drop
cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=1,ipv6,metadata=0x7
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=1,ip,metadata=0x7
actions=load:0x1->NXM_NX_REG0[1],resubmit(,23)
cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,23)
cookie=0x0, duration=6.673s, table=23, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,24)
cookie=0x0, duration=6.674s, table=24, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ipv6,reg0=0x2/0x2,metadata=0x7
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25)
cookie=0x0, duration=6.674s, table=24, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,reg0=0x2/0x2,metadata=0x7
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25)
cookie=0x0, duration=6.673s, table=24, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ipv6,reg0=0x4/0x4,metadata=0x7
actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=6.670s, table=24, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,reg0=0x4/0x4,metadata=0x7
actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=6.674s, table=24, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,25)
cookie=0x0, duration=6.673s, table=25, n_packets=0, n_bytes=0,
idle_age=6, priority=50,arp,metadata=0x7,arp_tpa=192.168.1.11,
arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:b6:91:70,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163eb69170->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a8010b->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[],
load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=6.670s, table=25, n_packets=0, n_bytes=0,
idle_age=6, priority=50,arp,metadata=0x7,arp_tpa=192.168.1.3,arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:d5:00:02,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163ed50002->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a80103->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[],
load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=6.670s, table=25, n_packets=0, n_bytes=0,
idle_age=6, priority=50,arp,metadata=0x7,arp_tpa=192.168.1.2,
arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:82:8b:0e,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163e828b0e->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a80102->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[],
load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=6.674s, table=25, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,26)
cookie=0x0, duration=6.674s, table=26, n_packets=0, n_bytes=0,
idle_age=6, priority=100,metadata=0x7,
dl_dst=01:00:00:00:00:00/01:00:00:00:00:00
actions=load:0xffff->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=6.674s, table=26, n_packets=0, n_bytes=0,
idle_age=6, priority=50,metadata=0x7,dl_dst=fa:16:3e:d5:00:02
actions=load:0x2->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=6.673s, table=26, n_packets=0, n_bytes=0,
idle_age=6, priority=50,metadata=0x7,dl_dst=fa:16:3e:b6:91:70
actions=load:0x3->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=6.670s, table=26, n_packets=0, n_bytes=0,
idle_age=6, priority=50,metadata=0x7,dl_dst=fa:16:3e:82:8b:0e
actions=load:0x1->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=6.674s, table=32, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg7=0x3,metadata=0x7
actions=load:0x7->NXM_NX_TUN_ID[0..23],
set_field:0x3/0xffffffff->tun_metadata0,
move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:3
cookie=0x0, duration=6.673s, table=32, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg7=0x2,metadata=0x7
actions=load:0x7->NXM_NX_TUN_ID[0..23],
set_field:0x2/0xffffffff->tun_metadata0,
move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:3
cookie=0x0, duration=6.670s, table=32, n_packets=0, n_bytes=0,
idle_age=6, priority=100,reg7=0x1,metadata=0x7
actions=load:0x7->NXM_NX_TUN_ID[0..23],
set_field:0x1/0xffffffff->tun_metadata0,
move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:5
cookie=0x0, duration=6.674s, table=48, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,49)
cookie=0x0, duration=6.674s, table=49, n_packets=0, n_bytes=0,
idle_age=6, priority=110,icmp6,metadata=0x7,icmp_type=135,icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=6.673s, table=49, n_packets=0, n_bytes=0,
idle_age=6, priority=110,icmp6,metadata=0x7,icmp_type=136,icmp_code=0
actions=resubmit(,50)
cookie=0x0, duration=6.674s, table=49, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ipv6,metadata=0x7
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=6.673s, table=49, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,metadata=0x7
actions=load:0x1->NXM_NX_REG0[0],resubmit(,50)
cookie=0x0, duration=6.674s, table=49, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,50)
cookie=0x0, duration=6.674s, table=50, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,reg0=0x1/0x1,metadata=0x7
actions=ct(table=51,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=6.673s, table=50, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ipv6,reg0=0x1/0x1,metadata=0x7
actions=ct(table=51,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=6.673s, table=50, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,51)
cookie=0x0, duration=6.670s, table=51, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,52)
cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,ct_state=+inv+trk,metadata=0x7
actions=drop
cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,ct_state=-new+est-rel-inv+trk,metadata=0x7
actions=resubmit(,53)
cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,ct_state=-new-est+rel-inv+trk,metadata=0x7
actions=resubmit(,53)
cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,icmp6,metadata=0x7,icmp_type=136,
icmp_code=0
actions=resubmit(,53)
cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=65535,icmp6,metadata=0x7,icmp_type=135,
icmp_code=0
actions=resubmit(,53)
cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=2002,ct_state=+new+trk,ip,reg7=0x3,metadata=0x7,
nw_src=192.168.1.11
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=6.670s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=2002,ct_state=+new+trk,ip,reg7=0x3,metadata=0x7,
nw_src=192.168.1.11
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=6.670s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=2002,udp,reg7=0x3,metadata=0x7,
nw_src=192.168.1.0/24,tp_src=67,tp_dst=68
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=6.670s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=2002,ct_state=+new+trk,ipv6,reg7=0x3,
metadata=0x7
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=2001,ip,reg7=0x3,metadata=0x7
actions=drop
cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=2001,ipv6,reg7=0x3,metadata=0x7
actions=drop
cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=1,ip,metadata=0x7
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=1,ipv6,metadata=0x7
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,53)
cookie=0x0, duration=6.674s, table=53, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ipv6,reg0=0x4/0x4,metadata=0x7
actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=6.674s, table=53, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,reg0=0x4/0x4,metadata=0x7
actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=6.673s, table=53, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ipv6,reg0=0x2/0x2,metadata=0x7
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54)
cookie=0x0, duration=6.673s, table=53, n_packets=0, n_bytes=0,
idle_age=6, priority=100,ip,reg0=0x2/0x2,metadata=0x7
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54)
cookie=0x0, duration=6.674s, table=53, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,54)
cookie=0x0, duration=6.674s, table=54, n_packets=0, n_bytes=0,
idle_age=6, priority=90,ip,reg7=0x3,metadata=0x7,
dl_dst=fa:16:3e:b6:91:70,nw_dst=255.255.255.255
actions=resubmit(,55)
cookie=0x0, duration=6.673s, table=54, n_packets=0, n_bytes=0,
idle_age=6, priority=90,ip,reg7=0x3,metadata=0x7,
dl_dst=fa:16:3e:b6:91:70,nw_dst=192.168.1.11
actions=resubmit(,55)
cookie=0x0, duration=6.673s, table=54, n_packets=0, n_bytes=0,
idle_age=6, priority=90,ip,reg7=0x3,metadata=0x7,
dl_dst=fa:16:3e:b6:91:70,nw_dst=224.0.0.0/4
actions=resubmit(,55)
cookie=0x0, duration=6.670s, table=54, n_packets=0, n_bytes=0,
idle_age=6, priority=80,ip,reg7=0x3,metadata=0x7,
dl_dst=fa:16:3e:b6:91:70
actions=drop
cookie=0x0, duration=6.670s, table=54, n_packets=0, n_bytes=0,
idle_age=6, priority=80,ipv6,reg7=0x3,metadata=0x7,
dl_dst=fa:16:3e:b6:91:70
actions=drop
cookie=0x0, duration=6.674s, table=54, n_packets=0, n_bytes=0,
idle_age=6, priority=0,metadata=0x7
actions=resubmit(,55)
cookie=0x0, duration=6.673s, table=55, n_packets=0, n_bytes=0,
idle_age=6, priority=100,metadata=0x7,
dl_dst=01:00:00:00:00:00/01:00:00:00:00:00
actions=resubmit(,64)
cookie=0x0, duration=6.674s, table=55, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg7=0x3,metadata=0x7,
dl_dst=fa:16:3e:b6:91:70
actions=resubmit(,64)
cookie=0x0, duration=6.673s, table=55, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg7=0x1,metadata=0x7
actions=resubmit(,64)
cookie=0x0, duration=6.670s, table=55, n_packets=0, n_bytes=0,
idle_age=6, priority=50,reg7=0x2,metadata=0x7
actions=resubmit(,64)
#. On compute nodes containing a port on the network, the OVN controller
also creates an additional flow.
.. code-block:: console
cookie=0x0, duration=13.358s, table=52, n_packets=0, n_bytes=0,
idle_age=13, priority=2002,ct_state=+new+trk,ipv6,reg7=0x3,
metadata=0x7,ipv6_src=::
actions=load:0x1->NXM_NX_REG0[1],resubmit(,53)
.. todo: Future commit
Attach the router to a second self-service network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. todo: Add after NAT patches merge.
Attach the router to an external network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -1,526 +0,0 @@
.. _refarch-selfservice-networks:
Self-service networks
---------------------
A self-service (project) network includes only virtual components, thus
enabling projects to manage them without additional configuration of the
underlying physical network. The OVN mechanism driver supports Geneve
and VLAN network types with a preference toward Geneve. Projects can
choose to isolate self-service networks, connect two or more together
via routers, or connect them to provider networks via routers with
appropriate capabilities. Similar to provider networks, self-service
networks can use arbitrary names.
.. note::
Similar to provider networks, self-service VLAN networks map to a
unique bridge on each compute node that supports launching instances
on those networks. Self-service VLAN networks also require several
commands at the host and OVS levels. The following example assumes
use of Geneve self-service networks.
Create a self-service network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Creating a self-service network involves several commands at the
Networking service level that yield a series of operations at the OVN
level to create the virtual network components. The following example
creates a Geneve self-service network and binds a subnet to it. The
subnet uses DHCP to distribute IP addresses to instances.
#. On the controller node, source the credentials for a regular
(non-privileged) project. The following example uses the ``demo``
project.
#. On the controller node, create a self-service network in the Networking
service.
.. code-block:: console
$ openstack network create selfservice
+-------------------------+--------------------------------------+
| Field | Value |
+-------------------------+--------------------------------------+
| admin_state_up | UP |
| availability_zone_hints | |
| availability_zones | |
| created_at | 2016-06-09T15:42:41 |
| description | |
| id | f49791f7-e653-4b43-99b1-0f5557c313e4 |
| ipv4_address_scope | None |
| ipv6_address_scope | None |
| mtu | 1442 |
| name | selfservice |
| port_security_enabled | True |
| project_id | 1ef26f483b9d44e8ac0c97388d6cb609 |
| router_external | Internal |
| shared | False |
| status | ACTIVE |
| subnets | |
| tags | [] |
| updated_at | 2016-06-09T15:42:41 |
+-------------------------+--------------------------------------+
OVN operations
^^^^^^^^^^^^^^
The OVN mechanism driver and OVN perform the following operations
during creation of a self-service network.
#. The mechanism driver translates the network into a logical switch in
the OVN northbound database.
.. code-block:: console
uuid : 0ab40684-7cf8-4d6c-ae8b-9d9143762d37
acls : []
external_ids : {"neutron:network_name"="selfservice"}
name : "neutron-d5aadceb-d8d6-41c8-9252-c5e0fe6c26a5"
ports : []
#. The OVN northbound service translates this object into new datapath
bindings and logical flows in the OVN southbound database.
* Datapath bindings
.. code-block:: console
_uuid : 0b214af6-8910-489c-926a-fd0ed16a8251
external_ids : {logical-switch="15e2c80b-1461-4003-9869-80416cd97de5"}
tunnel_key : 5
* Logical flows
.. code-block:: console
Datapath: 0b214af6-8910-489c-926a-fd0ed16a8251 Pipeline: ingress
table= 0( ls_in_port_sec_l2), priority= 100, match=(eth.src[40]),
action=(drop;)
table= 0( ls_in_port_sec_l2), priority= 100, match=(vlan.present),
action=(drop;)
table= 1( ls_in_port_sec_ip), priority= 0, match=(1),
action=(next;)
table= 2( ls_in_port_sec_nd), priority= 0, match=(1),
action=(next;)
table= 3( ls_in_pre_acl), priority= 0, match=(1),
action=(next;)
table= 4( ls_in_pre_lb), priority= 0, match=(1),
action=(next;)
table= 5( ls_in_pre_stateful), priority= 100, match=(reg0[0] == 1),
action=(ct_next;)
table= 5( ls_in_pre_stateful), priority= 0, match=(1),
action=(next;)
table= 6( ls_in_acl), priority= 0, match=(1),
action=(next;)
table= 7( ls_in_lb), priority= 0, match=(1),
action=(next;)
table= 8( ls_in_stateful), priority= 100, match=(reg0[2] == 1),
action=(ct_lb;)
table= 8( ls_in_stateful), priority= 100, match=(reg0[1] == 1),
action=(ct_commit; next;)
table= 8( ls_in_stateful), priority= 0, match=(1),
action=(next;)
table= 9( ls_in_arp_rsp), priority= 0, match=(1),
action=(next;)
table=10( ls_in_l2_lkup), priority= 100, match=(eth.mcast),
action=(outport = "_MC_flood"; output;)
Datapath: 0b214af6-8910-489c-926a-fd0ed16a8251 Pipeline: egress
table= 0( ls_out_pre_lb), priority= 0, match=(1),
action=(next;)
table= 1( ls_out_pre_acl), priority= 0, match=(1),
action=(next;)
table= 2(ls_out_pre_stateful), priority= 100, match=(reg0[0] == 1),
action=(ct_next;)
table= 2(ls_out_pre_stateful), priority= 0, match=(1),
action=(next;)
table= 3( ls_out_lb), priority= 0, match=(1),
action=(next;)
table= 4( ls_out_acl), priority= 0, match=(1),
action=(next;)
table= 5( ls_out_stateful), priority= 100, match=(reg0[1] == 1),
action=(ct_commit; next;)
table= 5( ls_out_stateful), priority= 100, match=(reg0[2] == 1),
action=(ct_lb;)
table= 5( ls_out_stateful), priority= 0, match=(1),
action=(next;)
table= 6( ls_out_port_sec_ip), priority= 0, match=(1),
action=(next;)
table= 7( ls_out_port_sec_l2), priority= 100, match=(eth.mcast),
action=(output;)
.. note::
These actions do not create flows on any nodes.
Create a subnet on the self-service network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A self-service network requires at least one subnet. In most cases,
the environment provides suitable values for IP address allocation for
instances, default gateway IP address, and metadata such as name
resolution.
#. On the controller node, create a subnet bound to the self-service network
``selfservice``.
.. code-block:: console
$ openstack subnet create --network selfservice --subnet-range 192.168.1.0/24 selfservice-v4
+-------------------+--------------------------------------+
| Field | Value |
+-------------------+--------------------------------------+
| allocation_pools | 192.168.1.2-192.168.1.254 |
| cidr | 192.168.1.0/24 |
| created_at | 2016-06-16 00:19:08+00:00 |
| description | |
| dns_nameservers | |
| enable_dhcp | True |
| gateway_ip | 192.168.1.1 |
| headers | |
| host_routes | |
| id | 8f027f25-0112-45b9-a1b9-2f8097c57219 |
| ip_version | 4 |
| ipv6_address_mode | None |
| ipv6_ra_mode | None |
| name | selfservice-v4 |
| network_id | 8ed4e43b-63ef-41ed-808b-b59f1120aec0 |
| project_id | b1ebf33664df402693f729090cfab861 |
| subnetpool_id | None |
| updated_at | 2016-06-16 00:19:08+00:00 |
+-------------------+--------------------------------------+
If using DHCP to manage instance IP addresses, adding a subnet causes a series
of operations in the Networking service and OVN.
* The Networking service schedules the network on appropriate number of DHCP
agents. The example environment contains three DHCP agents.
* Each DHCP agent spawns a network namespace with a ``dnsmasq`` process using
an IP address from the subnet allocation.
* The OVN mechanism driver creates a logical switch port object in the OVN
northbound database for each ``dnsmasq`` process.
OVN operations
^^^^^^^^^^^^^^
The OVN mechanism driver and OVN perform the following operations
during creation of a subnet on a self-service network.
#. If the subnet uses DHCP for IP address management, create logical ports
ports for each DHCP agent serving the subnet and bind them to the logical
switch. In this example, the subnet contains two DHCP agents.
.. code-block:: console
_uuid : 1ed7c28b-dc69-42b8-bed6-46477bb8b539
addresses : ["fa:16:3e:94:db:5e 192.168.1.2"]
enabled : true
external_ids : {"neutron:port_name"=""}
name : "0cfbbdca-ff58-4cf8-a7d3-77daaebe3056"
options : {}
parent_name : []
port_security : []
tag : []
type : ""
up : true
_uuid : ae10a5e0-db25-4108-b06a-d2d5c127d9c4
addresses : ["fa:16:3e:90:bd:f1 192.168.1.3"]
enabled : true
external_ids : {"neutron:port_name"=""}
name : "74930ace-d939-4bca-b577-fccba24c3fca"
options : {}
parent_name : []
port_security : []
tag : []
type : ""
up : true
_uuid : 0ab40684-7cf8-4d6c-ae8b-9d9143762d37
acls : []
external_ids : {"neutron:network_name"="selfservice"}
name : "neutron-d5aadceb-d8d6-41c8-9252-c5e0fe6c26a5"
ports : [1ed7c28b-dc69-42b8-bed6-46477bb8b539,
ae10a5e0-db25-4108-b06a-d2d5c127d9c4]
#. The OVN northbound service creates port bindings for these logical
ports and adds them to the appropriate multicast group.
* Port bindings
.. code-block:: console
_uuid : 3e463ca0-951c-46fd-b6cf-05392fa3aa1f
chassis : 6a9d0619-8818-41e6-abef-2f3d9a597c03
datapath : 0b214af6-8910-489c-926a-fd0ed16a8251
logical_port : "a203b410-97c1-4e4a-b0c3-558a10841c16"
mac : ["fa:16:3e:a1:dc:58 192.168.1.3"]
options : {}
parent_port : []
tag : []
tunnel_key : 2
type : ""
_uuid : fa7b294d-2a62-45ae-8de3-a41c002de6de
chassis : d63e8ae8-caf3-4a6b-9840-5c3a57febcac
datapath : 0b214af6-8910-489c-926a-fd0ed16a8251
logical_port : "39b23721-46f4-4747-af54-7e12f22b3397"
mac : ["fa:16:3e:1a:b4:23 192.168.1.2"]
options : {}
parent_port : []
tag : []
tunnel_key : 1
type : ""
* Multicast groups
.. code-block:: console
_uuid : c08d0102-c414-4a47-98d9-dd3fa9f9901c
datapath : 0b214af6-8910-489c-926a-fd0ed16a8251
name : _MC_flood
ports : [3e463ca0-951c-46fd-b6cf-05392fa3aa1f,
fa7b294d-2a62-45ae-8de3-a41c002de6de]
tunnel_key : 65535
#. The OVN northbound service translates the logical ports into logical flows
in the OVN southbound database.
.. code-block:: console
Datapath: 0b214af6-8910-489c-926a-fd0ed16a8251 Pipeline: ingress
table= 0( ls_in_port_sec_l2), priority= 50,
match=(inport == "39b23721-46f4-4747-af54-7e12f22b3397"),
action=(next;)
table= 0( ls_in_port_sec_l2), priority= 50,
match=(inport == "a203b410-97c1-4e4a-b0c3-558a10841c16"),
action=(next;)
table= 9( ls_in_arp_rsp), priority= 50,
match=(arp.tpa == 192.168.1.2 && arp.op == 1),
action=(eth.dst = eth.src; eth.src = fa:16:3e:1a:b4:23;
arp.op = 2; /* ARP reply */ arp.tha = arp.sha;
arp.sha = fa:16:3e:1a:b4:23; arp.tpa = arp.spa;
arp.spa = 192.168.1.2; outport = inport;
inport = ""; /* Allow sending out inport. */ output;)
table= 9( ls_in_arp_rsp), priority= 50,
match=(arp.tpa == 192.168.1.3 && arp.op == 1),
action=(eth.dst = eth.src; eth.src = fa:16:3e:a1:dc:58;
arp.op = 2; /* ARP reply */ arp.tha = arp.sha;
arp.sha = fa:16:3e:a1:dc:58; arp.tpa = arp.spa;
arp.spa = 192.168.1.3; outport = inport;
inport = ""; /* Allow sending out inport. */ output;)
table=10( ls_in_l2_lkup), priority= 50,
match=(eth.dst == fa:16:3e:a1:dc:58),
action=(outport = "a203b410-97c1-4e4a-b0c3-558a10841c16"; output;)
table=10( ls_in_l2_lkup), priority= 50,
match=(eth.dst == fa:16:3e:1a:b4:23),
action=(outport = "39b23721-46f4-4747-af54-7e12f22b3397"; output;)
Datapath: 0b214af6-8910-489c-926a-fd0ed16a8251 Pipeline: egress
table= 7( ls_out_port_sec_l2), priority= 50,
match=(outport == "39b23721-46f4-4747-af54-7e12f22b3397"),
action=(output;)
table= 7( ls_out_port_sec_l2), priority= 50,
match=(outport == "a203b410-97c1-4e4a-b0c3-558a10841c16"),
action=(output;)
#. For each compute node without a DHCP agent on the subnet:
* The OVN controller service translates these objects into flows on the
integration bridge ``br-int``.
.. code-block:: console
# ovs-ofctl dump-flows br-int
cookie=0x0, duration=9.054s, table=32, n_packets=0, n_bytes=0,
idle_age=9, priority=100,reg7=0xffff,metadata=0x5
actions=load:0x5->NXM_NX_TUN_ID[0..23],
set_field:0xffff/0xffffffff->tun_metadata0,
move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],
output:4,output:3
#. For each compute node with a DHCP agent on the subnet:
* Creation of a DHCP network namespace adds a virtual switch ports that
connects the DHCP agent with the ``dnsmasq`` process to the integration
bridge.
.. code-block:: console
# ovs-ofctl show br-int
OFPT_FEATURES_REPLY (xid=0x2): dpid:000022024a1dc045
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
9(tap39b23721-46): addr:00:00:00:00:b0:5d
config: PORT_DOWN
state: LINK_DOWN
speed: 0 Mbps now, 0 Mbps max
* The OVN controller service translates these objects into flows on the
integration bridge.
.. code-block:: console
cookie=0x0, duration=21.074s, table=0, n_packets=8, n_bytes=648,
idle_age=11, priority=100,in_port=9
actions=load:0x2->NXM_NX_REG5[],load:0x5->OXM_OF_METADATA[],
load:0x1->NXM_NX_REG6[],resubmit(,16)
cookie=0x0, duration=21.076s, table=16, n_packets=0, n_bytes=0,
idle_age=21, priority=100,metadata=0x5,
dl_src=01:00:00:00:00:00/01:00:00:00:00:00
actions=drop
cookie=0x0, duration=21.075s, table=16, n_packets=0, n_bytes=0,
idle_age=21, priority=100,metadata=0x5,vlan_tci=0x1000/0x1000
actions=drop
cookie=0x0, duration=21.076s, table=16, n_packets=0, n_bytes=0,
idle_age=21, priority=50,reg6=0x2,metadata=0x5
actions=resubmit(,17)
cookie=0x0, duration=21.075s, table=16, n_packets=8, n_bytes=648,
idle_age=11, priority=50,reg6=0x1,metadata=0x5
actions=resubmit(,17)
cookie=0x0, duration=21.075s, table=17, n_packets=8, n_bytes=648,
idle_age=11, priority=0,metadata=0x5
actions=resubmit(,18)
cookie=0x0, duration=21.076s, table=18, n_packets=8, n_bytes=648,
idle_age=11, priority=0,metadata=0x5
actions=resubmit(,19)
cookie=0x0, duration=21.076s, table=19, n_packets=8, n_bytes=648,
idle_age=11, priority=0,metadata=0x5
actions=resubmit(,20)
cookie=0x0, duration=21.075s, table=20, n_packets=8, n_bytes=648,
idle_age=11, priority=0,metadata=0x5
actions=resubmit(,21)
cookie=0x0, duration=5.398s, table=21, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ipv6,reg0=0x1/0x1,metadata=0x5
actions=ct(table=22,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=5.398s, table=21, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ip,reg0=0x1/0x1,metadata=0x5
actions=ct(table=22,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=5.398s, table=22, n_packets=6, n_bytes=508,
idle_age=2, priority=0,metadata=0x5
actions=resubmit(,23)
cookie=0x0, duration=5.398s, table=23, n_packets=6, n_bytes=508,
idle_age=2, priority=0,metadata=0x5
actions=resubmit(,24)
cookie=0x0, duration=5.398s, table=24, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ipv6,reg0=0x4/0x4,metadata=0x5
actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=5.398s, table=24, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ip,reg0=0x4/0x4,metadata=0x5
actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=5.398s, table=24, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ipv6,reg0=0x2/0x2,metadata=0x5
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25)
cookie=0x0, duration=5.398s, table=24, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ip,reg0=0x2/0x2,metadata=0x5
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25)
cookie=0x0, duration=5.399s, table=24, n_packets=6, n_bytes=508,
idle_age=2, priority=0,metadata=0x5 actions=resubmit(,25)
cookie=0x0, duration=5.398s, table=25, n_packets=0, n_bytes=0,
idle_age=5, priority=50,arp,metadata=0x5,
arp_tpa=192.168.1.2,arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:82:8b:0e,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163e828b0e->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a80102->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[],
load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=5.378s, table=25, n_packets=0, n_bytes=0,
idle_age=5, priority=50,arp,metadata=0x5,arp_tpa=192.168.1.3,
arp_op=1
actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],
mod_dl_src:fa:16:3e:d5:00:02,load:0x2->NXM_OF_ARP_OP[],
move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],
load:0xfa163ed50002->NXM_NX_ARP_SHA[],
move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],
load:0xc0a80103->NXM_OF_ARP_SPA[],
move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[],
load:0->NXM_OF_IN_PORT[],resubmit(,32)
cookie=0x0, duration=5.399s, table=25, n_packets=6, n_bytes=508,
idle_age=2, priority=0,metadata=0x5
actions=resubmit(,26)
cookie=0x0, duration=5.399s, table=26, n_packets=6, n_bytes=508,
idle_age=2, priority=100,metadata=0x5,
dl_dst=01:00:00:00:00:00/01:00:00:00:00:00
actions=load:0xffff->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=5.398s, table=26, n_packets=0, n_bytes=0,
idle_age=5, priority=50,metadata=0x5,dl_dst=fa:16:3e:d5:00:02
actions=load:0x2->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=5.398s, table=26, n_packets=0, n_bytes=0,
idle_age=5, priority=50,metadata=0x5,dl_dst=fa:16:3e:82:8b:0e
actions=load:0x1->NXM_NX_REG7[],resubmit(,32)
cookie=0x0, duration=21.038s, table=32, n_packets=0, n_bytes=0,
idle_age=21, priority=100,reg7=0x2,metadata=0x5
actions=load:0x5->NXM_NX_TUN_ID[0..23],
set_field:0x2/0xffffffff->tun_metadata0,
move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:4
cookie=0x0, duration=21.038s, table=32, n_packets=8, n_bytes=648,
idle_age=11, priority=100,reg7=0xffff,metadata=0x5
actions=load:0x5->NXM_NX_TUN_ID[0..23],
set_field:0xffff/0xffffffff->tun_metadata0,
move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],
output:4,resubmit(,33)
cookie=0x0, duration=5.397s, table=33, n_packets=12, n_bytes=1016,
idle_age=2, priority=100,reg7=0xffff,metadata=0x5
actions=load:0x1->NXM_NX_REG7[],resubmit(,34),
load:0xffff->NXM_NX_REG7[]
cookie=0x0, duration=5.397s, table=33, n_packets=0, n_bytes=0,
idle_age=5, priority=100,reg7=0x1,metadata=0x5
actions=resubmit(,34)
cookie=0x0, duration=21.074s, table=34, n_packets=8, n_bytes=648,
idle_age=11, priority=100,reg6=0x1,reg7=0x1,metadata=0x5
actions=drop
cookie=0x0, duration=21.076s, table=48, n_packets=8, n_bytes=648,
idle_age=11, priority=0,metadata=0x5 actions=resubmit(,49)
cookie=0x0, duration=21.075s, table=49, n_packets=8, n_bytes=648,
idle_age=11, priority=0,metadata=0x5 actions=resubmit(,50)
cookie=0x0, duration=5.398s, table=50, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ipv6,reg0=0x1/0x1,metadata=0x5
actions=ct(table=51,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=5.398s, table=50, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ip,reg0=0x1/0x1,metadata=0x5
actions=ct(table=51,zone=NXM_NX_REG5[0..15])
cookie=0x0, duration=5.398s, table=50, n_packets=6, n_bytes=508,
idle_age=3, priority=0,metadata=0x5
actions=resubmit(,51)
cookie=0x0, duration=5.398s, table=51, n_packets=6, n_bytes=508,
idle_age=3, priority=0,metadata=0x5
actions=resubmit(,52)
cookie=0x0, duration=5.398s, table=52, n_packets=6, n_bytes=508,
idle_age=3, priority=0,metadata=0x5
actions=resubmit(,53)
cookie=0x0, duration=5.399s, table=53, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ipv6,reg0=0x4/0x4,metadata=0x5
actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=5.398s, table=53, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ip,reg0=0x4/0x4,metadata=0x5
actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat)
cookie=0x0, duration=5.398s, table=53, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ip,reg0=0x2/0x2,metadata=0x5
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54)
cookie=0x0, duration=5.398s, table=53, n_packets=0, n_bytes=0,
idle_age=5, priority=100,ipv6,reg0=0x2/0x2,metadata=0x5
actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54)
cookie=0x0, duration=5.398s, table=53, n_packets=6, n_bytes=508,
idle_age=3, priority=0,metadata=0x5
actions=resubmit(,54)
cookie=0x0, duration=5.398s, table=54, n_packets=6, n_bytes=508,
idle_age=3, priority=0,metadata=0x5
actions=resubmit(,55)
cookie=0x0, duration=5.398s, table=55, n_packets=6, n_bytes=508,
idle_age=3, priority=100,metadata=0x5,
dl_dst=01:00:00:00:00:00/01:00:00:00:00:00
actions=resubmit(,64)
cookie=0x0, duration=5.398s, table=55, n_packets=0, n_bytes=0,
idle_age=5, priority=50,reg7=0x1,metadata=0x5
actions=resubmit(,64)
cookie=0x0, duration=5.398s, table=55, n_packets=0, n_bytes=0,
idle_age=5, priority=50,reg7=0x2,metadata=0x5
actions=resubmit(,64)
cookie=0x0, duration=5.397s, table=64, n_packets=6, n_bytes=508,
idle_age=3, priority=100,reg7=0x1,metadata=0x5
actions=output:9

View File

@ -1,42 +0,0 @@
Troubleshooting
===============
The following section describe common problems that you might
encounter after/during the installation of OVN ML2 driver with
Devstack and possible solutions to these problems.
Launching VM's failure
-----------------------
1. Disable AppArmor
Using Ubuntu you might encounter libvirt permission errors when trying
to create OVS ports after launching a VM (from the nova compute log).
Disabling AppArmor might help with this problem, check out
https://help.ubuntu.com/community/AppArmor for instructions on how to
disable it.
Multi-Node setup not working
-----------------------------
1. Geneve kernel module not supported:
By default OVN creates tunnels between compute nodes using the Geneve protocol.
Older kernels (< 3.18) don't support the Geneve module and hence tunneling
can't work. You can check it with this command 'lsmod | grep openvswitch'
(geneve should show up in the result list)
For more information about which upstream Kernel version is required for
support of each tunnel type, see the answer to " Why do tunnels not work when
using a kernel module other than the one packaged with Open vSwitch?" in the
OVS FAQ:
https://github.com/openvswitch/ovs/blob/master/FAQ.md
2. MTU configuration:
This problem is not unique to OVN but is amplified due to the possible larger
size of geneve header compared to other common tunneling protocols (VXLAN).
If you are using VM's as compute nodes make sure that you either lower the MTU
size on the virtual interface or enable fragmentation on it.

View File

@ -1,82 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'openstackdocstheme',
]
# openstackdocstheme options
repository_name = 'openstack/networking-ovn'
bug_project = 'networking-ovn'
bug_tag = ''
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'networking-ovn'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme = 'openstackdocs'
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -1,4 +0,0 @@
============
Contributing
============
.. include:: ../../../CONTRIBUTING.rst

View File

@ -1,266 +0,0 @@
Mapping between Neutron and OVN data models
========================================================
The primary job of the Neutron OVN ML2 driver is to translate requests for
resources into OVN's data model. Resources are created in OVN by updating the
appropriate tables in the OVN northbound database (an ovsdb database). This
document looks at the mappings between the data that exists in Neutron and what
the resulting entries in the OVN northbound DB would look like.
Network
----------
::
Neutron Network:
id
name
subnets
admin_state_up
status
tenant_id
Once a network is created, we should create an entry in the Logical Switch
table.
::
OVN northbound DB Logical Switch:
external_ids: {
'neutron:network_name': network.name
}
Subnet
---------
::
Neutron Subnet:
id
name
ip_version
network_id
cidr
gateway_ip
allocation_pools
dns_nameservers
host_routers
tenant_id
enable_dhcp
ipv6_ra_mode
ipv6_address_mode
Once a subnet is created, we should create an entry in the DHCP Options table
with the DHCPv4 or DHCPv6 options.
::
OVN northbound DB DHCP_Options:
cidr
options
external_ids: {
'subnet_id': subnet.id
}
Port
-------
::
Neutron Port:
id
name
network_id
admin_state_up
mac_address
fixed_ips
device_id
device_owner
tenant_id
status
When a port is created, we should create an entry in the Logical Switch Ports
table in the OVN northbound DB.
::
OVN Northbound DB Logical Switch Port:
switch: reference to OVN Logical Switch
router_port: (empty)
name: port.id
up: (read-only)
macs: [port.mac_address]
port_security:
external_ids: {'neutron:port_name': port.name}
If the port has extra DHCP options defined, we should create an entry
in the DHCP Options table in the OVN northbound DB.
::
OVN northbound DB DHCP_Options:
cidr
options
external_ids: {
'subnet_id': subnet.id,
'port_id': port.id
}
Router
----------
::
Neutron Router:
id
name
admin_state_up
status
tenant_id
external_gw_info:
network_id
external_fixed_ips: list of dicts
ip_address
subnet_id
...
::
OVN Northbound DB Logical Router:
ip:
default_gw:
external_ids:
Router Port
--------------
...
::
OVN Northbound DB Logical Router Port:
router: (reference to Logical Router)
network: (reference to network this port is connected to)
mac:
external_ids:
Security Groups
----------------
::
Neutron Port:
id
security_group: id
network_id
Neutron Security Group
id
name
tenant_id
security_group_rules
Neutron Security Group Rule
id
tenant_id
security_group_id
direction
remote_group_id
ethertype
protocol
port_range_min
port_range_max
remote_ip_prefix
...
::
OVN Northbound DB ACL Rule:
lswitch: (reference to Logical Switch - port.network_id)
priority: (0..65535)
match: boolean expressions according to security rule
Translation map (sg_rule ==> match expression)
-----------------------------------------------
sg_rule.direction="Ingress" => "inport=port.id"
sg_rule.direction="Egress" => "outport=port.id"
sg_rule.ethertype => "eth.type"
sg_rule.protocol => "ip.proto"
sg_rule.port_range_min/port_range_max =>
"port_range_min &lt;= tcp.src &lt;= port_range_max"
"port_range_min &lt;= udp.src &lt;= port_range_max"
sg_rule.remote_ip_prefix => "ip4.src/mask, ip4.dst/mask, ipv6.src/mask, ipv6.dst/mask"
(all match options for ACL can be found here:
https://github.com/openvswitch/ovs/blob/ovn/ovn/ovn-sb.xml)
action: "allow-related"
log: true/false
external_ids: {'neutron:port_id': port.id}
{'neutron:security_rule_id': security_rule.id}
Security groups maps between three neutron objects to one OVN-NB object, this
enable us to do the mapping in various ways, depending on OVN capabilities
The current implementation will use the first option in this list for
simplicity, but all options are kept here for future reference
1) For every <neutron port, security rule> pair, define an ACL entry::
Leads to many ACL entries.
acl.match = sg_rule converted
example: ((inport==port.id) && (ip.proto == "tcp") &&
(1024 &lt;= tcp.src &lt;= 4095) && (ip.src==192.168.0.1/16))
external_ids: {'neutron:port_id': port.id}
{'neutron:security_rule_id': security_rule.id}
2) For every <neutron port, security group> pair, define an ACL entry::
Reduce the number of ACL entries.
Means we have to manage the match field in case specific rule changes
example: (((inport==port.id) && (ip.proto == "tcp") &&
(1024 &lt;= tcp.src &lt;= 4095) && (ip.src==192.168.0.1/16)) ||
((outport==port.id) && (ip.proto == "udp") && (1024 &lt;= tcp.src &lt;= 4095)) ||
((inport==port.id) && (ip.proto == 6) ) ||
((inport==port.id) && (eth.type == 0x86dd)))
(This example is a security group with four security rules)
external_ids: {'neutron:port_id': port.id}
{'neutron:security_group_id': security_group.id}
3) For every <lswitch, security group> pair, define an ACL entry::
Reduce even more the number of ACL entries.
Manage complexity increase
example: (((inport==port.id) && (ip.proto == "tcp") && (1024 &lt;= tcp.src &lt;= 4095)
&& (ip.src==192.168.0.1/16)) ||
((outport==port.id) && (ip.proto == "udp") && (1024 &lt;= tcp.src &lt;= 4095)) ||
((inport==port.id) && (ip.proto == 6) ) ||
((inport==port.id) && (eth.type == 0x86dd))) ||
(((inport==port2.id) && (ip.proto == "tcp") && (1024 &lt;= tcp.src &lt;= 4095)
&& (ip.src==192.168.0.1/16)) ||
((outport==port2.id) && (ip.proto == "udp") && (1024 &lt;= tcp.src &lt;= 4095)) ||
((inport==port2.id) && (ip.proto == 6) ) ||
((inport==port2.id) && (eth.type == 0x86dd)))
external_ids: {'neutron:security_group': security_group.id}
Which option to pick depends on OVN match field length capabilities, and the
trade off between better performance due to less ACL entries compared to the
complexity to manage them.
If the default behaviour is not "drop" for unmatched entries, a rule with
lowest priority must be added to drop all traffic ("match==1")
Spoofing protection rules are being added by OVN internally and we need to
ignore the automatically added rules in Neutron

View File

@ -1,11 +0,0 @@
============
Design Notes
============
.. toctree::
:maxdepth: 1
data_model
native_dhcp
ovn_worker
metadata_api

View File

@ -1,355 +0,0 @@
OpenStack Metadata API and OVN
==============================
Introduction
------------
OpenStack Nova presents a metadata API to VMs similar to what is available on
Amazon EC2. Neutron is involved in this process because the source IP address
is not enough to uniquely identify the source of a metadata request since
networks can have overlapping IP addresses. Neutron is responsible for
intercepting metadata API requests and adding HTTP headers which uniquely
identify the source of the request before forwarding it to the metadata API
server.
The purpose of this document is to propose a design for how to enable this
functionality when OVN is used as the backend for OpenStack Neutron.
Neutron and Metadata Today
--------------------------
The following blog post describes how VMs access the metadata API through
Neutron today.
https://www.suse.com/communities/blog/vms-get-access-metadata-neutron/
In summary, we run a metadata proxy in either the router namespace or DHCP
namespace. The DHCP namespace can be used when theres no router connected to
the network. The one downside to the DHCP namespace approach is that it
requires pushing a static route to the VM through DHCP so that it knows to
route metadata requests to the DHCP server IP address.
* Instance sends a HTTP request for metadata to 169.254.169.254
* This request either hits the router or DHCP namespace depending on the route
in the instance
* The metadata proxy service in the namespace adds the following info to the
request:
* Instance IP (X-Forwarded-For header)
* Router or Network-ID (X-Neutron-Network-Id or X-Neutron-Router-Id header)
* The metadata proxy service sends this request to the metadata agent (outside
the namespace) via a UNIX domain socket.
* The neutron-metadata-agent service forwards the request to the Nova metadata
API service by adding some new headers (instance ID and Tenant ID) to the
request [0].
For proper operation, Neutron and Nova must be configured to communicate
together with a shared secret. Neutron uses this secret to sign the Instance-ID
header of the metadata request to prevent spoofing. This secret is configured
through metadata_proxy_shared_secret on both nova and neutron configuration
files (optional).
[0] https://github.com/openstack/neutron/blob/master/neutron/agent/metadata/agent.py#L167
Neutron and Metadata with OVN
-----------------------------
The current metadata API approach does not translate directly to OVN. There
are no Neutron agents in use with OVN. Further, OVN makes no use of its own
network namespaces that we could take advantage of like the original
implementation makes use of the router and dhcp namespaces.
We must use a modified approach that fits the OVN model. This section details
a proposed approach.
Overview of Proposed Approach
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The proposed approach would be similar to the *isolated network* case in the
current ML2+OVS implementation. Therefore, we would be running a metadata
proxy (haproxy) instance on every hypervisor for each network a VM on that
host is connected to.
The downside of this approach is that we'll be running more metadata proxies
than we're doing now in case of routed networks (one per virtual router) but
since haproxy is very lightweight and they will be idling most of the time,
it shouldn't be a big issue overall. However, the major benefit of this
approach is that we don't have to implement any scheduling logic to distribute
metadata proxies across the nodes, nor any HA logic. This, however, can be
evolved in the future as explained below in this document.
Also, this approach relies on a new feature in OVN that we must implement
first so that an OVN port can be present on *every* chassis (similar to
*localnet* ports). This new type of logical port would be *localport* and we
will never forward packets over a tunnel for these ports. We would only send
packets to the local instance of a *localport*.
**Step 1** - Create a port for the metadata proxy
When using the DHCP agent today, Neutron automatically creates a port for the
DHCP agent to use. We could do the same thing for use with the metadata proxy
(haproxy). We'll create an OVN *localport* which will be present on every
chassis and this port will have the same MAC/IP address on every host.
Eventually, we can share the same neutron port for both DHCP and metadata.
**Step 2** - Routing metadata API requests to the correct Neutron port
This works similarly to the current approach.
We would program OVN to include a static route in DHCP responses that routes
metadata API requests to the *localport* that is hosting the metadata API
proxy.
Also, in case DHCP isn't enabled or the client ignores the route info, we
will program a static route in the OVN logical router which will still get
metadata requests directed to the right place.
If the DHCP route does not work and the network is isolated, VMs won't get
metadata, but this already happens with the current implementation so this
approach doesn't introduce a regression.
**Step 3** - Management of the namespaces and haproxy instances
We propose a new agent in networking-ovn called ``neutron-ovn-metadata-agent``.
We will run this agent on every hypervisor and it will be responsible for
spawning the haproxy instances for managing the OVS interfaces, network
namespaces and haproxy processes used to proxy metadata API requests.
**Step 4** - Metadata API request processing
Similar to the existing neutron metadata agent, ``neutron-ovn-metadata-agent``
must act as an intermediary between haproxy and the Nova metadata API service.
``neutron-ovn-metadata-agent`` is the process that will have access to the
host networks where the Nova metadata API exists. Each haproxy will be in a
network namespace not able to reach the appropriate host network. Haproxy
will add the necessary headers to the metadata API request and then forward it
to ``neutron-ovn-metadata-agent`` over a UNIX domain socket, which matches the
behavior of the current metadata agent.
Metadata Proxy Management Logic
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In neutron-ovn-metadata-agent.
* On startup:
* Do a full sync. Ensure we have all the required metadata proxies running.
For that, the agent would watch the ``Port_Binding`` table of the OVN
Southbound database and look for all rows with the ``chassis`` column set
to the host the agent is running on. For all those entries, make sure a
metadata proxy instance is spawned for every ``datapath`` (Neutron
network) those ports are attached to. The agent will keep record of the
list of networks it currently has proxies running on by updating the
``external-ids`` key ``neutron-metadata-proxy-networks`` of the OVN
``Chassis`` record in the OVN Southbound database that corresponds to this
host. As an example, this key would look like
``neutron-metadata-proxy-networks=NET1_UUID,NET4_UUID`` meaning that this
chassis is hosting one or more VM's connected to networks 1 and 4 so we
should have a metadata proxy instance running for each. Ensure any running
metadata proxies no longer needed are torn down.
* Open and maintain a connection to the OVN Northbound database (using the
ovsdbapp library). On first connection, and anytime a reconnect happens:
* Do a full sync.
* Register a callback for creates/updates/deletes to Logical_Switch_Port rows
to detect when metadata proxies should be started or torn down.
``neutron-ovn-metadata-agent`` will watch OVN Southbound database
(``Port_Binding`` table) to detect when a port gets bound to its chassis. At
that point, the agent will make sure that there's a metadata proxy
attached to the OVN *localport* for the network which this port is connected
to.
* When a new network is created, we must create an OVN *localport* for use
as a metadata proxy.
* When a network is deleted, we must tear down the metadata proxy instance (if
present) on the host and delete the corresponding OVN *localport*.
Launching a metadata proxy includes:
* Creating a network namespace::
$ sudo ip netns add <ns-name>
* Creating a VETH pair (OVS upgrades that upgrade the kernel module will make
internal ports go away and then brought back by OVS scripts. This may cause
some disruption. Therefore, veth pairs are preferred over internal ports)::
$ sudo ip link add <iface-name>0 type veth peer name <iface-name>1
* Creating an OVS interface and placing one end in that namespace::
$ sudo ovs-vsctl add-port br-int <iface-name>0
$ sudo ip link set <iface-name>1 netns <ns-name>
* Setting the IP and MAC addresses on that interface::
$ sudo ip netns exec <ns-name> \
> ip link set <iface-name>1 address <neutron-port-mac>
$ sudo ip netns exec <ns-name> \
> ip addr add <neutron-port-ip>/<netmask> dev <iface-name>1
* Bringing the VETH pair up::
$ sudo ip netns exec <ns-name> ip link set <iface-name>1 up
$ sudo ip link set <iface-name>0 up
* Set ``external-ids:iface-id=NEUTRON_PORT_UUID`` on the OVS interface so that
OVN is able to correlate this new OVS interface with the correct OVN logical
port::
$ sudo ovs-vsctl set Interface <iface-name>0 external_ids:iface-id=<neutron-port-uuid>
* Starting haproxy in this network namespace.
* Add the network UUID to ``external-ids:neutron-metadata-proxy-networks`` on
the Chassis table for our chassis in OVN Southbound database.
Tearing down a metadata proxy includes:
* Removing the network UUID from our chassis.
* Stopping haproxy.
* Deleting the OVS interface.
* Deleting the network namespace.
**Other considerations**
This feature will be enabled by default in ``networking-ovn``, but there
should be a way to disable it in case operators who don't need metadata don't
have to deal with the complexity of it (haproxy instances, network namespaces,
etcetera). In this case, the agent would not create the neutron ports needed
for metadata.
There could be a race condition when the first VM for a certain network boots
on a hypervisor if it does so before the metadata proxy instance has been
spawned.
Right now, the ``vif-plugged`` event to Nova is sent out when the up column
in the OVN Northbound database's Logical_Switch_Port table changes to True,
indicating that the VIF is now up. To overcome this race condition we want
to wait until all network UUID's to which this VM is connected to are present
in ``external-ids:neutron-metadata-proxy-networks`` on the Chassis table
for our chassis in OVN Southbound database. This will delay the event to Nova
until the metadata proxy instance is up and running on the host ensuring the
VM will be able to get the metadata on boot.
Alternatives Considered
-----------------------
Alternative 1: Build metadata support into ovn-controller
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Weve been building some features useful to OpenStack directly into OVN. DHCP
and DNS are key examples of things weve replaced by building them into
ovn-controller. The metadata API case has some key differences that make this
a less attractive solution:
The metadata API is an OpenStack specific feature. DHCP and DNS by contrast
are more clearly useful outside of OpenStack. Building metadata API proxy
support into ovn-controller means embedding an HTTP and TCP stack into
ovn-controller. This is a significant degree of undesired complexity.
This option has been ruled out for these reasons.
Alternative 2: Distributed metadata and High Availability
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this approach, we would spawn a metadata proxy per virtual router or per
network (if isolated), thus, improving the number of metadata proxy instances
running in the cloud. However, scheduling and HA have to be considered. Also,
we wouldn't need the OVN *localport* implementation.
``neutron-ovn-metadata-agent`` would run on any host that we wish to be able
to host metadata API proxies. These hosts must also be running ovn-controller.
Each of these hosts will have a Chassis record in the OVN southbound database
created by ovn-controller. The Chassis table has a column called
``external_ids`` which can be used for general metadata however we see fit.
``neutron-ovn-metadata-agent`` will update its corresponding Chassis record
with an external-id of ``neutron-metadata-proxy-host=true`` to indicate that
this OVN chassis is one capable of hosting metadata proxy instances.
Once we have a way to determine hosts capable of hosting metadata API proxies,
we can add logic to the networking-ovn ML2 driver that schedules metadata API
proxies. This would be triggered by Neutron API requests.
The output of the scheduling process would be setting an ``external_ids`` key
on a Logical_Switch_Port in the OVN northbound database that corresponds with
a metadata proxy. The key could be something like
``neutron-metadata-proxy-chassis=CHASSIS_HOSTNAME``.
``neutron-ovn-metadata-agent`` on each host would also be watching for updates
to these Logical_Switch_Port rows. When it detects that a metadata proxy has
been scheduled locally, it will kick off the process to spawn the local
haproxy instance and get it plugged into OVN.
HA must also be considered. We must know when a host goes down so that all
metadata proxies scheduled to that host can be rescheduled. This is almost
the exact same problem we have with L3 HA. When a host goes down, we need to
trigger rescheduling gateways to other hosts. We should ensure that the
approach used for rescheduling L3 gateways can be utilized for rescheduling
metadata proxies, as well.
In neutron-server (networking-ovn).
Introduce a new networking-ovn configuration option:
* ``[ovn] isolated_metadata=[True|False]``
Events that trigger scheduling a new metadata proxy:
* If isolated_metadata is True
* When a new network is created, we must create an OVN logical port for use
as a metadata proxy and then schedule this to one of the
``neutron-ovn-metadata-agent`` instances.
* If isolated_metadata is False
* When a network is attached to or removed from a logical router, ensure
that at least one of the networks has a metadata proxy port already
created. If not, pick a network and create a metadata proxy port and then
schedule it to an agent. At this point, we need to update the static route
for metadata API.
Events that trigger unscheduling an existing metadata proxy:
* When a network is deleted, delete the metadata proxy port if it exists and
unschedule it from a ``neutron-ovn-metadata-agent``.
To schedule a new metadata proxy:
* Determine the list of available OVN Chassis that can host metadata proxies
by reading the ``Chassis`` table of the OVN Southbound database. Look for
chassis that have an external-id of ``neutron-metadata-proxy-host=true``.
* Of the available OVN chassis, choose the one “least loaded”, or currently
hosting the fewest number of metadata proxies.
* Set ``neutron-metadata-proxy-chassis=CHASSIS_HOSTNAME`` as an external-id on
the Logical_Switch_Port in the OVN Northbound database that corresponds to
the neutron port used for this metadata proxy. ``CHASSIS_HOSTNAME`` maps to
the hostname row of a Chassis record in the OVN Southbound database.
This approach has been ruled out for its complexity although we have analyzed
the details deeply because, eventually, and depending on the implementation of
L3 HA, we will want to evolve to it.
Other References
----------------
* Haproxy config --
https://review.openstack.org/#/c/431691/34/neutron/agent/metadata/driver.py
* https://engineeringblog.yelp.com/2015/04/true-zero-downtime-haproxy-reloads.html

View File

@ -1,50 +0,0 @@
Using the native DHCP feature provided by OVN
=============================================
DHCPv4
------
OVN implements a native DHCPv4 support which caters to the common use case of
providing an IP address to a booting instance by providing stateless replies to
DHCPv4 requests based on statically configured address mappings. To do this it
allows a short list of DHCPv4 options to be configured and applied at each
compute host running ovn-controller.
OVN northbound db provides a table 'DHCP_Options' to store the DHCP options.
Logical switch port has a reference to this table.
When a subnet is created and enable_dhcp is True, a new entry is created in
this table. The 'options' column stores the DHCPv4 options. These DHCPv4
options are included in the DHCPv4 reply by the ovn-controller when the VIF
attached to the logical switch port sends a DHCPv4 request.
In order to map the DHCP_Options row with the subnet, the OVN ML2 driver
stores the subnet id in the 'external_ids' column.
When a new port is created, the 'dhcpv4_options' column of the logical switch
port refers to the DHCP_Options row created for the subnet of the port.
If the port has multiple IPv4 subnets, then the first subnet in the 'fixed_ips'
is used.
If the port has extra DHCPv4 options defined, then a new entry is created
in the DHCP_Options table for the port. The default DHCP options are obtained
from the subnet DHCP_Options table and the extra DHCPv4 options of the port
are overridden. In order to map the port DHCP_Options row with the port,
the OVN ML2 driver stores both the subnet id and port id in the 'external_ids'
column.
If admin wants to disable native OVN DHCPv4 for any particular port, then the
admin needs to define the 'dhcp_disabled' with the value 'true' in the extra
DHCP options.
Ex. neutron port-update <PORT_ID> \
--extra-dhcp-opt ip_version=4, opt_name=dhcp_disabled, opt_value=false
DHCPv6
------
OVN implements a native DHCPv6 support similar to DHCPv4. When a v6 subnet is
created, the OVN ML2 driver will insert a new entry into DHCP_Options table
only when the subnet 'ipv6_address_mode' is not 'slaac', and enable_dhcp is
True.

View File

@ -1,81 +0,0 @@
OVN Neutron Worker and Port status handling
===========================================
When the logical switch port's VIF is attached or removed to/from the ovn
integration bridge, ovn-northd updates the Logical_Switch_Port.up to 'True'
or 'False' accordingly.
In order for the OVN Neutron ML2 driver to update the corresponding neutron
port's status to 'ACTIVE' or 'DOWN' in the db, it needs to monitor the
OVN Northbound db. A neutron worker is created for this purpose.
The implementation of the ovn worker can be found here -
'networking_ovn.ovsdb.ovsdb_monitor.OvnWorker'.
Neutron service will create 'n' api workers and 'm' rpc workers and 1 ovn
worker (all these workers are separate processes).
Api workers and rpc workers will create ovsdb idl client object
('ovs.db.idl.Idl') to connect to the OVN_Northbound db.
See 'networking_ovn.ovsdb.impl_idl_ovn.OvsdbNbOvnIdl' and
'ovsdbapp.backend.ovs_idl.connection.Connection' classes for more details.
Ovn worker will create 'networking_ovn.ovsdb.ovsdb_monitor.OvnIdl' class
object (which inherits from 'ovs.db.idl.Idl') to connect to the
OVN_Northbound db. On receiving the OVN_Northbound db updates from the
ovsdb-server, 'notify' function of 'OVnIdl' is called by the parent class
object.
OvnIdl.notify() function passes the received events to the
ovsdb_monitor.OvnDbNotifyHandler class.
ovsdb_monitor.OvnDbNotifyHandler checks for any changes in
the 'Logical_Switch_Port.up' and updates the neutron port's status accordingly.
If 'notify_nova_on_port_status_changes' configuration is set, then neutron
would notify nova on port status changes.
ovsdb locks
-----------
If there are multiple neutron servers running, then each neutron server will
have one ovn worker which listens for the notify events. When the
'Logical_Switch_Port.up' is updated by ovn-northd, we do not want all the
neutron servers to handle the event and update the neutron port status.
In order for only one neutron server to handle the events, ovsdb locks are
used.
At start, each neutron server's ovn worker will try to acquire a lock with id -
'neutron_ovn_event_lock'. The ovn worker which has acquired the lock will
handle the notify events.
In case the neutron server with the lock dies, ovsdb-server will assign the
lock to another neutron server in the queue.
More details about the ovsdb locks can be found here [1] and [2]
[1] - https://tools.ietf.org/html/draft-pfaff-ovsdb-proto-04#section-4.1.8
[2] - https://github.com/openvswitch/ovs/blob/branch-2.4/python/ovs/db/idl.py#L67
One thing to note is the ovn worker (with OvnIdl) do not carry out any
transactions to the OVN Northbound db.
Since the api and rpc workers are not configured with any locks,
using the ovsdb lock on the OVN_Northbound and OVN_Southbound DBs by the ovn
workers will not have any side effects to the transactions done by these api
and rpc workers.
Handling port status changes when neutron server(s) are down
------------------------------------------------------------
When neutron server starts, ovn worker would receive a dump of all
logical switch ports as events. 'ovsdb_monitor.OvnDbNotifyHandler' would
sync up if there are any inconsistencies in the port status.
OVN Southbound DB Access
------------------------
The OVN Neutron ML2 driver has a need to acquire chassis information (hostname
and physnets combinations). This is required initially to support routed
networks. Thus, the plugin will initiate and maintain a connection to the OVN
SB DB during startup.

View File

@ -1,10 +0,0 @@
=========================
Contributor Documentation
=========================
.. toctree::
:maxdepth: 2
contributing
testing
design/index

View File

@ -1,658 +0,0 @@
Testing with DevStack
=====================
This document describes how to test OpenStack with OVN using DevStack. We will
start by describing how to test on a single host.
Single Node Test Environment
----------------------------
1. Create a test system.
It's best to use a throwaway dev system for running DevStack. Your best bet is
to use either CentOS 7 or the latest Ubuntu LTS (16.04, Xenial).
2. Create the ``stack`` user.
::
$ git clone https://git.openstack.org/openstack-dev/devstack.git
$ sudo ./devstack/tools/create-stack-user.sh
3. Switch to the ``stack`` user and clone DevStack and networking-ovn.
::
$ sudo su - stack
$ git clone https://git.openstack.org/openstack-dev/devstack.git
$ git clone https://git.openstack.org/openstack/networking-ovn.git
4. Configure DevStack to use networking-ovn.
networking-ovn comes with a sample DevStack configuration file you can start
with. For example, you may want to set some values for the various PASSWORD
variables in that file so DevStack doesn't have to prompt you for them. Feel
free to edit it if you'd like, but it should work as-is.
::
$ cd devstack
$ cp ../networking-ovn/devstack/local.conf.sample local.conf
5. Run DevStack.
This is going to take a while. It installs a bunch of packages, clones a bunch
of git repos, and installs everything from these git repos.
::
$ ./stack.sh
Once DevStack completes successfully, you should see output that looks
something like this::
This is your host IP address: 172.16.189.6
This is your host IPv6 address: ::1
Horizon is now available at http://172.16.189.6/dashboard
Keystone is serving at http://172.16.189.6/identity/
The default users are: admin and demo
The password: password
2017-03-09 15:10:54.117 | stack.sh completed in 2110 seconds.
Environment Variables
---------------------
Once DevStack finishes successfully, we're ready to start interacting with
OpenStack APIs. OpenStack provides a set of command line tools for interacting
with these APIs. DevStack provides a file you can source to set up the right
environment variables to make the OpenStack command line tools work.
::
$ . openrc
If you're curious what environment variables are set, they generally start with
an OS prefix::
$ env | grep OS
OS_REGION_NAME=RegionOne
OS_IDENTITY_API_VERSION=2.0
OS_PASSWORD=password
OS_AUTH_URL=http://192.168.122.8:5000/v2.0
OS_USERNAME=demo
OS_TENANT_NAME=demo
OS_VOLUME_API_VERSION=2
OS_CACERT=/opt/stack/data/CA/int-ca/ca-chain.pem
OS_NO_CACHE=1
Default Network Configuration
-----------------------------
By default, DevStack creates networks called ``private`` and ``public``.
Run the following command to see the existing networks::
$ openstack network list
+--------------------------------------+---------+----------------------------------------------------------------------------+
| ID | Name | Subnets |
+--------------------------------------+---------+----------------------------------------------------------------------------+
| 40080dad-0064-480a-b1b0-592ae51c1471 | private | 5ff81545-7939-4ae0-8365-1658d45fa85c, da34f952-3bfc-45bb-b062-d2d973c1a751 |
| 7ec986dd-aae4-40b5-86cf-8668feeeab67 | public | 60d0c146-a29b-4cd3-bd90-3745603b1a4b, f010c309-09be-4af2-80d6-e6af9c78bae7 |
+--------------------------------------+---------+----------------------------------------------------------------------------+
A Neutron network is implemented as an OVN logical switch. networking-ovn
creates logical switches with a name in the format neutron-<network UUID>.
We can use ``ovn-nbctl`` to list the configured logical switches and see that
their names correlate with the output from ``neutron net-list``::
$ ovn-nbctl ls-list
71206f5c-b0e6-49ce-b572-eb2e964b2c4e (neutron-40080dad-0064-480a-b1b0-592ae51c1471)
8d8270e7-fd51-416f-ae85-16565200b8a4 (neutron-7ec986dd-aae4-40b5-86cf-8668feeeab67)
$ ovn-nbctl get Logical_Switch neutron-40080dad-0064-480a-b1b0-592ae51c1471 external_ids
{"neutron:network_name"=private}
Booting VMs
-----------
In this section we'll go through the steps to create two VMs that have a
virtual NIC attached to the ``private`` Neutron network.
DevStack uses libvirt as the Nova backend by default. If KVM is available, it
will be used. Otherwise, it will just run qemu emulated guests. This is
perfectly fine for our testing, as we only need these VMs to be able to send
and receive a small amount of traffic so performance is not very important.
1. Get the Network UUID.
Start by getting the UUID for the ``private`` network from the output of
``neutron net-list`` from earlier and save it off::
$ PRIVATE_NET_ID=40080dad-0064-480a-b1b0-592ae51c1471
2. Create an SSH keypair.
Next create an SSH keypair in Nova. Later, when we boot a VM, we'll ask that
the public key be put in the VM so we can SSH into it.
::
$ openstack keypair create demo > id_rsa_demo
$ chmod 600 id_rsa_demo
3. Choose a flavor.
We need minimal resources for these test VMs, so the ``m1.nano`` flavor is
sufficient.
::
$ openstack flavor list
+----+-----------+-------+------+-----------+-------+-----------+
| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
+----+-----------+-------+------+-----------+-------+-----------+
| 1 | m1.tiny | 512 | 1 | 0 | 1 | True |
| 2 | m1.small | 2048 | 20 | 0 | 1 | True |
| 3 | m1.medium | 4096 | 40 | 0 | 2 | True |
| 4 | m1.large | 8192 | 80 | 0 | 4 | True |
| 42 | m1.nano | 64 | 0 | 0 | 1 | True |
| 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True |
| 84 | m1.micro | 128 | 0 | 0 | 1 | True |
| c1 | cirros256 | 256 | 0 | 0 | 1 | True |
| d1 | ds512M | 512 | 5 | 0 | 1 | True |
| d2 | ds1G | 1024 | 10 | 0 | 1 | True |
| d3 | ds2G | 2048 | 10 | 0 | 2 | True |
| d4 | ds4G | 4096 | 20 | 0 | 4 | True |
+----+-----------+-------+------+-----------+-------+-----------+
$ FLAVOR_ID=42
4. Choose an image.
DevStack imports the CirrOS image by default, which is perfect for our testing.
It's a very small test image.
::
$ openstack image list
+--------------------------------------+--------------------------+--------+
| ID | Name | Status |
+--------------------------------------+--------------------------+--------+
| 849a8db2-3754-4cf6-9271-491fa4ff7195 | cirros-0.3.5-x86_64-disk | active |
+--------------------------------------+--------------------------+--------+
$ IMAGE_ID=849a8db2-3754-4cf6-9271-491fa4ff7195
5. Setup a security rule so that we can access the VMs we will boot up next.
By default, DevStack does not allow users to access VMs, to enable that, we
will need to add a rule. We will allow both ICMP and SSH.
::
$ openstack security group rule create --ingress --ethertype IPv4 --dst-port 22 --protocol tcp default
$ openstack security group rule create --ingress --ethertype IPv4 --protocol ICMP default
$ openstack security group rule list
+--------------------------------------+-------------+-----------+------------+--------------------------------------+--------------------------------------+
| ID | IP Protocol | IP Range | Port Range | Remote Security Group | Security Group |
+--------------------------------------+-------------+-----------+------------+--------------------------------------+--------------------------------------+
...
| ade97198-db44-429e-9b30-24693d86d9b1 | tcp | 0.0.0.0/0 | 22:22 | None | a47b14da-5607-404a-8de4-3a0f1ad3649c |
| d0861a98-f90e-4d1a-abfb-827b416bc2f6 | icmp | 0.0.0.0/0 | | None | a47b14da-5607-404a-8de4-3a0f1ad3649c |
...
+--------------------------------------+-------------+-----------+------------+--------------------------------------+--------------------------------------+
$ neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 22 --port-range-max 22 --protocol tcp default
$ neutron security-group-rule-create --direction ingress --ethertype IPv4 --protocol ICMP default
$ neutron security-group-rule-list
+--------------------------------------+----------------+-----------+-----------+---------------+-----------------+
| id | security_group | direction | ethertype | protocol/port | remote |
+--------------------------------------+----------------+-----------+-----------+---------------+-----------------+
| 8b2edbe6-790e-40ef-af54-c7b64ced8240 | default | ingress | IPv4 | 22/tcp | any |
| 5bee0179-807b-41d7-ab16-6de6ac051335 | default | ingress | IPv4 | icmp | any |
...
+--------------------------------------+----------------+-----------+-----------+---------------+-----------------+
6. Boot some VMs.
Now we will boot two VMs. We'll name them ``test1`` and ``test2``.
::
$ openstack server create --nic net-id=$PRIVATE_NET_ID --flavor $FLAVOR_ID --image $IMAGE_ID --key-name demo test1
+-----------------------------+-----------------------------------------------------------------+
| Field | Value |
+-----------------------------+-----------------------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | |
| OS-EXT-STS:power_state | NOSTATE |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | None |
| OS-SRV-USG:terminated_at | None |
| accessIPv4 | |
| accessIPv6 | |
| addresses | |
| adminPass | BzAWWA6byGP6 |
| config_drive | |
| created | 2017-03-09T16:56:08Z |
| flavor | m1.nano (42) |
| hostId | |
| id | d8b8084e-58ff-44f4-b029-a57e7ef6ba61 |
| image | cirros-0.3.5-x86_64-disk (849a8db2-3754-4cf6-9271-491fa4ff7195) |
| key_name | demo |
| name | test1 |
| progress | 0 |
| project_id | b6522570f7344c06b1f24303abf3c479 |
| properties | |
| security_groups | name='default' |
| status | BUILD |
| updated | 2017-03-09T16:56:08Z |
| user_id | c68f77f1d85e43eb9e5176380a68ac1f |
| volumes_attached | |
+-----------------------------+-----------------------------------------------------------------+
$ openstack server create --nic net-id=$PRIVATE_NET_ID --flavor $FLAVOR_ID --image $IMAGE_ID --key-name demo test2
+-----------------------------+-----------------------------------------------------------------+
| Field | Value |
+-----------------------------+-----------------------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | |
| OS-EXT-STS:power_state | NOSTATE |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | None |
| OS-SRV-USG:terminated_at | None |
| accessIPv4 | |
| accessIPv6 | |
| addresses | |
| adminPass | YB8dmt5v88JV |
| config_drive | |
| created | 2017-03-09T16:56:50Z |
| flavor | m1.nano (42) |
| hostId | |
| id | 170d4f37-9299-4a08-b48b-2b90fce8e09b |
| image | cirros-0.3.5-x86_64-disk (849a8db2-3754-4cf6-9271-491fa4ff7195) |
| key_name | demo |
| name | test2 |
| progress | 0 |
| project_id | b6522570f7344c06b1f24303abf3c479 |
| properties | |
| security_groups | name='default' |
| status | BUILD |
| updated | 2017-03-09T16:56:51Z |
| user_id | c68f77f1d85e43eb9e5176380a68ac1f |
| volumes_attached | |
+-----------------------------+-----------------------------------------------------------------+
Once both VMs have been started, they will have a status of ``ACTIVE``::
$ openstack server list
+--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+
| ID | Name | Status | Networks | Image Name |
+--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+
| 170d4f37-9299-4a08-b48b-2b90fce8e09b | test2 | ACTIVE | private=fd5d:9d1b:457c:0:f816:3eff:fe24:49df, 10.0.0.3 | cirros-0.3.5-x86_64-disk |
| d8b8084e-58ff-44f4-b029-a57e7ef6ba61 | test1 | ACTIVE | private=fd5d:9d1b:457c:0:f816:3eff:fe3f:953d, 10.0.0.10 | cirros-0.3.5-x86_64-disk |
+--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+
Our two VMs have addresses of ``10.0.0.3`` and ``10.0.0.10``. If we list
Neutron ports, there are two new ports with these addresses associated
with them::
$ openstack port list
+--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+
| ID | Name | MAC Address | Fixed IP Addresses | Status |
+--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+
...
| 97c970b0-485d-47ec-868d-783c2f7acde3 | | fa:16:3e:3f:95:3d | ip_address='10.0.0.10', subnet_id='da34f952-3bfc-45bb-b062-d2d973c1a751' | ACTIVE |
| | | | ip_address='fd5d:9d1b:457c:0:f816:3eff:fe3f:953d', subnet_id='5ff81545-7939-4ae0-8365-1658d45fa85c' | |
| e003044d-334a-4de3-96d9-35b2d2280454 | | fa:16:3e:24:49:df | ip_address='10.0.0.3', subnet_id='da34f952-3bfc-45bb-b062-d2d973c1a751' | ACTIVE |
| | | | ip_address='fd5d:9d1b:457c:0:f816:3eff:fe24:49df', subnet_id='5ff81545-7939-4ae0-8365-1658d45fa85c' | |
...
+--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+
$ TEST1_PORT_ID=97c970b0-485d-47ec-868d-783c2f7acde3
$ TEST2_PORT_ID=e003044d-334a-4de3-96d9-35b2d2280454
Now we can look at OVN using ``ovn-nbctl`` to see the logical switch ports
that were created for these two Neutron ports. The first part of the output
is the OVN logical switch port UUID. The second part in parentheses is the
logical switch port name. Neutron sets the logical switch port name equal to
the Neutron port ID.
::
$ ovn-nbctl lsp-list neutron-$PRIVATE_NET_ID
...
fde1744b-e03b-46b7-b181-abddcbe60bf2 (97c970b0-485d-47ec-868d-783c2f7acde3)
7ce284a8-a48a-42f5-bf84-b2bca62cd0fe (e003044d-334a-4de3-96d9-35b2d2280454)
...
These two ports correspond to the two VMs we created.
VM Connectivity
---------------
We can connect to our VMs by associating a floating IP address from the public
network.
::
$ openstack floating ip create --port $TEST1_PORT_ID public
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| created_at | 2017-03-09T18:58:12Z |
| description | |
| fixed_ip_address | 10.0.0.10 |
| floating_ip_address | 172.24.4.8 |
| floating_network_id | 7ec986dd-aae4-40b5-86cf-8668feeeab67 |
| id | 24ff0799-5a72-4a5b-abc0-58b301c9aee5 |
| name | None |
| port_id | 97c970b0-485d-47ec-868d-783c2f7acde3 |
| project_id | b6522570f7344c06b1f24303abf3c479 |
| revision_number | 1 |
| router_id | ee51adeb-0dd8-4da0-ab6f-7ce60e00e7b0 |
| status | DOWN |
| updated_at | 2017-03-09T18:58:12Z |
+---------------------+--------------------------------------+
Devstack does not wire up the public network by default so we must do
that before connecting to this floating IP address.
::
$ sudo ip link set br-ex up
$ sudo ip route add 172.24.4.0/24 dev br-ex
$ sudo ip addr add 172.24.4.1/24 dev br-ex
Now you should be able to connect to the VM via its floating IP address.
First, ping the address.
::
$ ping -c 1 172.24.4.8
PING 172.24.4.8 (172.24.4.8) 56(84) bytes of data.
64 bytes from 172.24.4.8: icmp_seq=1 ttl=63 time=0.823 ms
--- 172.24.4.8 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.823/0.823/0.823/0.000 ms
Now SSH to the VM::
$ ssh -i id_rsa_demo cirros@172.24.4.8 hostname
test1
Adding Another Compute Node
---------------------------
After completing the earlier instructions for setting up devstack, you can use
a second VM to emulate an additional compute node. This is important for OVN
testing as it exercises the tunnels created by OVN between the hypervisors.
Just as before, create a throwaway VM but make sure that this VM has a
different host name. Having same host name for both VMs will confuse Nova and
will not produce two hypervisors when you query nova hypervisor list later.
Once the VM is setup, create the ``stack`` user::
$ git clone https://git.openstack.org/openstack-dev/devstack.git
$ sudo ./devstack/tools/create-stack-user.sh
Switch to the ``stack`` user and clone DevStack and networking-ovn::
$ sudo su - stack
$ git clone https://git.openstack.org/openstack-dev/devstack.git
$ git clone https://git.openstack.org/openstack/networking-ovn.git
networking-ovn comes with another sample configuration file that can be used
for this::
$ cd devstack
$ cp ../networking-ovn/devstack/computenode-local.conf.sample local.conf
You must set SERVICE_HOST in local.conf. The value should be the IP address of
the main DevStack host. You must also set HOST_IP to the IP address of this
new host. See the text in the sample configuration file for more
information. Once that is complete, run DevStack::
$ cd devstack
$ ./stack.sh
This should complete in less time than before, as it's only running a single
OpenStack service (nova-compute) along with OVN (ovn-controller, ovs-vswitchd,
ovsdb-server). The final output will look something like this::
This is your host IP address: 172.16.189.30
This is your host IPv6 address: ::1
2017-03-09 18:39:27.058 | stack.sh completed in 1149 seconds.
Now go back to your main DevStack host. You can use admin credentials to
verify that the additional hypervisor has been added to the deployment::
$ cd devstack
$ . openrc admin
$ openstack hypervisor list
+----+------------------------+-----------------+---------------+-------+
| ID | Hypervisor Hostname | Hypervisor Type | Host IP | State |
+----+------------------------+-----------------+---------------+-------+
| 1 | centos7-ovn-devstack | QEMU | 172.16.189.6 | up |
| 2 | centos7-ovn-devstack-2 | QEMU | 172.16.189.30 | up |
+----+------------------------+-----------------+---------------+-------+
You can also look at OVN and OVS to see that the second host has shown up. For
example, there will be a second entry in the Chassis table of the
OVN_Southbound database. You can use the ``ovn-sbctl`` utility to list
chassis, their configuration, and the ports bound to each of them::
$ ovn-sbctl show
Chassis "ddc8991a-d838-4758-8d15-71032da9d062"
hostname: "centos7-ovn-devstack"
Encap vxlan
ip: "172.16.189.6"
options: {csum="true"}
Encap geneve
ip: "172.16.189.6"
options: {csum="true"}
Port_Binding "97c970b0-485d-47ec-868d-783c2f7acde3"
Port_Binding "e003044d-334a-4de3-96d9-35b2d2280454"
Port_Binding "cr-lrp-08d1f28d-cc39-4397-b12b-7124080899a1"
Chassis "b194d07e-0733-4405-b795-63b172b722fd"
hostname: "centos7-ovn-devstack-2.os1.phx2.redhat.com"
Encap geneve
ip: "172.16.189.30"
options: {csum="true"}
Encap vxlan
ip: "172.16.189.30"
options: {csum="true"}
You can also see a tunnel created to the other compute node::
$ ovs-vsctl show
...
Bridge br-int
fail_mode: secure
...
Port "ovn-b194d0-0"
Interface "ovn-b194d0-0"
type: geneve
options: {csum="true", key=flow, remote_ip="172.16.189.30"}
...
...
Provider Networks
-----------------
Neutron has a "provider networks" API extension that lets you specify
some additional attributes on a network. These attributes let you
map a Neutron network to a physical network in your environment.
The OVN ML2 driver is adding support for this API extension. It currently
supports "flat" and "vlan" networks.
Here is how you can test it:
First you must create an OVS bridge that provides connectivity to the
provider network on every host running ovn-controller. For trivial
testing this could just be a dummy bridge. In a real environment, you
would want to add a local network interface to the bridge, as well.
::
$ ovs-vsctl add-br br-provider
ovn-controller on each host must be configured with a mapping between
a network name and the bridge that provides connectivity to that network.
In this case we'll create a mapping from the network name "providernet"
to the bridge 'br-provider".
::
$ ovs-vsctl set open . \
external-ids:ovn-bridge-mappings=providernet:br-provider
Now create a Neutron provider network.
::
$ neutron net-create provider --shared \
--provider:physical_network providernet \
--provider:network_type flat
Alternatively, you can define connectivity to a VLAN instead of a flat network:
::
$ neutron net-create provider-101 --shared \
--provider:physical_network providernet \
--provider:network_type vlan \
--provider:segmentation_id 101
Observe that the OVN ML2 driver created a special logical switch port of type
localnet on the logical switch to model the connection to the physical network.
::
$ ovn-nbctl show
...
switch 5bbccbbd-f5ca-411b-bad9-01095d6f1316 (neutron-729dbbee-db84-4a3d-afc3-82c0b3701074)
port provnet-729dbbee-db84-4a3d-afc3-82c0b3701074
addresses: ["unknown"]
...
$ ovn-nbctl lsp-get-type provnet-729dbbee-db84-4a3d-afc3-82c0b3701074
localnet
$ ovn-nbctl lsp-get-options provnet-729dbbee-db84-4a3d-afc3-82c0b3701074
network_name=providernet
If VLAN is used, there will be a VLAN tag shown on the localnet port as well.
Finally, create a Neutron port on the provider network.
::
$ neutron port-create provider
or if you followed the VLAN example, it would be:
::
$ neutron port-create provider-101
Run Unit Tests
--------------
Run the unit tests in the local environment with ``tox``.
::
$ tox -e py27
$ tox -e py27 networking_ovn.tests.unit.test_ovn_db_sync
$ tox -e py27 networking_ovn.tests.unit.test_ovn_db_sync.TestOvnSbSyncML2
$ tox -e py27 networking_ovn.tests.unit.test_ovn_db_sync.TestOvnSbSyncML2
.test_ovn_sb_sync
Run Functional Tests
--------------------
you can run the functional tests with ``tox`` in your devstack environment:
::
$ cd networking_ovn/tests/functional
$ tox -e dsvm-functional
$ tox -e dsvm-functional networking_ovn.tests.functional.test_mech_driver\
.TestPortBinding.test_port_binding_create_port
If you want to run functional tests in your local clean environment, you may
need a new working directory.
::
$ export BASE=/opt/stack
$ mkdir -p /opt/stack/new
$ cd /opt/stack/new
Next, get networking_ovn, neutron and devstack.
::
$ git clone https://git.openstack.org/openstack/networking-ovn.git
$ git clone https://git.openstack.org/openstack/neutron.git
$ git clone https://git.openstack.org/openstack-dev/devstack.git
Then execute the script to prepare the environment.
::
$ cd networking-ovn/
$ ./networking_ovn/tests/contrib/gate_hook.sh
Finally, run the functional tests with ``tox``
::
$ cd networking_ovn/tests/functional
$ tox -e dsvm-functional
$ tox -e dsvm-functional networking_ovn.tests.functional.test_mech_driver\
.TestPortBinding.test_port_binding_create_port
Skydive
-------
`Skydive <https://github.com/skydive-project/skydive>`_ is an open source
real-time network topology and protocols analyzer. It aims to provide a
comprehensive way of understanding what is happening in the network
infrastructure. Skydive works by utilizing agents to collect host-local
information, and sending this information to a central agent for
further analysis. It utilizes elasticsearch to store the data.
To enable Skydive support with OVN and devstack, enable it on the control
and compute nodes.
On the control node, enable it as follows:
::
enable_plugin skydive https://github.com/skydive-project/skydive.git
enable_service skydive-analyzer
On the compute nodes, enable it as follows:
::
enable_plugin skydive https://github.com/skydive-project/skydive.git
enable_service skydive-agent
Troubleshooting
---------------
If you run into any problems, take a look at our :doc:`/admin/troubleshooting`
page.
Additional Resources
--------------------
See the documentation and other references linked
from the :doc:`/admin/ovn` page.

View File

@ -1,24 +0,0 @@
.. networking-ovn documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
.. the main title comes from README.rst
.. include:: ../../README.rst
Contents
--------
.. toctree::
:maxdepth: 2
admin/index
install/index
contributor/index
.. rubric:: Indices and tables
* :ref:`genindex`
* :ref:`search`

View File

@ -1,329 +0,0 @@
.. _installation:
Install & Configuration
=======================
The ``networking-ovn`` repository includes integration with DevStack that
enables creation of a simple Open Virtual Network (OVN) development and test
environment. This document discusses what is required for manual installation
or integration into a production OpenStack deployment tool of conventional
architectures that include the following types of nodes:
* Controller - Runs OpenStack control plane services such as REST APIs
and databases.
* Network - Runs the layer-2, layer-3 (routing), DHCP, and metadata agents
for the Networking service. Some agents optional. Usually provides
connectivity between provider (public) and project (private) networks
via NAT and floating IP addresses.
.. note::
Some tools deploy these services on controller nodes.
* Compute - Runs the hypervisor and layer-2 agent for the Networking
service.
Packaging
---------
Open vSwitch (OVS) includes OVN beginning with version 2.5 and considers
it experimental. The Networking service integration for OVN uses an
independent package, typically ``networking-ovn``.
Building OVS from source automatically installs OVN. For deployment tools
using distribution packages, the ``openvswitch-ovn`` package for RHEL/CentOS
and compatible distributions automatically installs ``openvswitch`` as a
dependency. Ubuntu/Debian includes ``ovn-central``, ``ovn-host``,
``ovn-docker``, and ``ovn-common`` packages that pull in the appropriate Open
vSwitch dependencies as needed.
A ``python-networking-ovn`` RPM may be obtained for Fedora or CentOS from
the RDO project. A package based on the ``master`` branch of
``networking-ovn`` can be found at https://trunk.rdoproject.org/.
Fedora and CentOS RPM builds of OVS and OVN from the ``master`` branch of
``ovs`` can be found in this COPR repository:
https://copr.fedorainfracloud.org/coprs/leifmadsen/ovs-master/.
Controller nodes
----------------
Each controller node runs the OVS service (including dependent services such
as ``ovsdb-server``) and the ``ovn-northd`` service. However, only a single
instance of the ``ovsdb-server`` and ``ovn-northd`` services can operate in
a deployment. However, deployment tools can implement active/passive
high-availability using a management tool that monitors service health
and automatically starts these services on another node after failure of the
primary node. See the :ref:`faq` for more information.
#. Install the ``openvswitch-ovn`` and ``networking-ovn`` packages.
#. Start the OVS service. The central OVS service starts the ``ovsdb-server``
service that manages OVN databases.
Using the *systemd* unit:
.. code-block:: console
# systemctl start openvswitch
Using the ``ovs-ctl`` script:
.. code-block:: console
# /usr/share/openvswitch/scripts/ovs-ctl start --system-id="random"
#. Configure the ``ovsdb-server`` component. By default, the ``ovsdb-server``
service only permits local access to databases via Unix socket. However,
OVN services on compute nodes require access to these databases.
* Permit remote database access.
.. code-block:: console
# ovs-appctl -t ovsdb-server ovsdb-server/add-remote ptcp:6640:IP_ADDRESS
Replace ``IP_ADDRESS`` with the IP address of the management network
interface on the controller node.
.. note::
Permit remote access to TCP port 6640 on any host firewall.
#. Start the ``ovn-northd`` service.
Using the *systemd* unit:
.. code-block:: console
# systemctl start ovn-northd
Using the ``ovn-ctl`` script:
.. code-block:: console
# /usr/share/openvswitch/scripts/ovn-ctl start_northd
Options for *start_northd*:
.. code-block:: console
# /usr/share/openvswitch/scripts/ovn-ctl start_northd --help
# ...
# DB_NB_SOCK="/usr/local/etc/openvswitch/nb_db.sock"
# DB_NB_PID="/usr/local/etc/openvswitch/ovnnb_db.pid"
# DB_SB_SOCK="usr/local/etc/openvswitch/sb_db.sock"
# DB_SB_PID="/usr/local/etc/openvswitch/ovnsb_db.pid"
# ...
#. Configure the Networking server component. The Networking service
implements OVN as an ML2 driver. Edit the ``/etc/neutron/neutron.conf``
file:
* Enable the ML2 core plug-in.
.. code-block:: ini
[DEFAULT]
...
core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
* Enable the OVN layer-3 service.
.. code-block:: ini
[DEFAULT]
...
service_plugins = networking_ovn.l3.l3_ovn.OVNL3RouterPlugin
#. Configure the ML2 plug-in. Edit the
``/etc/neutron/plugins/ml2/ml2_conf.ini`` file:
* Configure the OVN mechanism driver, network type drivers, self-service
(tenant) network types, and enable the port security extension.
.. code-block:: ini
[ml2]
...
mechanism_drivers = ovn
type_drivers = local,flat,vlan,geneve
tenant_network_types = geneve
extension_drivers = port_security
overlay_ip_version = 4
.. note::
To enable VLAN self-service networks, add ``vlan`` to the
``tenant_network_types`` option. The first network type
in the list becomes the default self-service network type.
To use IPv6 for all overlay (tunnel) network endpoints,
set the ``overlay_ip_version`` option to ``6``.
* Configure the Geneve ID range and maximum header size. The IP version
overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) is added
to the maximum header size based on the ML2 ``overlay_ip_version``
option.
.. code-block:: ini
[ml2_type_geneve]
...
vni_ranges = 1:65536
max_header_size = 38
.. note::
The Networking service uses the ``vni_ranges`` option to allocate
network segments. However, OVN ignores the actual values. Thus, the ID
range only determines the quantity of Geneve networks in the
environment. For example, a range of ``5001:6000`` defines a maximum
of 1000 Geneve networks.
* Optionally, enable support for VLAN provider and self-service
networks on one or more physical networks. If you specify only
the physical network, only administrative (privileged) users can
manage VLAN networks. Additionally specifying a VLAN ID range for
a physical network enables regular (non-privileged) users to
manage VLAN networks. The Networking service allocates the VLAN ID
for each self-service network using the VLAN ID range for the
physical network.
.. code-block:: ini
[ml2_type_vlan]
...
network_vlan_ranges = PHYSICAL_NETWORK:MIN_VLAN_ID:MAX_VLAN_ID
Replace ``PHYSICAL_NETWORK`` with the physical network name and
optionally define the minimum and maximum VLAN IDs. Use a comma
to separate each physical network.
For example, to enable support for administrative VLAN networks
on the ``physnet1`` network and self-service VLAN networks on
the ``physnet2`` network using VLAN IDs 1001 to 2000:
.. code-block:: ini
network_vlan_ranges = physnet1,physnet2:1001:2000
* Enable security groups.
.. code-block:: ini
[securitygroup]
...
enable_security_group = true
.. note::
The ``firewall_driver`` option under ``[securitygroup]`` is ignored
since the OVN ML2 driver itself handles security groups.
* Configure OVS database access and L3 scheduler
.. code-block:: ini
[ovn]
...
ovn_nb_connection = tcp:IP_ADDRESS:6641
ovn_sb_connection = tcp:IP_ADDRESS:6642
ovn_l3_scheduler = OVN_L3_SCHEDULER
.. note::
Replace ``IP_ADDRESS`` with the IP address of the controller node that
runs the ``ovsdb-server`` service. Replace ``OVN_L3_SCHEDULER`` with
``leastloaded`` if you want the scheduler to select a compute node with
the least number of gateway ports or ``chance`` if you want the
scheduler to randomly select a compute node from the available list of
compute nodes.
#. Start the ``neutron-server`` service.
Network nodes
-------------
Deployments using OVN native layer-3 and DHCP services do not require
conventional network nodes because connectivity to external networks
(including VTEP gateways) and routing occurs on compute nodes.
Compute nodes
-------------
Each compute node runs the OVS and ``ovn-controller`` services. The
``ovn-controller`` service replaces the conventional OVS layer-2 agent.
#. Install the ``openvswitch-ovn`` and ``networking-ovn`` packages.
#. Start the OVS service.
Using the *systemd* unit:
.. code-block:: console
# systemctl start openvswitch
Using the ``ovs-ctl`` script:
.. code-block:: console
# /usr/share/openvswitch/scripts/ovs-ctl start --system-id="random"
#. Configure the OVS service.
* Use OVS databases on the controller node.
.. code-block:: console
# ovs-vsctl set open . external-ids:ovn-remote=tcp:IP_ADDRESS:6642
Replace ``IP_ADDRESS`` with the IP address of the controller node
that runs the ``ovsdb-server`` service.
* Enable one or more overlay network protocols. At a minimum, OVN requires
enabling the ``geneve`` protocol. Deployments using VTEP gateways should
also enable the ``vxlan`` protocol.
.. code-block:: console
# ovs-vsctl set open . external-ids:ovn-encap-type=geneve,vxlan
.. note::
Deployments without VTEP gateways can safely enable both protocols.
* Configure the overlay network local endpoint IP address.
.. code-block:: console
# ovs-vsctl set open . external-ids:ovn-encap-ip=IP_ADDRESS
Replace ``IP_ADDRESS`` with the IP address of the overlay network
interface on the compute node.
#. Start the ``ovn-controller`` service.
Using the *systemd* unit:
.. code-block:: console
# systemctl start ovn-controller
Using the ``ovn-ctl`` script:
.. code-block:: console
# /usr/share/openvswitch/scripts/ovn-ctl start_controller
Verify operation
----------------
#. Each compute node should contain an ``ovn-controller`` instance.
.. code-block:: console
# ovn-sbctl show
<output>

View File

@ -1,178 +0,0 @@
[DEFAULT]
#
# From networking_ovn.metadata.agent
#
# Location for Metadata Proxy UNIX domain socket. (string value)
#metadata_proxy_socket = $state_path/metadata_proxy
# User (uid or name) running metadata proxy after its initialization (if empty:
# agent effective user). (string value)
#metadata_proxy_user =
# Group (gid or name) running metadata proxy after its initialization (if
# empty: agent effective group). (string value)
#metadata_proxy_group =
# Name of Open vSwitch bridge to use (string value)
#ovs_integration_bridge = br-int
# Certificate Authority public key (CA cert) file for ssl (string value)
#auth_ca_cert = <None>
# IP address or DNS name of Nova metadata server. (unknown value)
# Deprecated group/name - [DEFAULT]/nova_metadata_ip
#nova_metadata_host = 127.0.0.1
# TCP Port used by Nova metadata server. (port value)
# Minimum value: 0
# Maximum value: 65535
#nova_metadata_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses the same config key, but in [neutron] section.
# (string value)
#metadata_proxy_shared_secret =
# Protocol to access nova metadata, http or https (string value)
# Allowed values: http, https
#nova_metadata_protocol = http
# Allow to perform insecure SSL (https) requests to nova metadata (boolean
# value)
#nova_metadata_insecure = false
# Client certificate for nova metadata api server. (string value)
#nova_client_cert =
# Private key of client certificate. (string value)
#nova_client_priv_key =
# Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce
# mode from metadata_proxy_user/group values, 'user': set metadata proxy socket
# mode to 0o644, to use when metadata_proxy_user is agent effective user or
# root, 'group': set metadata proxy socket mode to 0o664, to use when
# metadata_proxy_group is agent effective group or root, 'all': set metadata
# proxy socket mode to 0o666, to use otherwise. (string value)
# Allowed values: deduce, user, group, all
#metadata_proxy_socket_mode = deduce
# Number of separate worker processes for metadata server (defaults to half of
# the number of CPUs) (integer value)
#metadata_workers = 1
# Number of backlog requests to configure the metadata server socket with
# (integer value)
#metadata_backlog = 4096
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Note: This option can be changed without restarting.
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false
# Enable journald for logging. If running in a systemd environment you may wish
# to enable journal support. Doing so will use the journal native protocol
# which includes structured metadata in addition to log messages.This option is
# ignored if log_config_append is set. (boolean value)
#use_journal = false
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = false
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message
# is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Interval, number of seconds, of log rate limiting. (integer value)
#rate_limit_interval = 0
# Maximum number of logged messages per rate_limit_interval. (integer value)
#rate_limit_burst = 0
# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
# or empty string. Logs with level greater or equal to rate_limit_except_level
# are not filtered. An empty string means that all levels are filtered. (string
# value)
#rate_limit_except_level = CRITICAL
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false

View File

@ -1,6 +0,0 @@
[DEFAULT]
output_file = etc/metadata_agent.ini.sample
wrap_width = 79
namespace = networking_ovn.metadata.agent
namespace = oslo.log

View File

@ -1,40 +0,0 @@
Migration from ML2/OVS to ML2/OVN
=================================
Proof-of-concept ansible script for migrating an OpenStack deployment
that uses ML2/OVS to OVN.
Prerequisites:
1. Ansible 2.2 or greater.
2. ML2/OVS must be using the OVS firewall driver.
To use:
1. Create an ansible inventory with the expected set of groups and variables
as indicated by the hosts-sample file.
2. Run the playbook::
$ ansible-playbook migrate-to-ovn.yml -i hosts
Testing Status:
- Tested on an RDO cloud on CentOS 7.3 based on Ocata.
- The cloud had 3 controller nodes and 6 compute nodes.
- Observed network downtime was 10 seconds.
- The "--forks 10" option was used with ansible-playbook to ensure
that commands could be run across the entire environment in parallel.
MTU:
- If migrating an ML2/OVS deployment using VXLAN tenant networks
to an OVN deployment using Geneve for tenant networks, we have
an unresolved issue around MTU. The VXLAN overhead is 30 bytes.
OVN with Geneve has an overhead of 38 bytes. We need the tenant
networks MTU adjusted for OVN and then we need all VMs to receive
the updated MTU value through DHCP before the migration can take
place. For testing purposes, we've just hacked the Neutron code
to indicate that the VXLAN overhead was 38 bytes instead of 30,
bypassing the issue at migration time.

View File

@ -1,37 +0,0 @@
# All controller nodes running OpenStack control services, particularly
# neutron-api. Also indicate which controller you'd like to have run
# the OVN central control services.
[controller]
overcloud-controller-0 ovn_central=true
overcloud-controller-1
overcloud-controller-2
# All compute nodes. We will replace the openvswitch agent
# with ovn-controller on these nodes.
#
# The ovn_encap_ip variable should be filled in with the IP
# address that other compute hosts should use as the tunnel
# endpoint for tunnels to that host.
[compute]
overcloud-novacompute-0 ovn_encap_ip=192.0.2.10
overcloud-novacompute-1 ovn_encap_ip=192.0.2.11
overcloud-novacompute-2 ovn_encap_ip=192.0.2.12
overcloud-novacompute-3 ovn_encap_ip=192.0.2.13
overcloud-novacompute-4 ovn_encap_ip=192.0.2.14
overcloud-novacompute-5 ovn_encap_ip=192.0.2.15
# Configure bridge mappings to be used on compute hosts.
[compute:vars]
ovn_bridge_mappings=net1:br-em1
is_compute_node=true
[overcloud:children]
controller
compute
# Fill in "ovn_db_ip" with an IP address on a management network
# that the controller and compute nodes should reach. This address
# should not be reachable otherwise.
[overcloud:vars]
ovn_db_ip=192.0.2.50
remote_user=heat-admin

View File

@ -1,187 +0,0 @@
# Migrate a Neutron deployment using ML2/OVS to OVN.
#
# See hosts-sample for expected contents of the ansible inventory.
---
- hosts: compute
remote_user: "{{ remote_user }}"
become: true
tasks:
- name: Ensure OVN packages are installed on compute nodes.
yum:
name: openvswitch-ovn-host
state: present
# TODO to make ansible-lint happy, all of these commands should be conditionally run
# only if the config value needs to be changed.
- name: Configure ovn-encap-type.
command: "ovs-vsctl set open . external_ids:ovn-encap-type=geneve"
- name: Configure ovn-encap-ip.
command: "ovs-vsctl set open . external_ids:ovn-encap-ip={{ ovn_encap_ip }}"
- name: Configure ovn-remote.
command: "ovs-vsctl set open . external_ids:ovn-remote=tcp:{{ ovn_db_ip }}:6642"
# TODO We could discover the appropriate value for ovn-bridge-mappings based on
# the openvswitch agent configuration instead of requiring it to be configured
# in the inventory.
- name: Configure ovn-bridge-mappings.
command: "ovs-vsctl set open . external_ids:ovn-bridge-mappings={{ ovn_bridge_mappings }}"
- name: Get hostname
shell: hostname -f
register: hostname
check_mode: no
- name: Set host name
command: "ovs-vsctl set Open_vSwitch . external-ids:hostname={{ hostname.stdout }}"
# TODO ansible has an "iptables" module, but it does not allow you specify a "rule number"
# which we require here.
- name: Open Geneve UDP port for tunneling.
command: iptables -I INPUT 10 -m state --state NEW -p udp --dport 6081 -j ACCEPT
- name: Persist our iptables changes after a reboot
shell: iptables-save > /etc/sysconfig/iptables.save
# TODO Remove this once the metadata API is supported.
# https://bugs.launchpad.net/networking-ovn/+bug/1562132
- name: Force config drive until the metadata API is supported.
ini_file:
dest: /etc/nova/nova.conf
section: DEFAULT
option: force_config_drive
value: true
- name: Restart nova-compute service to reflect force_config_drive value.
systemd:
name: openstack-nova-compute
state: restarted
enabled: yes
- hosts: controller
remote_user: "{{ remote_user }}"
become: true
tasks:
- name: Ensure OVN packages are installed on the central OVN host.
when: ovn_central is defined
yum:
name: openvswitch-ovn-central
state: present
# TODO Set up SSL for OVN databases
# TODO ansible has an "iptables" module, but it does not allow you specify a "rule number"
# which we require here.
- name: Open OVN database ports.
command: "iptables -I INPUT 10 -m state --state NEW -p tcp --dport {{ item }} -j ACCEPT"
with_items: [ 6641, 6642 ]
- name: Persist our iptables changes after a reboot
shell: iptables-save > /etc/sysconfig/iptables.save
# TODO Integrate HA support for the OVN control services.
- name: Start ovn-northd and the OVN databases.
when: ovn_central is defined
systemd:
name: ovn-northd
state: started
enabled: yes
- name: Enable remote access to the northbound database.
command: "ovn-nbctl set-connection ptcp:6641:{{ ovn_db_ip }}"
when: ovn_central is defined
- name: Enable remote access to the southbound database.
command: "ovn-sbctl set-connection ptcp:6642:{{ ovn_db_ip }}"
when: ovn_central is defined
- name: Ensure the Neutron ML2 plugin is installed on neutron-api hosts.
yum:
name: python-networking-ovn
state: present
- name: Update Neutron configuration files
ini_file: dest={{ item.dest }} section={{ item.section }} option={{ item.option }} value={{ item.value }}
with_items:
- { dest: '/etc/neutron/neutron.conf', section: 'DEFAULT', option: 'service_plugins', value: 'qos,networking_ovn.l3.l3_ovn.OVNL3RouterPlugin' }
- { dest: '/etc/neutron/neutron.conf', section: 'DEFAULT', option: 'notification_drivers', value: 'ovn-qos' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2', option: 'mechanism_drivers', value: 'ovn' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2', option: 'type_drivers', value: 'geneve,vxlan,vlan,flat' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2', option: 'tenant_network_types', value: 'geneve' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2_type_geneve', option: 'vni_ranges', value: '1:65536' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2_type_geneve', option: 'max_header_size', value: '38' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'ovn_nb_connection', value: '"tcp:{{ ovn_db_ip }}:6641"' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'ovn_sb_connection', value: '"tcp:{{ ovn_db_ip }}:6642"' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'ovsdb_connection_timeout', value: '180' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'neutron_sync_mode', value: 'repair' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'ovn_l3_mode', value: 'true' }
- { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'vif_type', value: 'ovs' }
- name: Note that API downtime begins now.
debug:
msg: NEUTRON API DOWNTIME STARTING NOW FOR THIS HOST
- name: Shut down neutron-server so that we can begin data sync to OVN.
systemd:
name: neutron-server
state: stopped
- hosts: controller
remote_user: "{{ remote_user }}"
become: true
tasks:
- name: Sync Neutron state to OVN.
when: ovn_central is defined
command: neutron-ovn-db-sync-util --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
- hosts: overcloud
remote_user: "{{ remote_user }}"
become: true
tasks:
- name: Note that data plane imact starts now.
debug:
msg: DATA PLANE IMPACT BEGINS NOW.
- name: Stop metadata, DHCP, L3 and openvswitch agent if needed.
systemd: name={{ item.name }} state={{ item.state }} enabled=no
with_items:
- { name: 'neutron-metadata-agent', state: 'stopped' }
- { name: 'neutron-dhcp-agent', state: 'stopped' }
- { name: 'neutron-l3-agent', state: 'stopped' }
- { name: 'neutron-openvswitch-agent', state: 'stopped' }
- hosts: compute
remote_user: "{{ remote_user }}"
become: true
tasks:
- name: Note that data plane is being restored.
debug:
msg: DATA PLANE IS NOW BEING RESTORED.
- name: Delete br-tun as it is no longer used.
command: "ovs-vsctl del-br br-tun"
- name: Reset OpenFlow protocol version before ovn-controller takes over.
with_items: [ br-int, br-ex ]
command: "ovs-vsctl set Bridge {{ item }} protocols=[]"
ignore_errors: True
- name: Start ovn-controller.
systemd:
name: ovn-controller
state: started
enabled: yes
- hosts: controller
remote_user: "{{ remote_user }}"
become: true
tasks:
# TODO The sync util scheduling gateway routers depends on this patch:
# https://review.openstack.org/#/c/427020/
# If the patch is not merged, this command is harmless, but the gateway
# routers won't get scheduled until later when neutron-server starts.
- name: Schedule gateway routers by running the sync util.
when: ovn_central is defined
command: neutron-ovn-db-sync-util --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
- hosts: overcloud
remote_user: "{{ remote_user }}"
become: true
tasks:
# TODO Make this smarter so that it only deletes net namespaces that were
# # created by neutron. In the simple case, this is fine, but will break
# # once containers are in use on the overcloud.
- name: Delete network namespaces.
command: ip -all netns delete
- hosts: controller
remote_user: "{{ remote_user }}"
become: true
tasks:
- name: Note that the Neutron API is coming back online.
debug:
msg: THE NEUTRON API IS NOW BEING RESTORED.
- name: Start neutron-server.
systemd:
name: neutron-server
state: started
# TODO In our grenade script we had to restart rabbitmq. Is that needed?

View File

@ -1,19 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'networking_ovn').version_string()

View File

@ -1,32 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "networking_ovn"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
_C = _translators.contextual_form
# The plural translation function using the name "_P"
_P = _translators.plural_form
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)

View File

@ -1,374 +0,0 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.common import utils
from neutron_lib import constants as n_const
from oslo_concurrency import lockutils
from oslo_log import log
from ovsdbapp.backend.ovs_idl import vlog
import six
from networking_ovn.agent.metadata import driver as metadata_driver
from networking_ovn.agent.metadata import ovsdb
from networking_ovn.agent.metadata import server as metadata_server
from networking_ovn.common import config
from networking_ovn.common import constants as ovn_const
from networking_ovn.ovsdb import row_event
LOG = log.getLogger(__name__)
_SYNC_STATE_LOCK = lockutils.ReaderWriterLock()
NS_PREFIX = 'ovnmeta-'
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
MAC_PATTERN = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
MetadataPortInfo = collections.namedtuple('MetadataPortInfo', ['mac',
'ip_addresses'])
def _sync_lock(f):
"""Decorator to block all operations for a global sync call."""
@six.wraps(f)
def wrapped(*args, **kwargs):
with _SYNC_STATE_LOCK.write_lock():
return f(*args, **kwargs)
return wrapped
def _wait_if_syncing(f):
"""Decorator to wait if any sync operations are in progress."""
@six.wraps(f)
def wrapped(*args, **kwargs):
with _SYNC_STATE_LOCK.read_lock():
return f(*args, **kwargs)
return wrapped
class PortBindingChassisEvent(row_event.RowEvent):
def __init__(self, metadata_agent):
self.agent = metadata_agent
table = 'Port_Binding'
events = (self.ROW_UPDATE)
super(PortBindingChassisEvent, self).__init__(
events, table, None)
self.event_name = 'PortBindingChassisEvent'
@_wait_if_syncing
def run(self, event, row, old):
# Check if the port has been bound/unbound to our chassis and update
# the metadata namespace accordingly.
# Type must be empty to make sure it's a VIF.
if row.type != "":
return
new_chassis = getattr(row, 'chassis', [])
old_chassis = getattr(old, 'chassis', [])
if new_chassis and new_chassis[0].name == self.agent.chassis:
LOG.info("Port %s in datapath %s bound to our chassis",
row.logical_port, str(row.datapath.uuid))
self.agent.update_datapath(str(row.datapath.uuid))
elif old_chassis and old_chassis[0].name == self.agent.chassis:
LOG.info("Port %s in datapath %s unbound from our chassis",
row.logical_port, str(row.datapath.uuid))
self.agent.update_datapath(str(row.datapath.uuid))
class ChassisCreateEvent(row_event.RowEvent):
"""Row create event - Chassis name == our_chassis.
On connection, we get a dump of all chassis so if we catch a creation
of our own chassis it has to be a reconnection. In this case, we need
to do a full sync to make sure that we capture all changes while the
connection to OVSDB was down.
"""
def __init__(self, metadata_agent):
self.agent = metadata_agent
self.first_time = True
table = 'Chassis'
events = (self.ROW_CREATE)
super(ChassisCreateEvent, self).__init__(
events, table, (('name', '=', self.agent.chassis),))
self.event_name = 'ChassisCreateEvent'
def run(self, event, row, old):
if self.first_time:
self.first_time = False
else:
LOG.info("Connection to OVSDB established, doing a full sync")
self.agent.sync()
class MetadataAgent(object):
def __init__(self, conf):
self.conf = conf
vlog.use_python_logger(max_level=config.get_ovn_ovsdb_log_level())
self._process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='metadata')
def start(self):
# Launch the server that will act as a proxy between the VM's and Nova.
proxy = metadata_server.UnixDomainMetadataProxy(self.conf)
proxy.run()
# Open the connection to OVS database
self.ovs_idl = ovsdb.MetadataAgentOvsIdl().start()
self.chassis = self._get_own_chassis_name()
# Open the connection to OVN SB database.
self.sb_idl = ovsdb.MetadataAgentOvnSbIdl(
[PortBindingChassisEvent(self), ChassisCreateEvent(self)]).start()
# Do the initial sync.
self.sync()
proxy.wait()
def _get_own_chassis_name(self):
"""Return the external_ids:system-id value of the Open_vSwitch table.
As long as ovn-controller is running on this node, the key is
guaranteed to exist and will include the chassis name.
"""
ext_ids = self.ovs_idl.db_get(
'Open_vSwitch', '.', 'external_ids').execute()
return ext_ids['system-id']
@_sync_lock
def sync(self):
"""Agent sync.
This function will make sure that all networks with ports in our
chassis are serving metadata. Also, it will tear down those namespaces
which were serving metadata but are no longer needed.
"""
metadata_namespaces = self.ensure_all_networks_provisioned()
system_namespaces = ip_lib.IPWrapper().get_namespaces()
unused_namespaces = [ns for ns in system_namespaces if
ns.startswith(NS_PREFIX) and
ns not in metadata_namespaces]
for ns in unused_namespaces:
self.teardown_datapath(self._get_datapath_name(ns))
@staticmethod
def _get_veth_name(datapath):
return ['{}{}{}'.format(n_const.TAP_DEVICE_PREFIX,
datapath[:10], i) for i in [0, 1]]
@staticmethod
def _get_datapath_name(namespace):
return namespace[len(NS_PREFIX):]
@staticmethod
def _get_namespace_name(datapath):
return NS_PREFIX + datapath
def teardown_datapath(self, datapath):
"""Unprovision this datapath to stop serving metadata.
This function will shutdown metadata proxy if it's running and delete
the VETH pair, the OVS port and the namespace.
"""
self.update_chassis_metadata_networks(datapath, remove=True)
namespace = self._get_namespace_name(datapath)
ip = ip_lib.IPWrapper(namespace)
# If the namespace doesn't exist, return
if not ip.netns.exists(namespace):
return
LOG.info("Cleaning up %s namespace which is not needed anymore",
namespace)
metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
self._process_monitor, datapath, self.conf, namespace)
veth_name = self._get_veth_name(datapath)
if ip_lib.device_exists(veth_name[0]):
ip_lib.IPWrapper().del_veth(veth_name[0])
self.ovs_idl.del_port('br-int', veth_name[0]).execute()
ip.garbage_collect_namespace()
def update_datapath(self, datapath):
"""Update the metadata service for this datapath.
This function will:
* Provision the namespace if it wasn't already in place.
* Update the namespace if it was already serving metadata (for example,
after binding/unbinding the first/last port of a subnet in our
chassis).
* Tear down the namespace if there are no more ports in our chassis
for this datapath.
"""
ports = self.sb_idl.get_ports_on_chassis(self.chassis)
datapath_ports = [p for p in ports if p.type == '' and
str(p.datapath.uuid) == datapath]
if datapath_ports:
self.provision_datapath(datapath)
else:
self.teardown_datapath(datapath)
def provision_datapath(self, datapath):
"""Provision the datapath so that it can serve metadata.
This function will create the namespace and VETH pair if needed
and assign the IP addresses to the interface corresponding to the
metadata port of the network. It will also remove existing IP
addresses that are no longer needed.
:return: The metadata namespace name of this datapath
"""
LOG.debug("Provisioning datapath %s", datapath)
port = self.sb_idl.get_metadata_port_network(datapath)
# If there's no metadata port or it doesn't have a MAC or IP
# addresses, then tear the namespace down if needed. This might happen
# when there are no subnets yet created so metadata port doesn't have
# an IP address.
if not (port and port.mac and
port.external_ids.get(ovn_const.OVN_CIDRS_EXT_ID_KEY, None)):
LOG.debug("There is no metadata port for datapath %s or it has no "
"MAC or IP addresses configured, tearing the namespace "
"down if needed", datapath)
self.teardown_datapath(datapath)
return
# First entry of the mac field must be the MAC address.
match = MAC_PATTERN.match(port.mac[0].split(' ')[0])
# If it is not, we can't provision the namespace. Tear it down if
# needed and log the error.
if not match:
LOG.error("Metadata port for datapath %s doesn't have a MAC "
"address, tearing the namespace down if needed",
datapath)
self.teardown_datapath(datapath)
return
mac = match.group()
ip_addresses = set(
port.external_ids[ovn_const.OVN_CIDRS_EXT_ID_KEY].split(' '))
ip_addresses.add(METADATA_DEFAULT_CIDR)
metadata_port = MetadataPortInfo(mac, ip_addresses)
# Create the VETH pair if it's not created. Also the add_veth function
# will create the namespace for us.
namespace = self._get_namespace_name(datapath)
veth_name = self._get_veth_name(datapath)
ip1 = ip_lib.IPDevice(veth_name[0])
if ip_lib.device_exists(veth_name[1], namespace):
ip2 = ip_lib.IPDevice(veth_name[1], namespace)
else:
LOG.debug("Creating VETH %s in %s namespace", veth_name[1],
namespace)
# Might happen that the end in the root namespace exists even
# though the other end doesn't. Make sure we delete it first if
# that's the case.
if ip1.exists():
ip1.link.delete()
ip1, ip2 = ip_lib.IPWrapper().add_veth(
veth_name[0], veth_name[1], namespace)
# Make sure both ends of the VETH are up
ip1.link.set_up()
ip2.link.set_up()
# Configure the MAC address.
ip2.link.set_address(metadata_port.mac)
dev_info = ip2.addr.list()
# Configure the IP addresses on the VETH pair and remove those
# that we no longer need.
current_cidrs = {dev['cidr'] for dev in dev_info}
for ipaddr in current_cidrs - metadata_port.ip_addresses:
ip2.addr.delete(ipaddr)
for ipaddr in metadata_port.ip_addresses - current_cidrs:
# NOTE(dalvarez): metadata only works on IPv4. We're doing this
# extra check here because it could be that the metadata port has
# an IPv6 address if there's an IPv6 subnet with SLAAC in its
# network. Neutron IPAM will autoallocate an IPv6 address for every
# port in the network.
if utils.get_ip_version(ipaddr) == 4:
ip2.addr.add(ipaddr)
# Configure the OVS port and add external_ids:iface-id so that it
# can be tracked by OVN.
self.ovs_idl.add_port('br-int', veth_name[0]).execute()
self.ovs_idl.db_set(
'Interface', veth_name[0],
('external_ids', {'iface-id': port.logical_port})).execute()
# Spawn metadata proxy if it's not already running.
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
self._process_monitor, namespace, METADATA_PORT,
self.conf, network_id=datapath)
self.update_chassis_metadata_networks(datapath)
return namespace
def ensure_all_networks_provisioned(self):
"""Ensure that all datapaths are provisioned.
This function will make sure that all datapaths with ports bound to
our chassis have its namespace, VETH pair and OVS port created and
metadata proxy is up and running.
:return: A list with the namespaces that are currently serving
metadata
"""
# Retrieve all ports in our Chassis with type == ''
ports = self.sb_idl.get_ports_on_chassis(self.chassis)
datapaths = {str(p.datapath.uuid) for p in ports if p.type == ''}
namespaces = []
# Make sure that all those datapaths are serving metadata
for datapath in datapaths:
netns = self.provision_datapath(datapath)
if netns:
namespaces.append(netns)
return namespaces
def update_chassis_metadata_networks(self, datapath, remove=False):
"""Update metadata networks hosted in this chassis.
Add or remove a datapath from the list of current datapaths that
we're currently serving metadata.
"""
current_dps = self.sb_idl.get_chassis_metadata_networks(self.chassis)
updated = False
if remove:
if datapath in current_dps:
current_dps.remove(datapath)
updated = True
else:
if datapath not in current_dps:
current_dps.append(datapath)
updated = True
if updated:
with self.sb_idl.create_transaction(check_error=True) as txn:
txn.add(self.sb_idl.set_chassis_metadata_networks(
self.chassis, current_dps))

View File

@ -1,217 +0,0 @@
# Copyright 2017 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import grp
import os
import pwd
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.agent.linux import external_process
from neutron.common import exceptions
LOG = logging.getLogger(__name__)
METADATA_SERVICE_NAME = 'metadata-proxy'
PROXY_CONFIG_DIR = "ns-metadata-proxy"
_HAPROXY_CONFIG_TEMPLATE = """
global
log /dev/log local0 %(log_level)s
user %(user)s
group %(group)s
maxconn 1024
pidfile %(pidfile)s
daemon
defaults
log global
mode http
option httplog
option dontlognull
option http-server-close
option forwardfor
retries 3
timeout http-request 30s
timeout connect 30s
timeout client 32s
timeout server 32s
timeout http-keep-alive 30s
listen listener
bind 0.0.0.0:%(port)s
server metadata %(unix_socket_path)s
http-request add-header X-OVN-%(res_type)s-ID %(res_id)s
"""
class InvalidUserOrGroupException(Exception):
pass
class HaproxyConfigurator(object):
def __init__(self, network_id, router_id, unix_socket_path, port, user,
group, state_path, pid_file):
self.network_id = network_id
self.router_id = router_id
if network_id is None and router_id is None:
raise exceptions.NetworkIdOrRouterIdRequiredError()
self.port = port
self.user = user
self.group = group
self.state_path = state_path
self.unix_socket_path = unix_socket_path
self.pidfile = pid_file
self.log_level = (
'debug' if logging.is_debug_enabled(cfg.CONF) else 'info')
def create_config_file(self):
"""Create the config file for haproxy."""
# Need to convert uid/gid into username/group
try:
username = pwd.getpwuid(int(self.user)).pw_name
except (ValueError, KeyError):
try:
username = pwd.getpwnam(self.user).pw_name
except KeyError:
raise InvalidUserOrGroupException(
_("Invalid user/uid: '%s'") % self.user)
try:
groupname = grp.getgrgid(int(self.group)).gr_name
except (ValueError, KeyError):
try:
groupname = grp.getgrnam(self.group).gr_name
except KeyError:
raise InvalidUserOrGroupException(
_("Invalid group/gid: '%s'") % self.group)
cfg_info = {
'port': self.port,
'unix_socket_path': self.unix_socket_path,
'user': username,
'group': groupname,
'pidfile': self.pidfile,
'log_level': self.log_level
}
if self.network_id:
cfg_info['res_type'] = 'Network'
cfg_info['res_id'] = self.network_id
else:
cfg_info['res_type'] = 'Router'
cfg_info['res_id'] = self.router_id
haproxy_cfg = _HAPROXY_CONFIG_TEMPLATE % cfg_info
LOG.debug("haproxy_cfg = %s", haproxy_cfg)
cfg_dir = self.get_config_path(self.state_path)
# uuid has to be included somewhere in the command line so that it can
# be tracked by process_monitor.
self.cfg_path = os.path.join(cfg_dir, "%s.conf" % cfg_info['res_id'])
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
with open(self.cfg_path, "w") as cfg_file:
cfg_file.write(haproxy_cfg)
@staticmethod
def get_config_path(state_path):
return os.path.join(state_path or cfg.CONF.state_path,
PROXY_CONFIG_DIR)
@staticmethod
def cleanup_config_file(uuid, state_path):
"""Delete config file created when metadata proxy was spawned."""
# Delete config file if it exists
cfg_path = os.path.join(
HaproxyConfigurator.get_config_path(state_path),
"%s.conf" % uuid)
try:
os.unlink(cfg_path)
except OSError as ex:
# It can happen that this function is called but metadata proxy
# was never spawned so its config file won't exist
if ex.errno != errno.ENOENT:
raise
class MetadataDriver(object):
monitors = {}
@classmethod
def _get_metadata_proxy_user_group(cls, conf):
user = conf.metadata_proxy_user or str(os.geteuid())
group = conf.metadata_proxy_group or str(os.getegid())
return user, group
@classmethod
def _get_metadata_proxy_callback(cls, port, conf, network_id=None,
router_id=None):
def callback(pid_file):
metadata_proxy_socket = conf.metadata_proxy_socket
user, group = (
cls._get_metadata_proxy_user_group(conf))
haproxy = HaproxyConfigurator(network_id,
router_id,
metadata_proxy_socket,
port,
user,
group,
conf.state_path,
pid_file)
haproxy.create_config_file()
proxy_cmd = ['haproxy',
'-f', haproxy.cfg_path]
return proxy_cmd
return callback
@classmethod
def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
network_id=None, router_id=None):
uuid = network_id or router_id
callback = cls._get_metadata_proxy_callback(
port, conf, network_id=network_id, router_id=router_id)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name,
callback=callback)
pm.enable()
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
cls.monitors[router_id] = pm
@classmethod
def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf, ns_name):
monitor.unregister(uuid, METADATA_SERVICE_NAME)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name)
pm.disable()
# Delete metadata proxy config file
HaproxyConfigurator.cleanup_config_file(uuid, cfg.CONF.state_path)
cls.monitors.pop(uuid, None)
@classmethod
def _get_metadata_proxy_process_manager(cls, router_id, conf, ns_name=None,
callback=None):
return external_process.ProcessManager(
conf=conf,
uuid=router_id,
namespace=ns_name,
default_cmd_callback=callback)

View File

@ -1,59 +0,0 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ovs.db import idl
from ovsdbapp.backend.ovs_idl import connection
from ovsdbapp.backend.ovs_idl import idlutils
from ovsdbapp.schema.open_vswitch import impl_idl as idl_ovs
from networking_ovn.common import config
from networking_ovn.ovsdb import impl_idl_ovn as idl_ovn
from networking_ovn.ovsdb import ovsdb_monitor
class MetadataAgentOvnSbIdl(ovsdb_monitor.OvnIdl):
SCHEMA = 'OVN_Southbound'
def __init__(self, events=None):
connection_string = config.get_ovn_sb_connection()
helper = idlutils.get_schema_helper(connection_string, self.SCHEMA)
tables = ('Chassis', 'Port_Binding', 'Datapath_Binding')
for table in tables:
helper.register_table(table)
super(MetadataAgentOvnSbIdl, self).__init__(
None, connection_string, helper)
if events:
self.notify_handler.watch_events(events)
def start(self):
ovsdb_monitor._check_and_set_ssl_files(self.SCHEMA)
conn = connection.Connection(
self, timeout=config.get_ovn_ovsdb_timeout())
return idl_ovn.OvsdbSbOvnIdl(conn)
class MetadataAgentOvsIdl(object):
def start(self):
connection_string = config.cfg.CONF.ovs.ovsdb_connection
helper = idlutils.get_schema_helper(connection_string,
'Open_vSwitch')
tables = ('Open_vSwitch', 'Bridge', 'Port', 'Interface')
for table in tables:
helper.register_table(table)
ovs_idl = idl.Idl(connection_string, helper)
conn = connection.Connection(
ovs_idl, timeout=config.cfg.CONF.ovs.ovsdb_connection_timeout)
return idl_ovs.OvsdbIdl(conn)

View File

@ -1,190 +0,0 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import httplib2
from neutron._i18n import _
from neutron.agent.linux import utils as agent_utils
from neutron.conf.agent.metadata import config
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import six
import six.moves.urllib.parse as urlparse
import webob
from networking_ovn.agent.metadata import ovsdb
from networking_ovn.common import constants as ovn_const
LOG = logging.getLogger(__name__)
MODE_MAP = {
config.USER_MODE: 0o644,
config.GROUP_MODE: 0o664,
config.ALL_MODE: 0o666,
}
class MetadataProxyHandler(object):
def __init__(self, conf):
self.conf = conf
self.subscribe()
def subscribe(self):
registry.subscribe(self.post_fork_initialize,
resources.PROCESS,
events.AFTER_INIT)
def post_fork_initialize(self, resource, event, trigger, **kwargs):
# We need to open a connection to OVN SouthBound database for
# each worker so that we can process the metadata requests.
self.sb_idl = ovsdb.MetadataAgentOvnSbIdl().start()
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
try:
LOG.debug("Request: %s", req)
instance_id, project_id = self._get_instance_and_project_id(req)
if instance_id:
return self._proxy_request(instance_id, project_id, req)
else:
return webob.exc.HTTPNotFound()
except Exception:
LOG.exception("Unexpected error.")
msg = _('An unknown error has occurred. '
'Please try your request again.')
explanation = six.text_type(msg)
return webob.exc.HTTPInternalServerError(explanation=explanation)
def _get_instance_and_project_id(self, req):
remote_address = req.headers.get('X-Forwarded-For')
network_id = req.headers.get('X-OVN-Network-ID')
ports = self.sb_idl.get_network_port_bindings_by_ip(network_id,
remote_address)
if len(ports) == 1:
external_ids = ports[0].external_ids
return (external_ids[ovn_const.OVN_DEVID_EXT_ID_KEY],
external_ids[ovn_const.OVN_PROJID_EXT_ID_KEY])
return None, None
def _proxy_request(self, instance_id, tenant_id, req):
headers = {
'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
'X-Instance-ID': str(instance_id),
'X-Tenant-ID': str(tenant_id),
'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
}
nova_host_port = '%s:%s' % (self.conf.nova_metadata_host,
self.conf.nova_metadata_port)
LOG.debug('Request to Nova at %s', nova_host_port)
LOG.debug(headers)
url = urlparse.urlunsplit((
self.conf.nova_metadata_protocol,
nova_host_port,
req.path_info,
req.query_string,
''))
h = httplib2.Http(
ca_certs=self.conf.auth_ca_cert,
disable_ssl_certificate_validation=self.conf.nova_metadata_insecure
)
if self.conf.nova_client_cert and self.conf.nova_client_priv_key:
h.add_certificate(self.conf.nova_client_priv_key,
self.conf.nova_client_cert,
nova_host_port)
resp, content = h.request(url, method=req.method, headers=headers,
body=req.body)
if resp.status == 200:
req.response.content_type = resp['content-type']
req.response.body = content
LOG.debug(str(resp))
return req.response
elif resp.status == 403:
LOG.warning(
'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.'
)
return webob.exc.HTTPForbidden()
elif resp.status == 400:
return webob.exc.HTTPBadRequest()
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.debug(msg)
explanation = six.text_type(msg)
return webob.exc.HTTPInternalServerError(explanation=explanation)
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
def _sign_instance_id(self, instance_id):
secret = self.conf.metadata_proxy_shared_secret
secret = encodeutils.to_utf8(secret)
instance_id = encodeutils.to_utf8(instance_id)
return hmac.new(secret, instance_id, hashlib.sha256).hexdigest()
class UnixDomainMetadataProxy(object):
def __init__(self, conf):
self.conf = conf
agent_utils.ensure_directory_exists_without_file(
cfg.CONF.metadata_proxy_socket)
def _get_socket_mode(self):
mode = self.conf.metadata_proxy_socket_mode
if mode == config.DEDUCE_MODE:
user = self.conf.metadata_proxy_user
if (not user or user == '0' or user == 'root'
or agent_utils.is_effective_user(user)):
# user is agent effective user or root => USER_MODE
mode = config.USER_MODE
else:
group = self.conf.metadata_proxy_group
if not group or agent_utils.is_effective_group(group):
# group is agent effective group => GROUP_MODE
mode = config.GROUP_MODE
else:
# otherwise => ALL_MODE
mode = config.ALL_MODE
return MODE_MAP[mode]
def run(self):
self.server = agent_utils.UnixDomainWSGIServer(
'networking-ovn-metadata-agent')
self.server.start(MetadataProxyHandler(self.conf),
self.conf.metadata_proxy_socket,
workers=self.conf.metadata_workers,
backlog=self.conf.metadata_backlog,
mode=self._get_socket_mode())
def wait(self):
self.server.wait()

View File

@ -1,37 +0,0 @@
# Copyright 2017 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron.common import config
from neutron.common import utils
from oslo_config import cfg
from oslo_log import log as logging
from networking_ovn.agent.metadata import agent
from networking_ovn.conf.agent.metadata import config as meta
LOG = logging.getLogger(__name__)
def main():
meta.register_meta_conf_opts(meta.SHARED_OPTS)
meta.register_meta_conf_opts(meta.UNIX_DOMAIN_METADATA_PROXY_OPTS)
meta.register_meta_conf_opts(meta.METADATA_PROXY_HANDLER_OPTS)
meta.register_meta_conf_opts(meta.OVS_OPTS, group='ovs')
config.init(sys.argv[1:])
config.setup_logging()
utils.log_opt_values(LOG)
agt = agent.MetadataAgent(cfg.CONF)
agt.start()

View File

@ -1,15 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import eventlet_utils
eventlet_utils.monkey_patch()

View File

@ -1,17 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_ovn.agent import metadata_agent
def main():
metadata_agent.main()

View File

@ -1,123 +0,0 @@
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log as logging
from neutron.conf.agent import securitygroups_rpc
from neutron import manager
from neutron import opts as neutron_options
from neutron.plugins.ml2 import plugin as ml2_plugin
from networking_ovn.common import config as ovn_config
from networking_ovn.ml2 import mech_driver
from networking_ovn import ovn_db_sync
from networking_ovn.ovsdb import impl_idl_ovn
LOG = logging.getLogger(__name__)
class Ml2Plugin(ml2_plugin.Ml2Plugin):
def _setup_dhcp(self):
pass
def _start_rpc_notifiers(self):
pass
class OVNMechanismDriver(mech_driver.OVNMechanismDriver):
def subscribe(self):
pass
def post_fork_initialize(self, resource, event, trigger, **kwargs):
pass
def setup_conf():
conf = cfg.CONF
ml2_group, ml2_opts = neutron_options.list_ml2_conf_opts()[0]
cfg.CONF.register_cli_opts(ml2_opts, ml2_group)
cfg.CONF.register_cli_opts(securitygroups_rpc.security_group_opts,
'SECURITYGROUP')
ovn_group, ovn_opts = ovn_config.list_opts()[0]
cfg.CONF.register_cli_opts(ovn_opts, group=ovn_group)
db_group, neutron_db_opts = db_options.list_opts()[0]
cfg.CONF.register_cli_opts(neutron_db_opts, db_group)
return conf
def main():
"""Main method for syncing neutron networks and ports with ovn nb db.
The utility syncs neutron db with ovn nb db.
"""
conf = setup_conf()
# if no config file is passed or no configuration options are passed
# then load configuration from /etc/neutron/neutron.conf
try:
conf(project='neutron')
except TypeError:
LOG.error('Error parsing the configuration values. Please verify.')
return
logging.setup(conf, 'neutron_ovn_db_sync_util')
LOG.info('Started Neutron OVN db sync')
mode = ovn_config.get_ovn_neutron_sync_mode()
if mode not in [ovn_db_sync.SYNC_MODE_LOG, ovn_db_sync.SYNC_MODE_REPAIR]:
LOG.error(
'Invalid sync mode : ["%s"]. Should be "log" or "repair"', mode)
return
# Validate and modify core plugin and ML2 mechanism drivers for syncing.
if (cfg.CONF.core_plugin.endswith('.Ml2Plugin') or
cfg.CONF.core_plugin == 'ml2'):
cfg.CONF.core_plugin = (
'networking_ovn.cmd.neutron_ovn_db_sync_util.Ml2Plugin')
if not cfg.CONF.ml2.mechanism_drivers:
LOG.error('please use --config-file to specify '
'neutron and ml2 configuration file.')
return
if 'ovn' not in cfg.CONF.ml2.mechanism_drivers:
LOG.error('No "ovn" mechanism driver found : "%s".',
cfg.CONF.ml2.mechanism_drivers)
return
cfg.CONF.set_override('mechanism_drivers', ['ovn-sync'], 'ml2')
conf.service_plugins = ['networking_ovn.l3.l3_ovn.OVNL3RouterPlugin']
else:
LOG.error('Invalid core plugin : ["%s"].', cfg.CONF.core_plugin)
return
try:
conn = impl_idl_ovn.get_connection(impl_idl_ovn.OvsdbNbOvnIdl)
ovn_api = impl_idl_ovn.OvsdbNbOvnIdl(conn)
except RuntimeError:
LOG.error('Invalid --ovn-ovn_nb_connection parameter provided.')
return
manager.init()
core_plugin = directory.get_plugin()
ovn_driver = core_plugin.mechanism_manager.mech_drivers['ovn-sync'].obj
ovn_driver._nb_ovn = ovn_api
synchronizer = ovn_db_sync.OvnNbSynchronizer(
core_plugin, ovn_api, mode, ovn_driver)
LOG.info('Sync started with mode : %s', mode)
synchronizer.do_sync()
LOG.info('Sync completed')

View File

@ -1,373 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import netaddr
from neutron_lib import constants as const
from neutron_lib import exceptions as n_exceptions
from oslo_config import cfg
from networking_ovn._i18n import _
from networking_ovn.common import constants as ovn_const
from networking_ovn.common import utils
# Convert the protocol number from integer to strings because that's
# how Neutron will pass it to us
PROTOCOL_NAME_TO_NUM_MAP = {k: str(v) for k, v in
const.IP_PROTOCOL_MAP.items()}
# Create a map from protocol numbers to names
PROTOCOL_NUM_TO_NAME_MAP = {v: k for k, v in
PROTOCOL_NAME_TO_NUM_MAP.items()}
# Group of transport protocols supported
TRANSPORT_PROTOCOLS = (const.PROTO_NAME_TCP,
const.PROTO_NAME_UDP,
const.PROTO_NAME_SCTP,
PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_TCP],
PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_UDP],
PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_SCTP])
# Group of versions of the ICMP protocol supported
ICMP_PROTOCOLS = (const.PROTO_NAME_ICMP,
const.PROTO_NAME_IPV6_ICMP,
const.PROTO_NAME_IPV6_ICMP_LEGACY,
PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_ICMP],
PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_IPV6_ICMP],
PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_IPV6_ICMP_LEGACY])
class ProtocolNotSupported(n_exceptions.NeutronException):
message = _('The protocol "%(protocol)s" is not supported. Valid '
'protocols are: %(valid_protocols); or protocol '
'numbers ranging from 0 to 255.')
def is_sg_enabled():
return cfg.CONF.SECURITYGROUP.enable_security_group
def acl_direction(r, port):
if r['direction'] == 'ingress':
portdir = 'outport'
else:
portdir = 'inport'
return '%s == "%s"' % (portdir, port['id'])
def acl_ethertype(r):
match = ''
ip_version = None
icmp = None
if r['ethertype'] == 'IPv4':
match = ' && ip4'
ip_version = 'ip4'
icmp = 'icmp4'
elif r['ethertype'] == 'IPv6':
match = ' && ip6'
ip_version = 'ip6'
icmp = 'icmp6'
return match, ip_version, icmp
def acl_remote_ip_prefix(r, ip_version):
if not r['remote_ip_prefix']:
return ''
src_or_dst = 'src' if r['direction'] == 'ingress' else 'dst'
return ' && %s.%s == %s' % (ip_version, src_or_dst,
r['remote_ip_prefix'])
def _get_protocol_number(protocol):
if protocol is None:
return
try:
protocol = int(protocol)
if protocol >= 0 and protocol <= 255:
return str(protocol)
except (ValueError, TypeError):
protocol = PROTOCOL_NAME_TO_NUM_MAP.get(protocol)
if protocol is not None:
return protocol
raise ProtocolNotSupported(
protocol=protocol, valid_protocols=', '.join(PROTOCOL_NAME_TO_NUM_MAP))
def acl_protocol_and_ports(r, icmp):
match = ''
protocol = _get_protocol_number(r.get('protocol'))
if protocol is None:
return match
min_port = r.get('port_range_min')
max_port = r.get('port_range_max')
if protocol in TRANSPORT_PROTOCOLS:
protocol = PROTOCOL_NUM_TO_NAME_MAP[protocol]
match += ' && %s' % protocol
if min_port is not None and min_port == max_port:
match += ' && %s.dst == %d' % (protocol, min_port)
else:
if min_port is not None:
match += ' && %s >= %d' % (protocol, min_port)
if max_port is not None:
match += ' && %s <= %d' % (protocol, max_port)
elif protocol in ICMP_PROTOCOLS:
protocol = icmp
match += ' && %s' % protocol
if min_port is not None:
match += ' && %s.type == %d' % (protocol, min_port)
if max_port is not None:
match += ' && %s.code == %d' % (protocol, max_port)
else:
match += ' && ip.proto == %s' % protocol
return match
def drop_all_ip_traffic_for_port(port):
acl_list = []
for direction, p in (('from-lport', 'inport'),
('to-lport', 'outport')):
lswitch = utils.ovn_name(port['network_id'])
lport = port['id']
acl = {"lswitch": lswitch, "lport": lport,
"priority": ovn_const.ACL_PRIORITY_DROP,
"action": ovn_const.ACL_ACTION_DROP,
"log": False,
"direction": direction,
"match": '%s == "%s" && ip' % (p, port['id']),
"external_ids": {'neutron:lport': port['id']}}
acl_list.append(acl)
return acl_list
def add_sg_rule_acl_for_port(port, r, match):
dir_map = {
'ingress': 'to-lport',
'egress': 'from-lport',
}
acl = {"lswitch": utils.ovn_name(port['network_id']),
"lport": port['id'],
"priority": ovn_const.ACL_PRIORITY_ALLOW,
"action": ovn_const.ACL_ACTION_ALLOW_RELATED,
"log": False,
"direction": dir_map[r['direction']],
"match": match,
"external_ids": {'neutron:lport': port['id']}}
return acl
def add_acl_dhcp(port, subnet, ovn_dhcp=True):
# Allow DHCP requests for OVN native DHCP service, while responses are
# allowed in ovn-northd.
# Allow both DHCP requests and responses to pass for other DHCP services.
# We do this even if DHCP isn't enabled for the subnet
acl_list = []
if not ovn_dhcp:
acl = {"lswitch": utils.ovn_name(port['network_id']),
"lport": port['id'],
"priority": ovn_const.ACL_PRIORITY_ALLOW,
"action": ovn_const.ACL_ACTION_ALLOW,
"log": False,
"direction": 'to-lport',
"match": ('outport == "%s" && ip4 && ip4.src == %s && '
'udp && udp.src == 67 && udp.dst == 68'
) % (port['id'], subnet['cidr']),
"external_ids": {'neutron:lport': port['id']}}
acl_list.append(acl)
acl = {"lswitch": utils.ovn_name(port['network_id']),
"lport": port['id'],
"priority": ovn_const.ACL_PRIORITY_ALLOW,
"action": ovn_const.ACL_ACTION_ALLOW,
"log": False,
"direction": 'from-lport',
"match": ('inport == "%s" && ip4 && '
'ip4.dst == {255.255.255.255, %s} && '
'udp && udp.src == 68 && udp.dst == 67'
) % (port['id'], subnet['cidr']),
"external_ids": {'neutron:lport': port['id']}}
acl_list.append(acl)
return acl_list
def _get_subnet_from_cache(plugin, admin_context, subnet_cache, subnet_id):
if subnet_id in subnet_cache:
return subnet_cache[subnet_id]
else:
subnet = plugin.get_subnet(admin_context, subnet_id)
if subnet:
subnet_cache[subnet_id] = subnet
return subnet
def _get_sg_ports_from_cache(plugin, admin_context, sg_ports_cache, sg_id):
if sg_id in sg_ports_cache:
return sg_ports_cache[sg_id]
else:
filters = {'security_group_id': [sg_id]}
sg_ports = plugin._get_port_security_group_bindings(
admin_context, filters)
if sg_ports:
sg_ports_cache[sg_id] = sg_ports
return sg_ports
def _get_sg_from_cache(plugin, admin_context, sg_cache, sg_id):
if sg_id in sg_cache:
return sg_cache[sg_id]
else:
sg = plugin.get_security_group(admin_context, sg_id)
if sg:
sg_cache[sg_id] = sg
return sg
def acl_remote_group_id(r, ip_version):
if not r['remote_group_id']:
return ''
src_or_dst = 'src' if r['direction'] == 'ingress' else 'dst'
addrset_name = utils.ovn_addrset_name(r['remote_group_id'],
ip_version)
return ' && %s.%s == $%s' % (ip_version, src_or_dst, addrset_name)
def _add_sg_rule_acl_for_port(port, r):
# Update the match based on which direction this rule is for (ingress
# or egress).
match = acl_direction(r, port)
# Update the match for IPv4 vs IPv6.
ip_match, ip_version, icmp = acl_ethertype(r)
match += ip_match
# Update the match if an IPv4 or IPv6 prefix was specified.
match += acl_remote_ip_prefix(r, ip_version)
# Update the match if remote group id was specified.
match += acl_remote_group_id(r, ip_version)
# Update the match for the protocol (tcp, udp, icmp) and port/type
# range if specified.
match += acl_protocol_and_ports(r, icmp)
# Finally, create the ACL entry for the direction specified.
return add_sg_rule_acl_for_port(port, r, match)
def update_acls_for_security_group(plugin,
admin_context,
ovn,
security_group_id,
security_group_rule,
sg_ports_cache=None,
is_add_acl=True):
# Skip ACLs if security groups aren't enabled
if not is_sg_enabled():
return
# Get the security group ports.
sg_ports_cache = sg_ports_cache or {}
sg_ports = _get_sg_ports_from_cache(plugin,
admin_context,
sg_ports_cache,
security_group_id)
# ACLs associated with a security group may span logical switches
sg_port_ids = [binding['port_id'] for binding in sg_ports]
sg_port_ids = list(set(sg_port_ids))
port_list = plugin.get_ports(admin_context,
filters={'id': sg_port_ids})
acl_new_values_dict = {}
update_port_list = []
# NOTE(lizk): We can directly locate the affected acl records,
# so no need to compare new acl values with existing acl objects.
for port in port_list:
# Skip trusted port
if utils.is_lsp_trusted(port):
continue
update_port_list.append(port)
acl = _add_sg_rule_acl_for_port(port, security_group_rule)
# Remove lport and lswitch since we don't need them
acl.pop('lport')
acl.pop('lswitch')
acl_new_values_dict[port['id']] = acl
if not update_port_list:
return
lswitch_names = set([p['network_id'] for p in update_port_list])
ovn.update_acls(list(lswitch_names),
iter(update_port_list),
acl_new_values_dict,
need_compare=False,
is_add_acl=is_add_acl).execute(check_error=True)
def add_acls(plugin, admin_context, port, sg_cache, subnet_cache):
acl_list = []
# Skip ACLs if security groups aren't enabled
if not is_sg_enabled():
return acl_list
sec_groups = utils.get_lsp_security_groups(port)
if not sec_groups:
return acl_list
# Drop all IP traffic to and from the logical port by default.
acl_list += drop_all_ip_traffic_for_port(port)
# Add DHCP ACLs.
port_subnet_ids = set()
for ip in port['fixed_ips']:
if netaddr.IPNetwork(ip['ip_address']).version != 4:
continue
subnet = _get_subnet_from_cache(plugin,
admin_context,
subnet_cache,
ip['subnet_id'])
# Ignore duplicate DHCP ACLs for the subnet.
if subnet['id'] not in port_subnet_ids:
acl_list += add_acl_dhcp(port, subnet, True)
port_subnet_ids.add(subnet['id'])
# We create an ACL entry for each rule on each security group applied
# to this port.
for sg_id in sec_groups:
sg = _get_sg_from_cache(plugin,
admin_context,
sg_cache,
sg_id)
for r in sg['security_group_rules']:
acl = _add_sg_rule_acl_for_port(port, r)
if acl not in acl_list:
acl_list.append(acl)
return acl_list
def acl_port_ips(port):
# Skip ACLs if security groups aren't enabled
if not is_sg_enabled():
return {'ip4': [], 'ip6': []}
ip_addresses = {4: [], 6: []}
for fixed_ip in port['fixed_ips']:
ip_version = netaddr.IPNetwork(fixed_ip['ip_address']).version
ip_addresses[ip_version].append(fixed_ip['ip_address'])
return {'ip4': ip_addresses[4],
'ip6': ip_addresses[6]}

View File

@ -1,210 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ovsdbapp.backend.ovs_idl import vlog
from networking_ovn._i18n import _
from neutron_lib.api.definitions import portbindings
VLOG_LEVELS = {'CRITICAL': vlog.CRITICAL, 'ERROR': vlog.ERROR, 'WARNING':
vlog.WARN, 'INFO': vlog.INFO, 'DEBUG': vlog.DEBUG}
ovn_opts = [
cfg.StrOpt('ovn_nb_connection',
default='tcp:127.0.0.1:6641',
help=_('The connection string for the OVN_Northbound OVSDB.\n'
'Use tcp:IP:PORT for TCP connection.\n'
'Use ssl:IP:PORT for SSL connection. The '
'ovn_nb_private_key, ovn_nb_certificate and '
'ovn_nb_ca_cert are mandatory.\n'
'Use unix:FILE for unix domain socket connection.')),
cfg.StrOpt('ovn_nb_private_key',
default='',
help=_('The PEM file with private key for SSL connection to '
'OVN-NB-DB')),
cfg.StrOpt('ovn_nb_certificate',
default='',
help=_('The PEM file with certificate that certifies the '
'private key specified in ovn_nb_private_key')),
cfg.StrOpt('ovn_nb_ca_cert',
default='',
help=_('The PEM file with CA certificate that OVN should use to'
' verify certificates presented to it by SSL peers')),
cfg.StrOpt('ovn_sb_connection',
default='tcp:127.0.0.1:6642',
help=_('The connection string for the OVN_Southbound OVSDB.\n'
'Use tcp:IP:PORT for TCP connection.\n'
'Use ssl:IP:PORT for SSL connection. The '
'ovn_sb_private_key, ovn_sb_certificate and '
'ovn_sb_ca_cert are mandatory.\n'
'Use unix:FILE for unix domain socket connection.')),
cfg.StrOpt('ovn_sb_private_key',
default='',
help=_('The PEM file with private key for SSL connection to '
'OVN-SB-DB')),
cfg.StrOpt('ovn_sb_certificate',
default='',
help=_('The PEM file with certificate that certifies the '
'private key specified in ovn_sb_private_key')),
cfg.StrOpt('ovn_sb_ca_cert',
default='',
help=_('The PEM file with CA certificate that OVN should use to'
' verify certificates presented to it by SSL peers')),
cfg.IntOpt('ovsdb_connection_timeout',
default=180,
help=_('Timeout in seconds for the OVSDB '
'connection transaction')),
cfg.IntOpt('ovsdb_probe_interval',
min=0,
default=0,
help=_('The probe interval in for the OVSDB session in '
'milliseconds. If this is zero, it disables the '
'connection keepalive feature. If non-zero the value '
'will be forced to at least 1000 milliseconds. Probing '
'is disabled by default.')),
cfg.StrOpt('neutron_sync_mode',
default='log',
choices=('off', 'log', 'repair'),
help=_('The synchronization mode of OVN_Northbound OVSDB '
'with Neutron DB.\n'
'off - synchronization is off \n'
'log - during neutron-server startup, '
'check to see if OVN is in sync with '
'the Neutron database. '
' Log warnings for any inconsistencies found so'
' that an admin can investigate \n'
'repair - during neutron-server startup, automatically'
' create resources found in Neutron but not in OVN.'
' Also remove resources from OVN'
' that are no longer in Neutron.')),
cfg.BoolOpt('ovn_l3_mode',
default=True,
deprecated_for_removal=True,
deprecated_reason="This option is no longer used. Native L3 "
"support in OVN is always used.",
help=_('Whether to use OVN native L3 support. Do not change '
'the value for existing deployments that contain '
'routers.')),
cfg.StrOpt("ovn_l3_scheduler",
default='leastloaded',
choices=('leastloaded', 'chance'),
help=_('The OVN L3 Scheduler type used to schedule router '
'gateway ports on hypervisors/chassis. \n'
'leastloaded - chassis with fewest gateway ports '
'selected \n'
'chance - chassis randomly selected')),
cfg.StrOpt("vif_type",
deprecated_for_removal=True,
deprecated_reason="The port VIF type is now determined based "
"on the OVN chassis information when the "
"port is bound to a host.",
default=portbindings.VIF_TYPE_OVS,
help=_("Type of VIF to be used for ports valid values are "
"(%(ovs)s, %(dpdk)s) default %(ovs)s") % {
"ovs": portbindings.VIF_TYPE_OVS,
"dpdk": portbindings.VIF_TYPE_VHOST_USER},
choices=[portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_VHOST_USER]),
cfg.StrOpt("vhost_sock_dir",
default="/var/run/openvswitch",
help=_("The directory in which vhost virtio socket "
"is created by all the vswitch daemons")),
cfg.IntOpt('dhcp_default_lease_time',
default=(12 * 60 * 60),
help=_('Default least time (in seconds) to use with '
'OVN\'s native DHCP service.')),
cfg.StrOpt("ovsdb_log_level",
default="INFO",
choices=list(VLOG_LEVELS.keys()),
help=_("The log level used for OVSDB")),
cfg.BoolOpt('ovn_metadata_enabled',
default=True,
help=_('Whether to use metadata service.'))
]
cfg.CONF.register_opts(ovn_opts, group='ovn')
def list_opts():
return [
('ovn', ovn_opts),
]
def get_ovn_nb_connection():
return cfg.CONF.ovn.ovn_nb_connection
def get_ovn_nb_private_key():
return cfg.CONF.ovn.ovn_nb_private_key
def get_ovn_nb_certificate():
return cfg.CONF.ovn.ovn_nb_certificate
def get_ovn_nb_ca_cert():
return cfg.CONF.ovn.ovn_nb_ca_cert
def get_ovn_sb_connection():
return cfg.CONF.ovn.ovn_sb_connection
def get_ovn_sb_private_key():
return cfg.CONF.ovn.ovn_sb_private_key
def get_ovn_sb_certificate():
return cfg.CONF.ovn.ovn_sb_certificate
def get_ovn_sb_ca_cert():
return cfg.CONF.ovn.ovn_sb_ca_cert
def get_ovn_ovsdb_timeout():
return cfg.CONF.ovn.ovsdb_connection_timeout
def get_ovn_ovsdb_probe_interval():
return cfg.CONF.ovn.ovsdb_probe_interval
def get_ovn_neutron_sync_mode():
return cfg.CONF.ovn.neutron_sync_mode
def is_ovn_l3():
return cfg.CONF.ovn.ovn_l3_mode
def get_ovn_l3_scheduler():
return cfg.CONF.ovn.ovn_l3_scheduler
def get_ovn_vhost_sock_dir():
return cfg.CONF.ovn.vhost_sock_dir
def get_ovn_dhcp_default_lease_time():
return cfg.CONF.ovn.dhcp_default_lease_time
def get_ovn_ovsdb_log_level():
return VLOG_LEVELS[cfg.CONF.ovn.ovsdb_log_level]
def is_ovn_metadata_enabled():
return cfg.CONF.ovn.ovn_metadata_enabled

View File

@ -1,69 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as const
import six
OVN_ML2_MECH_DRIVER_NAME = 'ovn'
OVN_NETWORK_NAME_EXT_ID_KEY = 'neutron:network_name'
OVN_PORT_NAME_EXT_ID_KEY = 'neutron:port_name'
OVN_ROUTER_NAME_EXT_ID_KEY = 'neutron:router_name'
OVN_SG_NAME_EXT_ID_KEY = 'neutron:security_group_name'
OVN_PHYSNET_EXT_ID_KEY = 'neutron:provnet-physical-network'
OVN_NETTYPE_EXT_ID_KEY = 'neutron:provnet-network-type'
OVN_SEGID_EXT_ID_KEY = 'neutron:provnet-segmentation-id'
OVN_PROJID_EXT_ID_KEY = 'neutron:project_id'
OVN_DEVID_EXT_ID_KEY = 'neutron:device_id'
OVN_CIDRS_EXT_ID_KEY = 'neutron:cidrs'
OVN_PORT_BINDING_PROFILE = portbindings.PROFILE
OVN_PORT_BINDING_PROFILE_PARAMS = [{'parent_name': six.string_types,
'tag': six.integer_types},
{'vtep-physical-switch': six.string_types,
'vtep-logical-switch': six.string_types}]
OVN_ROUTER_PORT_OPTION_KEYS = ['router-port', 'nat-addresses']
OVN_GATEWAY_CHASSIS_KEY = 'redirect-chassis'
OVN_PROVNET_PORT_NAME_PREFIX = 'provnet-'
OVN_NEUTRON_OWNER_TO_PORT_TYPE = {const.DEVICE_OWNER_DHCP: 'localport'}
# OVN ACLs have priorities. The highest priority ACL that matches is the one
# that takes effect. Our choice of priority numbers is arbitrary, but it
# leaves room above and below the ACLs we create. We only need two priorities.
# The first is for all the things we allow. The second is for dropping traffic
# by default.
ACL_PRIORITY_ALLOW = 1002
ACL_PRIORITY_DROP = 1001
ACL_ACTION_DROP = 'drop'
ACL_ACTION_ALLOW_RELATED = 'allow-related'
ACL_ACTION_ALLOW = 'allow'
# When a OVN L3 gateway is created, it needs to be bound to a chassis. In
# case a chassis is not found OVN_GATEWAY_INVALID_CHASSIS will be set in
# the options column of the Logical Router. This value is used to detect
# unhosted router gateways to schedule.
OVN_GATEWAY_INVALID_CHASSIS = 'neutron-ovn-invalid-chassis'
SUPPORTED_DHCP_OPTS = {
4: ['netmask', 'router', 'dns-server', 'log-server',
'lpr-server', 'swap-server', 'ip-forward-enable',
'policy-filter', 'default-ttl', 'mtu', 'router-discovery',
'router-solicitation', 'arp-timeout', 'ethernet-encap',
'tcp-ttl', 'tcp-keepalive', 'nis-server', 'ntp-server',
'tftp-server'],
6: ['server-id', 'dns-server', 'domain-search']}
DHCPV6_STATELESS_OPT = 'dhcpv6_stateless'
CHASSIS_DATAPATH_NETDEV = 'netdev'
CHASSIS_IFACE_DPDKVHOSTUSER = 'dpdkvhostuser'

View File

@ -1,60 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(russellb) This remains in its own file (vs constants.py) because we want
# to be able to easily import it and export the info without any dependencies
# on external imports.
# NOTE(russellb) If you update these lists, please also update
# doc/source/features.rst and the current release note.
ML2_SUPPORTED_API_EXTENSIONS_NEUTRON_L3 = [
'dns-integration',
'dvr',
'extraroute',
'ext-gw-mode',
'l3-ha',
'l3_agent_scheduler',
'router',
'router_availability_zone',
]
ML2_SUPPORTED_API_EXTENSIONS_OVN_L3 = [
'router',
'extraroute',
'ext-gw-mode'
]
ML2_SUPPORTED_API_EXTENSIONS = [
'address-scope',
'agent',
'allowed-address-pairs',
'auto-allocated-topology',
'availability_zone',
'binding',
'default-subnetpools',
'dhcp_agent_scheduler',
'external-net',
'extra_dhcp_opt',
'multi-provider',
'net-mtu',
'network_availability_zone',
'network-ip-availability',
'port-security',
'provider',
'quotas',
'rbac-policies',
'revisions',
'security-group',
'standard-attr-description',
'subnet_allocation',
'tag',
'timestamp_core',
]

File diff suppressed because it is too large Load Diff

View File

@ -1,181 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron_lib.api.definitions import l3
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.utils import net as n_utils
from networking_ovn._i18n import _
from networking_ovn.common import constants
def ovn_name(id):
# The name of the OVN entry will be neutron-<UUID>
# This is due to the fact that the OVN application checks if the name
# is a UUID. If so then there will be no matches.
# We prefix the UUID to enable us to use the Neutron UUID when
# updating, deleting etc.
return 'neutron-%s' % id
def ovn_lrouter_port_name(id):
# The name of the OVN lrouter port entry will be lrp-<UUID>
# This is to distinguish with the name of the connected lswitch patch port,
# which is named with neutron port uuid, so that OVS patch ports are
# generated properly. The pairing patch port names will be:
# - patch-lrp-<UUID>-to-<UUID>
# - patch-<UUID>-to-lrp-<UUID>
# lrp stands for Logical Router Port
return 'lrp-%s' % id
def ovn_provnet_port_name(network_id):
# The name of OVN lswitch provider network port entry will be
# provnet-<Network-UUID>. The port is created for network having
# provider:physical_network attribute.
return constants.OVN_PROVNET_PORT_NAME_PREFIX + '%s' % network_id
def ovn_vhu_sockpath(sock_dir, port_id):
# Frame the socket path of a virtio socket
return os.path.join(
sock_dir,
# this parameter will become the virtio port name,
# so it should not exceed IFNAMSIZ(16).
(const.VHOST_USER_DEVICE_PREFIX + port_id)[:14])
def ovn_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id and ip
# version. The format is:
# as-<ip version>-<security group uuid>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('as-%s-%s' % (ip_version, sg_id)).replace('-', '_')
def get_lsp_dhcp_opts(port, ip_version):
# Get dhcp options from Neutron port, for setting DHCP_Options row
# in OVN.
lsp_dhcp_disabled = False
lsp_dhcp_opts = {}
if port['device_owner'].startswith(const.DEVICE_OWNER_PREFIXES):
lsp_dhcp_disabled = True
else:
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
if edo['ip_version'] != ip_version:
continue
if edo['opt_name'] == 'dhcp_disabled' and (
edo['opt_value'] in ['True', 'true']):
# OVN native DHCP is disabled on this port
lsp_dhcp_disabled = True
# Make sure return value behavior not depends on the order and
# content of the extra DHCP options for the port
lsp_dhcp_opts.clear()
break
if edo['opt_name'] not in (
constants.SUPPORTED_DHCP_OPTS[ip_version]):
continue
opt = edo['opt_name'].replace('-', '_')
lsp_dhcp_opts[opt] = edo['opt_value']
return (lsp_dhcp_disabled, lsp_dhcp_opts)
def is_lsp_trusted(port):
return n_utils.is_port_trusted(port) if port.get('device_owner') else False
def get_lsp_security_groups(port, skip_trusted_port=True):
# In other agent link OVS, skipping trusted port is processed in security
# groups RPC. We haven't that step, so we do it here.
return [] if (skip_trusted_port and is_lsp_trusted(port)
) else port.get('security_groups', [])
def is_snat_enabled(router):
return router.get(l3.EXTERNAL_GW_INFO, {}).get('enable_snat', True)
def validate_and_get_data_from_binding_profile(port):
if (constants.OVN_PORT_BINDING_PROFILE not in port or
not validators.is_attr_set(
port[constants.OVN_PORT_BINDING_PROFILE])):
return {}
param_set = {}
param_dict = {}
for param_set in constants.OVN_PORT_BINDING_PROFILE_PARAMS:
param_keys = param_set.keys()
for param_key in param_keys:
try:
param_dict[param_key] = (port[
constants.OVN_PORT_BINDING_PROFILE][param_key])
except KeyError:
pass
if len(param_dict) == 0:
continue
if len(param_dict) != len(param_keys):
msg = _('Invalid binding:profile. %s are all '
'required.') % param_keys
raise n_exc.InvalidInput(error_message=msg)
if (len(port[constants.OVN_PORT_BINDING_PROFILE]) != len(
param_keys)):
msg = _('Invalid binding:profile. too many parameters')
raise n_exc.InvalidInput(error_message=msg)
break
if not param_dict:
return {}
for param_key, param_type in param_set.items():
if param_type is None:
continue
param_value = param_dict[param_key]
if not isinstance(param_value, param_type):
msg = _('Invalid binding:profile. %(key)s %(value)s '
'value invalid type') % {'key': param_key,
'value': param_value}
raise n_exc.InvalidInput(error_message=msg)
# Make sure we can successfully look up the port indicated by
# parent_name. Just let it raise the right exception if there is a
# problem.
if 'parent_name' in param_set:
plugin = directory.get_plugin()
plugin.get_port(n_context.get_admin_context(),
param_dict['parent_name'])
if 'tag' in param_set:
tag = int(param_dict['tag'])
if tag < 0 or tag > 4095:
msg = _('Invalid binding:profile. tag "%s" must be '
'an integer between 0 and 4095, inclusive') % tag
raise n_exc.InvalidInput(error_message=msg)
return param_dict
def is_dhcp_options_ignored(subnet):
# Don't insert DHCP_Options entry for v6 subnet with 'SLAAC' as
# 'ipv6_address_mode', since DHCPv6 shouldn't work for this mode.
return (subnet['ip_version'] == const.IP_VERSION_6 and
subnet.get('ipv6_address_mode') == const.IPV6_SLAAC)

View File

@ -1,137 +0,0 @@
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from neutron._i18n import _
from neutron_lib.utils import host
from oslo_config import cfg
DEDUCE_MODE = 'deduce'
USER_MODE = 'user'
GROUP_MODE = 'group'
ALL_MODE = 'all'
SOCKET_MODES = (DEDUCE_MODE, USER_MODE, GROUP_MODE, ALL_MODE)
SHARED_OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket.')),
cfg.StrOpt('metadata_proxy_user',
default='',
help=_("User (uid or name) running metadata proxy after "
"its initialization (if empty: agent effective "
"user).")),
cfg.StrOpt('metadata_proxy_group',
default='',
help=_("Group (gid or name) running metadata proxy after "
"its initialization (if empty: agent effective "
"group).")),
cfg.StrOpt('ovs_integration_bridge',
default='br-int',
help=_('Name of Open vSwitch bridge to use'))
]
METADATA_PROXY_HANDLER_OPTS = [
cfg.StrOpt('auth_ca_cert',
help=_("Certificate Authority public key (CA cert) "
"file for ssl")),
cfg.HostAddressOpt('nova_metadata_host',
default='127.0.0.1',
deprecated_name='nova_metadata_ip',
help=_("IP address or DNS name of Nova metadata "
"server.")),
cfg.PortOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('When proxying metadata requests, Neutron signs the '
'Instance-ID header with a shared secret to prevent '
'spoofing. You may select any string for a secret, '
'but it must match here and in the configuration used '
'by the Nova Metadata Server. NOTE: Nova uses the same '
'config key, but in [neutron] section.'),
secret=True),
cfg.StrOpt('nova_metadata_protocol',
default='http',
choices=['http', 'https'],
help=_("Protocol to access nova metadata, http or https")),
cfg.BoolOpt('nova_metadata_insecure', default=False,
help=_("Allow to perform insecure SSL (https) requests to "
"nova metadata")),
cfg.StrOpt('nova_client_cert',
default='',
help=_("Client certificate for nova metadata api server.")),
cfg.StrOpt('nova_client_priv_key',
default='',
help=_("Private key of client certificate."))
]
UNIX_DOMAIN_METADATA_PROXY_OPTS = [
cfg.StrOpt('metadata_proxy_socket_mode',
default=DEDUCE_MODE,
choices=SOCKET_MODES,
help=_("Metadata Proxy UNIX domain socket mode, 4 values "
"allowed: "
"'deduce': deduce mode from metadata_proxy_user/group "
"values, "
"'user': set metadata proxy socket mode to 0o644, to "
"use when metadata_proxy_user is agent effective user "
"or root, "
"'group': set metadata proxy socket mode to 0o664, to "
"use when metadata_proxy_group is agent effective "
"group or root, "
"'all': set metadata proxy socket mode to 0o666, to use "
"otherwise.")),
cfg.IntOpt('metadata_workers',
default=host.cpu_count() // 2,
help=_('Number of separate worker processes for metadata '
'server (defaults to half of the number of CPUs)')),
cfg.IntOpt('metadata_backlog',
default=4096,
help=_('Number of backlog requests to configure the '
'metadata server socket with'))
]
OVS_OPTS = [
cfg.StrOpt('ovsdb_connection',
default='unix:/usr/local/var/run/openvswitch/db.sock',
help=_('The connection string for the native OVSDB backend.\n'
'Use tcp:IP:PORT for TCP connection.\n'
'Use unix:FILE for unix domain socket connection.')),
cfg.IntOpt('ovsdb_connection_timeout',
default=180,
help=_('Timeout in seconds for the OVSDB '
'connection transaction'))
]
def register_meta_conf_opts(opts, cfg=cfg.CONF, group=None):
cfg.register_opts(opts, group=group)
def list_metadata_agent_opts():
return [
('DEFAULT',
itertools.chain(
SHARED_OPTS,
METADATA_PROXY_HANDLER_OPTS,
UNIX_DOMAIN_METADATA_PROXY_OPTS)
),
('ovs', OVS_OPTS)
]

View File

@ -1,141 +0,0 @@
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from neutron.db import api as db_api
from oslo_db import api as oslo_db_api
from sqlalchemy import asc
from sqlalchemy import func
from networking_ovn.db import models
from networking_ovn.journal import constants as journal_const
#
# Journal functions
#
# Retry deadlock exception for Galera DB.
# If two (or more) different threads call this method at the same time, they
# might both succeed in changing the same row to pending, but at least one
# of them will get a deadlock from Galera and will have to retry the operation.
@db_api.retry_db_errors
def get_oldest_pending_db_row_with_lock(session):
with session.begin():
row = session.query(models.OVNJournal).filter_by(
state=journal_const.PENDING).order_by(
asc(models.OVNJournal.last_retried)).with_for_update(
).first()
if row:
update_db_row_state(session, row, journal_const.PROCESSING)
return row
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
def update_db_row_state(session, row, state):
row.state = state
session.merge(row)
session.flush()
def update_pending_db_row_retry(session, row, retry_count):
if row.retry_count >= retry_count:
update_db_row_state(session, row, journal_const.FAILED)
else:
row.retry_count += 1
update_db_row_state(session, row, journal_const.PENDING)
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
def create_pending_row(session, object_type, object_uuid,
operation, data):
row = models.OVNJournal(object_type=object_type,
object_uuid=object_uuid,
operation=operation, data=data,
created_at=func.now(),
state=journal_const.PENDING)
session.add(row)
session.flush()
#
# Journal maintenance functions
#
@db_api.retry_db_errors
def _update_maintenance_state(session, expected_state, state):
with session.begin():
row = session.query(models.OVNMaintenance).filter_by(
state=expected_state).with_for_update().one_or_none()
if row is None:
return False
row.state = state
return True
def lock_maintenance(session):
return _update_maintenance_state(session, journal_const.PENDING,
journal_const.PROCESSING)
def unlock_maintenance(session):
return _update_maintenance_state(session, journal_const.PROCESSING,
journal_const.PENDING)
def update_maintenance_operation(session, operation=None):
"""Update the current maintenance operation details.
The function assumes the lock is held, so it mustn't be run outside
of a locked context.
"""
op_text = None
if operation:
op_text = operation.__name__
with session.begin():
row = session.query(models.OVNMaintenance).one_or_none()
row.processing_operation = op_text
#
# Journal clean up functions
#
def delete_rows_by_state_and_time(session, state, time_delta):
with session.begin():
now = session.execute(func.now()).scalar()
session.query(models.OVNJournal).filter(
models.OVNJournal.state == state,
models.OVNJournal.last_retried < now - time_delta).delete(
synchronize_session=False)
session.expire_all()
def reset_processing_rows(session, max_timedelta):
with session.begin():
now = session.execute(func.now()).scalar()
max_timedelta = datetime.timedelta(seconds=max_timedelta)
rows = session.query(models.OVNJournal).filter(
models.OVNJournal.last_retried < now - max_timedelta,
models.OVNJournal.state == journal_const.PROCESSING,
).update({'state': journal_const.PENDING})
return rows

View File

@ -1,22 +0,0 @@
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_ovn.db import models # noqa
from neutron.db.migration.models import head
def get_metadata():
return head.model_base.BASEV2.metadata

View File

@ -1 +0,0 @@
This directory contains the migration scripts for the networking_ovn project.

Some files were not shown because too many files have changed in this diff Show More