Browse Source

VMware: fix gitreview

Fix the correct repo

Change-Id: I1deed42fb003f06bc97634e3908c6d82c8620e85
changes/90/143290/8
Gary Kotton 7 years ago
parent
commit
3a96a43c53
  1. 2
      .gitreview
  2. 2
      .testr.conf
  3. 6
      MANIFEST.in
  4. 94
      doc/source/conf.py
  5. 1
      doc/source/contributing.rst
  6. 1
      doc/source/history.rst
  7. 28
      doc/source/index.rst
  8. 12
      doc/source/installation.rst
  9. 1
      doc/source/readme.rst
  10. 7
      doc/source/usage.rst
  11. 30
      etc/api-paste.ini
  12. 91
      etc/dhcp_agent.ini
  13. 68
      etc/init.d/neutron-server
  14. 102
      etc/l3_agent.ini
  15. 59
      etc/metadata_agent.ini
  16. 18
      etc/metering_agent.ini
  17. 656
      etc/neutron.conf
  18. 203
      etc/neutron/plugins/vmware/nsx.ini
  19. 16
      etc/neutron/rootwrap.d/cisco-apic.filters
  20. 14
      etc/neutron/rootwrap.d/debug.filters
  21. 35
      etc/neutron/rootwrap.d/dhcp.filters
  22. 12
      etc/neutron/rootwrap.d/ipset-firewall.filters
  23. 21
      etc/neutron/rootwrap.d/iptables-firewall.filters
  24. 48
      etc/neutron/rootwrap.d/l3.filters
  25. 26
      etc/neutron/rootwrap.d/lbaas-haproxy.filters
  26. 19
      etc/neutron/rootwrap.d/linuxbridge-plugin.filters
  27. 12
      etc/neutron/rootwrap.d/nec-plugin.filters
  28. 16
      etc/neutron/rootwrap.d/ofagent.filters
  29. 22
      etc/neutron/rootwrap.d/openvswitch-plugin.filters
  30. 13
      etc/neutron/rootwrap.d/vpnaas.filters
  31. 139
      etc/policy.json
  32. 34
      etc/rootwrap.conf
  33. 43
      etc/services.conf
  34. 4
      run_tests.sh
  35. 205
      setup.cfg
  36. 3
      test-requirements.txt
  37. 4
      tools/i18n_cfg.py
  38. 2
      tools/pretty_tox.sh
  39. 26
      tox.ini
  40. 1833
      vmware-nsx/neutron/plugins/vmware/plugins/service.py
  41. 49
      vmware-nsx/neutron/plugins/vmware/vshield/vcns_driver.py
  42. 0
      vmware-nsx/neutron/tests/unit/vmware/vshield/__init__.py
  43. 293
      vmware-nsx/neutron/tests/unit/vmware/vshield/test_edge_router.py
  44. 375
      vmware-nsx/neutron/tests/unit/vmware/vshield/test_firewall_driver.py
  45. 682
      vmware-nsx/neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py
  46. 517
      vmware-nsx/neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py
  47. 338
      vmware-nsx/neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py
  48. 394
      vmware-nsx/neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py
  49. 1
      vmware_nsx/__init__.py
  50. 0
      vmware_nsx/neutron/__init__.py
  51. 0
      vmware_nsx/neutron/plugins/__init__.py
  52. 0
      vmware_nsx/neutron/plugins/vmware/__init__.py
  53. 0
      vmware_nsx/neutron/plugins/vmware/api_client/__init__.py
  54. 0
      vmware_nsx/neutron/plugins/vmware/api_client/base.py
  55. 0
      vmware_nsx/neutron/plugins/vmware/api_client/client.py
  56. 0
      vmware_nsx/neutron/plugins/vmware/api_client/eventlet_client.py
  57. 0
      vmware_nsx/neutron/plugins/vmware/api_client/eventlet_request.py
  58. 0
      vmware_nsx/neutron/plugins/vmware/api_client/exception.py
  59. 0
      vmware_nsx/neutron/plugins/vmware/api_client/request.py
  60. 0
      vmware_nsx/neutron/plugins/vmware/api_client/version.py
  61. 0
      vmware_nsx/neutron/plugins/vmware/check_nsx_config.py
  62. 0
      vmware_nsx/neutron/plugins/vmware/common/__init__.py
  63. 0
      vmware_nsx/neutron/plugins/vmware/common/config.py
  64. 0
      vmware_nsx/neutron/plugins/vmware/common/exceptions.py
  65. 0
      vmware_nsx/neutron/plugins/vmware/common/nsx_utils.py
  66. 0
      vmware_nsx/neutron/plugins/vmware/common/securitygroups.py
  67. 0
      vmware_nsx/neutron/plugins/vmware/common/sync.py
  68. 0
      vmware_nsx/neutron/plugins/vmware/common/utils.py
  69. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/__init__.py
  70. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/db.py
  71. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/lsn_db.py
  72. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/maclearning.py
  73. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/models.py
  74. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/networkgw_db.py
  75. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/qos_db.py
  76. 2
      vmware_nsx/neutron/plugins/vmware/dbexts/servicerouter.py
  77. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/vcns_db.py
  78. 0
      vmware_nsx/neutron/plugins/vmware/dbexts/vcns_models.py
  79. 0
      vmware_nsx/neutron/plugins/vmware/dhcp_meta/__init__.py
  80. 0
      vmware_nsx/neutron/plugins/vmware/dhcp_meta/combined.py
  81. 0
      vmware_nsx/neutron/plugins/vmware/dhcp_meta/constants.py
  82. 0
      vmware_nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py
  83. 0
      vmware_nsx/neutron/plugins/vmware/dhcp_meta/migration.py
  84. 0
      vmware_nsx/neutron/plugins/vmware/dhcp_meta/nsx.py
  85. 0
      vmware_nsx/neutron/plugins/vmware/dhcp_meta/rpc.py
  86. 0
      vmware_nsx/neutron/plugins/vmware/dhcpmeta_modes.py
  87. 0
      vmware_nsx/neutron/plugins/vmware/extensions/__init__.py
  88. 0
      vmware_nsx/neutron/plugins/vmware/extensions/lsn.py
  89. 0
      vmware_nsx/neutron/plugins/vmware/extensions/maclearning.py
  90. 0
      vmware_nsx/neutron/plugins/vmware/extensions/networkgw.py
  91. 0
      vmware_nsx/neutron/plugins/vmware/extensions/nvp_qos.py
  92. 0
      vmware_nsx/neutron/plugins/vmware/extensions/qos.py
  93. 0
      vmware_nsx/neutron/plugins/vmware/extensions/servicerouter.py
  94. 0
      vmware_nsx/neutron/plugins/vmware/nsx_cluster.py
  95. 0
      vmware_nsx/neutron/plugins/vmware/nsxlib/__init__.py
  96. 0
      vmware_nsx/neutron/plugins/vmware/nsxlib/l2gateway.py
  97. 0
      vmware_nsx/neutron/plugins/vmware/nsxlib/lsn.py
  98. 0
      vmware_nsx/neutron/plugins/vmware/nsxlib/queue.py
  99. 0
      vmware_nsx/neutron/plugins/vmware/nsxlib/router.py
  100. 0
      vmware_nsx/neutron/plugins/vmware/nsxlib/secgroup.py

2
.gitreview

@ -1,4 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/neutron.git
project=stackforge/vmware-nsx.git

2
.testr.conf

@ -1,4 +1,4 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./vmware_nsx/neutron/tests/unit} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

6
MANIFEST.in

@ -2,11 +2,7 @@ include AUTHORS
include README.rst
include ChangeLog
include LICENSE
include neutron/db/migration/README
include neutron/db/migration/alembic.ini
include neutron/db/migration/alembic_migrations/script.py.mako
include neutron/db/migration/alembic_migrations/versions/README
recursive-include neutron/locale *
recursive-include vmware_nsx/neutron/locale *
exclude .gitignore
exclude .gitreview

94
doc/source/conf.py

@ -0,0 +1,94 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import fileinput
import fnmatch
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# A list of glob-style patterns that should be excluded when looking for source
# files.
exclude_patterns = [
'api/tests.*', # avoid of docs generation from tests
'api/oslo.vmware._*', # skip private modules
]
# Prune the excluded patterns from the autoindex
PATH = 'api/autoindex.rst'
if os.path.isfile(PATH) and os.access(PATH, os.R_OK):
for line in fileinput.input(PATH, inplace=True):
found = False
for pattern in exclude_patterns:
if fnmatch.fnmatch(line, '*' + pattern[4:]):
found = True
if not found:
print line,
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'oslo.vmware'
copyright = u'2014, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

1
doc/source/contributing.rst

@ -0,0 +1 @@
.. include:: ../../CONTRIBUTING.rst

1
doc/source/history.rst

@ -0,0 +1 @@
.. include:: ../../ChangeLog

28
doc/source/index.rst

@ -0,0 +1,28 @@
Welcome to oslo.vmware's documentation!
=======================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
history
Code Documentation
==================
.. toctree::
:maxdepth: 1
api/autoindex
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

12
doc/source/installation.rst

@ -0,0 +1,12 @@
============
Installation
============
At the command line::
$ pip install
Or, if you have virtualenvwrapper installed::
$ mkvirtualenv
$ pip install

1
doc/source/readme.rst

@ -0,0 +1 @@
.. include:: ../README.rst

7
doc/source/usage.rst

@ -0,0 +1,7 @@
========
Usage
========
To use in a project::
import vmware

30
etc/api-paste.ini

@ -0,0 +1,30 @@
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = request_id catch_errors extensions neutronapiapp_v2_0
keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
[filter:request_id]
paste.filter_factory = oslo.middleware:RequestId.factory
[filter:catch_errors]
paste.filter_factory = oslo.middleware:CatchErrors.factory
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory

91
etc/dhcp_agent.ini

@ -0,0 +1,91 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
# resync_interval = 5
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
# enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
# enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
# dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
# dnsmasq_config_file =
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
# dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Use broadcast in DHCP replies
# dhcp_broadcast_reply = False
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
# dhcp_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

68
etc/init.d/neutron-server

@ -0,0 +1,68 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: neutron-server
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: neutron-server
# Description: Provides the Neutron networking service
### END INIT INFO
set -e
PIDFILE=/var/run/neutron/neutron-server.pid
LOGFILE=/var/log/neutron/neutron-server.log
DAEMON=/usr/bin/neutron-server
DAEMON_ARGS="--log-file=$LOGFILE"
DAEMON_DIR=/var/run
ENABLED=true
if test -f /etc/default/neutron-server; then
. /etc/default/neutron-server
fi
mkdir -p /var/run/neutron
mkdir -p /var/log/neutron
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
export TMPDIR=/var/lib/neutron/tmp
if [ ! -x ${DAEMON} ] ; then
exit 0
fi
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting neutron server" "neutron-server"
start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS
log_end_msg $?
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping neutron server" "neutron-server"
start-stop-daemon --stop --oknodo --pidfile ${PIDFILE}
log_end_msg $?
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
$0 stop
sleep 1
$0 start
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON neutron-server && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/neutron-server {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

102
etc/l3_agent.ini

@ -0,0 +1,102 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
# handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
# external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
# metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
# send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
# periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
# periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10
# The working mode for the agent. Allowed values are:
# - legacy: this preserves the existing behavior where the L3 agent is
# deployed on a centralized networking node to provide L3 services
# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
# - dvr: this mode enables DVR functionality, and must be used for an L3
# agent that runs on a compute host.
# - dvr_snat: this enables centralized SNAT support in conjunction with
# DVR. This mode must be used for an L3 agent running on a centralized
# node (or in single-host deployments, e.g. devstack).
# agent_mode = legacy
# Location to store keepalived and all HA configurations
# ha_confs_path = $state_path/ha_confs
# VRRP authentication type AH/PASS
# ha_vrrp_auth_type = PASS
# VRRP authentication password
# ha_vrrp_auth_password =
# The advertisement interval in seconds
# ha_vrrp_advert_int = 2

59
etc/metadata_agent.ini

@ -0,0 +1,59 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
# The Neutron user information for accessing the Neutron API.
auth_url = http://localhost:5000/v2.0
auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
# nova_metadata_ip = 127.0.0.1
# TCP Port used by Nova metadata server
# nova_metadata_port = 8775
# Which protocol to use for requests to Nova metadata server, http or https
# nova_metadata_protocol = http
# Whether insecure SSL connection should be accepted for Nova metadata server
# requests
# nova_metadata_insecure = False
# Client certificate for nova api, needed when nova api requires client
# certificates
# nova_client_cert =
# Private key for nova client certificate
# nova_client_priv_key =
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
# metadata_proxy_shared_secret =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server. Defaults to
# half the number of CPU cores
# metadata_workers =
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 4096
# URL to connect to the cache backend.
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url = memory://?default_ttl=5

18
etc/metering_agent.ini

@ -0,0 +1,18 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
# Default driver:
# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver
# Example of non-default driver
# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver
# Interval between two metering measures
# measure_interval = 30
# Interval between two metering reports
# report_interval = 300
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# use_namespaces = True

656
etc/neutron.conf

@ -0,0 +1,656 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
# verbose = False
# =========Start Global Config Option for Distributed L3 Router===============
# Setting the "router_distributed" flag to "True" will default to the creation
# of distributed tenant routers. The admin can override this flag by specifying
# the type of the router on the create request (admin-only attribute). Default
# value is "False" to support legacy mode (centralized) routers.
#
# router_distributed = False
#
# ===========End Global Config Option for Distributed L3 Router===============
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
# core_plugin =
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
# Paste configuration file
# api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# DVR Base MAC address. The first 3 octets will remain unchanged. If the
# 4th octet is not 00, it will also be used. The others will be randomly
# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
# avoid mixing them up with MAC's allocated for tenant ports.
# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
# The default is 3 octet
# dvr_base_mac = fa:16:3f:00:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds). Use -1 to
# tell dnsmasq to use infinite lease times.
# dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet. For IPv6, validate only if
# gateway is not a link local address. Deprecated, to be removed during the
# K release, at which point the check will be mandatory.
# force_gateway_on_subnet = True
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# Maximum number of routes per router
# max_routes = 30
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Allow automatic rescheduling of routers from dead L3 agents with
# admin_state_up set to True to alive agents.
# allow_automatic_l3agent_failover = False
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== items for l3 extension ==============
# Enable high availability for virtual routers.
# l3_ha = False
#
# Maximum number of l3 agents which a HA router will be scheduled on. If it
# is set to 0 the router will be scheduled on every agent.
# max_l3_agents_per_router = 3
#
# Minimum number of l3 agents which a HA router will be scheduled on. The
# default value is 2.
# min_l3_agents_per_router = 2
#
# CIDR of the administrative network if HA mode is enabled
# l3_ha_net_cidr = 169.254.192.0/18
# =========== end of items for l3 extension =======
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
# api_workers = 0
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# The name of the admin nova tenant. If the uuid of the admin nova tenant
# is set, this is optional. Useful for cases where the uuid of the admin
# nova tenant is not available when configuration is being done.
# nova_admin_tenant_name =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
#
# Options defined in oslo.messaging
#
# Use durable queues in amqp. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
#amqp_durable_queues=false
# Auto-delete queues in amqp. (boolean value)
#amqp_auto_delete=false
# Size of RPC connection pool. (integer value)
#rpc_conn_pool_size=30
# Qpid broker hostname. (string value)
#qpid_hostname=localhost
# Qpid broker port. (integer value)
#qpid_port=5672
# Qpid HA cluster host:port pairs. (list value)
#qpid_hosts=$qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
#qpid_username=
# Password for Qpid connection. (string value)
#qpid_password=
# Space separated list of SASL mechanisms to use for auth.
# (string value)
#qpid_sasl_mechanisms=
# Seconds between connection keepalive heartbeats. (integer
# value)
#qpid_heartbeat=60
# Transport to use, either 'tcp' or 'ssl'. (string value)
#qpid_protocol=tcp
# Whether to disable the Nagle algorithm. (boolean value)
#qpid_tcp_nodelay=true
# The qpid topology version to use. Version 1 is what was
# originally used by impl_qpid. Version 2 includes some
# backwards-incompatible changes that allow broker federation
# to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break.
# (integer value)
#qpid_topology_version=1
# SSL version to use (valid only if SSL enabled). valid values
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
# distributions. (string value)
#kombu_ssl_version=
# SSL key file (valid only if SSL enabled). (string value)
#kombu_ssl_keyfile=
# SSL cert file (valid only if SSL enabled). (string value)
#kombu_ssl_certfile=
# SSL certification authority file (valid only if SSL
# enabled). (string value)
#kombu_ssl_ca_certs=
# How long to wait before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
#kombu_reconnect_delay=1.0
# The RabbitMQ broker address where a single node is used.
# (string value)
#rabbit_host=localhost
# The RabbitMQ broker port where a single node is used.
# (integer value)
#rabbit_port=5672
# RabbitMQ HA cluster host:port pairs. (list value)
#rabbit_hosts=$rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
#rabbit_use_ssl=false
# The RabbitMQ userid. (string value)
#rabbit_userid=guest
# The RabbitMQ password. (string value)
#rabbit_password=guest
# the RabbitMQ login method (string value)
#rabbit_login_method=AMQPLAIN
# The RabbitMQ virtual host. (string value)
#rabbit_virtual_host=/
# How frequently to retry connecting with RabbitMQ. (integer
# value)
#rabbit_retry_interval=1
# How long to backoff for between retries when connecting to
# RabbitMQ. (integer value)
#rabbit_retry_backoff=2
# Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
#rabbit_max_retries=0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
# this option, you must wipe the RabbitMQ database. (boolean
# value)
#rabbit_ha_queues=false
# If passed, use a fake RabbitMQ provider. (boolean value)
#fake_rabbit=false
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
# interface, or IP. The "host" option should point or resolve
# to this address. (string value)
#rpc_zmq_bind_address=*
# MatchMaker driver. (string value)
#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
# ZeroMQ receiver listening port. (integer value)
#rpc_zmq_port=9501
# Number of ZeroMQ contexts, defaults to 1. (integer value)
#rpc_zmq_contexts=1
# Maximum number of ingress messages to locally buffer per
# topic. Default is unlimited. (integer value)
#rpc_zmq_topic_backlog=<None>
# Directory for holding IPC sockets. (string value)
#rpc_zmq_ipc_dir=/var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP
# address. Must match "host" option, if running Nova. (string
# value)
#rpc_zmq_host=oslo
# Seconds to wait before a cast expires (TTL). Only supported
# by impl_zmq. (integer value)
#rpc_cast_timeout=30
# Heartbeat frequency. (integer value)
#matchmaker_heartbeat_freq=300
# Heartbeat time-to-live. (integer value)
#matchmaker_heartbeat_ttl=600
# Size of RPC greenthread pool. (integer value)
#rpc_thread_pool_size=64
# Driver or drivers to handle sending notifications. (multi
# valued)
#notification_driver=
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
#notification_topics=notifications
# Seconds to wait for a response from a call. (integer value)
#rpc_response_timeout=60
# A URL representing the messaging driver to use and its full
# configuration. If not set, we fall back to the rpc_backend
# option and driver specific configuration. (string value)
#transport_url=<None>
# The messaging driver to use, defaults to rabbit. Other
# drivers include qpid and zmq. (string value)
#rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the
# transport_url option. (string value)
#control_exchange=openstack
[matchmaker_redis]
#
# Options defined in oslo.messaging
#
# Host to locate redis. (string value)
#host=127.0.0.1
# Use this port to connect to redis host. (integer value)
#port=6379
# Password for Redis server (optional). (string value)
#password=<None>
[matchmaker_ring]
#
# Options defined in oslo.messaging
#
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
#ringfile=/etc/oslo/matchmaker_ring.json
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
# default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
# quota_network = 10
# Number of subnets allowed per tenant. A negative value means unlimited.
# quota_subnet = 10
# Number of ports allowed per tenant. A negative value means unlimited.
# quota_port = 50
# Number of security groups allowed per tenant. A negative value means
# unlimited.
# quota_security_group = 10
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
# quota_security_group_rule = 100
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitor = -1
# Number of loadbalancers allowed per tenant. A negative value means unlimited.
# quota_loadbalancer = 10
# Number of listeners allowed per tenant. A negative value means unlimited.
# quota_listener = -1
# Number of v2 health monitors allowed per tenant. A negative value means
# unlimited. These health monitors exist under the lbaas v2 API
# quota_healthmonitor = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
# Number of firewalls allowed per tenant. A negative value means unlimited.
# quota_firewall = 1
# Number of firewall policies allowed per tenant. A negative value means
# unlimited.
# quota_firewall_policy = 1
# Number of firewall rules allowed per tenant. A negative value means
# unlimited.
# quota_firewall_rule = 100
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
# root_helper = sudo
# Set to true to add comments to generated iptables rules that describe
# each rule's purpose. (System must support the iptables comments module.)
# comment_iptables_rules = True
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron_lbaas.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron_vpnaas.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron_lbaas.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'.
# service_provider = LOADBALANCER:A10Networks:neutron_lbaas.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
# service_provider = LOADBALANCERV2:LoggingNoop:neutron_lbaas.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default

203
etc/neutron/plugins/vmware/nsx.ini

@ -0,0 +1,203 @@
[DEFAULT]
# User name for NSX controller
# nsx_user = admin
# Password for NSX controller
# nsx_password = admin
# Time before aborting a request on an unresponsive controller (Seconds)
# http_timeout = 75
# Maximum number of times a particular request should be retried
# retries = 2
# Maximum number of times a redirect response should be followed
# redirects = 2
# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
# is omitted, 443 is assumed. This option MUST be specified, e.g.:
# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
# UUID of the pre-existing default NSX Transport zone to be used for creating
# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
# (Optional) UUID for the default l3 gateway service to use with this cluster.
# To be specified if planning to use logical routers with external gateways.
# default_l3_gw_service_uuid =
# (Optional) UUID for the default l2 gateway service to use with this cluster.
# To be specified for providing a predefined gateway tenant for connecting their networks.
# default_l2_gw_service_uuid =
# (Optional) UUID for the default service cluster. A service cluster is introduced to
# represent a group of gateways and it is needed in order to use Logical Services like
# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
# default_service_cluster_uuid =
# Name of the default interface name to be used on network-gateway. This value
# will be used for any device associated with a network gateway for which an
# interface name was not specified
# default_interface_name = breth0
# Reconnect connection to nsx if not used within this amount of time.
# conn_idle_timeout = 900
[quotas]
# number of network gateways allowed per tenant, -1 means unlimited
# quota_network_gateway = 5
[vcns]
# URL for VCNS manager
# manager_uri = https://management_ip
# User name for VCNS manager
# user = admin
# Password for VCNS manager
# password = default
# (Optional) Datacenter ID for Edge deployment
# datacenter_moid =
# (Optional) Deployment Container ID for NSX Edge deployment
# If not specified, either a default global container will be used, or
# the resource pool and datastore specified below will be used
# deployment_container_id =
# (Optional) Resource pool ID for NSX Edge deployment
# resource_pool_id =
# (Optional) Datastore ID for NSX Edge deployment
# datastore_id =
# (Required) UUID of logic switch for physical network connectivity
# external_network =
# (Optional) Asynchronous task status check interval
# default is 2000 (millisecond)
# task_status_check_interval = 2000
[nsx]
# Maximum number of ports for each bridged logical switch
# The recommended value for this parameter varies with NSX version
# Please use:
# NSX 2.x -> 64
# NSX 3.0, 3.1 -> 5000
# NSX 3.2 -> 10000
# max_lp_per_bridged_ls = 5000
# Maximum number of ports for each overlay (stt, gre) logical switch
# max_lp_per_overlay_ls = 256
# Number of connections to each controller node.
# default is 10
# concurrent_connections = 10
# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
# nsx_gen_timeout = -1
# Acceptable values for 'metadata_mode' are:
# - 'access_network': this enables a dedicated connection to the metadata
# proxy for metadata server access via Neutron router.
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
# This option is only useful if running on a host that does not support
# namespaces otherwise access_network should be used.
# metadata_mode = access_network
# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
# default_transport_type = stt
# Specifies in which mode the plugin needs to operate in order to provide DHCP and
# metadata proxy services to tenant instances. If 'agent' is chosen (default)
# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
# provide such services. In this mode, the plugin supports API extensions 'agent'
# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
# the plugin will use NSX logical services for DHCP and metadata proxy. This
# simplifies the deployment model for Neutron, in that the plugin no longer requires
# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above.
# Furthermore, a 'combined' mode is also provided and is used to support existing
# deployments that want to adopt the agentless mode going forward. With this mode,
# existing networks keep being served by the existing infrastructure (thus preserving
# backward compatibility, whereas new networks will be served by the new infrastructure.
# Migration tools are provided to 'move' one network from one model to another; with
# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
# ignored, as new networks will no longer be scheduled to existing dhcp agents.
# agent_mode = agent
# Specifies which mode packet replication should be done in. If set to service
# a service node is required in order to perform packet replication. This can
# also be set to source if one wants replication to be performed locally (NOTE:
# usually only useful for testing if one does not want to deploy a service node).
# In order to leverage distributed routers, replication_mode should be set to
# "service".
# replication_mode = service
[nsx_sync]
# Interval in seconds between runs of the status synchronization task.
# The plugin will aim at resynchronizing operational status for all
# resources in this interval, and it should be therefore large enough
# to ensure the task is feasible. Otherwise the plugin will be
# constantly synchronizing resource status, ie: a new task is started
# as soon as the previous is completed.
# If this value is set to 0, the state synchronization thread for this
# Neutron instance will be disabled.
# state_sync_interval = 10
# Random additional delay between two runs of the state synchronization task.
# An additional wait time between 0 and max_random_sync_delay seconds
# will be added on top of state_sync_interval.
# max_random_sync_delay = 0
# Minimum delay, in seconds, between two status synchronization requests for NSX.
# Depending on chunk size, controller load, and other factors, state
# synchronization requests might be pretty heavy. This means the
# controller might take time to respond, and its load might be quite
# increased by them. This parameter allows to specify a minimum
# interval between two subsequent requests.
# The value for this parameter must never exceed state_sync_interval.
# If this does, an error will be raised at startup.
# min_sync_req_delay = 1
# Minimum number of resources to be retrieved from NSX in a single status
# synchronization request.
# The actual size of the chunk will increase if the number of resources is such
# that using the minimum chunk size will cause the interval between two
# requests to be less than min_sync_req_delay
# min_chunk_size = 500
# Enable this option to allow punctual state synchronization on show
# operations. In this way, show operations will always fetch the operational
# status of the resource from the NSX backend, and this might have
# a considerable impact on overall performance.
# always_read_status = False
[nsx_lsn]
# Pull LSN information from NSX in case it is missing from the local
# data store. This is useful to rebuild the local store in case of
# server recovery
# sync_on_missing_data = False
[nsx_dhcp]
# (Optional) Comma separated list of additional dns servers. Default is an empty list
# extra_domain_name_servers =
# Domain to use for building the hostnames
# domain_name = openstacklocal
# Default DHCP lease time
# default_lease_time = 43200
[nsx_metadata]
# IP address used by Metadata server
# metadata_server_address = 127.0.0.1
# TCP Port used by Metadata server
# metadata_server_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it MUST match with the configuration used by the Metadata server
# metadata_shared_secret =

16
etc/neutron/rootwrap.d/cisco-apic.filters

@ -0,0 +1,16 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# cisco-apic filters
lldpctl: CommandFilter, lldpctl, root
# ip_lib filters
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

14
etc/neutron/rootwrap.d/debug.filters

@ -0,0 +1,14 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# This is needed because we should ping
# from inside a namespace which requires root
ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+

35
etc/neutron/rootwrap.d/dhcp.filters

@ -0,0 +1,35 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# dhcp-agent
dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID=
# dhcp-agent uses kill as well, that's handled by the generic KillFilter
# it looks like these are the only signals needed, per
# neutron/agent/linux/dhcp.py
kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP
kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
ivs-ctl: CommandFilter, ivs-ctl, root
mm-ctl: CommandFilter, mm-ctl, root
dhcp_release: CommandFilter, dhcp_release, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

12
etc/neutron/rootwrap.d/ipset-firewall.filters

@ -0,0 +1,12 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_firewall.py
# "ipset", "-A", ...
ipset: CommandFilter, ipset, root

21
etc/neutron/rootwrap.d/iptables-firewall.filters

@ -0,0 +1,21 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_manager.py
# "iptables-save", ...
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# neutron/agent/linux/iptables_manager.py
# "iptables", "-A", ...
iptables: CommandFilter, iptables, root
ip6tables: CommandFilter, ip6tables, root

48
etc/neutron/rootwrap.d/l3.filters

@ -0,0 +1,48 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# arping
arping: CommandFilter, arping, root
# l3_agent
sysctl: CommandFilter, sysctl, root
route: CommandFilter, route, root
radvd: CommandFilter, radvd, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP
kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
# ovs_lib (if OVSInterfaceDriver is used)
ovs-vsctl: CommandFilter, ovs-vsctl, root
# iptables_manager
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# Keepalived
keepalived: CommandFilter, keepalived, root
kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9
# l3 agent to delete floatingip's conntrack state
conntrack: CommandFilter, conntrack, root

26
etc/neutron/rootwrap.d/lbaas-haproxy.filters

@ -0,0 +1,26 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# haproxy
haproxy: CommandFilter, haproxy, root
# lbaas-agent uses kill as well, that's handled by the generic KillFilter
kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
mm-ctl: CommandFilter, mm-ctl, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
route: CommandFilter, route, root
# arping
arping: CommandFilter, arping, root

19
etc/neutron/rootwrap.d/linuxbridge-plugin.filters

@ -0,0 +1,19 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# linuxbridge-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
brctl: CommandFilter, brctl, root
bridge: CommandFilter, bridge, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

12
etc/neutron/rootwrap.d/nec-plugin.filters

@ -0,0 +1,12 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# nec_neutron_agent
ovs-vsctl: CommandFilter, ovs-vsctl, root

16
etc/neutron/rootwrap.d/ofagent.filters

@ -0,0 +1,16 @@
# neutron-rootwrap command filters for nodes on which
# neutron-ofagent-agent is expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# ovs_lib
ovs-vsctl: CommandFilter, ovs-vsctl, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

22
etc/neutron/rootwrap.d/openvswitch-plugin.filters

@ -0,0 +1,22 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# openvswitch-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
ovs-vsctl: CommandFilter, ovs-vsctl, root
ovs-ofctl: CommandFilter, ovs-ofctl, root
kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
ovsdb-client: CommandFilter, ovsdb-client, root
xe: CommandFilter, xe, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

13
etc/neutron/rootwrap.d/vpnaas.filters

@ -0,0 +1,13 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
openswan: CommandFilter, ipsec, root

139
etc/policy.json

@ -0,0 +1,139 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"create_port": "",
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"get_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",