VMware: fix gitreview

Fix the correct repo

Change-Id: I1deed42fb003f06bc97634e3908c6d82c8620e85
This commit is contained in:
Gary Kotton 2014-12-20 23:20:48 -08:00
parent 68b46468b0
commit 3a96a43c53
184 changed files with 1916 additions and 4728 deletions

View File

@ -1,4 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/neutron.git
project=stackforge/vmware-nsx.git

View File

@ -1,4 +1,4 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./vmware_nsx/neutron/tests/unit} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -2,11 +2,7 @@ include AUTHORS
include README.rst
include ChangeLog
include LICENSE
include neutron/db/migration/README
include neutron/db/migration/alembic.ini
include neutron/db/migration/alembic_migrations/script.py.mako
include neutron/db/migration/alembic_migrations/versions/README
recursive-include neutron/locale *
recursive-include vmware_nsx/neutron/locale *
exclude .gitignore
exclude .gitreview

94
doc/source/conf.py Normal file
View File

@ -0,0 +1,94 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import fileinput
import fnmatch
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# A list of glob-style patterns that should be excluded when looking for source
# files.
exclude_patterns = [
'api/tests.*', # avoid of docs generation from tests
'api/oslo.vmware._*', # skip private modules
]
# Prune the excluded patterns from the autoindex
PATH = 'api/autoindex.rst'
if os.path.isfile(PATH) and os.access(PATH, os.R_OK):
for line in fileinput.input(PATH, inplace=True):
found = False
for pattern in exclude_patterns:
if fnmatch.fnmatch(line, '*' + pattern[4:]):
found = True
if not found:
print line,
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'oslo.vmware'
copyright = u'2014, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -0,0 +1 @@
.. include:: ../../CONTRIBUTING.rst

1
doc/source/history.rst Normal file
View File

@ -0,0 +1 @@
.. include:: ../../ChangeLog

28
doc/source/index.rst Normal file
View File

@ -0,0 +1,28 @@
Welcome to oslo.vmware's documentation!
=======================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
history
Code Documentation
==================
.. toctree::
:maxdepth: 1
api/autoindex
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -0,0 +1,12 @@
============
Installation
============
At the command line::
$ pip install
Or, if you have virtualenvwrapper installed::
$ mkvirtualenv
$ pip install

1
doc/source/readme.rst Normal file
View File

@ -0,0 +1 @@
.. include:: ../README.rst

7
doc/source/usage.rst Normal file
View File

@ -0,0 +1,7 @@
========
Usage
========
To use in a project::
import vmware

30
etc/api-paste.ini Normal file
View File

@ -0,0 +1,30 @@
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = request_id catch_errors extensions neutronapiapp_v2_0
keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
[filter:request_id]
paste.filter_factory = oslo.middleware:RequestId.factory
[filter:catch_errors]
paste.filter_factory = oslo.middleware:CatchErrors.factory
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory

91
etc/dhcp_agent.ini Normal file
View File

@ -0,0 +1,91 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
# resync_interval = 5
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
# enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
# enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
# dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
# dnsmasq_config_file =
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
# dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Use broadcast in DHCP replies
# dhcp_broadcast_reply = False
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
# dhcp_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

68
etc/init.d/neutron-server Executable file
View File

@ -0,0 +1,68 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: neutron-server
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: neutron-server
# Description: Provides the Neutron networking service
### END INIT INFO
set -e
PIDFILE=/var/run/neutron/neutron-server.pid
LOGFILE=/var/log/neutron/neutron-server.log
DAEMON=/usr/bin/neutron-server
DAEMON_ARGS="--log-file=$LOGFILE"
DAEMON_DIR=/var/run
ENABLED=true
if test -f /etc/default/neutron-server; then
. /etc/default/neutron-server
fi
mkdir -p /var/run/neutron
mkdir -p /var/log/neutron
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
export TMPDIR=/var/lib/neutron/tmp
if [ ! -x ${DAEMON} ] ; then
exit 0
fi
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting neutron server" "neutron-server"
start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS
log_end_msg $?
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping neutron server" "neutron-server"
start-stop-daemon --stop --oknodo --pidfile ${PIDFILE}
log_end_msg $?
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
$0 stop
sleep 1
$0 start
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON neutron-server && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/neutron-server {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

102
etc/l3_agent.ini Normal file
View File

@ -0,0 +1,102 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
# handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
# external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
# metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
# send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
# periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
# periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10
# The working mode for the agent. Allowed values are:
# - legacy: this preserves the existing behavior where the L3 agent is
# deployed on a centralized networking node to provide L3 services
# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
# - dvr: this mode enables DVR functionality, and must be used for an L3
# agent that runs on a compute host.
# - dvr_snat: this enables centralized SNAT support in conjunction with
# DVR. This mode must be used for an L3 agent running on a centralized
# node (or in single-host deployments, e.g. devstack).
# agent_mode = legacy
# Location to store keepalived and all HA configurations
# ha_confs_path = $state_path/ha_confs
# VRRP authentication type AH/PASS
# ha_vrrp_auth_type = PASS
# VRRP authentication password
# ha_vrrp_auth_password =
# The advertisement interval in seconds
# ha_vrrp_advert_int = 2

59
etc/metadata_agent.ini Normal file
View File

@ -0,0 +1,59 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
# The Neutron user information for accessing the Neutron API.
auth_url = http://localhost:5000/v2.0
auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
# nova_metadata_ip = 127.0.0.1
# TCP Port used by Nova metadata server
# nova_metadata_port = 8775
# Which protocol to use for requests to Nova metadata server, http or https
# nova_metadata_protocol = http
# Whether insecure SSL connection should be accepted for Nova metadata server
# requests
# nova_metadata_insecure = False
# Client certificate for nova api, needed when nova api requires client
# certificates
# nova_client_cert =
# Private key for nova client certificate
# nova_client_priv_key =
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
# metadata_proxy_shared_secret =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server. Defaults to
# half the number of CPU cores
# metadata_workers =
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 4096
# URL to connect to the cache backend.
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url = memory://?default_ttl=5

18
etc/metering_agent.ini Normal file
View File

@ -0,0 +1,18 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
# Default driver:
# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver
# Example of non-default driver
# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver
# Interval between two metering measures
# measure_interval = 30
# Interval between two metering reports
# report_interval = 300
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# use_namespaces = True

656
etc/neutron.conf Normal file
View File

@ -0,0 +1,656 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
# verbose = False
# =========Start Global Config Option for Distributed L3 Router===============
# Setting the "router_distributed" flag to "True" will default to the creation
# of distributed tenant routers. The admin can override this flag by specifying
# the type of the router on the create request (admin-only attribute). Default
# value is "False" to support legacy mode (centralized) routers.
#
# router_distributed = False
#
# ===========End Global Config Option for Distributed L3 Router===============
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
# core_plugin =
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
# Paste configuration file
# api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# DVR Base MAC address. The first 3 octets will remain unchanged. If the
# 4th octet is not 00, it will also be used. The others will be randomly
# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
# avoid mixing them up with MAC's allocated for tenant ports.
# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
# The default is 3 octet
# dvr_base_mac = fa:16:3f:00:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds). Use -1 to
# tell dnsmasq to use infinite lease times.
# dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet. For IPv6, validate only if
# gateway is not a link local address. Deprecated, to be removed during the
# K release, at which point the check will be mandatory.
# force_gateway_on_subnet = True
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# Maximum number of routes per router
# max_routes = 30
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Allow automatic rescheduling of routers from dead L3 agents with
# admin_state_up set to True to alive agents.
# allow_automatic_l3agent_failover = False
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== items for l3 extension ==============
# Enable high availability for virtual routers.
# l3_ha = False
#
# Maximum number of l3 agents which a HA router will be scheduled on. If it
# is set to 0 the router will be scheduled on every agent.
# max_l3_agents_per_router = 3
#
# Minimum number of l3 agents which a HA router will be scheduled on. The
# default value is 2.
# min_l3_agents_per_router = 2
#
# CIDR of the administrative network if HA mode is enabled
# l3_ha_net_cidr = 169.254.192.0/18
# =========== end of items for l3 extension =======
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
# api_workers = 0
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# The name of the admin nova tenant. If the uuid of the admin nova tenant
# is set, this is optional. Useful for cases where the uuid of the admin
# nova tenant is not available when configuration is being done.
# nova_admin_tenant_name =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
#
# Options defined in oslo.messaging
#
# Use durable queues in amqp. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
#amqp_durable_queues=false
# Auto-delete queues in amqp. (boolean value)
#amqp_auto_delete=false
# Size of RPC connection pool. (integer value)
#rpc_conn_pool_size=30
# Qpid broker hostname. (string value)
#qpid_hostname=localhost
# Qpid broker port. (integer value)
#qpid_port=5672
# Qpid HA cluster host:port pairs. (list value)
#qpid_hosts=$qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
#qpid_username=
# Password for Qpid connection. (string value)
#qpid_password=
# Space separated list of SASL mechanisms to use for auth.
# (string value)
#qpid_sasl_mechanisms=
# Seconds between connection keepalive heartbeats. (integer
# value)
#qpid_heartbeat=60
# Transport to use, either 'tcp' or 'ssl'. (string value)
#qpid_protocol=tcp
# Whether to disable the Nagle algorithm. (boolean value)
#qpid_tcp_nodelay=true
# The qpid topology version to use. Version 1 is what was
# originally used by impl_qpid. Version 2 includes some
# backwards-incompatible changes that allow broker federation
# to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break.
# (integer value)
#qpid_topology_version=1
# SSL version to use (valid only if SSL enabled). valid values
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
# distributions. (string value)
#kombu_ssl_version=
# SSL key file (valid only if SSL enabled). (string value)
#kombu_ssl_keyfile=
# SSL cert file (valid only if SSL enabled). (string value)
#kombu_ssl_certfile=
# SSL certification authority file (valid only if SSL
# enabled). (string value)
#kombu_ssl_ca_certs=
# How long to wait before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
#kombu_reconnect_delay=1.0
# The RabbitMQ broker address where a single node is used.
# (string value)
#rabbit_host=localhost
# The RabbitMQ broker port where a single node is used.
# (integer value)
#rabbit_port=5672
# RabbitMQ HA cluster host:port pairs. (list value)
#rabbit_hosts=$rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
#rabbit_use_ssl=false
# The RabbitMQ userid. (string value)
#rabbit_userid=guest
# The RabbitMQ password. (string value)
#rabbit_password=guest
# the RabbitMQ login method (string value)
#rabbit_login_method=AMQPLAIN
# The RabbitMQ virtual host. (string value)
#rabbit_virtual_host=/
# How frequently to retry connecting with RabbitMQ. (integer
# value)
#rabbit_retry_interval=1
# How long to backoff for between retries when connecting to
# RabbitMQ. (integer value)
#rabbit_retry_backoff=2
# Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
#rabbit_max_retries=0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
# this option, you must wipe the RabbitMQ database. (boolean
# value)
#rabbit_ha_queues=false
# If passed, use a fake RabbitMQ provider. (boolean value)
#fake_rabbit=false
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
# interface, or IP. The "host" option should point or resolve
# to this address. (string value)
#rpc_zmq_bind_address=*
# MatchMaker driver. (string value)
#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
# ZeroMQ receiver listening port. (integer value)
#rpc_zmq_port=9501
# Number of ZeroMQ contexts, defaults to 1. (integer value)
#rpc_zmq_contexts=1
# Maximum number of ingress messages to locally buffer per
# topic. Default is unlimited. (integer value)
#rpc_zmq_topic_backlog=<None>
# Directory for holding IPC sockets. (string value)
#rpc_zmq_ipc_dir=/var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP
# address. Must match "host" option, if running Nova. (string
# value)
#rpc_zmq_host=oslo
# Seconds to wait before a cast expires (TTL). Only supported
# by impl_zmq. (integer value)
#rpc_cast_timeout=30
# Heartbeat frequency. (integer value)
#matchmaker_heartbeat_freq=300
# Heartbeat time-to-live. (integer value)
#matchmaker_heartbeat_ttl=600
# Size of RPC greenthread pool. (integer value)
#rpc_thread_pool_size=64
# Driver or drivers to handle sending notifications. (multi
# valued)
#notification_driver=
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
#notification_topics=notifications
# Seconds to wait for a response from a call. (integer value)
#rpc_response_timeout=60
# A URL representing the messaging driver to use and its full
# configuration. If not set, we fall back to the rpc_backend
# option and driver specific configuration. (string value)
#transport_url=<None>
# The messaging driver to use, defaults to rabbit. Other
# drivers include qpid and zmq. (string value)
#rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the
# transport_url option. (string value)
#control_exchange=openstack
[matchmaker_redis]
#
# Options defined in oslo.messaging
#
# Host to locate redis. (string value)
#host=127.0.0.1
# Use this port to connect to redis host. (integer value)
#port=6379
# Password for Redis server (optional). (string value)
#password=<None>
[matchmaker_ring]
#
# Options defined in oslo.messaging
#
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
#ringfile=/etc/oslo/matchmaker_ring.json
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
# default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
# quota_network = 10
# Number of subnets allowed per tenant. A negative value means unlimited.
# quota_subnet = 10
# Number of ports allowed per tenant. A negative value means unlimited.
# quota_port = 50
# Number of security groups allowed per tenant. A negative value means
# unlimited.
# quota_security_group = 10
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
# quota_security_group_rule = 100
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitor = -1
# Number of loadbalancers allowed per tenant. A negative value means unlimited.
# quota_loadbalancer = 10
# Number of listeners allowed per tenant. A negative value means unlimited.
# quota_listener = -1
# Number of v2 health monitors allowed per tenant. A negative value means
# unlimited. These health monitors exist under the lbaas v2 API
# quota_healthmonitor = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
# Number of firewalls allowed per tenant. A negative value means unlimited.
# quota_firewall = 1
# Number of firewall policies allowed per tenant. A negative value means
# unlimited.
# quota_firewall_policy = 1
# Number of firewall rules allowed per tenant. A negative value means
# unlimited.
# quota_firewall_rule = 100
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
# root_helper = sudo
# Set to true to add comments to generated iptables rules that describe
# each rule's purpose. (System must support the iptables comments module.)
# comment_iptables_rules = True
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron_lbaas.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron_vpnaas.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron_lbaas.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'.
# service_provider = LOADBALANCER:A10Networks:neutron_lbaas.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
# service_provider = LOADBALANCERV2:LoggingNoop:neutron_lbaas.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default

View File

@ -0,0 +1,203 @@
[DEFAULT]
# User name for NSX controller
# nsx_user = admin
# Password for NSX controller
# nsx_password = admin
# Time before aborting a request on an unresponsive controller (Seconds)
# http_timeout = 75
# Maximum number of times a particular request should be retried
# retries = 2
# Maximum number of times a redirect response should be followed
# redirects = 2
# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
# is omitted, 443 is assumed. This option MUST be specified, e.g.:
# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
# UUID of the pre-existing default NSX Transport zone to be used for creating
# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
# (Optional) UUID for the default l3 gateway service to use with this cluster.
# To be specified if planning to use logical routers with external gateways.
# default_l3_gw_service_uuid =
# (Optional) UUID for the default l2 gateway service to use with this cluster.
# To be specified for providing a predefined gateway tenant for connecting their networks.
# default_l2_gw_service_uuid =
# (Optional) UUID for the default service cluster. A service cluster is introduced to
# represent a group of gateways and it is needed in order to use Logical Services like
# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
# default_service_cluster_uuid =
# Name of the default interface name to be used on network-gateway. This value
# will be used for any device associated with a network gateway for which an
# interface name was not specified
# default_interface_name = breth0
# Reconnect connection to nsx if not used within this amount of time.
# conn_idle_timeout = 900
[quotas]
# number of network gateways allowed per tenant, -1 means unlimited
# quota_network_gateway = 5
[vcns]
# URL for VCNS manager
# manager_uri = https://management_ip
# User name for VCNS manager
# user = admin
# Password for VCNS manager
# password = default
# (Optional) Datacenter ID for Edge deployment
# datacenter_moid =
# (Optional) Deployment Container ID for NSX Edge deployment
# If not specified, either a default global container will be used, or
# the resource pool and datastore specified below will be used
# deployment_container_id =
# (Optional) Resource pool ID for NSX Edge deployment
# resource_pool_id =
# (Optional) Datastore ID for NSX Edge deployment
# datastore_id =
# (Required) UUID of logic switch for physical network connectivity
# external_network =
# (Optional) Asynchronous task status check interval
# default is 2000 (millisecond)
# task_status_check_interval = 2000
[nsx]
# Maximum number of ports for each bridged logical switch
# The recommended value for this parameter varies with NSX version
# Please use:
# NSX 2.x -> 64
# NSX 3.0, 3.1 -> 5000
# NSX 3.2 -> 10000
# max_lp_per_bridged_ls = 5000
# Maximum number of ports for each overlay (stt, gre) logical switch
# max_lp_per_overlay_ls = 256
# Number of connections to each controller node.
# default is 10
# concurrent_connections = 10
# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
# nsx_gen_timeout = -1
# Acceptable values for 'metadata_mode' are:
# - 'access_network': this enables a dedicated connection to the metadata
# proxy for metadata server access via Neutron router.
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
# This option is only useful if running on a host that does not support
# namespaces otherwise access_network should be used.
# metadata_mode = access_network
# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
# default_transport_type = stt
# Specifies in which mode the plugin needs to operate in order to provide DHCP and
# metadata proxy services to tenant instances. If 'agent' is chosen (default)
# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
# provide such services. In this mode, the plugin supports API extensions 'agent'
# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
# the plugin will use NSX logical services for DHCP and metadata proxy. This
# simplifies the deployment model for Neutron, in that the plugin no longer requires
# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above.
# Furthermore, a 'combined' mode is also provided and is used to support existing
# deployments that want to adopt the agentless mode going forward. With this mode,
# existing networks keep being served by the existing infrastructure (thus preserving
# backward compatibility, whereas new networks will be served by the new infrastructure.
# Migration tools are provided to 'move' one network from one model to another; with
# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
# ignored, as new networks will no longer be scheduled to existing dhcp agents.
# agent_mode = agent
# Specifies which mode packet replication should be done in. If set to service
# a service node is required in order to perform packet replication. This can
# also be set to source if one wants replication to be performed locally (NOTE:
# usually only useful for testing if one does not want to deploy a service node).
# In order to leverage distributed routers, replication_mode should be set to
# "service".
# replication_mode = service
[nsx_sync]
# Interval in seconds between runs of the status synchronization task.
# The plugin will aim at resynchronizing operational status for all
# resources in this interval, and it should be therefore large enough
# to ensure the task is feasible. Otherwise the plugin will be
# constantly synchronizing resource status, ie: a new task is started
# as soon as the previous is completed.
# If this value is set to 0, the state synchronization thread for this
# Neutron instance will be disabled.
# state_sync_interval = 10
# Random additional delay between two runs of the state synchronization task.
# An additional wait time between 0 and max_random_sync_delay seconds
# will be added on top of state_sync_interval.
# max_random_sync_delay = 0
# Minimum delay, in seconds, between two status synchronization requests for NSX.
# Depending on chunk size, controller load, and other factors, state
# synchronization requests might be pretty heavy. This means the
# controller might take time to respond, and its load might be quite
# increased by them. This parameter allows to specify a minimum
# interval between two subsequent requests.
# The value for this parameter must never exceed state_sync_interval.
# If this does, an error will be raised at startup.
# min_sync_req_delay = 1
# Minimum number of resources to be retrieved from NSX in a single status
# synchronization request.
# The actual size of the chunk will increase if the number of resources is such
# that using the minimum chunk size will cause the interval between two
# requests to be less than min_sync_req_delay
# min_chunk_size = 500
# Enable this option to allow punctual state synchronization on show
# operations. In this way, show operations will always fetch the operational
# status of the resource from the NSX backend, and this might have
# a considerable impact on overall performance.
# always_read_status = False
[nsx_lsn]
# Pull LSN information from NSX in case it is missing from the local
# data store. This is useful to rebuild the local store in case of
# server recovery
# sync_on_missing_data = False
[nsx_dhcp]
# (Optional) Comma separated list of additional dns servers. Default is an empty list
# extra_domain_name_servers =
# Domain to use for building the hostnames
# domain_name = openstacklocal
# Default DHCP lease time
# default_lease_time = 43200
[nsx_metadata]
# IP address used by Metadata server
# metadata_server_address = 127.0.0.1
# TCP Port used by Metadata server
# metadata_server_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it MUST match with the configuration used by the Metadata server
# metadata_shared_secret =

View File

@ -0,0 +1,16 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# cisco-apic filters
lldpctl: CommandFilter, lldpctl, root
# ip_lib filters
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -0,0 +1,14 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# This is needed because we should ping
# from inside a namespace which requires root
ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+

View File

@ -0,0 +1,35 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# dhcp-agent
dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID=
# dhcp-agent uses kill as well, that's handled by the generic KillFilter
# it looks like these are the only signals needed, per
# neutron/agent/linux/dhcp.py
kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP
kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
ivs-ctl: CommandFilter, ivs-ctl, root
mm-ctl: CommandFilter, mm-ctl, root
dhcp_release: CommandFilter, dhcp_release, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -0,0 +1,12 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_firewall.py
# "ipset", "-A", ...
ipset: CommandFilter, ipset, root

View File

@ -0,0 +1,21 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_manager.py
# "iptables-save", ...
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# neutron/agent/linux/iptables_manager.py
# "iptables", "-A", ...
iptables: CommandFilter, iptables, root
ip6tables: CommandFilter, ip6tables, root

View File

@ -0,0 +1,48 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# arping
arping: CommandFilter, arping, root
# l3_agent
sysctl: CommandFilter, sysctl, root
route: CommandFilter, route, root
radvd: CommandFilter, radvd, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP
kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
# ovs_lib (if OVSInterfaceDriver is used)
ovs-vsctl: CommandFilter, ovs-vsctl, root
# iptables_manager
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# Keepalived
keepalived: CommandFilter, keepalived, root
kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9
# l3 agent to delete floatingip's conntrack state
conntrack: CommandFilter, conntrack, root

View File

@ -0,0 +1,26 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# haproxy
haproxy: CommandFilter, haproxy, root
# lbaas-agent uses kill as well, that's handled by the generic KillFilter
kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
mm-ctl: CommandFilter, mm-ctl, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
route: CommandFilter, route, root
# arping
arping: CommandFilter, arping, root

View File

@ -0,0 +1,19 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# linuxbridge-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
brctl: CommandFilter, brctl, root
bridge: CommandFilter, bridge, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -0,0 +1,12 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# nec_neutron_agent
ovs-vsctl: CommandFilter, ovs-vsctl, root

View File

@ -0,0 +1,16 @@
# neutron-rootwrap command filters for nodes on which
# neutron-ofagent-agent is expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# ovs_lib
ovs-vsctl: CommandFilter, ovs-vsctl, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -0,0 +1,22 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# openvswitch-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
ovs-vsctl: CommandFilter, ovs-vsctl, root
ovs-ofctl: CommandFilter, ovs-ofctl, root
kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
ovsdb-client: CommandFilter, ovsdb-client, root
xe: CommandFilter, xe, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -0,0 +1,13 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
openswan: CommandFilter, ipsec, root

139
etc/policy.json Normal file
View File

@ -0,0 +1,139 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"create_port": "",
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"get_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"create_floatingip": "rule:regular_user",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only"
}

34
etc/rootwrap.conf Normal file
View File

@ -0,0 +1,34 @@
# Configuration for neutron-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, local0, local1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR
[xenapi]
# XenAPI configuration is only required by the L2 agent if it is to
# target a XenServer/XCP compute host's dom0.
xenapi_connection_url=<None>
xenapi_connection_username=root
xenapi_connection_password=<None>

43
etc/services.conf Normal file
View File

@ -0,0 +1,43 @@
[radware]
#vdirect_address = 0.0.0.0
#ha_secondary_address=
#vdirect_user = vDirect
#vdirect_password = radware
#service_ha_pair = False
#service_throughput = 1000
#service_ssl_throughput = 200
#service_compression_throughput = 100
#service_cache = 20
#service_adc_type = VA
#service_adc_version=
#service_session_mirroring_enabled = False
#service_isl_vlan = -1
#service_resource_pool_ids = []
#actions_to_skip = 'setup_l2_l3'
#l4_action_name = 'BaseCreate'
#l2_l3_workflow_name = openstack_l2_l3
#l4_workflow_name = openstack_l4
#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True
#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2
[netscaler_driver]
#netscaler_ncc_uri = https://ncc_server.acme.org/ncc/v1/api
#netscaler_ncc_username = admin
#netscaler_ncc_password = secret
[heleoslb]
#esm_mgmt =
#admin_username =
#admin_password =
#lb_image =
#inband_id =
#oob_id =
#mgmt_id =
#dummy_utif_id =
#resource_pool_id =
#async_requests =
#lb_flavor = small
#sync_interval = 60
[haproxy]
#jinja_config_template = /opt/stack/neutron/neutron/services/drivers/haproxy/templates/haproxy_v1.4.template

View File

@ -114,7 +114,7 @@ function run_tests {
if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then
# Default to running all tests if specific test is not
# provided.
testargs="discover ./neutron/tests"
testargs="discover ./vmware_nsx/neutron/tests"
fi
${wrapper} python -m testtools.run $testopts $testargs
@ -134,7 +134,7 @@ function run_tests {
set +e
testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'"
OS_TEST_PATH=`echo $testargs|grep -o 'neutron\.tests[^[:space:]:]\+'|tr . /`
OS_TEST_PATH=`echo $testargs|grep -o 'vmware_nsx\neutron\.tests[^[:space:]:]\+'|tr . /`
if [ -n "$OS_TEST_PATH" ]; then
os_test_dir=$(dirname "$OS_TEST_PATH")
else

205
setup.cfg
View File

@ -1,12 +1,11 @@
[metadata]
name = neutron
version = 2015.1
summary = OpenStack Networking
name = vmware-nsx
summary = VMware NSX library for OpenStack projects
description-file =
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://www.openstack.org/
home-page = http://launchpad.net/vmware-nsx
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@ -16,193 +15,39 @@ classifier =
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
[files]
packages =
neutron
data_files =
etc/neutron =
etc/api-paste.ini
etc/dhcp_agent.ini
etc/fwaas_driver.ini
etc/l3_agent.ini
etc/lbaas_agent.ini
etc/metadata_agent.ini
etc/metering_agent.ini
etc/policy.json
etc/neutron.conf
etc/rootwrap.conf
etc/vpn_agent.ini
etc/neutron/rootwrap.d =
etc/neutron/rootwrap.d/debug.filters
etc/neutron/rootwrap.d/dhcp.filters
etc/neutron/rootwrap.d/iptables-firewall.filters
etc/neutron/rootwrap.d/ipset-firewall.filters
etc/neutron/rootwrap.d/l3.filters
etc/neutron/rootwrap.d/lbaas-haproxy.filters
etc/neutron/rootwrap.d/linuxbridge-plugin.filters
etc/neutron/rootwrap.d/nec-plugin.filters
etc/neutron/rootwrap.d/ofagent.filters
etc/neutron/rootwrap.d/openvswitch-plugin.filters
etc/neutron/rootwrap.d/vpnaas.filters
etc/init.d = etc/init.d/neutron-server
etc/neutron/plugins/bigswitch =
etc/neutron/plugins/bigswitch/restproxy.ini
etc/neutron/plugins/bigswitch/ssl/ca_certs =
etc/neutron/plugins/bigswitch/ssl/ca_certs/README
etc/neutron/plugins/bigswitch/ssl/host_certs =
etc/neutron/plugins/bigswitch/ssl/host_certs/README
etc/neutron/plugins/brocade = etc/neutron/plugins/brocade/brocade.ini
etc/neutron/plugins/cisco =
etc/neutron/plugins/cisco/cisco_cfg_agent.ini
etc/neutron/plugins/cisco/cisco_plugins.ini
etc/neutron/plugins/cisco/cisco_router_plugin.ini
etc/neutron/plugins/cisco/cisco_vpn_agent.ini
etc/neutron/plugins/embrane = etc/neutron/plugins/embrane/heleos_conf.ini
etc/neutron/plugins/hyperv = etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
etc/neutron/plugins/ibm = etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini
etc/neutron/plugins/linuxbridge = etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini
etc/neutron/plugins/metaplugin = etc/neutron/plugins/metaplugin/metaplugin.ini
etc/neutron/plugins/midonet = etc/neutron/plugins/midonet/midonet.ini
etc/neutron/plugins/ml2 =
etc/neutron/plugins/bigswitch/restproxy.ini
etc/neutron/plugins/ml2/ml2_conf.ini
etc/neutron/plugins/ml2/ml2_conf_arista.ini
etc/neutron/plugins/ml2/ml2_conf_brocade.ini
etc/neutron/plugins/ml2/ml2_conf_cisco.ini
etc/neutron/plugins/ml2/ml2_conf_mlnx.ini
etc/neutron/plugins/ml2/ml2_conf_ncs.ini
etc/neutron/plugins/ml2/ml2_conf_odl.ini
etc/neutron/plugins/ml2/ml2_conf_ofa.ini
etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini
etc/neutron/plugins/ml2/ml2_conf_sriov.ini
etc/neutron/plugins/nuage/nuage_plugin.ini
etc/neutron/plugins/mlnx = etc/neutron/plugins/mlnx/mlnx_conf.ini
etc/neutron/plugins/nec = etc/neutron/plugins/nec/nec.ini
etc/neutron/plugins/nuage = etc/neutron/plugins/nuage/nuage_plugin.ini
etc/neutron/plugins/oneconvergence = etc/neutron/plugins/oneconvergence/nvsdplugin.ini
etc/neutron/plugins/openvswitch = etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
etc/neutron/plugins/plumgrid = etc/neutron/plugins/plumgrid/plumgrid.ini
etc/neutron/plugins/vmware = etc/neutron/plugins/vmware/nsx.ini
etc/neutron/plugins/opencontrail = etc/neutron/plugins/opencontrail/contrailplugin.ini
scripts =
bin/neutron-rootwrap
bin/neutron-rootwrap-xen-dom0
[global]
setup-hooks =
pbr.hooks.setup_hook
neutron.hooks.setup_hook
[entry_points]
console_scripts =
neutron-cisco-cfg-agent = neutron.plugins.cisco.cfg_agent.cfg_agent:main
neutron-check-nsx-config = neutron.plugins.vmware.check_nsx_config:main
neutron-db-manage = neutron.db.migration.cli:main
neutron-debug = neutron.debug.shell:main
neutron-dhcp-agent = neutron.agent.dhcp_agent:main
neutron-hyperv-agent = neutron.plugins.hyperv.agent.hyperv_neutron_agent:main
neutron-ibm-agent = neutron.plugins.ibm.agent.sdnve_neutron_agent:main
neutron-l3-agent = neutron.agent.l3.agent:main
neutron-lbaas-agent = neutron.services.loadbalancer.agent.agent:main
neutron-linuxbridge-agent = neutron.plugins.linuxbridge.agent.linuxbridge_neutron_agent:main
neutron-metadata-agent = neutron.agent.metadata.agent:main
neutron-mlnx-agent = neutron.plugins.mlnx.agent.eswitch_neutron_agent:main
neutron-nec-agent = neutron.plugins.nec.agent.nec_neutron_agent:main
neutron-netns-cleanup = neutron.agent.netns_cleanup_util:main
neutron-ns-metadata-proxy = neutron.agent.metadata.namespace_proxy:main
neutron-nsx-manage = neutron.plugins.vmware.shell:main
neutron-nvsd-agent = neutron.plugins.oneconvergence.agent.nvsd_neutron_agent:main
neutron-openvswitch-agent = neutron.plugins.openvswitch.agent.ovs_neutron_agent:main
neutron-ovs-cleanup = neutron.agent.ovs_cleanup_util:main
neutron-restproxy-agent = neutron.plugins.bigswitch.agent.restproxy_agent:main
neutron-server = neutron.server:main
neutron-rootwrap = oslo.rootwrap.cmd:main
neutron-usage-audit = neutron.cmd.usage_audit:main
neutron-vpn-agent = neutron.services.vpn.agent:main
neutron-metering-agent = neutron.services.metering.agents.metering_agent:main
neutron-ofagent-agent = neutron.plugins.ofagent.agent.main:main
neutron-sriov-nic-agent = neutron.plugins.sriovnicagent.sriov_nic_agent:main
neutron-sanity-check = neutron.cmd.sanity_check:main
neutron.core_plugins =
bigswitch = neutron.plugins.bigswitch.plugin:NeutronRestProxyV2
brocade = neutron.plugins.brocade.NeutronPlugin:BrocadePluginV2
cisco = neutron.plugins.cisco.network_plugin:PluginV2
embrane = neutron.plugins.embrane.plugins.embrane_ml2_plugin:EmbraneMl2Plugin
hyperv = neutron.plugins.hyperv.hyperv_neutron_plugin:HyperVNeutronPlugin
ibm = neutron.plugins.ibm.sdnve_neutron_plugin:SdnvePluginV2
midonet = neutron.plugins.midonet.plugin:MidonetPluginV2
ml2 = neutron.plugins.ml2.plugin:Ml2Plugin
nec = neutron.plugins.nec.nec_plugin:NECPluginV2
nuage = neutron.plugins.nuage.plugin:NuagePlugin
metaplugin = neutron.plugins.metaplugin.meta_neutron_plugin:MetaPluginV2
oneconvergence = neutron.plugins.oneconvergence.plugin:OneConvergencePluginV2
plumgrid = neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin:NeutronPluginPLUMgridV2
vmware = neutron.plugins.vmware.plugin:NsxPlugin
neutron.service_plugins =
dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin
router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin
bigswitch_l3 = neutron.plugins.bigswitch.l3_router_plugin:L3RestProxy
firewall = neutron.services.firewall.fwaas_plugin:FirewallPlugin
lbaas = neutron.services.loadbalancer.plugin:LoadBalancerPlugin
vpnaas = neutron.services.vpn.plugin:VPNDriverPlugin
metering = neutron.services.metering.metering_plugin:MeteringPlugin
neutron.ml2.type_drivers =
flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver
local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver
vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver
gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver
vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver
neutron.ml2.mechanism_drivers =
opendaylight = neutron.plugins.ml2.drivers.mechanism_odl:OpenDaylightMechanismDriver
logger = neutron.tests.unit.ml2.drivers.mechanism_logger:LoggerMechanismDriver
test = neutron.tests.unit.ml2.drivers.mechanism_test:TestMechanismDriver
bulkless = neutron.tests.unit.ml2.drivers.mechanism_bulkless:BulklessMechanismDriver
linuxbridge = neutron.plugins.ml2.drivers.mech_linuxbridge:LinuxbridgeMechanismDriver
openvswitch = neutron.plugins.ml2.drivers.mech_openvswitch:OpenvswitchMechanismDriver
hyperv = neutron.plugins.ml2.drivers.mech_hyperv:HypervMechanismDriver
ncs = neutron.plugins.ml2.drivers.mechanism_ncs:NCSMechanismDriver
arista = neutron.plugins.ml2.drivers.arista.mechanism_arista:AristaDriver
cisco_nexus = neutron.plugins.ml2.drivers.cisco.nexus.mech_cisco_nexus:CiscoNexusMechanismDriver
cisco_apic = neutron.plugins.ml2.drivers.cisco.apic.mechanism_apic:APICMechanismDriver
l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver
bigswitch = neutron.plugins.ml2.drivers.mech_bigswitch.driver:BigSwitchMechanismDriver
ofagent = neutron.plugins.ml2.drivers.mech_ofagent:OfagentMechanismDriver
mlnx = neutron.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver
brocade = neutron.plugins.ml2.drivers.brocade.mechanism_brocade:BrocadeMechanism
fslsdn = neutron.plugins.ml2.drivers.freescale.mechanism_fslsdn:FslsdnMechanismDriver
sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver:SriovNicSwitchMechanismDriver
nuage = neutron.plugins.ml2.drivers.mech_nuage.driver:NuageMechanismDriver
neutron.ml2.extension_drivers =
test = neutron.tests.unit.ml2.test_extension_driver_api:TestExtensionDriver
neutron.openstack.common.cache.backends =
memory = neutron.openstack.common.cache._backends.memory:MemoryBackend
# These are for backwards compat with Icehouse notification_driver configuration values
oslo.messaging.notify.drivers =
neutron.openstack.common.notifier.log_notifier = oslo.messaging.notify._impl_log:LogDriver
neutron.openstack.common.notifier.no_op_notifier = oslo.messaging.notify._impl_noop:NoOpDriver
neutron.openstack.common.notifier.rpc_notifier2 = oslo.messaging.notify._impl_messaging:MessagingV2Driver
neutron.openstack.common.notifier.rpc_notifier = oslo.messaging.notify._impl_messaging:MessagingDriver
neutron.openstack.common.notifier.test_notifier = oslo.messaging.notify._impl_test:TestDriver
vmware_nsx
namespace_packages =
vmware_nsx
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
build-dir = doc/build
all_files = 1
[upload_sphinx]
upload-dir = doc/build/html
[compile_catalog]
directory = vmware_nsx/locale
domain = vmware_nsx
[update_catalog]
domain = vmware_nsx
output_dir = vmware_nsx/locale
input_file = vmware_nsx/locale/vmware_nsx.pot
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = neutron/locale/neutron.pot
output_file = vmware_nsx/locale/vmware_nsx.pot
[compile_catalog]
directory = neutron/locale
domain = neutron
[update_catalog]
domain = neutron
output_dir = neutron/locale
input_file = neutron/locale/neutron.pot
[pbr]
autodoc_index_modules = 1
[wheel]
universal = 1

View File

@ -1,6 +1,9 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-e git://git.openstack.org/openstack/neutron.git#egg=neutron
hacking>=0.9.2,<0.10
cliff>=1.7.0 # Apache-2.0

View File

@ -92,6 +92,4 @@ msg_format_checkers = [
]
file_black_list = ["./neutron/tests/unit",
"./neutron/openstack",
"./neutron/plugins/bigswitch/tests"]
file_black_list = ["./vmware_nsx/neutron/tests/unit"]

View File

@ -3,4 +3,4 @@
TESTRARGS=$1
exec 3>&1
status=$(exec 4>&1 >&3; ( python -m neutron.openstack.common.lockutils python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | subunit-trace -f) && exit $status
status=$(exec 4>&1 >&3; ( python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | subunit-trace -f) && exit $status

26
tox.ini
View File

@ -1,5 +1,5 @@
[tox]
envlist = py27,py33,py34,pep8
envlist = py27,pep8
minversion = 1.6
skipsdist = True
@ -13,10 +13,7 @@ install_command = pip install -U {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals = sh
commands =
sh tools/pretty_tox.sh '{posargs}'
# there is also secret magic in pretty_tox.sh which lets you run in a fail only
# mode. To do this define the TRACE_FAILONLY environmental variable.
commands = python setup.py testr --slowest --testr-args='{posargs}'
[testenv:hashtest]
# This is the same as default environment, but with a random PYTHONHASHSEED.
@ -25,13 +22,10 @@ commands =
setenv = VIRTUAL_ENV={envdir}
[testenv:functional]
setenv = OS_TEST_PATH=./neutron/tests/functional
OS_TEST_TIMEOUT=90
setenv = OS_TEST_TIMEOUT=90
[testenv:dsvm-functional]
setenv = OS_TEST_PATH=./neutron/tests/functional
OS_SUDO_TESTING=1
OS_ROOTWRAP_CMD=sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
setenv = OS_SUDO_TESTING=1
OS_FAIL_ON_MISSING_DEPS=1
OS_TEST_TIMEOUT=90
sitepackages=True
@ -44,16 +38,14 @@ downloadcache = ~/cache/pip
commands =
sh ./tools/check_bash.sh
flake8
neutron-db-manage check_migration
sh -c "find neutron -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null"
whitelist_externals = sh
[testenv:i18n]
commands = python ./tools/check_i18n.py ./neutron ./tools/i18n_cfg.py
commands = python ./tools/check_i18n.py ./vmware_nsx/neutron
[testenv:cover]
commands =
python -m neutron.openstack.common.lockutils python setup.py testr --coverage --testr-args='{posargs}'
python setup.py testr --coverage --testr-args='{posargs}'
[testenv:venv]
commands = {posargs}
@ -76,16 +68,14 @@ commands = python setup.py build_sphinx
ignore = E125,E126,E128,E129,E265,H305,H307,H402,H404,H405,H904
show-source = true
builtins = _
# TODO(dougw) neutron/tests/unit/vmware exclusion is a temporary services split hack
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios,neutron/tests/unit/vmware*
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject
[testenv:pylint]
deps =
{[testenv]deps}
pylint
commands =
pylint --rcfile=.pylintrc --output-format=colorized {posargs:neutron}
pylint --rcfile=.pylintrc --output-format=colorized {posargs:vmware_nsx/neutron}
[hacking]
import_exceptions = neutron.i18n
local-check-factory = neutron.hacking.checks.factory

File diff suppressed because it is too large Load Diff

View File

@ -1,49 +0,0 @@
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import config # noqa
from neutron.plugins.vmware.vshield import edge_appliance_driver
from neutron.plugins.vmware.vshield import edge_firewall_driver
from neutron.plugins.vmware.vshield import edge_ipsecvpn_driver
from neutron.plugins.vmware.vshield import edge_loadbalancer_driver
from neutron.plugins.vmware.vshield.tasks import tasks
from neutron.plugins.vmware.vshield import vcns
LOG = logging.getLogger(__name__)
class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver,
edge_firewall_driver.EdgeFirewallDriver,
edge_loadbalancer_driver.EdgeLbDriver,
edge_ipsecvpn_driver.EdgeIPsecVpnDriver):
def __init__(self, callbacks):
super(VcnsDriver, self).__init__()
self.callbacks = callbacks
self.vcns_uri = cfg.CONF.vcns.manager_uri
self.vcns_user = cfg.CONF.vcns.user
self.vcns_passwd = cfg.CONF.vcns.password
self.datacenter_moid = cfg.CONF.vcns.datacenter_moid
self.deployment_container_id = cfg.CONF.vcns.deployment_container_id
self.resource_pool_id = cfg.CONF.vcns.resource_pool_id
self.datastore_id = cfg.CONF.vcns.datastore_id
self.external_network = cfg.CONF.vcns.external_network
interval = cfg.CONF.vcns.task_status_check_interval
self.task_manager = tasks.TaskManager(interval)
self.task_manager.start()
self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd)

View File

@ -1,293 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from eventlet import greenthread
import mock
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.extensions import l3
from neutron import manager as n_manager
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.plugins import service as nsp
from neutron.tests import base
from neutron.tests.unit import test_l3_plugin
from neutron.tests.unit import vmware
from neutron.tests.unit.vmware import test_nsx_plugin
from neutron.tests.unit.vmware.vshield import fake_vcns
_uuid = uuidutils.generate_uuid
class ServiceRouterTestExtensionManager(object):
def get_resources(self):
# If l3 resources have been loaded and updated by main API
# router, update the map in the l3 extension so it will load
# the same attributes as the API router
l3_attr_map = copy.deepcopy(l3.RESOURCE_ATTRIBUTE_MAP)
for res in l3.RESOURCE_ATTRIBUTE_MAP.keys():
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res)
if attr_info:
l3.RESOURCE_ATTRIBUTE_MAP[res] = attr_info
resources = l3.L3.get_resources()
# restore the original resources once the controllers are created
l3.RESOURCE_ATTRIBUTE_MAP = l3_attr_map
return resources
def get_actions(self):
return []
def get_request_extensions(self):
return []
class ServiceRouterTest(test_nsx_plugin.L3NatTest,
test_l3_plugin.L3NatTestCaseMixin):
def vcns_patch(self):
instance = self.mock_vcns.start()
self.vcns_instance = instance
instance.return_value.deploy_edge.side_effect = self.fc2.deploy_edge
instance.return_value.get_edge_id.side_effect = self.fc2.get_edge_id
instance.return_value.get_edge_deploy_status.side_effect = (
self.fc2.get_edge_deploy_status)
instance.return_value.delete_edge.side_effect = self.fc2.delete_edge
instance.return_value.update_interface.side_effect = (
self.fc2.update_interface)
instance.return_value.get_nat_config.side_effect = (
self.fc2.get_nat_config)
instance.return_value.update_nat_config.side_effect = (
self.fc2.update_nat_config)
instance.return_value.delete_nat_rule.side_effect = (
self.fc2.delete_nat_rule)
instance.return_value.get_edge_status.side_effect = (
self.fc2.get_edge_status)
instance.return_value.get_edges.side_effect = self.fc2.get_edges
instance.return_value.update_routes.side_effect = (
self.fc2.update_routes)
instance.return_value.create_lswitch.side_effect = (
self.fc2.create_lswitch)
instance.return_value.delete_lswitch.side_effect = (
self.fc2.delete_lswitch)
instance.return_value.get_loadbalancer_config.side_effect = (
self.fc2.get_loadbalancer_config)
instance.return_value.enable_service_loadbalancer.side_effect = (
self.fc2.enable_service_loadbalancer)
def setUp(self, ext_mgr=None, service_plugins=None):
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
cfg.CONF.set_override('task_status_check_interval', 200, group="vcns")
# vcns does not support duplicated router name, ignore router name
# validation for unit-test cases
self.fc2 = fake_vcns.FakeVcns(unique_router_name=False)
self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True)
self.vcns_patch()
mock_proxy = mock.patch(
"%s.%s" % (vmware.SERVICE_PLUGIN_NAME,
'_set_create_lswitch_proxy'))
mock_proxy.start()
ext_mgr = ext_mgr or ServiceRouterTestExtensionManager()
super(ServiceRouterTest, self).setUp(
plugin=vmware.SERVICE_PLUGIN_NAME,
service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.fc2.set_fake_nsx_api(self.fc)
self.addCleanup(self.fc2.reset_all)
def tearDown(self):
plugin = n_manager.NeutronManager.get_plugin()
manager = plugin.vcns_driver.task_manager
# wait max ~10 seconds for all tasks to be finished
for i in range(100):
if not manager.has_pending_task():
break
greenthread.sleep(0.1)
if manager.has_pending_task():
manager.show_pending_tasks()
raise Exception(_("Tasks not completed"))
manager.stop()
# Ensure the manager thread has been stopped
self.assertIsNone(manager._thread)
super(ServiceRouterTest, self).tearDown()
def _create_router(self, fmt, tenant_id, name=None,
admin_state_up=None, set_context=False,
arg_list=None, **kwargs):
data = {'router': {'tenant_id': tenant_id}}
if name:
data['router']['name'] = name
if admin_state_up:
data['router']['admin_state_up'] = admin_state_up
for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())):
# Arg must be present and not empty
if arg in kwargs and kwargs[arg]:
data['router'][arg] = kwargs[arg]
data['router']['service_router'] = True
router_req = self.new_create_request('routers', data, fmt)
if set_context and tenant_id:
# create a specific auth context for this request
router_req.environ['neutron.context'] = context.Context(
'', tenant_id)
return router_req.get_response(self.ext_api)
class ServiceRouterTestCase(ServiceRouterTest,
test_nsx_plugin.TestL3NatTestCase):
def test_router_create(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True),
('external_gateway_info', None),
('service_router', True)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
expected_value_1 = expected_value + [('status', 'PENDING_CREATE')]
for k, v in expected_value_1:
self.assertEqual(router['router'][k], v)
# wait max ~10 seconds for router status update
for i in range(20):
greenthread.sleep(0.5)
res = self._show('routers', router['router']['id'])
if res['router']['status'] == 'ACTIVE':
break
expected_value_2 = expected_value + [('status', 'ACTIVE')]
for k, v in expected_value_2:
self.assertEqual(res['router'][k], v)
# check an integration lswitch is created
lswitch_name = "%s-ls" % name
for lswitch_id, lswitch in self.fc2._lswitches.iteritems():
if lswitch['display_name'] == lswitch_name:
break
else:
self.fail("Integration lswitch not found")
# check an integration lswitch is deleted
lswitch_name = "%s-ls" % name
for lswitch_id, lswitch in self.fc2._lswitches.iteritems():
if lswitch['display_name'] == lswitch_name:
self.fail("Integration switch is not deleted")
def test_router_delete_after_plugin_restart(self):
name = 'router1'
tenant_id = _uuid()
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id):
# clear router type cache to mimic plugin restart
plugin = n_manager.NeutronManager.get_plugin()
plugin._router_type = {}
# check an integration lswitch is deleted
lswitch_name = "%s-ls" % name
for lswitch_id, lswitch in self.fc2._lswitches.iteritems():
if lswitch['display_name'] == lswitch_name:
self.fail("Integration switch is not deleted")
def test_router_show(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True),
('status', 'PENDING_CREATE'),
('external_gateway_info', None),
('service_router', True)]
with self.router(name='router1', admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None):
super(ServiceRouterTestCase,
self)._test_router_create_with_gwinfo_and_l3_ext_net(
vlan_id, validate_ext_gw=False)
def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None):
super(ServiceRouterTestCase,
self)._test_router_update_gateway_on_l3_ext_net(
vlan_id, validate_ext_gw=False)
def test_floatingip_update(self):
self._test_floatingip_update(constants.FLOATINGIP_STATUS_ACTIVE)
class TestProxyCreateLswitch(base.BaseTestCase):
def setUp(self):
super(TestProxyCreateLswitch, self).setUp()
self.tenant_id = "foo_tenant"
self.display_name = "foo_network"
self.tz_config = [
{'zone_uuid': 'foo_zone',
'transport_type': 'stt'}
]
self.tags = utils.get_tags(quantum_net_id='foo_id',
os_tid=self.tenant_id)
self.cluster = None
def test_create_lswitch_with_basic_args(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config)
self.assertEqual(self.display_name, result[0])
self.assertEqual(self.tz_config, result[1])
self.assertEqual(sorted(self.tags), sorted(result[2]))
def test_create_lswitch_with_shared_as_kwarg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,
shared=True)
expected = self.tags + [{'scope': 'shared', 'tag': 'true'}]
self.assertEqual(sorted(expected), sorted(result[2]))
def test_create_lswitch_with_shared_as_arg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,
True)
additional_tags = [{'scope': 'shared', 'tag': 'true'}]
expected = self.tags + additional_tags
self.assertEqual(sorted(expected), sorted(result[2]))
def test_create_lswitch_with_additional_tags(self):
more_tags = [{'scope': 'foo_scope', 'tag': 'foo_tag'}]
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,
tags=more_tags)
expected = self.tags + more_tags
self.assertEqual(sorted(expected), sorted(result[2]))

View File

@ -1,375 +0,0 @@
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import mock
import webob.exc
from neutron import context
from neutron.db.firewall import firewall_db
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.vshield.common import exceptions as vcns_exc
from neutron.plugins.vmware.vshield import vcns_driver
from neutron.tests.unit.db.firewall import test_db_firewall
from neutron.tests.unit import vmware
from neutron.tests.unit.vmware.vshield import fake_vcns
_uuid = uuidutils.generate_uuid
VSE_ID = 'edge-1'
ROUTER_ID = '42f95450-5cc9-44e4-a744-1320e592a9d5'
VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test")
class VcnsDriverTestCase(test_db_firewall.FirewallPluginDbTestCase,
firewall_db.Firewall_db_mixin):
def vcns_firewall_patch(self):
instance = self.mock_vcns.start()
instance.return_value.update_firewall.side_effect = (
self.fc2.update_firewall)
instance.return_value.delete_firewall.side_effect = (
self.fc2.delete_firewall)
instance.return_value.update_firewall_rule.side_effect = (
self.fc2.update_firewall_rule)
instance.return_value.delete_firewall_rule.side_effect = (
self.fc2.delete_firewall_rule)
instance.return_value.add_firewall_rule_above.side_effect = (
self.fc2.add_firewall_rule_above)
instance.return_value.add_firewall_rule.side_effect = (
self.fc2.add_firewall_rule)
instance.return_value.get_firewall.side_effect = (
self.fc2.get_firewall)
instance.return_value.get_firewall_rule.side_effect = (
self.fc2.get_firewall_rule)
def setUp(self):
self.config_parse(args=['--config-file', VCNS_CONFIG_FILE])
# mock vcns
self.fc2 = fake_vcns.FakeVcns(unique_router_name=False)
self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True)
self.vcns_firewall_patch()
self.driver = vcns_driver.VcnsDriver(mock.Mock())
super(VcnsDriverTestCase, self).setUp()
self.addCleanup(self.fc2.reset_all)
self.addCleanup(self.mock_vcns.stop)
self.tenant_id = _uuid()
self.subnet_id = _uuid()
class TestEdgeFwDriver(VcnsDriverTestCase):
def _make_firewall_dict_with_rules(self, context, firewall_id):
fw = self.get_firewall(context, firewall_id)
fw_policy_id = fw['firewall_policy_id']
if fw_policy_id:
firewall_policy_db = self._get_firewall_policy(
context, fw_policy_id)
fw['firewall_rule_list'] = [
self._make_firewall_rule_dict(fw_rule_db)
for fw_rule_db in firewall_policy_db['firewall_rules']
]
return fw
def _compare_firewall_rule_lists(self, firewall_policy_id,
list1, list2):
for r1, r2 in zip(list1, list2):
rule = r1['firewall_rule']
rule['firewall_policy_id'] = firewall_policy_id
for k in rule:
self.assertEqual(rule[k], r2[k])
def test_create_and_get_firewall(self):
ctx = context.get_admin_context()
name = 'firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False),
self.firewall_rule(name='fwr3',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_expect)
fw_get = self.driver.get_firewall(ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
def test_update_firewall_with_rules(self):
ctx = context.get_admin_context()
name = 'new_firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False),
self.firewall_rule(name='fwr3',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_create = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_create)
data = {'firewall_rule': {'name': name,
'source_port': '10:20',
'destination_port': '30:40'}}
self.new_update_request('firewall_rules', data,
fr[0]['firewall_rule']['id'])
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_expect)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
def test_delete_firewall(self):
ctx = context.get_admin_context()
name = 'firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False),
self.firewall_rule(name='fwr3',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_expect)
self.driver.delete_firewall(ctx, VSE_ID)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self.assertFalse(fw_get['firewall_rule_list'])
def test_update_firewall_rule(self):
ctx = context.get_admin_context()
name = 'new_firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_create = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_create)
data = {'firewall_rule': {'name': name,
'source_port': '10:20',
'destination_port': '30:40'}}
req = self.new_update_request(
'firewall_rules', data,
fr[0]['firewall_rule']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
rule_expect = res['firewall_rule']
rule_expect['edge_id'] = VSE_ID
self.driver.update_firewall_rule(
ctx, rule_expect['id'], VSE_ID, rule_expect)
rule_get = self.driver.get_firewall_rule(
ctx, rule_expect['id'], VSE_ID)
for k, v in rule_get['firewall_rule'].items():
self.assertEqual(rule_expect[k], v)
def test_delete_firewall_rule(self):
ctx = context.get_admin_context()
name = 'new_firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_create = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_create)
fr[0]['firewall_rule']['edge_id'] = VSE_ID
self.driver.delete_firewall_rule(
ctx, fr[0]['firewall_rule']['id'],
VSE_ID)
self.assertRaises(vcns_exc.VcnsNotFound,
self.driver.get_firewall_rule,
ctx, fr[0]['firewall_rule']['id'],
VSE_ID)
def test_insert_rule(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_create = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_create)
with contextlib.nested(self.firewall_rule(name='fwr0',
do_delete=False),
self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False),
self.firewall_rule(name='fwr3',
do_delete=False),
self.firewall_rule(name='fwr4',
do_delete=False),
self.firewall_rule(name='fwr5',
do_delete=False),
self.firewall_rule(
name='fwr6',
do_delete=False)) as fwr:
# test insert when rule list is empty
fwr0_id = fwr[0]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr0_id,
insert_before=None,
insert_after=None,
expected_code=webob.exc.HTTPOk.code)
fw_update = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_update)
# test insert at top of list above existing rule
fwr1_id = fwr[1]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr1_id,
insert_before=fwr0_id,
insert_after=None,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr1_id,
'insert_before': fwr0_id,
'insert_after': None}
rule = fwr[1]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
# test insert at bottom of list
fwr2_id = fwr[2]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr2_id,
insert_before=None,
insert_after=fwr0_id,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr2_id,
'insert_before': None,
'insert_after': fwr0_id}
rule = fwr[2]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
# test insert in the middle of the list using
# insert_before
fwr3_id = fwr[3]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr3_id,
insert_before=fwr2_id,
insert_after=None,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr3_id,
'insert_before': fwr2_id,
'insert_after': None}
rule = fwr[3]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
# test insert in the middle of the list using
# insert_after
fwr4_id = fwr[4]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr4_id,
insert_before=None,
insert_after=fwr3_id,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr4_id,
'insert_before': None,
'insert_after': fwr3_id}
rule = fwr[4]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
# test insert when both insert_before and
# insert_after are set
fwr5_id = fwr[5]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr5_id,
insert_before=fwr4_id,
insert_after=fwr4_id,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr5_id,
'insert_before': fwr4_id,
'insert_after': fwr4_id}
rule = fwr[5]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])

View File

@ -1,682 +0,0 @@
# Copyright 2013 VMware, Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import copy
import webob.exc
from neutron.api.v2 import attributes
from neutron import context
from neutron.extensions import firewall
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as const
from neutron.tests.unit.db.firewall import test_db_firewall
from neutron.tests.unit.vmware.vshield import test_edge_router
_uuid = uuidutils.generate_uuid
FW_PLUGIN_CLASS = "neutron.plugins.vmware.plugin.NsxServicePlugin"
class FirewallTestExtensionManager(
test_edge_router.ServiceRouterTestExtensionManager):
def get_resources(self):
# If l3 resources have been loaded and updated by main API
# router, update the map in the l3 extension so it will load
# the same attributes as the API router
resources = super(FirewallTestExtensionManager, self).get_resources()
firewall_attr_map = copy.deepcopy(firewall.RESOURCE_ATTRIBUTE_MAP)
for res in firewall.RESOURCE_ATTRIBUTE_MAP.keys():
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res)
if attr_info:
firewall.RESOURCE_ATTRIBUTE_MAP[res] = attr_info
fw_resources = firewall.Firewall.get_resources()
# restore the original resources once the controllers are created
firewall.RESOURCE_ATTRIBUTE_MAP = firewall_attr_map
resources.extend(fw_resources)
return resources
def get_actions(self):
return []
def get_request_extensions(self):
return []
class FirewallPluginTestCase(test_db_firewall.FirewallPluginDbTestCase,
test_edge_router.ServiceRouterTest):
def vcns_firewall_patch(self):
self.vcns_instance.return_value.update_firewall.side_effect = (
self.fc2.update_firewall)
self.vcns_instance.return_value.delete_firewall.side_effect = (
self.fc2.delete_firewall)
self.vcns_instance.return_value.update_firewall_rule.side_effect = (
self.fc2.update_firewall_rule)
self.vcns_instance.return_value.delete_firewall_rule.side_effect = (
self.fc2.delete_firewall_rule)
self.vcns_instance.return_value.add_firewall_rule_above.side_effect = (
self.fc2.add_firewall_rule_above)
self.vcns_instance.return_value.add_firewall_rule.side_effect = (
self.fc2.add_firewall_rule)
self.vcns_instance.return_value.get_firewall.side_effect = (
self.fc2.get_firewall)
self.vcns_instance.return_value.get_firewall_rule.side_effect = (
self.fc2.get_firewall_rule)
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
super(FirewallPluginTestCase, self).setUp(
ext_mgr=FirewallTestExtensionManager(),
fw_plugin=FW_PLUGIN_CLASS)
self.vcns_firewall_patch()
self.plugin = manager.NeutronManager.get_plugin()
def tearDown(self):
super(FirewallPluginTestCase, self).tearDown()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
self.ext_api = None
self.plugin = None
def _create_and_get_router(self):
req = self._create_router(self.fmt, self._tenant_id)
res = self.deserialize(self.fmt, req)
return res['router']['id']
def _create_firewall(self, fmt, name, description, firewall_policy_id,
admin_state_up=True, expected_res_status=None,
**kwargs):
data = {'firewall': {'name': name,
'description': description,
'firewall_policy_id': firewall_policy_id,
'router_id': kwargs.get('router_id'),
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
firewall_req = self.new_create_request('firewalls', data, fmt)
firewall_res = firewall_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(firewall_res.status_int, expected_res_status)
return firewall_res
def test_create_firewall(self):
name = "new_fw"
attrs = self._get_test_firewall_attrs(name)
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['firewall_policy_id'] = fwp_id
attrs['router_id'] = self._create_and_get_router()
with self.firewall(
name=name,
firewall_policy_id=fwp_id,
router_id=attrs['router_id'],
admin_state_up=test_db_firewall.ADMIN_STATE_UP,
expected_res_status=201
) as fw:
attrs = self._replace_firewall_status(
attrs, const.PENDING_CREATE, const.ACTIVE)
for k, v in attrs.iteritems():
self.assertEqual(fw['firewall'][k], v)
def test_create_firewall_without_policy(self):
name = "new_fw"
attrs = self._get_test_firewall_attrs(name)
attrs['router_id'] = self._create_and_get_router()
with self.firewall(
name=name,
router_id=attrs['router_id'],
admin_state_up=test_db_firewall.ADMIN_STATE_UP,
expected_res_status=201
) as fw:
attrs = self._replace_firewall_status(
attrs, const.PENDING_CREATE, const.ACTIVE)
for k, v in attrs.iteritems():
self.assertEqual(fw['firewall'][k], v)
def test_update_firewall(self):
name = "new_fw"
attrs = self._get_test_firewall_attrs(name)
attrs['router_id'] = self._create_and_get_router()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['firewall_policy_id'] = fwp_id
with self.firewall(
firewall_policy_id=fwp_id, router_id=attrs['router_id'],
admin_state_up=test_db_firewall.ADMIN_STATE_UP) as fw:
fw_id = fw['firewall']['id']
new_data = {'firewall': {'name': name}}
req = self.new_update_request('firewalls', new_data, fw_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
res_json = self.deserialize(
self.fmt, res)
attrs = self._replace_firewall_status(
attrs, const.PENDING_CREATE, const.ACTIVE)
for k, v in attrs.iteritems():
self.assertEqual(res_json['firewall'][k], v)
def test_delete_firewall(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(
firewall_policy_id=fwp_id,
router_id=self._create_and_get_router(),
admin_state_up=test_db_firewall.ADMIN_STATE_UP,
do_delete=False) as fw:
fw_id = fw['firewall']['id']
with ctx.session.begin(subtransactions=True):
req = self.new_delete_request('firewalls', fw_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
self.assertRaises(
firewall.FirewallNotFound,
self.plugin.get_firewall, ctx, fw_id)
def test_delete_router_in_use_by_fwservice(self):
router_id = self._create_and_get_router()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(
name='fw',
firewall_policy_id=fwp_id,
router_id=router_id,
admin_state_up=test_db_firewall.ADMIN_STATE_UP,
expected_res_status=201
):
self._delete('routers', router_id,
expected_code=webob.exc.HTTPConflict.code)
def test_show_firewall(self):
name = "firewall1"
attrs = self._get_test_firewall_attrs(name)
attrs['router_id'] = self._create_and_get_router()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['firewall_policy_id'] = fwp_id
with self.firewall(
name=name,
firewall_policy_id=fwp_id, router_id=attrs['router_id'],
admin_state_up=test_db_firewall.ADMIN_STATE_UP) as firewall:
req = self.new_show_request('firewalls',
firewall['firewall']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
attrs = self._replace_firewall_status(
attrs, const.PENDING_CREATE, const.ACTIVE)
for k, v in attrs.iteritems():
self.assertEqual(res['firewall'][k], v)
def test_list_firewalls(self):
keys_list = []
for i in range(3):
keys_list.append({'name': "fw" + str(i),
'router_id': self._create_and_get_router(),
'admin_state_up': True,
'status': "ACTIVE"})
with contextlib.nested(
self.firewall(
name='fw0', router_id=keys_list[0]['router_id'],
admin_state_up=True, description='fw'),
self.firewall(
name='fw1', router_id=keys_list[1]['router_id'],
admin_state_up=True, description='fw'),
self.firewall(
name='fw2', router_id=keys_list[2]['router_id'],
admin_state_up=True, description='fw'),
) as (fw1, fw2, fw3):
self._test_list_resources(
'firewall', (fw1, fw2, fw3),
query_params='description=fw')
req = self.new_list_request('firewalls')
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res['firewalls']), 3)
for index in range(len(res['firewalls'])):
for k, v in keys_list[index].items():
self.assertEqual(res['firewalls'][index][k], v)
def test_create_firewall_with_rules(self):
ctx = context.get_admin_context()
with contextlib.nested(self.firewall_rule(name='fwr1'),
self.firewall_rule(name='fwr2'),
self.firewall_rule(name='fwr3')) as fr:
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
data = {'firewall_policy':
{'firewall_rules': fw_rule_ids}}
req = self.new_update_request(
'firewall_policies', data, fwp_id)
req.get_response(self.ext_api)
attrs = self._get_test_firewall_attrs()
attrs['firewall_policy_id'] = fwp_id
with self.firewall(
firewall_policy_id=fwp_id,
router_id=self._create_and_get_router(),
admin_state_up=test_db_firewall.ADMIN_STATE_UP) as fw:
rule_list = (
self.plugin._make_firewall_rule_list_by_policy_id(
ctx, fw['firewall']['firewall_policy_id']))
self._compare_firewall_rule_lists(
fwp_id, fr, rule_list)
def test_update_firewall_policy_with_no_firewall(self):
name = "new_firewall_policy1"
attrs = self._get_test_firewall_policy_attrs(name, audited=False)
with self.firewall_policy(shared=test_db_firewall.SHARED,
firewall_rules=None,
audited=test_db_firewall.AUDITED) as fwp:
data = {'firewall_policy': {'name': name}}
req = self.new_update_request('firewall_policies', data,
fwp['firewall_policy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in attrs.iteritems():
self.assertEqual(res['firewall_policy'][k], v)
def test_update_firewall_policy_with_firewall(self):
name = "new_firewall_policy1"
attrs = self._get_test_firewall_policy_attrs(name, audited=False)
with self.firewall_policy(shared=test_db_firewall.SHARED,
firewall_rules=None,
audited=test_db_firewall.AUDITED) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(
firewall_policy_id=fwp_id,
router_id=self._create_and_get_router(),
admin_state_up=test_db_firewall.ADMIN_STATE_UP
):
data = {'firewall_policy': {'name': name}}
req = self.new_update_request(
'firewall_policies', data, fwp['firewall_policy']['id'])
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
for k, v in attrs.iteritems():
self.assertEqual(res['firewall_policy'][k], v)
def test_update_firewall_rule_with_no_firewall(self):
name = "new_firewall_rule1"
attrs = self._get_test_firewall_rule_attrs(name)
attrs['source_port'] = '10:20'
attrs['destination_port'] = '30:40'
with self.firewall_rule() as fwr:
data = {'firewall_rule': {'name': name,
'source_port': '10:20',
'destination_port': '30:40'}}
req = self.new_update_request(
'firewall_rules', data, fwr['firewall_rule']['id'])
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
for k, v in attrs.iteritems():
self.assertEqual(res['firewall_rule'][k], v)
attrs['source_port'] = '10000'
attrs['destination_port'] = '80'
with self.firewall_rule() as fwr:
data = {'firewall_rule': {'name': name,
'source_port': 10000,
'destination_port': 80}}
req = self.new_update_request('firewall_rules', data,
fwr['firewall_rule']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in attrs.iteritems():
self.assertEqual(res['firewall_rule'][k], v)
attrs['source_port'] = None
attrs['destination_port'] = None
with self.firewall_rule() as fwr:
data = {'firewall_rule': {'name': name,
'source_port': None,
'destination_port': None}}
req = self.new_update_request(
'firewall_rules', data, fwr['firewall_rule']['id'])
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
for k, v in attrs.iteritems():
self.assertEqual(res['firewall_rule'][k], v)
def test_update_firewall_rule_with_firewall(self):
name = "new_firewall_rule1"
attrs = self._get_test_firewall_rule_attrs(name)
with self.firewall_rule() as fwr:
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['firewall_policy_id'] = fwp_id
with self.firewall(
firewall_policy_id=fwp_id,
router_id=self._create_and_get_router(),
admin_state_up=test_db_firewall.ADMIN_STATE_UP
):
fwr_id = fwr['firewall_rule']['id']
data = {'firewall_policy': {'firewall_rules': [fwr_id]}}
req = self.new_update_request(
'firewall_policies', data,
fwp['firewall_policy']['id'])
req.get_response(self.ext_api)
data = {'firewall_rule': {'name': name}}
req = self.new_update_request(
'firewall_rules', data,
fwr['firewall_rule']['id'])
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
attrs['firewall_policy_id'] = fwp_id
for k, v in attrs.iteritems():
self.assertEqual(res['firewall_rule'][k], v)
def test_insert_rule_with_no_firewall(self):
attrs = self._get_test_firewall_policy_attrs()
attrs['audited'] = False
attrs['firewall_list'] = []
with contextlib.nested(self.firewall_rule(name='fwr0'),
self.firewall_rule(name='fwr1'),
self.firewall_rule(name='fwr2'),
self.firewall_rule(name='fwr3'),
self.firewall_rule(name='fwr4'),
self.firewall_rule(name='fwr5'),
self.firewall_rule(name='fwr6')) as fwr:
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['id'] = fwp_id
# test insert when rule list is empty
fwr0_id = fwr[0]['firewall_rule']['id']
attrs['firewall_rules'].insert(0, fwr0_id)
self._rule_action('insert', fwp_id, fwr0_id,
insert_before=None,
insert_after=None,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert at top of rule list, insert_before and
# insert_after not provided
fwr1_id = fwr[1]['firewall_rule']['id']
attrs['firewall_rules'].insert(0, fwr1_id)
insert_data = {'firewall_rule_id': fwr1_id}
self._rule_action('insert', fwp_id, fwr0_id,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs, body_data=insert_data)
# test insert at top of list above existing rule
fwr2_id = fwr[2]['firewall_rule']['id']
attrs['firewall_rules'].insert(0, fwr2_id)
self._rule_action('insert', fwp_id, fwr2_id,
insert_before=fwr1_id,
insert_after=None,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert at bottom of list
fwr3_id = fwr[3]['firewall_rule']['id']
attrs['firewall_rules'].append(fwr3_id)
self._rule_action('insert', fwp_id, fwr3_id,
insert_before=None,
insert_after=fwr0_id,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert in the middle of the list using
# insert_before
fwr4_id = fwr[4]['firewall_rule']['id']
attrs['firewall_rules'].insert(1, fwr4_id)
self._rule_action('insert', fwp_id, fwr4_id,
insert_before=fwr1_id,
insert_after=None,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert in the middle of the list using
# insert_after
fwr5_id = fwr[5]['firewall_rule']['id']
attrs['firewall_rules'].insert(1, fwr5_id)
self._rule_action('insert', fwp_id, fwr5_id,
insert_before=None,
insert_after=fwr2_id,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert when both insert_before and
# insert_after are set
fwr6_id = fwr[6]['firewall_rule']['id']
attrs['firewall_rules'].insert(1, fwr6_id)
self._rule_action('insert', fwp_id, fwr6_id,
insert_before=fwr5_id,
insert_after=fwr5_id,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
def test_insert_rule_with_firewall(self):
attrs = self._get_test_firewall_policy_attrs()
attrs['audited'] = False
attrs['firewall_list'] = []
with contextlib.nested(self.firewall_rule(name='fwr0'),
self.firewall_rule(name='fwr1'),
self.firewall_rule(name='fwr2'),
self.firewall_rule(name='fwr3'),
self.firewall_rule(name='fwr4'),
self.firewall_rule(name='fwr5'),
self.firewall_rule(name='fwr6')) as fwr:
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['id'] = fwp_id
with self.firewall(router_id=self._create_and_get_router(),
firewall_policy_id=fwp_id) as fw:
# test insert when rule list is empty
fwr0_id = fwr[0]['firewall_rule']['id']
attrs['firewall_rules'].insert(0, fwr0_id)
attrs['firewall_list'].insert(0, fw['firewall']['id'])
self._rule_action('insert', fwp_id, fwr0_id,
insert_before=None,
insert_after=None,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert at top of rule list, insert_before and
# insert_after not provided
fwr1_id = fwr[1]['firewall_rule']['id']
attrs['firewall_rules'].insert(0, fwr1_id)
insert_data = {'firewall_rule_id': fwr1_id}
self._rule_action(
'insert', fwp_id, fwr0_id,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs, body_data=insert_data)
# test insert at top of list above existing rule
fwr2_id = fwr[2]['firewall_rule']['id']
attrs['firewall_rules'].insert(0, fwr2_id)
self._rule_action('insert', fwp_id, fwr2_id,
insert_before=fwr1_id,
insert_after=None,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert at bottom of list
fwr3_id = fwr[3]['firewall_rule']['id']
attrs['firewall_rules'].append(fwr3_id)
self._rule_action('insert', fwp_id, fwr3_id,
insert_before=None,
insert_after=fwr0_id,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert in the middle of the list using
# insert_before
fwr4_id = fwr[4]['firewall_rule']['id']
attrs['firewall_rules'].insert(1, fwr4_id)
self._rule_action('insert', fwp_id, fwr4_id,
insert_before=fwr1_id,
insert_after=None,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert in the middle of the list using
# insert_after
fwr5_id = fwr[5]['firewall_rule']['id']
attrs['firewall_rules'].insert(1, fwr5_id)
self._rule_action('insert', fwp_id, fwr5_id,
insert_before=None,
insert_after=fwr2_id,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
# test insert when both insert_before and
# insert_after are set
fwr6_id = fwr[6]['firewall_rule']['id']
attrs['firewall_rules'].insert(1, fwr6_id)
self._rule_action('insert', fwp_id, fwr6_id,
insert_before=fwr5_id,
insert_after=fwr5_id,
expected_code=webob.exc.HTTPOk.code,
expected_body=attrs)
def test_remove_rule_with_no_firewall(self):
attrs = self._get_test_firewall_policy_attrs()
attrs['audited'] = False
attrs['firewall_list'] = []
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['id'] = fwp_id
with contextlib.nested(self.firewall_rule(name='fwr1'),
self.firewall_rule(name='fwr2'),
self.firewall_rule(name='fwr3')) as fr1:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr1]
attrs['firewall_rules'] = fw_rule_ids[:]
data = {'firewall_policy':
{'firewall_rules': fw_rule_ids}}
req = self.new_update_request('firewall_policies', data,
fwp_id)
req.get_response(self.ext_api)
# test removing a rule from a policy that does not exist
self._rule_action('remove', '123', fw_rule_ids[1],
expected_code=webob.exc.HTTPNotFound.code,
expected_body=None)
# test removing a rule in the middle of the list
attrs['firewall_rules'].remove(fw_rule_ids[1])
self._rule_action('remove', fwp_id, fw_rule_ids[1],
expected_body=attrs)
# test removing a rule at the top of the list
attrs['firewall_rules'].remove(fw_rule_ids[0])
self._rule_action('remove', fwp_id, fw_rule_ids[0],
expected_body=attrs)
# test removing remaining rule in the list
attrs['firewall_rules'].remove(fw_rule_ids[2])
self._rule_action('remove', fwp_id, fw_rule_ids[2],
expected_body=attrs)
# test removing rule that is not associated with the policy
self._rule_action('remove', fwp_id, fw_rule_ids[2],
expected_code=webob.exc.HTTPBadRequest.code,
expected_body=None)
def test_remove_rule_with_firewall(self):
attrs = self._get_test_firewall_policy_attrs()
attrs['audited'] = False
attrs['firewall_list'] = []
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['id'] = fwp_id
with self.firewall(router_id=self._create_and_get_router(),
firewall_policy_id=fwp_id) as fw:
attrs['firewall_list'].insert(0, fw['firewall']['id'])
with contextlib.nested(self.firewall_rule(name='fwr1'),
self.firewall_rule(name='fwr2'),
self.firewall_rule(name='fwr3')) as fr1:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr1]
attrs['firewall_rules'] = fw_rule_ids[:]
data = {'firewall_policy':
{'firewall_rules': fw_rule_ids}}
req = self.new_update_request(
'firewall_policies', data, fwp_id)
req.get_response(self.ext_api)
# test removing a rule from a policy that does not exist
self._rule_action(
'remove', '123',
fw_rule_ids[1],
expected_code=webob.exc.HTTPNotFound.code,
expected_body=None)
# test removing a rule in the middle of the list
attrs['firewall_rules'].remove(fw_rule_ids[1])
self._rule_action('remove', fwp_id, fw_rule_ids[1],
expected_body=attrs)
# test removing a rule at the top of the list
attrs['firewall_rules'].remove(fw_rule_ids[0])
self._rule_action('remove', fwp_id, fw_rule_ids[0],
expected_body=attrs)
# test removing remaining rule in the list
attrs['firewall_rules'].remove(fw_rule_ids[2])
self._rule_action('remove', fwp_id, fw_rule_ids[2],
expected_body=attrs)
# test removing rule that is not
#associated with the policy
self._rule_action(
'remove', fwp_id, fw_rule_ids[2],
expected_code=webob.exc.HTTPBadRequest.code,
expected_body=None)
def test_remove_rule_with_firewalls(self):
attrs = self._get_test_firewall_policy_attrs()
attrs['audited'] = False
attrs['firewall_list'] = []
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['id'] = fwp_id
with contextlib.nested(
self.firewall(router_id=self._create_and_get_router(),
firewall_policy_id=fwp_id),
self.firewall(router_id=self._create_and_get_router(),
firewall_policy_id=fwp_id)) as (fw1, fw2):
attrs['firewall_list'].insert(0, fw1['firewall']['id'])
attrs['firewall_list'].insert(1, fw2['firewall']['id'])
with contextlib.nested(self.firewall_rule(name='fwr1'),
self.firewall_rule(name='fwr2'),
self.firewall_rule(name='fwr3')) as fr1:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr1]
attrs['firewall_rules'] = fw_rule_ids[:]
data = {'firewall_policy':
{'firewall_rules': fw_rule_ids}}
req = self.new_update_request(
'firewall_policies', data, fwp_id)
req.get_response(self.ext_api)
# test removing a rule from a policy that does not exist
self._rule_action(
'remove', '123',
fw_rule_ids[1],
expected_code=webob.exc.HTTPNotFound.code,
expected_body=None)
# test removing a rule in the middle of the list
attrs['firewall_rules'].remove(fw_rule_ids[1])
self._rule_action('remove', fwp_id, fw_rule_ids[1],
expected_body=attrs)
# test removing a rule at the top of the list
attrs['firewall_rules'].remove(fw_rule_ids[0])
self._rule_action('remove', fwp_id, fw_rule_ids[0],
expected_body=attrs)
# test removing remaining rule in the list
attrs['firewall_rules'].remove(fw_rule_ids[2])
self._rule_action('remove', fwp_id, fw_rule_ids[2],
expected_body=attrs)
# test removing rule that is not
#associated with the policy
self._rule_action(
'remove', fwp_id, fw_rule_ids[2],
expected_code=webob.exc.HTTPBadRequest.code,
expected_body=None)

View File

@ -1,517 +0,0 @@
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import testtools
from webob import exc as web_exc
from neutron.api.v2 import attributes
from neutron import context
from neutron.extensions import loadbalancer as lb
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
from neutron.tests.unit.vmware.vshield import test_edge_router
_uuid = uuidutils.generate_uuid
LBAAS_PLUGIN_CLASS = "neutron.plugins.vmware.plugin.NsxServicePlugin"
class LoadBalancerTestExtensionManager(
test_edge_router.ServiceRouterTestExtensionManager):
def get_resources(self):
# If l3 resources have been loaded and updated by main API
# router, update the map in the l3 extension so it will load
# the same attributes as the API router
resources = super(LoadBalancerTestExtensionManager,
self).get_resources()
lb_attr_map = lb.RESOURCE_ATTRIBUTE_MAP.copy()
for res in lb.RESOURCE_ATTRIBUTE_MAP.keys():
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res)
if attr_info:
lb.RESOURCE_ATTRIBUTE_MAP[res] = attr_info
lb_resources = lb.Loadbalancer.get_resources()
# restore the original resources once the controllers are created
lb.RESOURCE_ATTRIBUTE_MAP = lb_attr_map
resources.extend(lb_resources)
return resources
class TestLoadbalancerPlugin(
test_db_loadbalancer.LoadBalancerPluginDbTestCase,
test_edge_router.ServiceRouterTest):
def vcns_loadbalancer_patch(self):
instance = self.vcns_instance
instance.return_value.create_vip.side_effect = (
self.fc2.create_vip)
instance.return_value.get_vip.side_effect = (
self.fc2.get_vip)
instance.return_value.update_vip.side_effect = (
self.fc2.update_vip)
instance.return_value.delete_vip.side_effect = (
self.fc2.delete_vip)
instance.return_value.create_pool.side_effect = (
self.fc2.create_pool)
instance.return_value.get_pool.side_effect = (
self.fc2.get_pool)
instance.return_value.update_pool.side_effect = (
self.fc2.update_pool)
instance.return_value.delete_pool.side_effect = (
self.fc2.delete_pool)
instance.return_value.create_health_monitor.side_effect = (
self.fc2.create_health_monitor)
instance.return_value.get_health_monitor.side_effect = (
self.fc2.get_health_monitor)
instance.return_value.update_health_monitor.side_effect = (
self.fc2.update_health_monitor)
instance.return_value.delete_health_monitor.side_effect = (
self.fc2.delete_health_monitor)
instance.return_value.create_app_profile.side_effect = (
self.fc2.create_app_profile)
instance.return_value.update_app_profile.side_effect = (
self.fc2.update_app_profile)
instance.return_value.delete_app_profile.side_effect = (
self.fc2.delete_app_profile)
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
super(TestLoadbalancerPlugin, self).setUp(
ext_mgr=LoadBalancerTestExtensionManager(),
lb_plugin=LBAAS_PLUGIN_CLASS)
self.vcns_loadbalancer_patch()
self.plugin = manager.NeutronManager.get_plugin()
def tearDown(self):
super(TestLoadbalancerPlugin, self).tearDown()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
self.ext_api = None
self.plugin = None
def _create_and_get_router(self):
req = self._create_router(self.fmt, self._tenant_id)
res = self.deserialize(self.fmt, req)
return res['router']['id']
def _get_vip_optional_args(self):
args = super(TestLoadbalancerPlugin, self)._get_vip_optional_args()
return args + ('router_id',)
def test_update_healthmonitor(self):
keys = [('type', "TCP"),
('tenant_id', self._tenant_id),
('delay', 20),
('timeout', 20),
('max_retries', 2),
('admin_state_up', False)]
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, health_mon, pool):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
with self.vip(
router_id=self._create_and_get_router(),
pool=pool, subnet=subnet):
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
health_mon, pool['pool']['id']
)
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
req = self.new_update_request(
"health_monitors",
data,
health_mon['health_monitor']['id'])
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['health_monitor'][k], v)
def test_create_vip(self, **extras):
expected = {
'name': 'vip1',
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'ACTIVE',
'router_id': self._create_and_get_router(),
'tenant_id': self._tenant_id,
}
expected.update(extras)
name = expected['name']
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, monitor, pool):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
expected['pool_id'] = pool['pool']['id']
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool['pool']['id']
)
with self.vip(
router_id=expected['router_id'], name=name,
pool=pool, subnet=subnet, **extras) as vip:
for k in ('id', 'address', 'port_id', 'pool_id'):
self.assertTrue(vip['vip'].get(k, None))
self.assertEqual(
dict((k, v)
for k, v in vip['vip'].items() if k in expected),
expected
)
def test_create_vip_with_session_persistence(self):
self.test_create_vip(session_persistence={'type': 'HTTP_COOKIE'})
def test_create_vip_with_invalid_persistence_method(self):
with testtools.ExpectedException(web_exc.HTTPClientError):
self.test_create_vip(
protocol='TCP',
session_persistence={'type': 'HTTP_COOKIE'})
def test_create_vips_with_same_names(self):
new_router_id = self._create_and_get_router()
with self.subnet() as subnet:
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
with contextlib.nested(
self.vip(
name='vip',
router_id=new_router_id,
subnet=subnet, protocol_port=80),
self.vip(
name='vip',
router_id=new_router_id,
subnet=subnet, protocol_port=81),
self.vip(
name='vip',
router_id=new_router_id,
subnet=subnet, protocol_port=82),
) as (vip1, vip2, vip3):
req = self.new_list_request('vips')
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
for index in range(len(res['vips'])):
self.assertEqual(res['vips'][index]['name'], 'vip')
def test_update_vip(self):
name = 'new_vip'
router_id = self._create_and_get_router()
keys = [('router_id', router_id),
('name', name),
('address', "10.0.0.2"),
('protocol_port', 80),
('connection_limit', 100),
('admin_state_up', False),
('status', 'ACTIVE')]
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, monitor, pool):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool['pool']['id']
)
with self.vip(
router_id=router_id, name=name,
pool=pool, subnet=subnet) as vip:
keys.append(('subnet_id', vip['vip']['subnet_id']))
data = {'vip': {'name': name,
'connection_limit': 100,
'session_persistence':
{'type': "APP_COOKIE",
'cookie_name': "jesssionId"},
'admin_state_up': False}}
req = self.new_update_request(
'vips', data, vip['vip']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['vip'][k], v)
def test_delete_vip(self):
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, monitor, pool):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool['pool']['id']
)
with self.vip(
router_id=self._create_and_get_router(),
pool=pool, subnet=subnet, do_delete=False) as vip:
req = self.new_delete_request('vips', vip['vip']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_delete_router_in_use_by_lbservice(self):
router_id = self._create_and_get_router()
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, monitor, pool):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool['pool']['id']
)
with self.vip(
router_id=router_id,
pool=pool, subnet=subnet):
self._delete('routers', router_id,
expected_code=web_exc.HTTPConflict.code)
def test_show_vip(self):
router_id = self._create_and_get_router()
name = "vip_show"
keys = [('name', name),
('protocol_port', 80),
('protocol', 'HTTP'),
('connection_limit', -1),
('admin_state_up', True),
('status', 'ACTIVE'),
('router_id', router_id)]
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, monitor, pool):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool['pool']['id']
)
with self.vip(
router_id=router_id, name=name,
pool=pool, subnet=subnet) as vip:
req = self.new_show_request('vips',
vip['vip']['id'])
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['vip'][k], v)
def test_list_vips(self):
keys_list = []
for i in range(3):
keys_list.append({'name': "vip" + str(i),
'router_id': self._create_and_get_router(),
'protocol_port': 80 + i,
'protocol': "HTTP",
'status': "ACTIVE",
'admin_state_up': True})
with self.subnet() as subnet:
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
with contextlib.nested(
self.vip(
router_id=keys_list[0]['router_id'], name='vip0',
subnet=subnet, protocol_port=80),
self.vip(
router_id=keys_list[1]['router_id'], name='vip1',
subnet=subnet, protocol_port=81),
self.vip(
router_id=keys_list[2]['router_id'], name='vip2',
subnet=subnet, protocol_port=82),
) as (vip1, vip2, vip3):
self._test_list_with_sort(
'vip',
(vip1, vip2, vip3),
[('protocol_port', 'asc'), ('name', 'desc')]
)
req = self.new_list_request('vips')
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res['vips']), 3)
for index in range(len(res['vips'])):
for k, v in keys_list[index].items():
self.assertEqual(res['vips'][index][k], v)
def test_update_pool(self):
data = {'pool': {'name': "new_pool",
'admin_state_up': False}}
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, monitor, pool):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool['pool']['id']
)
with self.vip(
router_id=self._create_and_get_router(),
pool=pool, subnet=subnet):
req = self.new_update_request(
'pools', data, pool['pool']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in data['pool'].items():
self.assertEqual(res['pool'][k], v)
def test_create_member(self):
router_id = self._create_and_get_router()
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, monitor, pool):
pool_id = pool['pool']['id']
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool['pool']['id']
)
with self.vip(
router_id=router_id,
pool=pool, subnet=subnet):
with contextlib.nested(
self.member(address='192.168.1.100',
protocol_port=80,
pool_id=pool_id),
self.member(router_id=router_id,
address='192.168.1.101',
protocol_port=80,
pool_id=pool_id)) as (member1, member2):
req = self.new_show_request('pools',
pool_id,
fmt=self.fmt)
pool_update = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertIn(member1['member']['id'],
pool_update['pool']['members'])
self.assertIn(member2['member']['id'],
pool_update['pool']['members'])
def _show_pool(self, pool_id):
req = self.new_show_request('pools', pool_id, fmt=self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(web_exc.HTTPOk.code, res.status_int)
return self.deserialize(self.fmt, res)
def test_update_member(self):
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool(name="pool1"),
self.pool(name="pool2")
) as (subnet, monitor, pool1, pool2):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool1['pool']['id']
)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool2['pool']['id']
)
with self.vip(
router_id=self._create_and_get_router(),
pool=pool1, subnet=subnet):
keys = [('address', "192.168.1.100"),
('tenant_id', self._tenant_id),
('protocol_port', 80),
('weight', 10),
('pool_id', pool2['pool']['id']),
('admin_state_up', False),
('status', 'ACTIVE')]
with self.member(
pool_id=pool1['pool']['id']) as member:
pool1_update = self._show_pool(pool1['pool']['id'])
self.assertEqual(len(pool1_update['pool']['members']), 1)
pool2_update = self._show_pool(pool2['pool']['id'])
self.assertEqual(len(pool1_update['pool']['members']), 1)
self.assertFalse(pool2_update['pool']['members'])
data = {'member': {'pool_id': pool2['pool']['id'],
'weight': 10,
'admin_state_up': False}}
req = self.new_update_request('members',
data,
member['member']['id'])
raw_res = req.get_response(self.ext_api)
self.assertEqual(web_exc.HTTPOk.code, raw_res.status_int)
res = self.deserialize(self.fmt, raw_res)
for k, v in keys:
self.assertEqual(res['member'][k], v)
pool1_update = self._show_pool(pool1['pool']['id'])
pool2_update = self._show_pool(pool2['pool']['id'])
self.assertEqual(len(pool2_update['pool']['members']), 1)
self.assertFalse(pool1_update['pool']['members'])
def test_delete_member(self):
with contextlib.nested(
self.subnet(),
self.health_monitor(),
self.pool()
) as (subnet, monitor, pool):
pool_id = pool['pool']['id']
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
monitor, pool['pool']['id']
)
with self.vip(
router_id=self._create_and_get_router(),
pool=pool, subnet=subnet):
with self.member(pool_id=pool_id,
do_delete=False) as member:
req = self.new_delete_request('members',
member['member']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
pool_update = self._show_pool(pool['pool']['id'])
self.assertFalse(pool_update['pool']['members'])

View File

@ -1,338 +0,0 @@
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.dbexts import vcns_db
from neutron.plugins.vmware.vshield.common import exceptions as vcns_exc
from neutron.plugins.vmware.vshield import vcns_driver
from neutron.tests.unit import vmware
from neutron.tests.unit.vmware.vshield import fake_vcns
from neutron_lbaas.services.loadbalancer import constants as lb_constants
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
_uuid = uuidutils.generate_uuid
VSE_ID = 'edge-1'
POOL_MAP_INFO = {
'pool_id': None,
'edge_id': VSE_ID,
'pool_vseid': 'pool-1'}
VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test")
class VcnsDriverTestCase(test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def vcns_loadbalancer_patch(self):
instance = self.mock_vcns.start()
instance.return_value.create_vip.side_effect = (
self.fc2.create_vip)
instance.return_value.get_vip.side_effect = (
self.fc2.get_vip)
instance.return_value.update_vip.side_effect = (
self.fc2.update_vip)
instance.return_value.delete_vip.side_effect = (
self.fc2.delete_vip)
instance.return_value.create_pool.side_effect = (
self.fc2.create_pool)
instance.return_value.get_pool.side_effect = (
self.fc2.get_pool)
instance.return_value.update_pool.side_effect = (
self.fc2.update_pool)
instance.return_value.delete_pool.side_effect = (
self.fc2.delete_pool)
instance.return_value.create_health_monitor.side_effect = (
self.fc2.create_health_monitor)
instance.return_value.get_health_monitor.side_effect = (
self.fc2.get_health_monitor)
instance.return_value.update_health_monitor.side_effect = (
self.fc2.update_health_monitor)
instance.return_value.delete_health_monitor.side_effect = (
self.fc2.delete_health_monitor)
instance.return_value.create_app_profile.side_effect = (
self.fc2.create_app_profile)
instance.return_value.update_app_profile.side_effect = (
self.fc2.update_app_profile)
instance.return_value.delete_app_profile.side_effect = (
self.fc2.delete_app_profile)
self.pool_id = None
self.vip_id = None
def setUp(self):
self.config_parse(args=['--config-file', VCNS_CONFIG_FILE])
# mock vcns
self.fc2 = fake_vcns.FakeVcns(unique_router_name=False)
self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True)
self.vcns_loadbalancer_patch()
self.driver = vcns_driver.VcnsDriver(mock.Mock())
super(VcnsDriverTestCase, self).setUp()
self.addCleanup(self.fc2.reset_all)
self.addCleanup(self.mock_vcns.stop)
def tearDown(self):
super(VcnsDriverTestCase, self).tearDown()
class TestEdgeLbDriver(VcnsDriverTestCase):
def test_create_and_get_vip(self):
ctx = context.get_admin_context()
with self.pool(do_delete=False) as pool:
self.pool_id = pool['pool']['id']
POOL_MAP_INFO['pool_id'] = pool['pool']['id']
vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO)
with self.vip(pool=pool) as res:
vip_create = res['vip']
self.driver.create_vip(ctx, VSE_ID, vip_create)
vip_get = self.driver.get_vip(ctx, vip_create['id'])
for k, v in vip_get.iteritems():
self.assertEqual(vip_create[k], v)
def test_create_two_vips_with_same_name(self):
ctx = context.get_admin_context()
with self.pool(do_delete=False) as pool:
self.pool_id = pool['pool']['id']
POOL_MAP_INFO['pool_id'] = pool['pool']['id']
vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO)
with self.vip(pool=pool) as res:
vip_create = res['vip']
self.driver.create_vip(ctx, VSE_ID, vip_create)
self.assertRaises(vcns_exc.Forbidden,
self.driver.create_vip,
ctx, VSE_ID, vip_create)
def test_convert_app_profile(self):
app_profile_name = 'app_profile_name'
sess_persist1 = {'type': "SOURCE_IP"}
sess_persist2 = {'type': "HTTP_COOKIE"}
sess_persist3 = {'type': "APP_COOKIE",
'cookie_name': "app_cookie_name"}
# protocol is HTTP and type is SOURCE_IP
expect_vcns_app_profile1 = {
'insertXForwardedFor': False,
'name': app_profile_name,
'serverSslEnabled': False,
'sslPassthrough': False,
'template': lb_constants.PROTOCOL_HTTP,
'persistence': {'method': 'sourceip'}}
vcns_app_profile = self.driver._convert_app_profile(
app_profile_name, sess_persist1, lb_constants.PROTOCOL_HTTP)
for k, v in expect_vcns_app_profile1.iteritems():
self.assertEqual(vcns_app_profile[k], v)
# protocol is HTTP and type is HTTP_COOKIE and APP_COOKIE
expect_vcns_app_profile2 = {
'insertXForwardedFor': False,
'name': app_profile_name,
'serverSslEnabled': False,
'sslPassthrough': False,
'template': lb_constants.PROTOCOL_HTTP,
'persistence': {'method': 'cookie',
'cookieName': 'default_cookie_name',
'cookieMode': 'insert'}}
vcns_app_profile = self.driver._convert_app_profile(
app_profile_name, sess_persist2, lb_constants.PROTOCOL_HTTP)
for k, v in expect_vcns_app_profile2.iteritems():
self.assertEqual(vcns_app_profile[k], v)
expect_vcns_app_profile3 = {
'insertXForwardedFor': False,
'name': app_profile_name,
'serverSslEnabled': False,
'sslPassthrough': False,
'template': lb_constants.PROTOCOL_HTTP,
'persistence': {'method': 'cookie',
'cookieName': sess_persist3['cookie_name'],
'cookieMode': 'app'}}
vcns_app_profile = self.driver._convert_app_profile(
app_profile_name, sess_persist3, lb_constants.PROTOCOL_HTTP)
for k, v in expect_vcns_app_profile3.iteritems():
self.assertEqual(vcns_app_profile[k], v)
# protocol is HTTPS and type is SOURCE_IP
expect_vcns_app_profile1 = {
'insertXForwardedFor': False,
'name': app_profile_name,
'serverSslEnabled': False,
'sslPassthrough': True,
'template': lb_constants.PROTOCOL_HTTPS,
'persistence': {'method': 'sourceip'}}
vcns_app_profile = self.driver._convert_app_profile(
app_profile_name, sess_persist1, lb_constants.PROTOCOL_HTTPS)
for k, v in expect_vcns_app_profile1.iteritems():
self.assertEqual(vcns_app_profile[k], v)
# protocol is HTTPS, and type isn't SOURCE_IP
self.assertRaises(vcns_exc.VcnsBadRequest,
self.driver._convert_app_profile,
app_profile_name,
sess_persist2, lb_constants.PROTOCOL_HTTPS)
self.assertRaises(vcns_exc.VcnsBadRequest,
self.driver._convert_app_profile,
app_profile_name,
sess_persist3, lb_constants.PROTOCOL_HTTPS)
# protocol is TCP and type is SOURCE_IP
expect_vcns_app_profile1 = {
'insertXForwardedFor': False,
'name': app_profile_name,
'serverSslEnabled': False,
'sslPassthrough': False,
'template': lb_constants.PROTOCOL_TCP,
'persistence': {'method': 'sourceip'}}
vcns_app_profile = self.driver._convert_app_profile(
app_profile_name, sess_persist1, lb_constants.PROTOCOL_TCP)
for k, v in expect_vcns_app_profile1.iteritems():
self.assertEqual(vcns_app_profile[k], v)
# protocol is TCP, and type isn't SOURCE_IP
self.assertRaises(vcns_exc.VcnsBadRequest,
self.driver._convert_app_profile,
app_profile_name,
sess_persist2, lb_constants.PROTOCOL_TCP)
self.assertRaises(vcns_exc.VcnsBadRequest,
self.driver._convert_app_profile,
app_profile_name,
sess_persist3, lb_constants.PROTOCOL_TCP)
def test_update_vip(self):
ctx = context.get_admin_context()
with self.pool(do_delete=False) as pool:
self.pool_id = pool['pool']['id']
POOL_MAP_INFO['pool_id'] = pool['pool']['id']
vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO)
with self.vip(pool=pool) as res:
vip_create = res['vip']
self.driver.create_vip(ctx, VSE_ID, vip_create)
vip_update = {'id': vip_create['id'],
'pool_id': pool['pool']['id'],
'name': 'update_name',
'description': 'description',
'address': 'update_address',
'port_id': 'update_port_id',
'protocol_port': 'protocol_port',
'protocol': 'update_protocol'}
self.driver.update_vip(ctx, vip_update)
vip_get = self.driver.get_vip(ctx, vip_create['id'])
for k, v in vip_get.iteritems():
if k in vip_update:
self.assertEqual(vip_update[k], v)
def test_delete_vip(self):
ctx = context.get_admin_context()
with self.pool(do_delete=False) as pool:
self.pool_id = pool['pool']['id']
POOL_MAP_INFO['pool_id'] = pool['pool']['id']
vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO)
with self.vip(pool=pool) as res:
vip_create = res['vip']
self.driver.create_vip(ctx, VSE_ID, vip_create)
self.driver.delete_vip(ctx, vip_create['id'])
self.assertRaises(vcns_exc.VcnsNotFound,
self.driver.get_vip,
ctx,
vip_create['id'])
#Test Pool Operation
def test_create_and_get_pool(self):
ctx = context.get_admin_context()
with self.pool(do_delete=False) as p:
self.pool_id = p['pool']['id']
pool_create = p['pool']
self.driver.create_pool(ctx, VSE_ID, pool_create, [])
pool_get = self.driver.get_pool(ctx, pool_create['id'], VSE_ID)
for k, v in pool_get.iteritems():
self.assertEqual(pool_create[k], v)
def test_create_two_pools_with_same_name(self):
ctx = context.get_admin_context()
with self.pool(do_delete=False) as p:
self.pool_id = p['pool']['id']
pool_create = p['pool']
self.driver.create_pool(ctx, VSE_ID, pool_create, [])
self.assertRaises(vcns_exc.Forbidden,
self.driver.create_pool,
ctx, VSE_ID, pool_create, [])
def test_update_pool(self):
ctx = context.get_admin_context()
with self.pool(do_delete=False) as p:
self.pool_id = p['pool']['id']
pool_create = p['pool']
self.driver.create_pool(ctx, VSE_ID, pool_create, [])
pool_update = {'id': pool_create['id'],
'lb_method': 'lb_method',
'name': 'update_name',
'members': [],
'health_monitors': []}
self.driver.update_pool(ctx, VSE_ID, pool_update, [])
pool_get = self.driver.get_pool(ctx, pool_create['id'], VSE_ID)
for k, v in pool_get.iteritems():
if k in pool_update:
self.assertEqual(pool_update[k], v)
def test_delete_pool(self):
ctx = context.get_admin_context()
with self.pool(do_delete=False) as p:
self.pool_id = p['pool']['id']
pool_create = p['pool']
self.driver.create_pool(ctx, VSE_ID, pool_create, [])
self.driver.delete_pool(ctx, pool_create['id'], VSE_ID)
self.assertRaises(vcns_exc.VcnsNotFound,
self.driver.get_pool,
ctx,
pool_create['id'],
VSE_ID)
def test_create_and_get_monitor(self):
ctx = context.get_admin_context()
with self.health_monitor(do_delete=False) as m:
monitor_create = m['health_monitor']
self.driver.create_health_monitor(ctx, VSE_ID, monitor_create)
monitor_get = self.driver.get_health_monitor(
ctx, monitor_create['id'], VSE_ID)
for k, v in monitor_get.iteritems():
self.assertEqual(monitor_create[k], v)
def test_update_health_monitor(self):
ctx = context.get_admin_context()
with self.health_monitor(do_delete=False) as m:
monitor_create = m['health_monitor']
self.driver.create_health_monitor(
ctx, VSE_ID, monitor_create)
monitor_update = {'id': monitor_create['id'],
'delay': 'new_delay',
'timeout': "new_timeout",
'type': 'type',
'max_retries': "max_retries"}
self.driver.update_health_monitor(
ctx, VSE_ID, monitor_create, monitor_update)
monitor_get = self.driver.get_health_monitor(
ctx, monitor_create['id'], VSE_ID)
for k, v in monitor_get.iteritems():
if k in monitor_update:
self.assertEqual(monitor_update[k], v)
def test_delete_health_monitor(self):
ctx = context.get_admin_context()
with self.health_monitor(do_delete=False) as m:
monitor_create = m['health_monitor']
self.driver.create_health_monitor(ctx, VSE_ID, monitor_create)
self.driver.delete_health_monitor(
ctx, monitor_create['id'], VSE_ID)
self.assertRaises(vcns_exc.VcnsNotFound,
self.driver.get_health_monitor,
ctx,
monitor_create['id'],
VSE_ID)

View File

@ -1,394 +0,0 @@
# Copyright 2014 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import webob.exc
from neutron.api.v2 import attributes
from neutron.db.vpn import vpn_db
from neutron.extensions import vpnaas
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.tests.unit.db.vpn import test_db_vpnaas
from neutron.tests.unit.vmware.vshield import test_edge_router
_uuid = uuidutils.generate_uuid
class VPNTestExtensionManager(
test_edge_router.ServiceRouterTestExtensionManager):
def get_resources(self):
# If l3 resources have been loaded and updated by main API
# router, update the map in the l3 extension so it will load
# the same attributes as the API router
resources = super(VPNTestExtensionManager, self).get_resources()
vpn_attr_map = copy.deepcopy(vpnaas.RESOURCE_ATTRIBUTE_MAP)
for res in vpnaas.RESOURCE_ATTRIBUTE_MAP.keys():
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res)
if attr_info:
vpnaas.RESOURCE_ATTRIBUTE_MAP[res] = attr_info
vpn_resources = vpnaas.Vpnaas.get_resources()
# restore the original resources once the controllers are created
vpnaas.RESOURCE_ATTRIBUTE_MAP = vpn_attr_map
resources.extend(vpn_resources)
return resources
class TestVpnPlugin(test_db_vpnaas.VPNTestMixin,
test_edge_router.ServiceRouterTest):
def vcns_vpn_patch(self):
instance = self.vcns_instance
instance.return_value.update_ipsec_config.side_effect = (
self.fc2.update_ipsec_config)
instance.return_value.get_ipsec_config.side_effect = (
self.fc2.get_ipsec_config)
instance.return_value.delete_ipsec_config.side_effect = (
self.fc2.delete_ipsec_config)
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items():
self.saved_attr_map[resource] = attrs.copy()
super(TestVpnPlugin, self).setUp(ext_mgr=VPNTestExtensionManager())
self.vcns_vpn_patch()
self.plugin = manager.NeutronManager.get_plugin()
self.router_id = None
def tearDown(self):
super(TestVpnPlugin, self).tearDown()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
self.ext_api = None
self.plugin = None
@contextlib.contextmanager
def router(self, vlan_id=None):
with self._create_l3_ext_network(vlan_id) as net:
with self.subnet(cidr='100.0.0.0/24', network=net) as s:
data = {'router': {'tenant_id': self._tenant_id}}
data['router']['service_router'] = True
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
router['router']['id'],
s['subnet']['network_id'])
router = self._show('routers', router['router']['id'])
yield router
self._delete('routers', router['router']['id'])
def test_create_vpnservice(self, **extras):
"""Test case to create a vpnservice."""
description = 'my-vpn-service'
expected = {'name': 'vpnservice1',
'description': 'my-vpn-service',
'admin_state_up': True,
'status': 'ACTIVE',
'tenant_id': self._tenant_id, }
expected.update(extras)
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
expected['router_id'] = router['router']['id']
expected['subnet_id'] = subnet['subnet']['id']
name = expected['name']
with self.vpnservice(name=name,
subnet=subnet,
router=router,
description=description,
**extras) as vpnservice:
self.assertEqual(dict((k, v) for k, v in
vpnservice['vpnservice'].items()
if k in expected),
expected)
def test_create_vpnservices_with_same_router(self, **extras):
"""Test case to create two vpnservices with same router."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(name='vpnservice1',
subnet=subnet,
router=router):
res = self._create_vpnservice(
'json', 'vpnservice2', True,
router_id=(router['router']['id']),
subnet_id=(subnet['subnet']['id']))
self.assertEqual(
res.status_int, webob.exc.HTTPConflict.code)
def test_update_vpnservice(self):
"""Test case to update a vpnservice."""
name = 'new_vpnservice1'
expected = [('name', name)]
with contextlib.nested(
self.subnet(cidr='10.2.0.0/24'),
self.router()) as (subnet, router):
with self.vpnservice(name=name,
subnet=subnet,
router=router) as vpnservice:
expected.append(('subnet_id',
vpnservice['vpnservice']['subnet_id']))
expected.append(('router_id',
vpnservice['vpnservice']['router_id']))
data = {'vpnservice': {'name': name,
'admin_state_up': False}}
expected.append(('admin_state_up', False))
self._set_active(vpn_db.VPNService,
vpnservice['vpnservice']['id'])
req = self.new_update_request(
'vpnservices',
data,
vpnservice['vpnservice']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in expected:
self.assertEqual(res['vpnservice'][k], v)
def test_delete_vpnservice(self):
"""Test case to delete a vpnservice."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(name='vpnservice',
subnet=subnet,
router=router,
do_delete=False) as vpnservice:
req = self.new_delete_request(
'vpnservices', vpnservice['vpnservice']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_delete_router_in_use_by_vpnservice(self):
"""Test delete router in use by vpn service."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router):
self._delete('routers', router['router']['id'],
expected_code=webob.exc.HTTPConflict.code)
def _test_create_ipsec_site_connection(self, key_overrides=None,
ike_key_overrides=None,
ipsec_key_overrides=None,
setup_overrides=None,
expected_status_int=200):
"""Create ipsec_site_connection and check results."""
params = {'ikename': 'ikepolicy1',
'ipsecname': 'ipsecpolicy1',
'vpnsname': 'vpnservice1',
'subnet_cidr': '10.2.0.0/24',
'subnet_version': 4}
if setup_overrides:
params.update(setup_overrides)
expected = {'name': 'connection1',
'description': 'my-ipsec-connection',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'ACTIVE',
'admin_state_up': True}
if key_overrides:
expected.update(key_overrides)
ike_expected = {'name': params['ikename'],
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'ike_version': 'v1',
'pfs': 'group5'}
if ike_key_overrides:
ike_expected.update(ike_key_overrides)
ipsec_expected = {'name': params['ipsecname'],
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'group5'}
if ipsec_key_overrides:
ipsec_expected.update(ipsec_key_overrides)
dpd = {'action': 'hold',
'interval': 40,
'timeout': 120}
with contextlib.nested(
self.ikepolicy(self.fmt, ike_expected['name'],
ike_expected['auth_algorithm'],
ike_expected['encryption_algorithm'],
ike_version=ike_expected['ike_version'],
pfs=ike_expected['pfs']),
self.ipsecpolicy(self.fmt, ipsec_expected['name'],
ipsec_expected['auth_algorithm'],
ipsec_expected['encryption_algorithm'],
pfs=ipsec_expected['pfs']),
self.subnet(cidr=params['subnet_cidr'],
ip_version=params['subnet_version']),
self.router()) as (
ikepolicy, ipsecpolicy, subnet, router):
with self.vpnservice(name=params['vpnsname'], subnet=subnet,
router=router) as vpnservice1:
expected['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
expected['ipsecpolicy_id'] = (
ipsecpolicy['ipsecpolicy']['id']
)
expected['vpnservice_id'] = (
vpnservice1['vpnservice']['id']
)
try:
with self.ipsec_site_connection(
self.fmt,
expected['name'],
expected['peer_address'],
expected['peer_id'],
expected['peer_cidrs'],
expected['mtu'],
expected['psk'],
expected['initiator'],
dpd['action'],
dpd['interval'],
dpd['timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
expected['admin_state_up'],
description=expected['description']
) as ipsec_site_connection:
if expected_status_int != 200:
self.fail("Expected failure on create")
self._check_ipsec_site_connection(
ipsec_site_connection['ipsec_site_connection'],
expected,
dpd)
except webob.exc.HTTPClientError as ce:
self.assertEqual(ce.code, expected_status_int)
def test_create_ipsec_site_connection(self, **extras):
"""Test case to create an ipsec_site_connection."""
self._test_create_ipsec_site_connection(key_overrides=extras)
def test_create_ipsec_site_connection_invalid_ikepolicy(self):
self._test_create_ipsec_site_connection(
ike_key_overrides={'ike_version': 'v2'},
expected_status_int=400)
def test_create_ipsec_site_connection_invalid_ipsecpolicy(self):
self._test_create_ipsec_site_connection(
ipsec_key_overrides={'encryption_algorithm': 'aes-192'},
expected_status_int=400)
self._test_create_ipsec_site_connection(
ipsec_key_overrides={'pfs': 'group14'},
expected_status_int=400)
def _test_update_ipsec_site_connection(self,
update={'name': 'new name'},
overrides=None,
expected_status_int=200):
"""Creates and then updates ipsec_site_connection."""
expected = {'name': 'new_ipsec_site_connection',
'ikename': 'ikepolicy1',
'ipsecname': 'ipsecpolicy1',
'vpnsname': 'vpnservice1',
'description': 'my-ipsec-connection',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'ACTIVE',
'admin_state_up': True,
'action': 'hold',
'interval': 40,
'timeout': 120,
'subnet_cidr': '10.2.0.0/24',
'subnet_version': 4,
'make_active': True}
if overrides:
expected.update(overrides)
with contextlib.nested(
self.ikepolicy(name=expected['ikename']),
self.ipsecpolicy(name=expected['ipsecname']),
self.subnet(cidr=expected['subnet_cidr'],
ip_version=expected['subnet_version']),
self.router()
) as (ikepolicy, ipsecpolicy, subnet, router):
with self.vpnservice(name=expected['vpnsname'], subnet=subnet,
router=router) as vpnservice1:
expected['vpnservice_id'] = vpnservice1['vpnservice']['id']
expected['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
expected['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
with self.ipsec_site_connection(
self.fmt,
expected['name'],
expected['peer_address'],
expected['peer_id'],
expected['peer_cidrs'],
expected['mtu'],
expected['psk'],
expected['initiator'],
expected['action'],
expected['interval'],
expected['timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
expected['admin_state_up'],
description=expected['description']
) as ipsec_site_connection:
data = {'ipsec_site_connection': update}
if expected.get('make_active'):
self._set_active(
vpn_db.IPsecSiteConnection,
(ipsec_site_connection['ipsec_site_connection']
['id']))
req = self.new_update_request(
'ipsec-site-connections',
data,
ipsec_site_connection['ipsec_site_connection']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(expected_status_int, res.status_int)
if expected_status_int == 200:
res_dict = self.deserialize(self.fmt, res)
for k, v in update.items():
self.assertEqual(
res_dict['ipsec_site_connection'][k], v)
def test_update_ipsec_site_connection(self):
"""Test case for valid updates to IPSec site connection."""
dpd = {'action': 'hold',
'interval': 40,
'timeout': 120}
self._test_update_ipsec_site_connection(update={'dpd': dpd})
self._test_update_ipsec_site_connection(update={'mtu': 2000})
def test_delete_ipsec_site_connection(self):
"""Test case to delete a ipsec_site_connection."""
with self.ipsec_site_connection(
do_delete=False) as ipsec_site_connection:
req = self.new_delete_request(
'ipsec-site-connections',
ipsec_site_connection['ipsec_site_connection']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)

1
vmware_nsx/__init__.py Normal file
View File

@ -0,0 +1 @@
__import__('pkg_resources').declare_namespace(__name__)

View File

@ -14,7 +14,7 @@
#
from neutron.db import l3_dvr_db
from neutron.plugins.vmware.extensions import servicerouter
from vmware_nsx.neutron.plugins.vmware.extensions import servicerouter
class ServiceRouter_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin):

Some files were not shown because too many files have changed in this diff Show More