diff --git a/.gitreview b/.gitreview index 184583f0d6..b22070e80b 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.openstack.org port=29418 -project=openstack/neutron.git +project=stackforge/vmware-nsx.git diff --git a/.testr.conf b/.testr.conf index c180b0319f..d6c25c2a08 100644 --- a/.testr.conf +++ b/.testr.conf @@ -1,4 +1,4 @@ [DEFAULT] -test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION +test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./vmware_nsx/neutron/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list diff --git a/MANIFEST.in b/MANIFEST.in index 4e527c7fae..96ac0f6bc4 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,11 +2,7 @@ include AUTHORS include README.rst include ChangeLog include LICENSE -include neutron/db/migration/README -include neutron/db/migration/alembic.ini -include neutron/db/migration/alembic_migrations/script.py.mako -include neutron/db/migration/alembic_migrations/versions/README -recursive-include neutron/locale * +recursive-include vmware_nsx/neutron/locale * exclude .gitignore exclude .gitreview diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000000..90faf0b784 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,94 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import fileinput +import fnmatch + +sys.path.insert(0, os.path.abspath('../..')) +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + #'sphinx.ext.intersphinx', + 'oslosphinx' +] + +# autodoc generation is a bit aggressive and a nuisance when doing heavy +# text edit cycles. +# execute "export SPHINX_DEBUG=1" in your terminal to disable + +# A list of glob-style patterns that should be excluded when looking for source +# files. +exclude_patterns = [ + 'api/tests.*', # avoid of docs generation from tests + 'api/oslo.vmware._*', # skip private modules +] + +# Prune the excluded patterns from the autoindex +PATH = 'api/autoindex.rst' +if os.path.isfile(PATH) and os.access(PATH, os.R_OK): + for line in fileinput.input(PATH, inplace=True): + found = False + for pattern in exclude_patterns: + if fnmatch.fnmatch(line, '*' + pattern[4:]): + found = True + if not found: + print line, + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'oslo.vmware' +copyright = u'2014, OpenStack Foundation' + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' +# html_static_path = ['static'] + +# Output file base name for HTML help builder. +htmlhelp_basename = '%sdoc' % project + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ('index', + '%s.tex' % project, + u'%s Documentation' % project, + u'OpenStack Foundation', 'manual'), +] + +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst new file mode 100644 index 0000000000..8cb3146fef --- /dev/null +++ b/doc/source/contributing.rst @@ -0,0 +1 @@ +.. include:: ../../CONTRIBUTING.rst \ No newline at end of file diff --git a/doc/source/history.rst b/doc/source/history.rst new file mode 100644 index 0000000000..69ed4fe6c2 --- /dev/null +++ b/doc/source/history.rst @@ -0,0 +1 @@ +.. include:: ../../ChangeLog diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000000..65a7168525 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,28 @@ +Welcome to oslo.vmware's documentation! +======================================= + +Contents: + +.. toctree:: + :maxdepth: 2 + + readme + installation + usage + contributing + history + +Code Documentation +================== + +.. toctree:: + :maxdepth: 1 + + api/autoindex + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc/source/installation.rst b/doc/source/installation.rst new file mode 100644 index 0000000000..5c01c7ae5f --- /dev/null +++ b/doc/source/installation.rst @@ -0,0 +1,12 @@ +============ +Installation +============ + +At the command line:: + + $ pip install + +Or, if you have virtualenvwrapper installed:: + + $ mkvirtualenv + $ pip install \ No newline at end of file diff --git a/doc/source/readme.rst b/doc/source/readme.rst new file mode 100644 index 0000000000..6b2b3ec68c --- /dev/null +++ b/doc/source/readme.rst @@ -0,0 +1 @@ +.. include:: ../README.rst \ No newline at end of file diff --git a/doc/source/usage.rst b/doc/source/usage.rst new file mode 100644 index 0000000000..66a594bca1 --- /dev/null +++ b/doc/source/usage.rst @@ -0,0 +1,7 @@ +======== +Usage +======== + +To use in a project:: + + import vmware \ No newline at end of file diff --git a/etc/api-paste.ini b/etc/api-paste.ini new file mode 100644 index 0000000000..29f01e20b2 --- /dev/null +++ b/etc/api-paste.ini @@ -0,0 +1,30 @@ +[composite:neutron] +use = egg:Paste#urlmap +/: neutronversions +/v2.0: neutronapi_v2_0 + +[composite:neutronapi_v2_0] +use = call:neutron.auth:pipeline_factory +noauth = request_id catch_errors extensions neutronapiapp_v2_0 +keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 + +[filter:request_id] +paste.filter_factory = oslo.middleware:RequestId.factory + +[filter:catch_errors] +paste.filter_factory = oslo.middleware:CatchErrors.factory + +[filter:keystonecontext] +paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +[filter:extensions] +paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory + +[app:neutronversions] +paste.app_factory = neutron.api.versions:Versions.factory + +[app:neutronapiapp_v2_0] +paste.app_factory = neutron.api.v2.router:APIRouter.factory diff --git a/etc/dhcp_agent.ini b/etc/dhcp_agent.ini new file mode 100644 index 0000000000..0f99878944 --- /dev/null +++ b/etc/dhcp_agent.ini @@ -0,0 +1,91 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +# enable_isolated_metadata = False + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +# enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +# dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +# dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Use broadcast in DHCP replies +# dhcp_broadcast_reply = False + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/etc/init.d/neutron-server b/etc/init.d/neutron-server new file mode 100755 index 0000000000..98e5da6104 --- /dev/null +++ b/etc/init.d/neutron-server @@ -0,0 +1,68 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: neutron-server +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: neutron-server +# Description: Provides the Neutron networking service +### END INIT INFO + +set -e + +PIDFILE=/var/run/neutron/neutron-server.pid +LOGFILE=/var/log/neutron/neutron-server.log + +DAEMON=/usr/bin/neutron-server +DAEMON_ARGS="--log-file=$LOGFILE" +DAEMON_DIR=/var/run + +ENABLED=true + +if test -f /etc/default/neutron-server; then + . /etc/default/neutron-server +fi + +mkdir -p /var/run/neutron +mkdir -p /var/log/neutron + +. /lib/lsb/init-functions + +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" +export TMPDIR=/var/lib/neutron/tmp + +if [ ! -x ${DAEMON} ] ; then + exit 0 +fi + +case "$1" in + start) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Starting neutron server" "neutron-server" + start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS + log_end_msg $? + ;; + stop) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Stopping neutron server" "neutron-server" + start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} + log_end_msg $? + ;; + restart|force-reload) + test "$ENABLED" = "true" || exit 1 + $0 stop + sleep 1 + $0 start + ;; + status) + test "$ENABLED" = "true" || exit 0 + status_of_proc -p $PIDFILE $DAEMON neutron-server && exit 0 || exit $? + ;; + *) + log_action_msg "Usage: /etc/init.d/neutron-server {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/etc/l3_agent.ini b/etc/l3_agent.ini new file mode 100644 index 0000000000..94c9714754 --- /dev/null +++ b/etc/l3_agent.ini @@ -0,0 +1,102 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +# handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +# external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +# metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +# send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +# periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +# periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 + +# The working mode for the agent. Allowed values are: +# - legacy: this preserves the existing behavior where the L3 agent is +# deployed on a centralized networking node to provide L3 services +# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR. +# - dvr: this mode enables DVR functionality, and must be used for an L3 +# agent that runs on a compute host. +# - dvr_snat: this enables centralized SNAT support in conjunction with +# DVR. This mode must be used for an L3 agent running on a centralized +# node (or in single-host deployments, e.g. devstack). +# agent_mode = legacy + +# Location to store keepalived and all HA configurations +# ha_confs_path = $state_path/ha_confs + +# VRRP authentication type AH/PASS +# ha_vrrp_auth_type = PASS + +# VRRP authentication password +# ha_vrrp_auth_password = + +# The advertisement interval in seconds +# ha_vrrp_advert_int = 2 diff --git a/etc/metadata_agent.ini b/etc/metadata_agent.ini new file mode 100644 index 0000000000..84442ea1ac --- /dev/null +++ b/etc/metadata_agent.ini @@ -0,0 +1,59 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True + +# The Neutron user information for accessing the Neutron API. +auth_url = http://localhost:5000/v2.0 +auth_region = RegionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +# nova_metadata_ip = 127.0.0.1 + +# TCP Port used by Nova metadata server +# nova_metadata_port = 8775 + +# Which protocol to use for requests to Nova metadata server, http or https +# nova_metadata_protocol = http + +# Whether insecure SSL connection should be accepted for Nova metadata server +# requests +# nova_metadata_insecure = False + +# Client certificate for nova api, needed when nova api requires client +# certificates +# nova_client_cert = + +# Private key for nova client certificate +# nova_client_priv_key = + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +# metadata_proxy_shared_secret = + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server. Defaults to +# half the number of CPU cores +# metadata_workers = + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 4096 + +# URL to connect to the cache backend. +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = memory://?default_ttl=5 diff --git a/etc/metering_agent.ini b/etc/metering_agent.ini new file mode 100644 index 0000000000..88826ce79f --- /dev/null +++ b/etc/metering_agent.ini @@ -0,0 +1,18 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True + +# Default driver: +# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver +# Example of non-default driver +# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver + +# Interval between two metering measures +# measure_interval = 30 + +# Interval between two metering reports +# report_interval = 300 + +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# use_namespaces = True diff --git a/etc/neutron.conf b/etc/neutron.conf new file mode 100644 index 0000000000..623456c310 --- /dev/null +++ b/etc/neutron.conf @@ -0,0 +1,656 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +# verbose = False + +# =========Start Global Config Option for Distributed L3 Router=============== +# Setting the "router_distributed" flag to "True" will default to the creation +# of distributed tenant routers. The admin can override this flag by specifying +# the type of the router on the create request (admin-only attribute). Default +# value is "False" to support legacy mode (centralized) routers. +# +# router_distributed = False +# +# ===========End Global Config Option for Distributed L3 Router=============== + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +# debug = False + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +# state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +# core_plugin = +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +# api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +# auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# DVR Base MAC address. The first 3 octets will remain unchanged. If the +# 4th octet is not 00, it will also be used. The others will be randomly +# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to +# avoid mixing them up with MAC's allocated for tenant ports. +# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00 +# The default is 3 octet +# dvr_base_mac = fa:16:3f:00:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds). Use -1 to +# tell dnsmasq to use infinite lease times. +# dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +# allow_overlapping_ips = False +# Ensure that configured gateway is on subnet. For IPv6, validate only if +# gateway is not a link local address. Deprecated, to be removed during the +# K release, at which point the check will be mandatory. +# force_gateway_on_subnet = True + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# Maximum number of routes per router +# max_routes = 30 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Allow automatic rescheduling of routers from dead L3 agents with +# admin_state_up set to True to alive agents. +# allow_automatic_l3agent_failover = False + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== items for l3 extension ============== +# Enable high availability for virtual routers. +# l3_ha = False +# +# Maximum number of l3 agents which a HA router will be scheduled on. If it +# is set to 0 the router will be scheduled on every agent. +# max_l3_agents_per_router = 3 +# +# Minimum number of l3 agents which a HA router will be scheduled on. The +# default value is 2. +# min_l3_agents_per_router = 2 +# +# CIDR of the administrative network if HA mode is enabled +# l3_ha_net_cidr = 169.254.192.0/18 +# =========== end of items for l3 extension ======= + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +# notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +# notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +# nova_url = http://127.0.0.1:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +# nova_region_name = + +# Username for connection to nova in admin context +# nova_admin_username = + +# The uuid of the admin nova tenant +# nova_admin_tenant_id = + +# The name of the admin nova tenant. If the uuid of the admin nova tenant +# is set, this is optional. Useful for cases where the uuid of the admin +# nova tenant is not available when configuration is being done. +# nova_admin_tenant_name = + +# Password for connection to nova in admin context. +# nova_admin_password = + +# Authorization URL for connection to nova in admin context. +# nova_admin_auth_url = + +# CA file for novaclient to verify server certificates +# nova_ca_certificates_file = + +# Boolean to control ignoring SSL errors on the nova url +# nova_api_insecure = False + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest + +# The RabbitMQ password. (string value) +#rabbit_password=guest + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=oslo + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitor = -1 + +# Number of loadbalancers allowed per tenant. A negative value means unlimited. +# quota_loadbalancer = 10 + +# Number of listeners allowed per tenant. A negative value means unlimited. +# quota_listener = -1 + +# Number of v2 health monitors allowed per tenant. A negative value means +# unlimited. These health monitors exist under the lbaas v2 API +# quota_healthmonitor = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +# Number of firewalls allowed per tenant. A negative value means unlimited. +# quota_firewall = 1 + +# Number of firewall policies allowed per tenant. A negative value means +# unlimited. +# quota_firewall_policy = 1 + +# Number of firewall rules allowed per tenant. A negative value means +# unlimited. +# quota_firewall_rule = 100 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +# root_helper = sudo + +# Set to true to add comments to generated iptables rules that describe +# each rule's purpose. (System must support the iptables comments module.) +# comment_iptables_rules = True + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:// +# NOTE: In deployment the [database] section and its connection attribute may +# be set in the corresponding core plugin '.ini' file. However, it is suggested +# to put the [database] section and its connection attribute in this +# configuration file. + +# Database engine for which script will be generated when using offline +# migration +# engine = + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron_lbaas.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron_vpnaas.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron_lbaas.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default +# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'. +# service_provider = LOADBALANCER:A10Networks:neutron_lbaas.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default +# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend +# service_provider = LOADBALANCERV2:LoggingNoop:neutron_lbaas.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default diff --git a/etc/neutron/plugins/vmware/nsx.ini b/etc/neutron/plugins/vmware/nsx.ini new file mode 100644 index 0000000000..a9bf5c5e00 --- /dev/null +++ b/etc/neutron/plugins/vmware/nsx.ini @@ -0,0 +1,203 @@ +[DEFAULT] +# User name for NSX controller +# nsx_user = admin + +# Password for NSX controller +# nsx_password = admin + +# Time before aborting a request on an unresponsive controller (Seconds) +# http_timeout = 75 + +# Maximum number of times a particular request should be retried +# retries = 2 + +# Maximum number of times a redirect response should be followed +# redirects = 2 + +# Comma-separated list of NSX controller endpoints (:). When port +# is omitted, 443 is assumed. This option MUST be specified, e.g.: +# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80 + +# UUID of the pre-existing default NSX Transport zone to be used for creating +# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.: +# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53 + +# (Optional) UUID for the default l3 gateway service to use with this cluster. +# To be specified if planning to use logical routers with external gateways. +# default_l3_gw_service_uuid = + +# (Optional) UUID for the default l2 gateway service to use with this cluster. +# To be specified for providing a predefined gateway tenant for connecting their networks. +# default_l2_gw_service_uuid = + +# (Optional) UUID for the default service cluster. A service cluster is introduced to +# represent a group of gateways and it is needed in order to use Logical Services like +# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this +# config parameter *MUST BE* set to a valid pre-existent service cluster uuid. +# default_service_cluster_uuid = + +# Name of the default interface name to be used on network-gateway. This value +# will be used for any device associated with a network gateway for which an +# interface name was not specified +# default_interface_name = breth0 + +# Reconnect connection to nsx if not used within this amount of time. +# conn_idle_timeout = 900 + +[quotas] +# number of network gateways allowed per tenant, -1 means unlimited +# quota_network_gateway = 5 + +[vcns] +# URL for VCNS manager +# manager_uri = https://management_ip + +# User name for VCNS manager +# user = admin + +# Password for VCNS manager +# password = default + +# (Optional) Datacenter ID for Edge deployment +# datacenter_moid = + +# (Optional) Deployment Container ID for NSX Edge deployment +# If not specified, either a default global container will be used, or +# the resource pool and datastore specified below will be used +# deployment_container_id = + +# (Optional) Resource pool ID for NSX Edge deployment +# resource_pool_id = + +# (Optional) Datastore ID for NSX Edge deployment +# datastore_id = + +# (Required) UUID of logic switch for physical network connectivity +# external_network = + +# (Optional) Asynchronous task status check interval +# default is 2000 (millisecond) +# task_status_check_interval = 2000 + +[nsx] +# Maximum number of ports for each bridged logical switch +# The recommended value for this parameter varies with NSX version +# Please use: +# NSX 2.x -> 64 +# NSX 3.0, 3.1 -> 5000 +# NSX 3.2 -> 10000 +# max_lp_per_bridged_ls = 5000 + +# Maximum number of ports for each overlay (stt, gre) logical switch +# max_lp_per_overlay_ls = 256 + +# Number of connections to each controller node. +# default is 10 +# concurrent_connections = 10 + +# Number of seconds a generation id should be valid for (default -1 meaning do not time out) +# nsx_gen_timeout = -1 + +# Acceptable values for 'metadata_mode' are: +# - 'access_network': this enables a dedicated connection to the metadata +# proxy for metadata server access via Neutron router. +# - 'dhcp_host_route': this enables host route injection via the dhcp agent. +# This option is only useful if running on a host that does not support +# namespaces otherwise access_network should be used. +# metadata_mode = access_network + +# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt) +# default_transport_type = stt + +# Specifies in which mode the plugin needs to operate in order to provide DHCP and +# metadata proxy services to tenant instances. If 'agent' is chosen (default) +# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to +# provide such services. In this mode, the plugin supports API extensions 'agent' +# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), +# the plugin will use NSX logical services for DHCP and metadata proxy. This +# simplifies the deployment model for Neutron, in that the plugin no longer requires +# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode +# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. +# Furthermore, a 'combined' mode is also provided and is used to support existing +# deployments that want to adopt the agentless mode going forward. With this mode, +# existing networks keep being served by the existing infrastructure (thus preserving +# backward compatibility, whereas new networks will be served by the new infrastructure. +# Migration tools are provided to 'move' one network from one model to another; with +# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is +# ignored, as new networks will no longer be scheduled to existing dhcp agents. +# agent_mode = agent + +# Specifies which mode packet replication should be done in. If set to service +# a service node is required in order to perform packet replication. This can +# also be set to source if one wants replication to be performed locally (NOTE: +# usually only useful for testing if one does not want to deploy a service node). +# In order to leverage distributed routers, replication_mode should be set to +# "service". +# replication_mode = service + +[nsx_sync] +# Interval in seconds between runs of the status synchronization task. +# The plugin will aim at resynchronizing operational status for all +# resources in this interval, and it should be therefore large enough +# to ensure the task is feasible. Otherwise the plugin will be +# constantly synchronizing resource status, ie: a new task is started +# as soon as the previous is completed. +# If this value is set to 0, the state synchronization thread for this +# Neutron instance will be disabled. +# state_sync_interval = 10 + +# Random additional delay between two runs of the state synchronization task. +# An additional wait time between 0 and max_random_sync_delay seconds +# will be added on top of state_sync_interval. +# max_random_sync_delay = 0 + +# Minimum delay, in seconds, between two status synchronization requests for NSX. +# Depending on chunk size, controller load, and other factors, state +# synchronization requests might be pretty heavy. This means the +# controller might take time to respond, and its load might be quite +# increased by them. This parameter allows to specify a minimum +# interval between two subsequent requests. +# The value for this parameter must never exceed state_sync_interval. +# If this does, an error will be raised at startup. +# min_sync_req_delay = 1 + +# Minimum number of resources to be retrieved from NSX in a single status +# synchronization request. +# The actual size of the chunk will increase if the number of resources is such +# that using the minimum chunk size will cause the interval between two +# requests to be less than min_sync_req_delay +# min_chunk_size = 500 + +# Enable this option to allow punctual state synchronization on show +# operations. In this way, show operations will always fetch the operational +# status of the resource from the NSX backend, and this might have +# a considerable impact on overall performance. +# always_read_status = False + +[nsx_lsn] +# Pull LSN information from NSX in case it is missing from the local +# data store. This is useful to rebuild the local store in case of +# server recovery +# sync_on_missing_data = False + +[nsx_dhcp] +# (Optional) Comma separated list of additional dns servers. Default is an empty list +# extra_domain_name_servers = + +# Domain to use for building the hostnames +# domain_name = openstacklocal + +# Default DHCP lease time +# default_lease_time = 43200 + +[nsx_metadata] +# IP address used by Metadata server +# metadata_server_address = 127.0.0.1 + +# TCP Port used by Metadata server +# metadata_server_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it MUST match with the configuration used by the Metadata server +# metadata_shared_secret = diff --git a/etc/neutron/rootwrap.d/cisco-apic.filters b/etc/neutron/rootwrap.d/cisco-apic.filters new file mode 100644 index 0000000000..69e4afcc89 --- /dev/null +++ b/etc/neutron/rootwrap.d/cisco-apic.filters @@ -0,0 +1,16 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# cisco-apic filters +lldpctl: CommandFilter, lldpctl, root + +# ip_lib filters +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/debug.filters b/etc/neutron/rootwrap.d/debug.filters new file mode 100644 index 0000000000..b61d960178 --- /dev/null +++ b/etc/neutron/rootwrap.d/debug.filters @@ -0,0 +1,14 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# This is needed because we should ping +# from inside a namespace which requires root +ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ +ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+ diff --git a/etc/neutron/rootwrap.d/dhcp.filters b/etc/neutron/rootwrap.d/dhcp.filters new file mode 100644 index 0000000000..0712ec131d --- /dev/null +++ b/etc/neutron/rootwrap.d/dhcp.filters @@ -0,0 +1,35 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# dhcp-agent +dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID= +# dhcp-agent uses kill as well, that's handled by the generic KillFilter +# it looks like these are the only signals needed, per +# neutron/agent/linux/dhcp.py +kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP +kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +ivs-ctl: CommandFilter, ivs-ctl, root +mm-ctl: CommandFilter, mm-ctl, root +dhcp_release: CommandFilter, dhcp_release, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, python, -9 +kill_metadata7: KillFilter, root, python2.7, -9 + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/ipset-firewall.filters b/etc/neutron/rootwrap.d/ipset-firewall.filters new file mode 100644 index 0000000000..52c66373b2 --- /dev/null +++ b/etc/neutron/rootwrap.d/ipset-firewall.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] +# neutron/agent/linux/iptables_firewall.py +# "ipset", "-A", ... +ipset: CommandFilter, ipset, root diff --git a/etc/neutron/rootwrap.d/iptables-firewall.filters b/etc/neutron/rootwrap.d/iptables-firewall.filters new file mode 100644 index 0000000000..b8a6ab5b3b --- /dev/null +++ b/etc/neutron/rootwrap.d/iptables-firewall.filters @@ -0,0 +1,21 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# neutron/agent/linux/iptables_manager.py +# "iptables-save", ... +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# neutron/agent/linux/iptables_manager.py +# "iptables", "-A", ... +iptables: CommandFilter, iptables, root +ip6tables: CommandFilter, ip6tables, root diff --git a/etc/neutron/rootwrap.d/l3.filters b/etc/neutron/rootwrap.d/l3.filters new file mode 100644 index 0000000000..be69b32c57 --- /dev/null +++ b/etc/neutron/rootwrap.d/l3.filters @@ -0,0 +1,48 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# arping +arping: CommandFilter, arping, root + +# l3_agent +sysctl: CommandFilter, sysctl, root +route: CommandFilter, route, root +radvd: CommandFilter, radvd, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, python, -9 +kill_metadata7: KillFilter, root, python2.7, -9 +kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP +kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root + +# ovs_lib (if OVSInterfaceDriver is used) +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# iptables_manager +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# Keepalived +keepalived: CommandFilter, keepalived, root +kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9 + +# l3 agent to delete floatingip's conntrack state +conntrack: CommandFilter, conntrack, root diff --git a/etc/neutron/rootwrap.d/lbaas-haproxy.filters b/etc/neutron/rootwrap.d/lbaas-haproxy.filters new file mode 100644 index 0000000000..b4e1ecba20 --- /dev/null +++ b/etc/neutron/rootwrap.d/lbaas-haproxy.filters @@ -0,0 +1,26 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# haproxy +haproxy: CommandFilter, haproxy, root + +# lbaas-agent uses kill as well, that's handled by the generic KillFilter +kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +mm-ctl: CommandFilter, mm-ctl, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +route: CommandFilter, route, root + +# arping +arping: CommandFilter, arping, root diff --git a/etc/neutron/rootwrap.d/linuxbridge-plugin.filters b/etc/neutron/rootwrap.d/linuxbridge-plugin.filters new file mode 100644 index 0000000000..03df39592c --- /dev/null +++ b/etc/neutron/rootwrap.d/linuxbridge-plugin.filters @@ -0,0 +1,19 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# linuxbridge-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +brctl: CommandFilter, brctl, root +bridge: CommandFilter, bridge, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/nec-plugin.filters b/etc/neutron/rootwrap.d/nec-plugin.filters new file mode 100644 index 0000000000..89c4cfe355 --- /dev/null +++ b/etc/neutron/rootwrap.d/nec-plugin.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# nec_neutron_agent +ovs-vsctl: CommandFilter, ovs-vsctl, root diff --git a/etc/neutron/rootwrap.d/ofagent.filters b/etc/neutron/rootwrap.d/ofagent.filters new file mode 100644 index 0000000000..11e4256483 --- /dev/null +++ b/etc/neutron/rootwrap.d/ofagent.filters @@ -0,0 +1,16 @@ +# neutron-rootwrap command filters for nodes on which +# neutron-ofagent-agent is expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# ovs_lib +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/openvswitch-plugin.filters b/etc/neutron/rootwrap.d/openvswitch-plugin.filters new file mode 100644 index 0000000000..b63a83b943 --- /dev/null +++ b/etc/neutron/rootwrap.d/openvswitch-plugin.filters @@ -0,0 +1,22 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# openvswitch-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +ovs-vsctl: CommandFilter, ovs-vsctl, root +ovs-ofctl: CommandFilter, ovs-ofctl, root +kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 +ovsdb-client: CommandFilter, ovsdb-client, root +xe: CommandFilter, xe, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/vpnaas.filters b/etc/neutron/rootwrap.d/vpnaas.filters new file mode 100644 index 0000000000..7848136b9f --- /dev/null +++ b/etc/neutron/rootwrap.d/vpnaas.filters @@ -0,0 +1,13 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +openswan: CommandFilter, ipsec, root diff --git a/etc/policy.json b/etc/policy.json new file mode 100644 index 0000000000..1664c8d7af --- /dev/null +++ b/etc/policy.json @@ -0,0 +1,139 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "context_is_advsvc": "role:advsvc", + "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_only": "rule:context_is_admin", + "regular_user": "", + "shared": "field:networks:shared=True", + "shared_firewalls": "field:firewalls:shared=True", + "external": "field:networks:router:external=True", + "default": "rule:admin_or_owner", + + "create_subnet": "rule:admin_or_network_owner", + "get_subnet": "rule:admin_or_owner or rule:shared", + "update_subnet": "rule:admin_or_network_owner", + "delete_subnet": "rule:admin_or_network_owner", + + "create_network": "", + "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", + "get_network:router:external": "rule:regular_user", + "get_network:segments": "rule:admin_only", + "get_network:provider:network_type": "rule:admin_only", + "get_network:provider:physical_network": "rule:admin_only", + "get_network:provider:segmentation_id": "rule:admin_only", + "get_network:queue_id": "rule:admin_only", + "create_network:shared": "rule:admin_only", + "create_network:router:external": "rule:admin_only", + "create_network:segments": "rule:admin_only", + "create_network:provider:network_type": "rule:admin_only", + "create_network:provider:physical_network": "rule:admin_only", + "create_network:provider:segmentation_id": "rule:admin_only", + "update_network": "rule:admin_or_owner", + "update_network:segments": "rule:admin_only", + "update_network:shared": "rule:admin_only", + "update_network:provider:network_type": "rule:admin_only", + "update_network:provider:physical_network": "rule:admin_only", + "update_network:provider:segmentation_id": "rule:admin_only", + "update_network:router:external": "rule:admin_only", + "delete_network": "rule:admin_or_owner", + + "create_port": "", + "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc", + "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", + "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", + "create_port:binding:host_id": "rule:admin_only", + "create_port:binding:profile": "rule:admin_only", + "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", + "get_port": "rule:admin_or_owner or rule:context_is_advsvc", + "get_port:queue_id": "rule:admin_only", + "get_port:binding:vif_type": "rule:admin_only", + "get_port:binding:vif_details": "rule:admin_only", + "get_port:binding:host_id": "rule:admin_only", + "get_port:binding:profile": "rule:admin_only", + "update_port": "rule:admin_or_owner or rule:context_is_advsvc", + "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", + "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", + "update_port:binding:host_id": "rule:admin_only", + "update_port:binding:profile": "rule:admin_only", + "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", + "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", + + "get_router:ha": "rule:admin_only", + "create_router": "rule:regular_user", + "create_router:external_gateway_info:enable_snat": "rule:admin_only", + "create_router:distributed": "rule:admin_only", + "create_router:ha": "rule:admin_only", + "get_router": "rule:admin_or_owner", + "get_router:distributed": "rule:admin_only", + "update_router:external_gateway_info:enable_snat": "rule:admin_only", + "update_router:distributed": "rule:admin_only", + "update_router:ha": "rule:admin_only", + "delete_router": "rule:admin_or_owner", + + "add_router_interface": "rule:admin_or_owner", + "remove_router_interface": "rule:admin_or_owner", + + "create_firewall": "", + "get_firewall": "rule:admin_or_owner", + "create_firewall:shared": "rule:admin_only", + "get_firewall:shared": "rule:admin_only", + "update_firewall": "rule:admin_or_owner", + "update_firewall:shared": "rule:admin_only", + "delete_firewall": "rule:admin_or_owner", + + "create_firewall_policy": "", + "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", + "create_firewall_policy:shared": "rule:admin_or_owner", + "update_firewall_policy": "rule:admin_or_owner", + "delete_firewall_policy": "rule:admin_or_owner", + + "create_firewall_rule": "", + "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", + "update_firewall_rule": "rule:admin_or_owner", + "delete_firewall_rule": "rule:admin_or_owner", + + "create_qos_queue": "rule:admin_only", + "get_qos_queue": "rule:admin_only", + + "update_agent": "rule:admin_only", + "delete_agent": "rule:admin_only", + "get_agent": "rule:admin_only", + + "create_dhcp-network": "rule:admin_only", + "delete_dhcp-network": "rule:admin_only", + "get_dhcp-networks": "rule:admin_only", + "create_l3-router": "rule:admin_only", + "delete_l3-router": "rule:admin_only", + "get_l3-routers": "rule:admin_only", + "get_dhcp-agents": "rule:admin_only", + "get_l3-agents": "rule:admin_only", + "get_loadbalancer-agent": "rule:admin_only", + "get_loadbalancer-pools": "rule:admin_only", + + "create_floatingip": "rule:regular_user", + "update_floatingip": "rule:admin_or_owner", + "delete_floatingip": "rule:admin_or_owner", + "get_floatingip": "rule:admin_or_owner", + + "create_network_profile": "rule:admin_only", + "update_network_profile": "rule:admin_only", + "delete_network_profile": "rule:admin_only", + "get_network_profiles": "", + "get_network_profile": "", + "update_policy_profiles": "rule:admin_only", + "get_policy_profiles": "", + "get_policy_profile": "", + + "create_metering_label": "rule:admin_only", + "delete_metering_label": "rule:admin_only", + "get_metering_label": "rule:admin_only", + + "create_metering_label_rule": "rule:admin_only", + "delete_metering_label_rule": "rule:admin_only", + "get_metering_label_rule": "rule:admin_only", + + "get_service_provider": "rule:regular_user", + "get_lsn": "rule:admin_only", + "create_lsn": "rule:admin_only" +} diff --git a/etc/rootwrap.conf b/etc/rootwrap.conf new file mode 100644 index 0000000000..dee1dd94b1 --- /dev/null +++ b/etc/rootwrap.conf @@ -0,0 +1,34 @@ +# Configuration for neutron-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR + +[xenapi] +# XenAPI configuration is only required by the L2 agent if it is to +# target a XenServer/XCP compute host's dom0. +xenapi_connection_url= +xenapi_connection_username=root +xenapi_connection_password= diff --git a/etc/services.conf b/etc/services.conf new file mode 100644 index 0000000000..262c120827 --- /dev/null +++ b/etc/services.conf @@ -0,0 +1,43 @@ +[radware] +#vdirect_address = 0.0.0.0 +#ha_secondary_address= +#vdirect_user = vDirect +#vdirect_password = radware +#service_ha_pair = False +#service_throughput = 1000 +#service_ssl_throughput = 200 +#service_compression_throughput = 100 +#service_cache = 20 +#service_adc_type = VA +#service_adc_version= +#service_session_mirroring_enabled = False +#service_isl_vlan = -1 +#service_resource_pool_ids = [] +#actions_to_skip = 'setup_l2_l3' +#l4_action_name = 'BaseCreate' +#l2_l3_workflow_name = openstack_l2_l3 +#l4_workflow_name = openstack_l4 +#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True +#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2 + +[netscaler_driver] +#netscaler_ncc_uri = https://ncc_server.acme.org/ncc/v1/api +#netscaler_ncc_username = admin +#netscaler_ncc_password = secret + +[heleoslb] +#esm_mgmt = +#admin_username = +#admin_password = +#lb_image = +#inband_id = +#oob_id = +#mgmt_id = +#dummy_utif_id = +#resource_pool_id = +#async_requests = +#lb_flavor = small +#sync_interval = 60 + +[haproxy] +#jinja_config_template = /opt/stack/neutron/neutron/services/drivers/haproxy/templates/haproxy_v1.4.template diff --git a/run_tests.sh b/run_tests.sh index 51a1046cbb..b2d5e84d96 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -114,7 +114,7 @@ function run_tests { if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then # Default to running all tests if specific test is not # provided. - testargs="discover ./neutron/tests" + testargs="discover ./vmware_nsx/neutron/tests" fi ${wrapper} python -m testtools.run $testopts $testargs @@ -134,7 +134,7 @@ function run_tests { set +e testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'" - OS_TEST_PATH=`echo $testargs|grep -o 'neutron\.tests[^[:space:]:]\+'|tr . /` + OS_TEST_PATH=`echo $testargs|grep -o 'vmware_nsx\neutron\.tests[^[:space:]:]\+'|tr . /` if [ -n "$OS_TEST_PATH" ]; then os_test_dir=$(dirname "$OS_TEST_PATH") else diff --git a/setup.cfg b/setup.cfg index 03504469ac..73e91edfec 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,12 +1,11 @@ [metadata] -name = neutron -version = 2015.1 -summary = OpenStack Networking +name = vmware-nsx +summary = VMware NSX library for OpenStack projects description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org -home-page = http://www.openstack.org/ +home-page = http://launchpad.net/vmware-nsx classifier = Environment :: OpenStack Intended Audience :: Information Technology @@ -16,193 +15,39 @@ classifier = Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.3 [files] packages = - neutron -data_files = - etc/neutron = - etc/api-paste.ini - etc/dhcp_agent.ini - etc/fwaas_driver.ini - etc/l3_agent.ini - etc/lbaas_agent.ini - etc/metadata_agent.ini - etc/metering_agent.ini - etc/policy.json - etc/neutron.conf - etc/rootwrap.conf - etc/vpn_agent.ini - etc/neutron/rootwrap.d = - etc/neutron/rootwrap.d/debug.filters - etc/neutron/rootwrap.d/dhcp.filters - etc/neutron/rootwrap.d/iptables-firewall.filters - etc/neutron/rootwrap.d/ipset-firewall.filters - etc/neutron/rootwrap.d/l3.filters - etc/neutron/rootwrap.d/lbaas-haproxy.filters - etc/neutron/rootwrap.d/linuxbridge-plugin.filters - etc/neutron/rootwrap.d/nec-plugin.filters - etc/neutron/rootwrap.d/ofagent.filters - etc/neutron/rootwrap.d/openvswitch-plugin.filters - etc/neutron/rootwrap.d/vpnaas.filters - etc/init.d = etc/init.d/neutron-server - etc/neutron/plugins/bigswitch = - etc/neutron/plugins/bigswitch/restproxy.ini - etc/neutron/plugins/bigswitch/ssl/ca_certs = - etc/neutron/plugins/bigswitch/ssl/ca_certs/README - etc/neutron/plugins/bigswitch/ssl/host_certs = - etc/neutron/plugins/bigswitch/ssl/host_certs/README - etc/neutron/plugins/brocade = etc/neutron/plugins/brocade/brocade.ini - etc/neutron/plugins/cisco = - etc/neutron/plugins/cisco/cisco_cfg_agent.ini - etc/neutron/plugins/cisco/cisco_plugins.ini - etc/neutron/plugins/cisco/cisco_router_plugin.ini - etc/neutron/plugins/cisco/cisco_vpn_agent.ini - etc/neutron/plugins/embrane = etc/neutron/plugins/embrane/heleos_conf.ini - etc/neutron/plugins/hyperv = etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini - etc/neutron/plugins/ibm = etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini - etc/neutron/plugins/linuxbridge = etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini - etc/neutron/plugins/metaplugin = etc/neutron/plugins/metaplugin/metaplugin.ini - etc/neutron/plugins/midonet = etc/neutron/plugins/midonet/midonet.ini - etc/neutron/plugins/ml2 = - etc/neutron/plugins/bigswitch/restproxy.ini - etc/neutron/plugins/ml2/ml2_conf.ini - etc/neutron/plugins/ml2/ml2_conf_arista.ini - etc/neutron/plugins/ml2/ml2_conf_brocade.ini - etc/neutron/plugins/ml2/ml2_conf_cisco.ini - etc/neutron/plugins/ml2/ml2_conf_mlnx.ini - etc/neutron/plugins/ml2/ml2_conf_ncs.ini - etc/neutron/plugins/ml2/ml2_conf_odl.ini - etc/neutron/plugins/ml2/ml2_conf_ofa.ini - etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini - etc/neutron/plugins/ml2/ml2_conf_sriov.ini - etc/neutron/plugins/nuage/nuage_plugin.ini - etc/neutron/plugins/mlnx = etc/neutron/plugins/mlnx/mlnx_conf.ini - etc/neutron/plugins/nec = etc/neutron/plugins/nec/nec.ini - etc/neutron/plugins/nuage = etc/neutron/plugins/nuage/nuage_plugin.ini - etc/neutron/plugins/oneconvergence = etc/neutron/plugins/oneconvergence/nvsdplugin.ini - etc/neutron/plugins/openvswitch = etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini - etc/neutron/plugins/plumgrid = etc/neutron/plugins/plumgrid/plumgrid.ini - etc/neutron/plugins/vmware = etc/neutron/plugins/vmware/nsx.ini - etc/neutron/plugins/opencontrail = etc/neutron/plugins/opencontrail/contrailplugin.ini -scripts = - bin/neutron-rootwrap - bin/neutron-rootwrap-xen-dom0 - -[global] -setup-hooks = - pbr.hooks.setup_hook - neutron.hooks.setup_hook - -[entry_points] -console_scripts = - neutron-cisco-cfg-agent = neutron.plugins.cisco.cfg_agent.cfg_agent:main - neutron-check-nsx-config = neutron.plugins.vmware.check_nsx_config:main - neutron-db-manage = neutron.db.migration.cli:main - neutron-debug = neutron.debug.shell:main - neutron-dhcp-agent = neutron.agent.dhcp_agent:main - neutron-hyperv-agent = neutron.plugins.hyperv.agent.hyperv_neutron_agent:main - neutron-ibm-agent = neutron.plugins.ibm.agent.sdnve_neutron_agent:main - neutron-l3-agent = neutron.agent.l3.agent:main - neutron-lbaas-agent = neutron.services.loadbalancer.agent.agent:main - neutron-linuxbridge-agent = neutron.plugins.linuxbridge.agent.linuxbridge_neutron_agent:main - neutron-metadata-agent = neutron.agent.metadata.agent:main - neutron-mlnx-agent = neutron.plugins.mlnx.agent.eswitch_neutron_agent:main - neutron-nec-agent = neutron.plugins.nec.agent.nec_neutron_agent:main - neutron-netns-cleanup = neutron.agent.netns_cleanup_util:main - neutron-ns-metadata-proxy = neutron.agent.metadata.namespace_proxy:main - neutron-nsx-manage = neutron.plugins.vmware.shell:main - neutron-nvsd-agent = neutron.plugins.oneconvergence.agent.nvsd_neutron_agent:main - neutron-openvswitch-agent = neutron.plugins.openvswitch.agent.ovs_neutron_agent:main - neutron-ovs-cleanup = neutron.agent.ovs_cleanup_util:main - neutron-restproxy-agent = neutron.plugins.bigswitch.agent.restproxy_agent:main - neutron-server = neutron.server:main - neutron-rootwrap = oslo.rootwrap.cmd:main - neutron-usage-audit = neutron.cmd.usage_audit:main - neutron-vpn-agent = neutron.services.vpn.agent:main - neutron-metering-agent = neutron.services.metering.agents.metering_agent:main - neutron-ofagent-agent = neutron.plugins.ofagent.agent.main:main - neutron-sriov-nic-agent = neutron.plugins.sriovnicagent.sriov_nic_agent:main - neutron-sanity-check = neutron.cmd.sanity_check:main -neutron.core_plugins = - bigswitch = neutron.plugins.bigswitch.plugin:NeutronRestProxyV2 - brocade = neutron.plugins.brocade.NeutronPlugin:BrocadePluginV2 - cisco = neutron.plugins.cisco.network_plugin:PluginV2 - embrane = neutron.plugins.embrane.plugins.embrane_ml2_plugin:EmbraneMl2Plugin - hyperv = neutron.plugins.hyperv.hyperv_neutron_plugin:HyperVNeutronPlugin - ibm = neutron.plugins.ibm.sdnve_neutron_plugin:SdnvePluginV2 - midonet = neutron.plugins.midonet.plugin:MidonetPluginV2 - ml2 = neutron.plugins.ml2.plugin:Ml2Plugin - nec = neutron.plugins.nec.nec_plugin:NECPluginV2 - nuage = neutron.plugins.nuage.plugin:NuagePlugin - metaplugin = neutron.plugins.metaplugin.meta_neutron_plugin:MetaPluginV2 - oneconvergence = neutron.plugins.oneconvergence.plugin:OneConvergencePluginV2 - plumgrid = neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin:NeutronPluginPLUMgridV2 - vmware = neutron.plugins.vmware.plugin:NsxPlugin -neutron.service_plugins = - dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin - router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin - bigswitch_l3 = neutron.plugins.bigswitch.l3_router_plugin:L3RestProxy - firewall = neutron.services.firewall.fwaas_plugin:FirewallPlugin - lbaas = neutron.services.loadbalancer.plugin:LoadBalancerPlugin - vpnaas = neutron.services.vpn.plugin:VPNDriverPlugin - metering = neutron.services.metering.metering_plugin:MeteringPlugin -neutron.ml2.type_drivers = - flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver - local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver - vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver - gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver - vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver -neutron.ml2.mechanism_drivers = - opendaylight = neutron.plugins.ml2.drivers.mechanism_odl:OpenDaylightMechanismDriver - logger = neutron.tests.unit.ml2.drivers.mechanism_logger:LoggerMechanismDriver - test = neutron.tests.unit.ml2.drivers.mechanism_test:TestMechanismDriver - bulkless = neutron.tests.unit.ml2.drivers.mechanism_bulkless:BulklessMechanismDriver - linuxbridge = neutron.plugins.ml2.drivers.mech_linuxbridge:LinuxbridgeMechanismDriver - openvswitch = neutron.plugins.ml2.drivers.mech_openvswitch:OpenvswitchMechanismDriver - hyperv = neutron.plugins.ml2.drivers.mech_hyperv:HypervMechanismDriver - ncs = neutron.plugins.ml2.drivers.mechanism_ncs:NCSMechanismDriver - arista = neutron.plugins.ml2.drivers.arista.mechanism_arista:AristaDriver - cisco_nexus = neutron.plugins.ml2.drivers.cisco.nexus.mech_cisco_nexus:CiscoNexusMechanismDriver - cisco_apic = neutron.plugins.ml2.drivers.cisco.apic.mechanism_apic:APICMechanismDriver - l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver - bigswitch = neutron.plugins.ml2.drivers.mech_bigswitch.driver:BigSwitchMechanismDriver - ofagent = neutron.plugins.ml2.drivers.mech_ofagent:OfagentMechanismDriver - mlnx = neutron.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver - brocade = neutron.plugins.ml2.drivers.brocade.mechanism_brocade:BrocadeMechanism - fslsdn = neutron.plugins.ml2.drivers.freescale.mechanism_fslsdn:FslsdnMechanismDriver - sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver:SriovNicSwitchMechanismDriver - nuage = neutron.plugins.ml2.drivers.mech_nuage.driver:NuageMechanismDriver -neutron.ml2.extension_drivers = - test = neutron.tests.unit.ml2.test_extension_driver_api:TestExtensionDriver -neutron.openstack.common.cache.backends = - memory = neutron.openstack.common.cache._backends.memory:MemoryBackend -# These are for backwards compat with Icehouse notification_driver configuration values -oslo.messaging.notify.drivers = - neutron.openstack.common.notifier.log_notifier = oslo.messaging.notify._impl_log:LogDriver - neutron.openstack.common.notifier.no_op_notifier = oslo.messaging.notify._impl_noop:NoOpDriver - neutron.openstack.common.notifier.rpc_notifier2 = oslo.messaging.notify._impl_messaging:MessagingV2Driver - neutron.openstack.common.notifier.rpc_notifier = oslo.messaging.notify._impl_messaging:MessagingDriver - neutron.openstack.common.notifier.test_notifier = oslo.messaging.notify._impl_test:TestDriver + vmware_nsx +namespace_packages = + vmware_nsx [build_sphinx] -all_files = 1 -build-dir = doc/build source-dir = doc/source +build-dir = doc/build +all_files = 1 + +[upload_sphinx] +upload-dir = doc/build/html + +[compile_catalog] +directory = vmware_nsx/locale +domain = vmware_nsx + +[update_catalog] +domain = vmware_nsx +output_dir = vmware_nsx/locale +input_file = vmware_nsx/locale/vmware_nsx.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg -output_file = neutron/locale/neutron.pot +output_file = vmware_nsx/locale/vmware_nsx.pot -[compile_catalog] -directory = neutron/locale -domain = neutron - -[update_catalog] -domain = neutron -output_dir = neutron/locale -input_file = neutron/locale/neutron.pot +[pbr] +autodoc_index_modules = 1 [wheel] universal = 1 diff --git a/test-requirements.txt b/test-requirements.txt index c46402904a..bb51f87d26 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,9 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. + +-e git://git.openstack.org/openstack/neutron.git#egg=neutron + hacking>=0.9.2,<0.10 cliff>=1.7.0 # Apache-2.0 diff --git a/tools/i18n_cfg.py b/tools/i18n_cfg.py index 5ad1a514d9..f722a9ec92 100644 --- a/tools/i18n_cfg.py +++ b/tools/i18n_cfg.py @@ -92,6 +92,4 @@ msg_format_checkers = [ ] -file_black_list = ["./neutron/tests/unit", - "./neutron/openstack", - "./neutron/plugins/bigswitch/tests"] +file_black_list = ["./vmware_nsx/neutron/tests/unit"] diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh index f27ba30f5e..0fc360530d 100755 --- a/tools/pretty_tox.sh +++ b/tools/pretty_tox.sh @@ -3,4 +3,4 @@ TESTRARGS=$1 exec 3>&1 -status=$(exec 4>&1 >&3; ( python -m neutron.openstack.common.lockutils python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | subunit-trace -f) && exit $status +status=$(exec 4>&1 >&3; ( python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | subunit-trace -f) && exit $status diff --git a/tox.ini b/tox.ini index 858999c09b..a21f192512 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,py33,py34,pep8 +envlist = py27,pep8 minversion = 1.6 skipsdist = True @@ -13,10 +13,7 @@ install_command = pip install -U {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh -commands = - sh tools/pretty_tox.sh '{posargs}' -# there is also secret magic in pretty_tox.sh which lets you run in a fail only -# mode. To do this define the TRACE_FAILONLY environmental variable. +commands = python setup.py testr --slowest --testr-args='{posargs}' [testenv:hashtest] # This is the same as default environment, but with a random PYTHONHASHSEED. @@ -25,13 +22,10 @@ commands = setenv = VIRTUAL_ENV={envdir} [testenv:functional] -setenv = OS_TEST_PATH=./neutron/tests/functional - OS_TEST_TIMEOUT=90 +setenv = OS_TEST_TIMEOUT=90 [testenv:dsvm-functional] -setenv = OS_TEST_PATH=./neutron/tests/functional - OS_SUDO_TESTING=1 - OS_ROOTWRAP_CMD=sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf +setenv = OS_SUDO_TESTING=1 OS_FAIL_ON_MISSING_DEPS=1 OS_TEST_TIMEOUT=90 sitepackages=True @@ -44,16 +38,14 @@ downloadcache = ~/cache/pip commands = sh ./tools/check_bash.sh flake8 - neutron-db-manage check_migration - sh -c "find neutron -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" whitelist_externals = sh [testenv:i18n] -commands = python ./tools/check_i18n.py ./neutron ./tools/i18n_cfg.py +commands = python ./tools/check_i18n.py ./vmware_nsx/neutron [testenv:cover] commands = - python -m neutron.openstack.common.lockutils python setup.py testr --coverage --testr-args='{posargs}' + python setup.py testr --coverage --testr-args='{posargs}' [testenv:venv] commands = {posargs} @@ -76,16 +68,14 @@ commands = python setup.py build_sphinx ignore = E125,E126,E128,E129,E265,H305,H307,H402,H404,H405,H904 show-source = true builtins = _ -# TODO(dougw) neutron/tests/unit/vmware exclusion is a temporary services split hack -exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios,neutron/tests/unit/vmware* +exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject [testenv:pylint] deps = {[testenv]deps} pylint commands = - pylint --rcfile=.pylintrc --output-format=colorized {posargs:neutron} + pylint --rcfile=.pylintrc --output-format=colorized {posargs:vmware_nsx/neutron} [hacking] import_exceptions = neutron.i18n -local-check-factory = neutron.hacking.checks.factory diff --git a/vmware-nsx/neutron/plugins/vmware/plugins/service.py b/vmware-nsx/neutron/plugins/vmware/plugins/service.py deleted file mode 100644 index 923e218b78..0000000000 --- a/vmware-nsx/neutron/plugins/vmware/plugins/service.py +++ /dev/null @@ -1,1833 +0,0 @@ -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import netaddr -from oslo.config import cfg -from oslo.utils import excutils - -from neutron.common import constants -from neutron.common import exceptions as n_exc -try: - from neutron_fwaas.db.firewall import firewall_db -except Exception: - print("WARNING: missing neutron-fwaas package") -from neutron.db import l3_db -try: - from neutron_lbaas.db.loadbalancer import loadbalancer_db -except Exception: - print("WARNING: missing neutron-lbaas package") -from neutron.db import routedserviceinsertion_db as rsi_db -try: - from neutron_vpnaas.db.vpn import vpn_db -except Exception: - print("WARNING: missing neutron-vpnaas package") -from neutron.extensions import firewall as fw_ext -from neutron.extensions import l3 -from neutron.extensions import routedserviceinsertion as rsi -from neutron.extensions import vpnaas as vpn_ext -from neutron.i18n import _LE, _LW -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as service_constants -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import config # noqa -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware.dbexts import servicerouter as sr_db -from neutron.plugins.vmware.dbexts import vcns_db -from neutron.plugins.vmware.dbexts import vcns_models -from neutron.plugins.vmware.extensions import servicerouter as sr -from neutron.plugins.vmware.nsxlib import router as routerlib -from neutron.plugins.vmware.nsxlib import switch as switchlib -from neutron.plugins.vmware.plugins import base -from neutron.plugins.vmware.vshield.common import constants as vcns_const -from neutron.plugins.vmware.vshield.common import exceptions -from neutron.plugins.vmware.vshield.tasks import constants as tasks_const -from neutron.plugins.vmware.vshield import vcns_driver -from sqlalchemy.orm import exc as sa_exc - -LOG = logging.getLogger(__name__) - -ROUTER_TYPE_BASIC = 1 -ROUTER_TYPE_ADVANCED = 2 - -ROUTER_STATUS = [ - service_constants.ACTIVE, - service_constants.DOWN, - service_constants.PENDING_CREATE, - service_constants.PENDING_DELETE, - service_constants.ERROR -] - -ROUTER_STATUS_LEVEL = { - service_constants.ACTIVE: vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, - service_constants.DOWN: vcns_const.RouterStatus.ROUTER_STATUS_DOWN, - service_constants.PENDING_CREATE: ( - vcns_const.RouterStatus.ROUTER_STATUS_PENDING_CREATE - ), - service_constants.PENDING_DELETE: ( - vcns_const.RouterStatus.ROUTER_STATUS_PENDING_DELETE - ), - service_constants.ERROR: vcns_const.RouterStatus.ROUTER_STATUS_ERROR -} - - -class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin, - base.NsxPluginV2, - rsi_db.RoutedServiceInsertionDbMixin, - firewall_db.Firewall_db_mixin, - loadbalancer_db.LoadBalancerPluginDb, - vpn_db.VPNPluginDb - ): - - supported_extension_aliases = ( - base.NsxPluginV2.supported_extension_aliases + [ - "service-router", - "routed-service-insertion", - "fwaas", - "lbaas", - "vpnaas" - ]) - # The service plugin cannot currently support pagination - __native_pagination_support = False - __native_sorting_support = False - - def __init__(self): - super(NsxAdvancedPlugin, self).__init__() - - self._super_create_ext_gw_port = ( - self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW]) - self._super_delete_ext_gw_port = ( - self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW]) - - self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW] = ( - self._vcns_create_ext_gw_port) - self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW] = ( - self._vcns_delete_ext_gw_port) - - # cache router type based on router id - self._router_type = {} - self.callbacks = VcnsCallbacks(self.safe_reference) - - # load the vCNS driver - self._load_vcns_drivers() - - # switchlib's create_lswitch needs to be replaced in order to proxy - # logical switch create requests to vcns - self._set_create_lswitch_proxy() - - def _set_create_lswitch_proxy(self): - base.switchlib.create_lswitch = self._proxy_create_lswitch - - def _proxy_create_lswitch(self, *args, **kwargs): - name, tz_config, tags = ( - _process_base_create_lswitch_args(*args, **kwargs) - ) - return self.vcns_driver.create_lswitch( - name, tz_config, tags=tags, - port_isolation=None, replication_mode=None) - - def _load_vcns_drivers(self): - self.vcns_driver = vcns_driver.VcnsDriver(self.callbacks) - - def _set_router_type(self, router_id, router_type): - self._router_type[router_id] = router_type - - def _get_router_type(self, context=None, router_id=None, router=None): - if not router: - if router_id in self._router_type: - return self._router_type[router_id] - router = self._get_router(context, router_id) - - LOG.debug("EDGE: router = %s", router) - if router['extra_attributes']['service_router']: - router_type = ROUTER_TYPE_ADVANCED - else: - router_type = ROUTER_TYPE_BASIC - self._set_router_type(router['id'], router_type) - return router_type - - def _find_router_type(self, router): - is_service_router = router.get(sr.SERVICE_ROUTER, False) - if is_service_router: - return ROUTER_TYPE_ADVANCED - else: - return ROUTER_TYPE_BASIC - - def _is_advanced_service_router(self, context=None, router_id=None, - router=None): - if router: - router_type = self._get_router_type(router=router) - else: - router_type = self._get_router_type(context, router_id) - return (router_type == ROUTER_TYPE_ADVANCED) - - def _vcns_create_ext_gw_port(self, context, port_data): - router_id = port_data['device_id'] - if not self._is_advanced_service_router(context, router_id): - self._super_create_ext_gw_port(context, port_data) - return - - # NOP for Edge because currently the port will be create internally - # by VSM - LOG.debug("EDGE: _vcns_create_ext_gw_port") - - def _vcns_delete_ext_gw_port(self, context, port_data): - router_id = port_data['device_id'] - if not self._is_advanced_service_router(context, router_id): - self._super_delete_ext_gw_port(context, port_data) - return - - # NOP for Edge - LOG.debug("EDGE: _vcns_delete_ext_gw_port") - - def _get_external_attachment_info(self, context, router): - gw_port = router.gw_port - ipaddress = None - netmask = None - nexthop = None - - if gw_port: - # gw_port may have multiple IPs, only configure the first one - if gw_port.get('fixed_ips'): - ipaddress = gw_port['fixed_ips'][0]['ip_address'] - - network_id = gw_port.get('network_id') - if network_id: - ext_net = self._get_network(context, network_id) - if not ext_net.external: - msg = (_("Network '%s' is not a valid external " - "network") % network_id) - raise n_exc.BadRequest(resource='router', msg=msg) - if ext_net.subnets: - ext_subnet = ext_net.subnets[0] - netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask) - nexthop = ext_subnet.gateway_ip - - return (ipaddress, netmask, nexthop) - - def _get_external_gateway_address(self, context, router): - ipaddress, netmask, nexthop = self._get_external_attachment_info( - context, router) - return nexthop - - def _vcns_update_static_routes(self, context, **kwargs): - router = kwargs.get('router') - if router is None: - router = self._get_router(context, kwargs['router_id']) - - edge_id = kwargs.get('edge_id') - if edge_id is None: - binding = vcns_db.get_vcns_router_binding(context.session, - router['id']) - edge_id = binding['edge_id'] - - skippable = True - if 'nexthop' in kwargs: - nexthop = kwargs['nexthop'] - # The default gateway and vnic config has dependencies, if we - # explicitly specify nexthop to change, tell the driver not to - # skip this route update - skippable = False - else: - nexthop = self._get_external_gateway_address(context, - router) - - if 'subnets' in kwargs: - subnets = kwargs['subnets'] - else: - subnets = self._find_router_subnets_cidrs(context.elevated(), - router['id']) - - routes = [] - for subnet in subnets: - routes.append({ - 'cidr': subnet, - 'nexthop': vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0] - }) - self.vcns_driver.update_routes(router['id'], edge_id, nexthop, routes, - skippable) - - def _get_nat_rules(self, context, router): - fip_qry = context.session.query(l3_db.FloatingIP) - fip_db = fip_qry.filter_by(router_id=router['id']).all() - - dnat = [] - snat = [] - for fip in fip_db: - if fip.fixed_port_id: - dnat.append({ - 'dst': fip.floating_ip_address, - 'translated': fip.fixed_ip_address - }) - - gw_port = router.gw_port - if gw_port and router.enable_snat: - if gw_port.get('fixed_ips'): - snat_ip = gw_port['fixed_ips'][0]['ip_address'] - subnets = self._find_router_subnets_cidrs(context.elevated(), - router['id']) - for subnet in subnets: - snat.append({ - 'src': subnet, - 'translated': snat_ip - }) - - return (snat, dnat) - - def _update_nat_rules(self, context, router): - snat, dnat = self._get_nat_rules(context, router) - binding = vcns_db.get_vcns_router_binding(context.session, - router['id']) - self.vcns_driver.update_nat_rules(router['id'], - binding['edge_id'], - snat, dnat) - - def _update_interface(self, context, router, sync=False): - addr, mask, nexthop = self._get_external_attachment_info( - context, router) - - secondary = [] - fip_qry = context.session.query(l3_db.FloatingIP) - fip_db = fip_qry.filter_by(router_id=router['id']).all() - for fip in fip_db: - if fip.fixed_port_id: - secondary.append(fip.floating_ip_address) - #Add all vip addresses bound on the router - vip_addrs = self._get_all_vip_addrs_by_router_id(context, - router['id']) - secondary.extend(vip_addrs) - - binding = vcns_db.get_vcns_router_binding(context.session, - router['id']) - task = self.vcns_driver.update_interface( - router['id'], binding['edge_id'], - vcns_const.EXTERNAL_VNIC_INDEX, - self.vcns_driver.external_network, - addr, mask, secondary=secondary) - if sync: - task.wait(tasks_const.TaskState.RESULT) - - def _update_router_gw_info(self, context, router_id, info): - if not self._is_advanced_service_router(context, router_id): - super(NsxAdvancedPlugin, self)._update_router_gw_info( - context, router_id, info) - return - - # get original gw_port config - router = self._get_router(context, router_id) - org_ext_net_id = router.gw_port_id and router.gw_port.network_id - org_enable_snat = router.enable_snat - orgaddr, orgmask, orgnexthop = self._get_external_attachment_info( - context, router) - - super(base.NsxPluginV2, self)._update_router_gw_info( - context, router_id, info, router=router) - - new_ext_net_id = router.gw_port_id and router.gw_port.network_id - new_enable_snat = router.enable_snat - newaddr, newmask, newnexthop = self._get_external_attachment_info( - context, router) - - binding = vcns_db.get_vcns_router_binding(context.session, router_id) - - if new_ext_net_id != org_ext_net_id and orgnexthop: - # network changed, need to remove default gateway before vnic - # can be configured - LOG.debug("VCNS: delete default gateway %s", orgnexthop) - self._vcns_update_static_routes(context, - router=router, - edge_id=binding['edge_id'], - nexthop=None) - - if orgaddr != newaddr or orgmask != newmask: - self.vcns_driver.update_interface( - router_id, binding['edge_id'], - vcns_const.EXTERNAL_VNIC_INDEX, - self.vcns_driver.external_network, - newaddr, newmask) - - if orgnexthop != newnexthop: - self._vcns_update_static_routes(context, - router=router, - edge_id=binding['edge_id'], - nexthop=newnexthop) - - if (new_ext_net_id == org_ext_net_id and - org_enable_snat == new_enable_snat): - return - - self._update_nat_rules(context, router) - - def _add_subnet_snat_rule(self, context, router, subnet): - # NOP for service router - if not self._is_advanced_service_router(router=router): - super(NsxAdvancedPlugin, self)._add_subnet_snat_rule( - context, router, subnet) - - def _delete_subnet_snat_rule(self, context, router, subnet): - # NOP for service router - if not self._is_advanced_service_router(router=router): - super(NsxAdvancedPlugin, self)._delete_subnet_snat_rule( - context, router, subnet) - - def _remove_floatingip_address(self, context, fip_db): - # NOP for service router - router_id = fip_db.router_id - if not self._is_advanced_service_router(context, router_id): - super(NsxAdvancedPlugin, self)._remove_floatingip_address( - context, fip_db) - - def _create_advanced_service_router(self, context, neutron_router_id, - name, lrouter, lswitch): - - # store binding - binding = vcns_db.add_vcns_router_binding( - context.session, neutron_router_id, None, lswitch['uuid'], - service_constants.PENDING_CREATE) - - # deploy edge - jobdata = { - 'neutron_router_id': neutron_router_id, - 'lrouter': lrouter, - 'lswitch': lswitch, - 'context': context - } - - # deploy and wait until the deploy requeste has been requested - # so we will have edge_id ready. The wait here should be fine - # as we're not in a database transaction now - self.vcns_driver.deploy_edge( - lrouter['uuid'], name, lswitch['uuid'], jobdata=jobdata, - wait_for_exec=True) - - return binding - - def _create_integration_lswitch(self, tenant_id, name): - # use defautl transport zone - transport_zone_config = [{ - "zone_uuid": self.cluster.default_tz_uuid, - "transport_type": cfg.CONF.NSX.default_transport_type - }] - return self.vcns_driver.create_lswitch(name, transport_zone_config) - - def _add_router_integration_interface(self, tenant_id, name, - lrouter, lswitch): - # create logic switch port - try: - ls_port = switchlib.create_lport( - self.cluster, lswitch['uuid'], tenant_id, - '', '', lrouter['uuid'], True) - except api_exc.NsxApiException: - msg = (_("An exception occurred while creating a port " - "on lswitch %s") % lswitch['uuid']) - LOG.exception(msg) - raise n_exc.NeutronException(message=msg) - - # create logic router port - try: - neutron_port_id = '' - pname = name[:36] + '-lp' - admin_status_enabled = True - lr_port = routerlib.create_router_lport( - self.cluster, lrouter['uuid'], tenant_id, - neutron_port_id, pname, admin_status_enabled, - [vcns_const.INTEGRATION_LR_IPADDRESS]) - except api_exc.NsxApiException: - msg = (_("Unable to create port on NSX logical router %s") % name) - LOG.exception(msg) - switchlib.delete_port( - self.cluster, lswitch['uuid'], ls_port['uuid']) - raise n_exc.NeutronException(message=msg) - - # attach logic router port to switch port - try: - self._update_router_port_attachment( - self.cluster, None, lrouter['uuid'], {}, lr_port['uuid'], - 'PatchAttachment', ls_port['uuid'], None) - except api_exc.NsxApiException as e: - # lr_port should have been deleted - switchlib.delete_port( - self.cluster, lswitch['uuid'], ls_port['uuid']) - raise e - - def _create_lrouter(self, context, router, nexthop): - lrouter = super(NsxAdvancedPlugin, self)._create_lrouter( - context, router, vcns_const.INTEGRATION_EDGE_IPADDRESS) - - router_type = self._find_router_type(router) - self._set_router_type(lrouter['uuid'], router_type) - if router_type == ROUTER_TYPE_BASIC: - return lrouter - - tenant_id = self._get_tenant_id_for_create(context, router) - name = router['name'] - try: - lsname = name[:36] + '-ls' - lswitch = self._create_integration_lswitch( - tenant_id, lsname) - except Exception: - msg = _("Unable to create integration logic switch " - "for router %s") % name - LOG.exception(msg) - routerlib.delete_lrouter(self.cluster, lrouter['uuid']) - raise n_exc.NeutronException(message=msg) - - try: - self._add_router_integration_interface(tenant_id, name, - lrouter, lswitch) - except Exception: - msg = _("Unable to add router interface to integration lswitch " - "for router %s") % name - LOG.exception(msg) - routerlib.delete_lrouter(self.cluster, lrouter['uuid']) - raise n_exc.NeutronException(message=msg) - - try: - self._create_advanced_service_router( - context, router['id'], name, lrouter, lswitch) - except Exception: - msg = (_("Unable to create advance service router for %s") % name) - LOG.exception(msg) - self.vcns_driver.delete_lswitch(lswitch('uuid')) - routerlib.delete_lrouter(self.cluster, lrouter['uuid']) - raise n_exc.NeutronException(message=msg) - - lrouter['status'] = service_constants.PENDING_CREATE - return lrouter - - def check_router_in_use(self, context, router_id): - router_filter = {'router_id': [router_id]} - vpnservices = self.get_vpnservices( - context, filters={'router_id': [router_id]}) - if vpnservices: - raise vpn_ext.RouterInUseByVPNService( - router_id=router_id, - vpnservice_id=vpnservices[0]['id']) - vips = self.get_vips( - context, filters=router_filter) - if vips: - raise nsx_exc.RouterInUseByLBService( - router_id=router_id, - vip_id=vips[0]['id']) - firewalls = self.get_firewalls( - context, filters=router_filter) - if firewalls: - raise nsx_exc.RouterInUseByFWService( - router_id=router_id, - firewall_id=firewalls[0]['id']) - - def _delete_lrouter(self, context, router_id, nsx_router_id): - binding = vcns_db.get_vcns_router_binding(context.session, router_id) - if not binding: - super(NsxAdvancedPlugin, self)._delete_lrouter( - context, router_id, nsx_router_id) - else: - #Check whether router has an advanced service inserted. - self.check_router_in_use(context, router_id) - vcns_db.update_vcns_router_binding( - context.session, router_id, - status=service_constants.PENDING_DELETE) - - lswitch_id = binding['lswitch_id'] - edge_id = binding['edge_id'] - - # delete lswitch - try: - self.vcns_driver.delete_lswitch(lswitch_id) - except exceptions.ResourceNotFound: - LOG.warning(_LW("Did not found lswitch %s in NSX"), lswitch_id) - - # delete edge - jobdata = { - 'context': context - } - self.vcns_driver.delete_edge(router_id, edge_id, jobdata=jobdata) - - # delete NSX logical router - routerlib.delete_lrouter(self.cluster, nsx_router_id) - - if id in self._router_type: - del self._router_type[router_id] - - def _update_lrouter(self, context, router_id, name, nexthop, routes=None): - if not self._is_advanced_service_router(context, router_id): - return super(NsxAdvancedPlugin, self)._update_lrouter( - context, router_id, name, nexthop, routes=routes) - - previous_routes = super(NsxAdvancedPlugin, self)._update_lrouter( - context, router_id, name, - vcns_const.INTEGRATION_EDGE_IPADDRESS, routes=routes) - - # TODO(fank): Theoretically users can specify extra routes for - # physical network, and routes for phyiscal network needs to be - # configured on Edge. This can be done by checking if nexthop is in - # external network. But for now we only handle routes for logic - # space and leave it for future enhancement. - - # Let _update_router_gw_info handle nexthop change - #self._vcns_update_static_routes(context, router_id=router_id) - - return previous_routes - - def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, - internal_ip, router_id, - min_num_rules_expected=0): - # NOP for advanced service router - if not self._is_advanced_service_router(context, router_id): - super(NsxAdvancedPlugin, self)._retrieve_and_delete_nat_rules( - context, floating_ip_address, internal_ip, router_id, - min_num_rules_expected=min_num_rules_expected) - - def _update_fip_assoc(self, context, fip, floatingip_db, external_port): - # Update DB model only for advanced service router - router_id = self._get_fip_assoc_data(context, fip, floatingip_db)[2] - if (router_id and - not self._is_advanced_service_router(context, router_id)): - super(NsxAdvancedPlugin, self)._update_fip_assoc( - context, fip, floatingip_db, external_port) - else: - super(base.NsxPluginV2, self)._update_fip_assoc( - context, fip, floatingip_db, external_port) - - def _get_nsx_lrouter_status(self, id): - try: - lrouter = routerlib.get_lrouter(self.cluster, id) - lr_status = lrouter["_relations"]["LogicalRouterStatus"] - if lr_status["fabric_status"]: - nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE - else: - nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_DOWN - except n_exc.NotFound: - nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ERROR - - return nsx_status - - def _get_vse_status(self, context, id): - binding = vcns_db.get_vcns_router_binding(context.session, id) - edge_status_level = self.vcns_driver.get_edge_status( - binding['edge_id']) - edge_db_status_level = ROUTER_STATUS_LEVEL[binding.status] - - if edge_status_level > edge_db_status_level: - return edge_status_level - else: - return edge_db_status_level - - def _get_all_nsx_lrouters_statuses(self, tenant_id, fields): - # get nsx lrouters status - nsx_lrouters = routerlib.get_lrouters(self.cluster, - tenant_id, - fields) - - nsx_status = {} - for nsx_lrouter in nsx_lrouters: - if (nsx_lrouter["_relations"]["LogicalRouterStatus"] - ["fabric_status"]): - nsx_status[nsx_lrouter['uuid']] = ( - vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE - ) - else: - nsx_status[nsx_lrouter['uuid']] = ( - vcns_const.RouterStatus.ROUTER_STATUS_DOWN - ) - - return nsx_status - - def _get_all_vse_statuses(self, context): - bindings = self._model_query( - context, vcns_models.VcnsRouterBinding) - - vse_db_status_level = {} - edge_id_to_router_id = {} - router_ids = [] - for binding in bindings: - if not binding['edge_id']: - continue - router_id = binding['router_id'] - router_ids.append(router_id) - edge_id_to_router_id[binding['edge_id']] = router_id - vse_db_status_level[router_id] = ( - ROUTER_STATUS_LEVEL[binding['status']]) - - if not vse_db_status_level: - # no advanced service router, no need to query - return {} - - vse_status_level = {} - edges_status_level = self.vcns_driver.get_edges_statuses() - for edge_id, status_level in edges_status_level.iteritems(): - if edge_id in edge_id_to_router_id: - router_id = edge_id_to_router_id[edge_id] - db_status_level = vse_db_status_level[router_id] - if status_level > db_status_level: - vse_status_level[router_id] = status_level - else: - vse_status_level[router_id] = db_status_level - - return vse_status_level - - def get_router(self, context, id, fields=None): - if fields and 'status' not in fields: - return super(NsxAdvancedPlugin, self).get_router( - context, id, fields=fields) - - router = super(NsxAdvancedPlugin, self).get_router(context, id) - - router_type = self._find_router_type(router) - if router_type == ROUTER_TYPE_ADVANCED: - vse_status_level = self._get_vse_status(context, id) - if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]: - router['status'] = ROUTER_STATUS[vse_status_level] - - return self._fields(router, fields) - - def get_routers(self, context, filters=None, fields=None, **kwargs): - routers = super(NsxAdvancedPlugin, self).get_routers( - context, filters=filters, **kwargs) - - if fields and 'status' not in fields: - # no status checking, just return regular get_routers - return [self._fields(router, fields) for router in routers] - - for router in routers: - router_type = self._find_router_type(router) - if router_type == ROUTER_TYPE_ADVANCED: - break - else: - # no advanced service router, return here - return [self._fields(router, fields) for router in routers] - - vse_status_all = self._get_all_vse_statuses(context) - for router in routers: - router_type = self._find_router_type(router) - if router_type == ROUTER_TYPE_ADVANCED: - vse_status_level = vse_status_all.get(router['id']) - if vse_status_level is None: - vse_status_level = ( - vcns_const.RouterStatus.ROUTER_STATUS_ERROR) - if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]: - router['status'] = ROUTER_STATUS[vse_status_level] - - return [self._fields(router, fields) for router in routers] - - def add_router_interface(self, context, router_id, interface_info): - info = super(NsxAdvancedPlugin, self).add_router_interface( - context, router_id, interface_info) - if self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - if router.enable_snat: - self._update_nat_rules(context, router) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._vcns_update_static_routes(context, router=router) - return info - - def remove_router_interface(self, context, router_id, interface_info): - info = super(NsxAdvancedPlugin, self).remove_router_interface( - context, router_id, interface_info) - if self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - if router.enable_snat: - self._update_nat_rules(context, router) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._vcns_update_static_routes(context, router=router) - return info - - def create_floatingip(self, context, floatingip): - fip = super(NsxAdvancedPlugin, self).create_floatingip( - context, floatingip) - router_id = fip.get('router_id') - if router_id and self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._update_nat_rules(context, router) - self._update_interface(context, router) - return fip - - def update_floatingip(self, context, id, floatingip): - fip = super(NsxAdvancedPlugin, self).update_floatingip( - context, id, floatingip) - router_id = fip.get('router_id') - if router_id and self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._update_nat_rules(context, router) - self._update_interface(context, router) - elif not router_id: - # The floating IP has been disassociated and should be set to DOWN - self.update_floatingip_status(context, fip['id'], - constants.FLOATINGIP_STATUS_DOWN) - return fip - - def delete_floatingip(self, context, id): - fip_db = self._get_floatingip(context, id) - router_id = None - if fip_db.fixed_port_id: - router_id = fip_db.router_id - super(NsxAdvancedPlugin, self).delete_floatingip(context, id) - if router_id and self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._update_interface(context, router) - self._update_nat_rules(context, router) - - def disassociate_floatingips(self, context, port_id): - routers = set() - - try: - fip_qry = context.session.query(l3_db.FloatingIP) - fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) - for fip_db in fip_dbs: - routers.add(fip_db.router_id) - except sa_exc.NoResultFound: - pass - super(NsxAdvancedPlugin, self).disassociate_floatingips(context, - port_id) - - for router_id in routers: - if self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._update_interface(context, router) - self._update_nat_rules(context, router) - - # - # FWaaS plugin implementation - # - def _firewall_set_status( - self, context, firewall_id, status, firewall=None): - with context.session.begin(subtransactions=True): - fw_db = self._get_firewall(context, firewall_id) - if status == service_constants.PENDING_UPDATE and ( - fw_db.status == service_constants.PENDING_DELETE): - raise fw_ext.FirewallInPendingState( - firewall_id=firewall_id, pending_state=status) - else: - fw_db.status = status - if firewall: - firewall['status'] = status - - def _ensure_firewall_update_allowed(self, context, firewall_id): - fwall = self.get_firewall(context, firewall_id) - if fwall['status'] in [service_constants.PENDING_CREATE, - service_constants.PENDING_UPDATE, - service_constants.PENDING_DELETE]: - raise fw_ext.FirewallInPendingState(firewall_id=firewall_id, - pending_state=fwall['status']) - - def _ensure_firewall_policy_update_allowed( - self, context, firewall_policy_id): - firewall_policy = self.get_firewall_policy(context, firewall_policy_id) - for firewall_id in firewall_policy.get('firewall_list', []): - self._ensure_firewall_update_allowed(context, firewall_id) - - def _ensure_update_or_delete_firewall_rule( - self, context, firewall_rule_id): - fw_rule = self.get_firewall_rule(context, firewall_rule_id) - if fw_rule.get('firewall_policy_id'): - self._ensure_firewall_policy_update_allowed( - context, fw_rule['firewall_policy_id']) - - def _make_firewall_rule_list_by_policy_id(self, context, fw_policy_id): - if not fw_policy_id: - return [] - firewall_policy_db = self._get_firewall_policy(context, fw_policy_id) - return [ - self._make_firewall_rule_dict(fw_rule_db) - for fw_rule_db in firewall_policy_db['firewall_rules'] - ] - - def _get_edge_id_by_vcns_edge_binding(self, context, - router_id): - #Get vcns_router_binding mapping between router and edge - router_binding = vcns_db.get_vcns_router_binding( - context.session, router_id) - return router_binding.edge_id - - def _get_firewall_list_from_firewall_policy(self, context, policy_id): - firewall_policy_db = self._get_firewall_policy(context, policy_id) - return [ - self._make_firewall_dict(fw_db) - for fw_db in firewall_policy_db['firewalls'] - ] - - def _get_firewall_list_from_firewall_rule(self, context, rule_id): - rule = self._get_firewall_rule(context, rule_id) - if not rule.firewall_policy_id: - # The firewall rule is not associated with firewall policy yet - return None - - return self._get_firewall_list_from_firewall_policy( - context, rule.firewall_policy_id) - - def _vcns_update_firewall(self, context, fw, router_id=None, **kwargs): - edge_id = kwargs.get('edge_id') - if not edge_id: - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, router_id) - firewall_rule_list = kwargs.get('firewall_rule_list') - if not firewall_rule_list: - firewall_rule_list = self._make_firewall_rule_list_by_policy_id( - context, fw['firewall_policy_id']) - fw_with_rules = fw - fw_with_rules['firewall_rule_list'] = firewall_rule_list - try: - self.vcns_driver.update_firewall(context, edge_id, fw_with_rules) - except exceptions.VcnsApiException as e: - self._firewall_set_status( - context, fw['id'], service_constants.ERROR) - LOG.exception(_LE("Failed to create firewall on vShield Edge " - "bound on router %s"), router_id) - raise e - - except exceptions.VcnsBadRequest as e: - self._firewall_set_status( - context, fw['id'], service_constants.ERROR) - LOG.exception(_LE("Bad Firewall request Input")) - raise e - - def _vcns_delete_firewall(self, context, router_id=None, **kwargs): - edge_id = kwargs.get('edge_id') - if not edge_id: - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, router_id) - #TODO(linb):do rollback on error - self.vcns_driver.delete_firewall(context, edge_id) - - def create_firewall(self, context, firewall): - LOG.debug("create_firewall() called") - router_id = firewall['firewall'].get(vcns_const.ROUTER_ID) - if not router_id: - msg = _("router_id is not provided!") - LOG.error(msg) - raise n_exc.BadRequest(resource='router', msg=msg) - if not self._is_advanced_service_router(context, router_id): - msg = _("router_id:%s is not an advanced router!") % router_id - LOG.error(msg) - raise n_exc.BadRequest(resource='router', msg=msg) - if self._get_resource_router_id_binding( - context, firewall_db.Firewall, router_id=router_id): - msg = _("A firewall is already associated with the router") - LOG.error(msg) - raise nsx_exc.ServiceOverQuota( - overs='firewall', err_msg=msg) - - fw = super(NsxAdvancedPlugin, self).create_firewall(context, firewall) - #Add router service insertion binding with firewall object - res = { - 'id': fw['id'], - 'router_id': router_id - } - self._process_create_resource_router_id( - context, res, firewall_db.Firewall) - # Since there is only one firewall per edge, - # here would be bulk configuration operation on firewall - self._vcns_update_firewall(context, fw, router_id) - self._firewall_set_status( - context, fw['id'], service_constants.ACTIVE, fw) - fw[rsi.ROUTER_ID] = router_id - return fw - - def update_firewall(self, context, id, firewall): - LOG.debug("update_firewall() called") - self._ensure_firewall_update_allowed(context, id) - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=id) - rule_list_pre = self._make_firewall_rule_list_by_policy_id( - context, - self.get_firewall(context, id)['firewall_policy_id']) - firewall['firewall']['status'] = service_constants.PENDING_UPDATE - fw = super(NsxAdvancedPlugin, self).update_firewall( - context, id, firewall) - fw[rsi.ROUTER_ID] = service_router_binding['router_id'] - rule_list_new = self._make_firewall_rule_list_by_policy_id( - context, fw['firewall_policy_id']) - if rule_list_pre == rule_list_new: - self._firewall_set_status( - context, fw['id'], service_constants.ACTIVE, fw) - return fw - else: - self._vcns_update_firewall( - context, fw, service_router_binding.router_id, - firewall_rule_list=rule_list_new) - self._firewall_set_status( - context, fw['id'], service_constants.ACTIVE, fw) - return fw - - def delete_firewall(self, context, id): - LOG.debug("delete_firewall() called") - self._firewall_set_status( - context, id, service_constants.PENDING_DELETE) - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=id) - self._vcns_delete_firewall(context, service_router_binding.router_id) - super(NsxAdvancedPlugin, self).delete_firewall(context, id) - self._delete_resource_router_id_binding( - context, id, firewall_db.Firewall) - - def get_firewall(self, context, id, fields=None): - fw = super(NsxAdvancedPlugin, self).get_firewall( - context, id, fields) - if fields and rsi.ROUTER_ID not in fields: - return fw - - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - fw[rsi.ROUTER_ID] = service_router_binding['router_id'] - return fw - - def get_firewalls(self, context, filters=None, fields=None): - fws = super(NsxAdvancedPlugin, self).get_firewalls( - context, filters, fields) - if fields and rsi.ROUTER_ID not in fields: - return fws - service_router_bindings = self._get_resource_router_id_bindings( - context, firewall_db.Firewall, - resource_ids=[fw['id'] for fw in fws]) - mapping = dict([(binding['resource_id'], binding['router_id']) - for binding in service_router_bindings]) - for fw in fws: - fw[rsi.ROUTER_ID] = mapping[fw['id']] - return fws - - def update_firewall_rule(self, context, id, firewall_rule): - LOG.debug("update_firewall_rule() called") - self._ensure_update_or_delete_firewall_rule(context, id) - fwr_pre = self.get_firewall_rule(context, id) - fwr = super(NsxAdvancedPlugin, self).update_firewall_rule( - context, id, firewall_rule) - if fwr_pre == fwr: - return fwr - - # check if this rule is associated with firewall - fw_list = self._get_firewall_list_from_firewall_rule(context, id) - if not fw_list: - return fwr - - for fw in fw_list: - # get router service insertion binding with firewall id - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, service_router_binding.router_id) - - #TODO(linb): do rollback on error - self.vcns_driver.update_firewall_rule(context, id, edge_id, fwr) - - return fwr - - def update_firewall_policy(self, context, id, firewall_policy): - LOG.debug("update_firewall_policy() called") - self._ensure_firewall_policy_update_allowed(context, id) - firewall_rules_pre = self._make_firewall_rule_list_by_policy_id( - context, id) - fwp = super(NsxAdvancedPlugin, self).update_firewall_policy( - context, id, firewall_policy) - firewall_rules = self._make_firewall_rule_list_by_policy_id( - context, id) - if firewall_rules_pre == firewall_rules: - return fwp - - # check if this policy is associated with firewall - fw_list = self._get_firewall_list_from_firewall_policy(context, id) - if not fw_list: - return fwp - - for fw in fw_list: - # Get the router_service insertion binding with firewall id - # TODO(fank): optimized by using _get_resource_router_id_bindings - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - self._vcns_update_firewall( - context, fw, service_router_binding.router_id, - firewall_rule_list=firewall_rules) - return fwp - - def insert_rule(self, context, id, rule_info): - LOG.debug("insert_rule() called") - self._ensure_firewall_policy_update_allowed(context, id) - fwp = super(NsxAdvancedPlugin, self).insert_rule( - context, id, rule_info) - fwr = super(NsxAdvancedPlugin, self).get_firewall_rule( - context, rule_info['firewall_rule_id']) - - # check if this policy is associated with firewall - fw_list = self._get_firewall_list_from_firewall_policy(context, id) - if not fw_list: - return fwp - for fw in fw_list: - # TODO(fank): optimized by using _get_resource_router_id_bindings - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, service_router_binding.router_id) - - if rule_info.get('insert_before') or rule_info.get('insert_after'): - #if insert_before or insert_after is set, we would call - #VCNS insert_rule API - #TODO(linb): do rollback on error - self.vcns_driver.insert_rule(context, rule_info, edge_id, fwr) - else: - #Else we would call bulk configuration on the firewall - self._vcns_update_firewall(context, fw, edge_id=edge_id) - return fwp - - def remove_rule(self, context, id, rule_info): - LOG.debug("remove_rule() called") - self._ensure_firewall_policy_update_allowed(context, id) - fwp = super(NsxAdvancedPlugin, self).remove_rule( - context, id, rule_info) - fwr = super(NsxAdvancedPlugin, self).get_firewall_rule( - context, rule_info['firewall_rule_id']) - - # check if this policy is associated with firewall - fw_list = self._get_firewall_list_from_firewall_policy(context, id) - if not fw_list: - return fwp - for fw in fw_list: - # TODO(fank): optimized by using _get_resource_router_id_bindings - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, service_router_binding.router_id) - #TODO(linb): do rollback on error - self.vcns_driver.delete_firewall_rule( - context, fwr['id'], edge_id) - return fwp - - # - # LBAAS service plugin implementation - # - def _get_edge_id_by_vip_id(self, context, vip_id): - try: - service_router_binding = self._get_resource_router_id_binding( - context, loadbalancer_db.Vip, resource_id=vip_id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to find the edge with " - "vip_id: %s"), vip_id) - return self._get_edge_id_by_vcns_edge_binding( - context, service_router_binding.router_id) - - def _get_all_vip_addrs_by_router_id( - self, context, router_id): - vip_bindings = self._get_resource_router_id_bindings( - context, loadbalancer_db.Vip, router_ids=[router_id]) - vip_addrs = [] - for vip_binding in vip_bindings: - vip = self.get_vip(context, vip_binding.resource_id) - vip_addrs.append(vip.get('address')) - return vip_addrs - - def _add_router_service_insertion_binding(self, context, resource_id, - router_id, - model): - res = { - 'id': resource_id, - 'router_id': router_id - } - self._process_create_resource_router_id(context, res, - model) - - def _resource_set_status(self, context, model, id, status, obj=None, - pool_id=None): - with context.session.begin(subtransactions=True): - try: - qry = context.session.query(model) - if issubclass(model, loadbalancer_db.PoolMonitorAssociation): - res = qry.filter_by(monitor_id=id, - pool_id=pool_id).one() - else: - res = qry.filter_by(id=id).one() - if status == service_constants.PENDING_UPDATE and ( - res.get('status') == service_constants.PENDING_DELETE): - msg = (_("Operation can't be performed, Since resource " - "%(model)s : %(id)s is in DELETEing status!") % - {'model': model, - 'id': id}) - LOG.error(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - else: - res.status = status - except sa_exc.NoResultFound: - msg = (_("Resource %(model)s : %(id)s not found!") % - {'model': model, - 'id': id}) - LOG.exception(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - if obj: - obj['status'] = status - - def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs): - pool = self.get_pool(context, pool_id) - edge_id = kwargs.get('edge_id') - if not edge_id: - edge_id = self._get_edge_id_by_vip_id( - context, pool['vip_id']) - #Check wheter the pool is already created on the router - #in case of future's M:N relation between Pool and Vip - - #Check associated HealthMonitors and then create them - for monitor_id in pool.get('health_monitors'): - hm = self.get_health_monitor(context, monitor_id) - try: - self.vcns_driver.create_health_monitor( - context, edge_id, hm) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to create healthmonitor " - "associated with pool id: %s!"), pool_id) - for monitor_ide in pool.get('health_monitors'): - if monitor_ide == monitor_id: - break - self.vcns_driver.delete_health_monitor( - context, monitor_ide, edge_id) - #Create the pool on the edge - members = [ - super(NsxAdvancedPlugin, self).get_member( - context, member_id) - for member_id in pool.get('members') - ] - try: - self.vcns_driver.create_pool(context, edge_id, pool, members) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to create pool on vshield edge")) - self.vcns_driver.delete_pool( - context, pool_id, edge_id) - for monitor_id in pool.get('health_monitors'): - self.vcns_driver.delete_health_monitor( - context, monitor_id, edge_id) - - def _vcns_update_pool(self, context, pool, **kwargs): - edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) - members = kwargs.get('members') - if not members: - members = [ - super(NsxAdvancedPlugin, self).get_member( - context, member_id) - for member_id in pool.get('members') - ] - self.vcns_driver.update_pool(context, edge_id, pool, members) - - def create_vip(self, context, vip): - LOG.debug("create_vip() called") - router_id = vip['vip'].get(vcns_const.ROUTER_ID) - if not router_id: - msg = _("router_id is not provided!") - LOG.error(msg) - raise n_exc.BadRequest(resource='router', msg=msg) - - if not self._is_advanced_service_router(context, router_id): - msg = _("router_id: %s is not an advanced router!") % router_id - LOG.error(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - - #Check whether the vip port is an external port - subnet_id = vip['vip']['subnet_id'] - network_id = self.get_subnet(context, subnet_id)['network_id'] - ext_net = self._get_network(context, network_id) - if not ext_net.external: - msg = (_("Network '%s' is not a valid external " - "network") % network_id) - raise nsx_exc.NsxPluginException(err_msg=msg) - - v = super(NsxAdvancedPlugin, self).create_vip(context, vip) - #Get edge_id for the resource - router_binding = vcns_db.get_vcns_router_binding( - context.session, - router_id) - edge_id = router_binding.edge_id - #Add vip_router binding - self._add_router_service_insertion_binding(context, v['id'], - router_id, - loadbalancer_db.Vip) - #Create the vip port on vShield Edge - router = self._get_router(context, router_id) - self._update_interface(context, router, sync=True) - #Create the vip and associated pool/monitor on the corresponding edge - try: - self._vcns_create_pool_and_monitors( - context, v['pool_id'], edge_id=edge_id) - self.vcns_driver.create_vip(context, edge_id, v) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to create vip!")) - self._delete_resource_router_id_binding( - context, v['id'], loadbalancer_db.Vip) - super(NsxAdvancedPlugin, self).delete_vip(context, v['id']) - self._resource_set_status(context, loadbalancer_db.Vip, - v['id'], service_constants.ACTIVE, v) - v[rsi.ROUTER_ID] = router_id - - return v - - def update_vip(self, context, id, vip): - edge_id = self._get_edge_id_by_vip_id(context, id) - old_vip = self.get_vip(context, id) - session_persistence_update = bool( - vip['vip'].get('session_persistence')) - vip['vip']['status'] = service_constants.PENDING_UPDATE - v = super(NsxAdvancedPlugin, self).update_vip(context, id, vip) - v[rsi.ROUTER_ID] = self._get_resource_router_id_binding( - context, loadbalancer_db.Vip, resource_id=id)['router_id'] - if old_vip['pool_id'] != v['pool_id']: - self.vcns_driver.delete_vip(context, id) - #Delete old pool/monitor on the edge - #TODO(linb): Factor out procedure for removing pool and health - #separate method - old_pool = self.get_pool(context, old_vip['pool_id']) - self.vcns_driver.delete_pool( - context, old_vip['pool_id'], edge_id) - for monitor_id in old_pool.get('health_monitors'): - self.vcns_driver.delete_health_monitor( - context, monitor_id, edge_id) - #Create new pool/monitor object on the edge - #TODO(linb): add exception handle if error - self._vcns_create_pool_and_monitors( - context, v['pool_id'], edge_id=edge_id) - self.vcns_driver.create_vip(context, edge_id, v) - return v - try: - self.vcns_driver.update_vip(context, v, session_persistence_update) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to update vip with id: %s!"), id) - self._resource_set_status(context, loadbalancer_db.Vip, - id, service_constants.ERROR, v) - - self._resource_set_status(context, loadbalancer_db.Vip, - v['id'], service_constants.ACTIVE, v) - return v - - def delete_vip(self, context, id): - v = self.get_vip(context, id) - self._resource_set_status( - context, loadbalancer_db.Vip, - id, service_constants.PENDING_DELETE) - try: - self.vcns_driver.delete_vip(context, id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to delete vip with id: %s!"), id) - self._resource_set_status(context, loadbalancer_db.Vip, - id, service_constants.ERROR) - edge_id = self._get_edge_id_by_vip_id(context, id) - #Check associated HealthMonitors and then delete them - pool = self.get_pool(context, v['pool_id']) - self.vcns_driver.delete_pool(context, v['pool_id'], edge_id) - for monitor_id in pool.get('health_monitors'): - #TODO(linb): do exception handle if error - self.vcns_driver.delete_health_monitor( - context, monitor_id, edge_id) - - router_binding = self._get_resource_router_id_binding( - context, loadbalancer_db.Vip, resource_id=id) - router = self._get_router(context, router_binding.router_id) - self._delete_resource_router_id_binding( - context, id, loadbalancer_db.Vip) - super(NsxAdvancedPlugin, self).delete_vip(context, id) - self._update_interface(context, router, sync=True) - - def get_vip(self, context, id, fields=None): - vip = super(NsxAdvancedPlugin, self).get_vip(context, id, fields) - if fields and rsi.ROUTER_ID not in fields: - return vip - - service_router_binding = self._get_resource_router_id_binding( - context, loadbalancer_db.Vip, resource_id=vip['id']) - vip[rsi.ROUTER_ID] = service_router_binding['router_id'] - return vip - - def get_vips(self, context, filters=None, fields=None): - vips = super(NsxAdvancedPlugin, self).get_vips( - context, filters, fields) - if fields and rsi.ROUTER_ID not in fields: - return vips - service_router_bindings = self._get_resource_router_id_bindings( - context, loadbalancer_db.Vip, - resource_ids=[vip['id'] for vip in vips]) - mapping = dict([(binding['resource_id'], binding['router_id']) - for binding in service_router_bindings]) - for vip in vips: - vip[rsi.ROUTER_ID] = mapping[vip['id']] - return vips - - def update_pool(self, context, id, pool): - pool['pool']['status'] = service_constants.PENDING_UPDATE - p = super(NsxAdvancedPlugin, self).update_pool(context, id, pool) - #Check whether the pool is already associated with the vip - if not p.get('vip_id'): - self._resource_set_status(context, loadbalancer_db.Pool, - p['id'], service_constants.ACTIVE, p) - return p - try: - self._vcns_update_pool(context, p) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to update pool with id: %s!"), id) - self._resource_set_status(context, loadbalancer_db.Pool, - p['id'], service_constants.ERROR, p) - self._resource_set_status(context, loadbalancer_db.Pool, - p['id'], service_constants.ACTIVE, p) - return p - - def create_member(self, context, member): - m = super(NsxAdvancedPlugin, self).create_member(context, member) - pool_id = m.get('pool_id') - pool = self.get_pool(context, pool_id) - if not pool.get('vip_id'): - self._resource_set_status(context, loadbalancer_db.Member, - m['id'], service_constants.ACTIVE, m) - return m - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, - service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to update pool with the member")) - super(NsxAdvancedPlugin, self).delete_member(context, m['id']) - - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - self._resource_set_status(context, loadbalancer_db.Member, - m['id'], service_constants.ACTIVE, m) - return m - - def update_member(self, context, id, member): - member['member']['status'] = service_constants.PENDING_UPDATE - old_member = self.get_member(context, id) - m = super(NsxAdvancedPlugin, self).update_member( - context, id, member) - - if m['pool_id'] != old_member['pool_id']: - old_pool_id = old_member['pool_id'] - old_pool = self.get_pool(context, old_pool_id) - if old_pool.get('vip_id'): - self._resource_set_status( - context, loadbalancer_db.Pool, - old_pool_id, service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, old_pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to update old pool " - "with the member")) - super(NsxAdvancedPlugin, self).delete_member( - context, m['id']) - self._resource_set_status( - context, loadbalancer_db.Pool, - old_pool_id, service_constants.ACTIVE) - - pool_id = m['pool_id'] - pool = self.get_pool(context, pool_id) - if not pool.get('vip_id'): - self._resource_set_status(context, loadbalancer_db.Member, - m['id'], service_constants.ACTIVE, m) - return m - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, - service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to update pool with the member")) - super(NsxAdvancedPlugin, self).delete_member( - context, m['id']) - - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - self._resource_set_status(context, loadbalancer_db.Member, - m['id'], service_constants.ACTIVE, m) - return m - - def delete_member(self, context, id): - m = self.get_member(context, id) - super(NsxAdvancedPlugin, self).delete_member(context, id) - pool_id = m['pool_id'] - pool = self.get_pool(context, pool_id) - if not pool.get('vip_id'): - return - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to update pool with the member")) - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - - def update_health_monitor(self, context, id, health_monitor): - old_hm = super(NsxAdvancedPlugin, self).get_health_monitor( - context, id) - hm = super(NsxAdvancedPlugin, self).update_health_monitor( - context, id, health_monitor) - for hm_pool in hm.get('pools'): - pool_id = hm_pool['pool_id'] - pool = self.get_pool(context, pool_id) - if pool.get('vip_id'): - edge_id = self._get_edge_id_by_vip_id( - context, pool['vip_id']) - try: - self.vcns_driver.update_health_monitor( - context, edge_id, old_hm, hm) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to update monitor " - "with id: %s!"), id) - return hm - - def create_pool_health_monitor(self, context, - health_monitor, pool_id): - monitor_id = health_monitor['health_monitor']['id'] - pool = self.get_pool(context, pool_id) - monitors = pool.get('health_monitors') - if len(monitors) > 0: - msg = _("Vcns right now can only support " - "one monitor per pool") - LOG.error(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - #Check whether the pool is already associated with the vip - if not pool.get('vip_id'): - res = super(NsxAdvancedPlugin, - self).create_pool_health_monitor(context, - health_monitor, - pool_id) - return res - #Get the edge_id - edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) - res = super(NsxAdvancedPlugin, - self).create_pool_health_monitor(context, - health_monitor, - pool_id) - monitor = self.get_health_monitor(context, monitor_id) - #TODO(linb)Add Exception handle if error - self.vcns_driver.create_health_monitor(context, edge_id, monitor) - #Get updated pool - pool['health_monitors'].append(monitor['id']) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to associate monitor with pool!")) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.ERROR) - super(NsxAdvancedPlugin, self).delete_pool_health_monitor( - context, monitor_id, pool_id) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - self._resource_set_status( - context, loadbalancer_db.PoolMonitorAssociation, - monitor_id, service_constants.ACTIVE, res, - pool_id=pool_id) - return res - - def delete_pool_health_monitor(self, context, id, pool_id): - super(NsxAdvancedPlugin, self).delete_pool_health_monitor( - context, id, pool_id) - pool = self.get_pool(context, pool_id) - #Check whether the pool is already associated with the vip - if pool.get('vip_id'): - #Delete the monitor on vshield edge - edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception( - _LE("Failed to update pool with pool_monitor!")) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.ERROR) - #TODO(linb): Add exception handle if error - self.vcns_driver.delete_health_monitor(context, id, edge_id) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - - def _vcns_update_ipsec_config( - self, context, vpnservice_id, removed_ipsec_conn_id=None): - sites = [] - vpn_service = self._get_vpnservice(context, vpnservice_id) - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, vpn_service.router_id) - if not vpn_service.router.gw_port: - msg = _("Failed to update ipsec vpn configuration on edge, since " - "the router: %s does not have a gateway yet!" - ) % vpn_service.router_id - LOG.error(msg) - raise exceptions.VcnsBadRequest(resource='router', msg=msg) - - external_ip = vpn_service.router.gw_port['fixed_ips'][0]['ip_address'] - subnet = self._make_subnet_dict(vpn_service.subnet) - for ipsec_site_conn in vpn_service.ipsec_site_connections: - if ipsec_site_conn.id != removed_ipsec_conn_id: - site = self._make_ipsec_site_connection_dict(ipsec_site_conn) - ikepolicy = self._make_ikepolicy_dict( - ipsec_site_conn.ikepolicy) - ipsecpolicy = self._make_ipsecpolicy_dict( - ipsec_site_conn.ipsecpolicy) - sites.append({'site': site, - 'ikepolicy': ikepolicy, - 'ipsecpolicy': ipsecpolicy, - 'subnet': subnet, - 'external_ip': external_ip}) - try: - self.vcns_driver.update_ipsec_config( - edge_id, sites, enabled=vpn_service.admin_state_up) - except exceptions.VcnsBadRequest: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Bad or unsupported Input request!")) - except exceptions.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to update ipsec VPN configuration " - "with vpnservice: %(vpnservice_id)s on " - "vShield Edge: %(edge_id)s"), - {'vpnservice_id': vpnservice_id, - 'edge_id': edge_id}) - - def create_vpnservice(self, context, vpnservice): - LOG.debug("create_vpnservice() called") - router_id = vpnservice['vpnservice'].get('router_id') - if not self._is_advanced_service_router(context, router_id): - msg = _("router_id:%s is not an advanced router!") % router_id - LOG.warning(msg) - raise exceptions.VcnsBadRequest(resource='router', msg=msg) - - if self.get_vpnservices(context, filters={'router_id': [router_id]}): - msg = _("a vpnservice is already associated with the router: %s" - ) % router_id - LOG.warning(msg) - raise nsx_exc.ServiceOverQuota( - overs='vpnservice', err_msg=msg) - - service = super(NsxAdvancedPlugin, self).create_vpnservice( - context, vpnservice) - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ACTIVE, service) - return service - - def update_vpnservice(self, context, vpnservice_id, vpnservice): - vpnservice['vpnservice']['status'] = service_constants.PENDING_UPDATE - service = super(NsxAdvancedPlugin, self).update_vpnservice( - context, vpnservice_id, vpnservice) - # Only admin_state_up attribute is configurable on Edge. - if vpnservice['vpnservice'].get('admin_state_up') is None: - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ACTIVE, service) - return service - # Test whether there is one ipsec site connection attached to - # the vpnservice. If not, just return without updating ipsec - # config on edge side. - vpn_service_db = self._get_vpnservice(context, vpnservice_id) - if not vpn_service_db.ipsec_site_connections: - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ACTIVE, service) - return service - try: - self._vcns_update_ipsec_config(context, service['id']) - except Exception: - with excutils.save_and_reraise_exception(): - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ERROR, service) - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ACTIVE, service) - return service - - def create_ipsec_site_connection(self, context, ipsec_site_connection): - ipsec_site_conn = super( - NsxAdvancedPlugin, self).create_ipsec_site_connection( - context, ipsec_site_connection) - try: - self._vcns_update_ipsec_config( - context, ipsec_site_conn['vpnservice_id']) - except Exception: - with excutils.save_and_reraise_exception(): - super(NsxAdvancedPlugin, self).delete_ipsec_site_connection( - context, ipsec_site_conn['id']) - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, - ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn) - return ipsec_site_conn - - def update_ipsec_site_connection(self, context, ipsec_site_connection_id, - ipsec_site_connection): - ipsec_site_connection['ipsec_site_connection']['status'] = ( - service_constants.PENDING_UPDATE) - ipsec_site_conn = super( - NsxAdvancedPlugin, self).update_ipsec_site_connection( - context, ipsec_site_connection_id, ipsec_site_connection) - try: - self._vcns_update_ipsec_config( - context, ipsec_site_conn['vpnservice_id']) - except Exception: - with excutils.save_and_reraise_exception(): - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, ipsec_site_conn['id'], - service_constants.ERROR, ipsec_site_conn) - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, - ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn) - return ipsec_site_conn - - def delete_ipsec_site_connection(self, context, ipsec_site_conn_id): - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, - ipsec_site_conn_id, service_constants.PENDING_DELETE) - vpnservice_id = self.get_ipsec_site_connection( - context, ipsec_site_conn_id)['vpnservice_id'] - try: - self._vcns_update_ipsec_config( - context, vpnservice_id, ipsec_site_conn_id) - except Exception: - with excutils.save_and_reraise_exception(): - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, ipsec_site_conn_id, - service_constants.ERROR) - super(NsxAdvancedPlugin, self).delete_ipsec_site_connection( - context, ipsec_site_conn_id) - - -class VcnsCallbacks(object): - """Edge callback implementation Callback functions for - asynchronous tasks. - """ - def __init__(self, plugin): - self.plugin = plugin - - def edge_deploy_started(self, task): - """callback when deployment task started.""" - jobdata = task.userdata['jobdata'] - context = jobdata['context'] - edge_id = task.userdata.get('edge_id') - neutron_router_id = jobdata['neutron_router_id'] - name = task.userdata['router_name'] - if edge_id: - LOG.debug("Start deploying %(edge_id)s for router %(name)s", { - 'edge_id': edge_id, - 'name': name}) - vcns_db.update_vcns_router_binding( - context.session, neutron_router_id, edge_id=edge_id) - else: - LOG.debug("Failed to deploy Edge for router %s", name) - vcns_db.update_vcns_router_binding( - context.session, neutron_router_id, - status=service_constants.ERROR) - - def edge_deploy_result(self, task): - """callback when deployment task finished.""" - jobdata = task.userdata['jobdata'] - lrouter = jobdata['lrouter'] - context = jobdata['context'] - name = task.userdata['router_name'] - neutron_router_id = jobdata['neutron_router_id'] - router_db = None - try: - router_db = self.plugin._get_router( - context, neutron_router_id) - except l3.RouterNotFound: - # Router might have been deleted before deploy finished - LOG.exception(_LE("Router %s not found"), lrouter['uuid']) - - if task.status == tasks_const.TaskStatus.COMPLETED: - LOG.debug("Successfully deployed %(edge_id)s for " - "router %(name)s", { - 'edge_id': task.userdata['edge_id'], - 'name': name}) - if (router_db and - router_db['status'] == service_constants.PENDING_CREATE): - router_db['status'] = service_constants.ACTIVE - - binding = vcns_db.get_vcns_router_binding( - context.session, neutron_router_id) - # only update status to active if its status is pending create - if binding['status'] == service_constants.PENDING_CREATE: - vcns_db.update_vcns_router_binding( - context.session, neutron_router_id, - status=service_constants.ACTIVE) - else: - LOG.debug("Failed to deploy Edge for router %s", name) - if router_db: - router_db['status'] = service_constants.ERROR - vcns_db.update_vcns_router_binding( - context.session, neutron_router_id, - status=service_constants.ERROR) - - def edge_delete_result(self, task): - jobdata = task.userdata['jobdata'] - router_id = task.userdata['router_id'] - context = jobdata['context'] - if task.status == tasks_const.TaskStatus.COMPLETED: - vcns_db.delete_vcns_router_binding(context.session, - router_id) - - def interface_update_result(self, task): - LOG.debug("interface_update_result %d", task.status) - - def snat_create_result(self, task): - LOG.debug("snat_create_result %d", task.status) - - def snat_delete_result(self, task): - LOG.debug("snat_delete_result %d", task.status) - - def dnat_create_result(self, task): - LOG.debug("dnat_create_result %d", task.status) - - def dnat_delete_result(self, task): - LOG.debug("dnat_delete_result %d", task.status) - - def routes_update_result(self, task): - LOG.debug("routes_update_result %d", task.status) - - def nat_update_result(self, task): - LOG.debug("nat_update_result %d", task.status) - - -def _process_base_create_lswitch_args(*args, **kwargs): - tags = utils.get_tags() - tags.append({"tag": args[1], - "scope": "quantum_net_id"}) - if args[2]: - tags.append({"tag": args[2], "scope": "os_tid"}) - switch_name = args[3] - tz_config = args[4] - if kwargs.get("shared", False) or len(args) >= 6: - tags.append({"tag": "true", "scope": "shared"}) - if kwargs.get("tags"): - tags.extend(kwargs["tags"]) - return switch_name, tz_config, tags diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/vcns_driver.py b/vmware-nsx/neutron/plugins/vmware/vshield/vcns_driver.py deleted file mode 100644 index 148c575c37..0000000000 --- a/vmware-nsx/neutron/plugins/vmware/vshield/vcns_driver.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import config # noqa -from neutron.plugins.vmware.vshield import edge_appliance_driver -from neutron.plugins.vmware.vshield import edge_firewall_driver -from neutron.plugins.vmware.vshield import edge_ipsecvpn_driver -from neutron.plugins.vmware.vshield import edge_loadbalancer_driver -from neutron.plugins.vmware.vshield.tasks import tasks -from neutron.plugins.vmware.vshield import vcns - -LOG = logging.getLogger(__name__) - - -class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver, - edge_firewall_driver.EdgeFirewallDriver, - edge_loadbalancer_driver.EdgeLbDriver, - edge_ipsecvpn_driver.EdgeIPsecVpnDriver): - - def __init__(self, callbacks): - super(VcnsDriver, self).__init__() - - self.callbacks = callbacks - self.vcns_uri = cfg.CONF.vcns.manager_uri - self.vcns_user = cfg.CONF.vcns.user - self.vcns_passwd = cfg.CONF.vcns.password - self.datacenter_moid = cfg.CONF.vcns.datacenter_moid - self.deployment_container_id = cfg.CONF.vcns.deployment_container_id - self.resource_pool_id = cfg.CONF.vcns.resource_pool_id - self.datastore_id = cfg.CONF.vcns.datastore_id - self.external_network = cfg.CONF.vcns.external_network - interval = cfg.CONF.vcns.task_status_check_interval - self.task_manager = tasks.TaskManager(interval) - self.task_manager.start() - self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd) diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/__init__.py b/vmware-nsx/neutron/tests/unit/vmware/vshield/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_edge_router.py b/vmware-nsx/neutron/tests/unit/vmware/vshield/test_edge_router.py deleted file mode 100644 index 4daddf70b3..0000000000 --- a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_edge_router.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from eventlet import greenthread -import mock -from oslo.config import cfg - -from neutron.api.v2 import attributes -from neutron.common import constants -from neutron import context -from neutron.extensions import l3 -from neutron import manager as n_manager -from neutron.openstack.common import uuidutils -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware.plugins import service as nsp -from neutron.tests import base -from neutron.tests.unit import test_l3_plugin -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware import test_nsx_plugin -from neutron.tests.unit.vmware.vshield import fake_vcns - -_uuid = uuidutils.generate_uuid - - -class ServiceRouterTestExtensionManager(object): - - def get_resources(self): - # If l3 resources have been loaded and updated by main API - # router, update the map in the l3 extension so it will load - # the same attributes as the API router - l3_attr_map = copy.deepcopy(l3.RESOURCE_ATTRIBUTE_MAP) - for res in l3.RESOURCE_ATTRIBUTE_MAP.keys(): - attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res) - if attr_info: - l3.RESOURCE_ATTRIBUTE_MAP[res] = attr_info - resources = l3.L3.get_resources() - # restore the original resources once the controllers are created - l3.RESOURCE_ATTRIBUTE_MAP = l3_attr_map - - return resources - - def get_actions(self): - return [] - - def get_request_extensions(self): - return [] - - -class ServiceRouterTest(test_nsx_plugin.L3NatTest, - test_l3_plugin.L3NatTestCaseMixin): - - def vcns_patch(self): - instance = self.mock_vcns.start() - self.vcns_instance = instance - instance.return_value.deploy_edge.side_effect = self.fc2.deploy_edge - instance.return_value.get_edge_id.side_effect = self.fc2.get_edge_id - instance.return_value.get_edge_deploy_status.side_effect = ( - self.fc2.get_edge_deploy_status) - instance.return_value.delete_edge.side_effect = self.fc2.delete_edge - instance.return_value.update_interface.side_effect = ( - self.fc2.update_interface) - instance.return_value.get_nat_config.side_effect = ( - self.fc2.get_nat_config) - instance.return_value.update_nat_config.side_effect = ( - self.fc2.update_nat_config) - instance.return_value.delete_nat_rule.side_effect = ( - self.fc2.delete_nat_rule) - instance.return_value.get_edge_status.side_effect = ( - self.fc2.get_edge_status) - instance.return_value.get_edges.side_effect = self.fc2.get_edges - instance.return_value.update_routes.side_effect = ( - self.fc2.update_routes) - instance.return_value.create_lswitch.side_effect = ( - self.fc2.create_lswitch) - instance.return_value.delete_lswitch.side_effect = ( - self.fc2.delete_lswitch) - instance.return_value.get_loadbalancer_config.side_effect = ( - self.fc2.get_loadbalancer_config) - instance.return_value.enable_service_loadbalancer.side_effect = ( - self.fc2.enable_service_loadbalancer) - - def setUp(self, ext_mgr=None, service_plugins=None): - cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) - cfg.CONF.set_override('task_status_check_interval', 200, group="vcns") - - # vcns does not support duplicated router name, ignore router name - # validation for unit-test cases - self.fc2 = fake_vcns.FakeVcns(unique_router_name=False) - self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) - self.vcns_patch() - mock_proxy = mock.patch( - "%s.%s" % (vmware.SERVICE_PLUGIN_NAME, - '_set_create_lswitch_proxy')) - mock_proxy.start() - - ext_mgr = ext_mgr or ServiceRouterTestExtensionManager() - super(ServiceRouterTest, self).setUp( - plugin=vmware.SERVICE_PLUGIN_NAME, - service_plugins=service_plugins, - ext_mgr=ext_mgr) - - self.fc2.set_fake_nsx_api(self.fc) - self.addCleanup(self.fc2.reset_all) - - def tearDown(self): - plugin = n_manager.NeutronManager.get_plugin() - manager = plugin.vcns_driver.task_manager - # wait max ~10 seconds for all tasks to be finished - for i in range(100): - if not manager.has_pending_task(): - break - greenthread.sleep(0.1) - if manager.has_pending_task(): - manager.show_pending_tasks() - raise Exception(_("Tasks not completed")) - manager.stop() - # Ensure the manager thread has been stopped - self.assertIsNone(manager._thread) - super(ServiceRouterTest, self).tearDown() - - def _create_router(self, fmt, tenant_id, name=None, - admin_state_up=None, set_context=False, - arg_list=None, **kwargs): - data = {'router': {'tenant_id': tenant_id}} - if name: - data['router']['name'] = name - if admin_state_up: - data['router']['admin_state_up'] = admin_state_up - for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): - # Arg must be present and not empty - if arg in kwargs and kwargs[arg]: - data['router'][arg] = kwargs[arg] - data['router']['service_router'] = True - router_req = self.new_create_request('routers', data, fmt) - if set_context and tenant_id: - # create a specific auth context for this request - router_req.environ['neutron.context'] = context.Context( - '', tenant_id) - - return router_req.get_response(self.ext_api) - - -class ServiceRouterTestCase(ServiceRouterTest, - test_nsx_plugin.TestL3NatTestCase): - - def test_router_create(self): - name = 'router1' - tenant_id = _uuid() - expected_value = [('name', name), ('tenant_id', tenant_id), - ('admin_state_up', True), - ('external_gateway_info', None), - ('service_router', True)] - with self.router(name=name, admin_state_up=True, - tenant_id=tenant_id) as router: - expected_value_1 = expected_value + [('status', 'PENDING_CREATE')] - for k, v in expected_value_1: - self.assertEqual(router['router'][k], v) - - # wait max ~10 seconds for router status update - for i in range(20): - greenthread.sleep(0.5) - res = self._show('routers', router['router']['id']) - if res['router']['status'] == 'ACTIVE': - break - expected_value_2 = expected_value + [('status', 'ACTIVE')] - for k, v in expected_value_2: - self.assertEqual(res['router'][k], v) - - # check an integration lswitch is created - lswitch_name = "%s-ls" % name - for lswitch_id, lswitch in self.fc2._lswitches.iteritems(): - if lswitch['display_name'] == lswitch_name: - break - else: - self.fail("Integration lswitch not found") - - # check an integration lswitch is deleted - lswitch_name = "%s-ls" % name - for lswitch_id, lswitch in self.fc2._lswitches.iteritems(): - if lswitch['display_name'] == lswitch_name: - self.fail("Integration switch is not deleted") - - def test_router_delete_after_plugin_restart(self): - name = 'router1' - tenant_id = _uuid() - with self.router(name=name, admin_state_up=True, - tenant_id=tenant_id): - # clear router type cache to mimic plugin restart - plugin = n_manager.NeutronManager.get_plugin() - plugin._router_type = {} - - # check an integration lswitch is deleted - lswitch_name = "%s-ls" % name - for lswitch_id, lswitch in self.fc2._lswitches.iteritems(): - if lswitch['display_name'] == lswitch_name: - self.fail("Integration switch is not deleted") - - def test_router_show(self): - name = 'router1' - tenant_id = _uuid() - expected_value = [('name', name), ('tenant_id', tenant_id), - ('admin_state_up', True), - ('status', 'PENDING_CREATE'), - ('external_gateway_info', None), - ('service_router', True)] - with self.router(name='router1', admin_state_up=True, - tenant_id=tenant_id) as router: - res = self._show('routers', router['router']['id']) - for k, v in expected_value: - self.assertEqual(res['router'][k], v) - - def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None): - super(ServiceRouterTestCase, - self)._test_router_create_with_gwinfo_and_l3_ext_net( - vlan_id, validate_ext_gw=False) - - def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None): - super(ServiceRouterTestCase, - self)._test_router_update_gateway_on_l3_ext_net( - vlan_id, validate_ext_gw=False) - - def test_floatingip_update(self): - self._test_floatingip_update(constants.FLOATINGIP_STATUS_ACTIVE) - - -class TestProxyCreateLswitch(base.BaseTestCase): - def setUp(self): - super(TestProxyCreateLswitch, self).setUp() - self.tenant_id = "foo_tenant" - self.display_name = "foo_network" - self.tz_config = [ - {'zone_uuid': 'foo_zone', - 'transport_type': 'stt'} - ] - self.tags = utils.get_tags(quantum_net_id='foo_id', - os_tid=self.tenant_id) - self.cluster = None - - def test_create_lswitch_with_basic_args(self): - result = nsp._process_base_create_lswitch_args(self.cluster, - 'foo_id', - self.tenant_id, - self.display_name, - self.tz_config) - self.assertEqual(self.display_name, result[0]) - self.assertEqual(self.tz_config, result[1]) - self.assertEqual(sorted(self.tags), sorted(result[2])) - - def test_create_lswitch_with_shared_as_kwarg(self): - result = nsp._process_base_create_lswitch_args(self.cluster, - 'foo_id', - self.tenant_id, - self.display_name, - self.tz_config, - shared=True) - expected = self.tags + [{'scope': 'shared', 'tag': 'true'}] - self.assertEqual(sorted(expected), sorted(result[2])) - - def test_create_lswitch_with_shared_as_arg(self): - result = nsp._process_base_create_lswitch_args(self.cluster, - 'foo_id', - self.tenant_id, - self.display_name, - self.tz_config, - True) - additional_tags = [{'scope': 'shared', 'tag': 'true'}] - expected = self.tags + additional_tags - self.assertEqual(sorted(expected), sorted(result[2])) - - def test_create_lswitch_with_additional_tags(self): - more_tags = [{'scope': 'foo_scope', 'tag': 'foo_tag'}] - result = nsp._process_base_create_lswitch_args(self.cluster, - 'foo_id', - self.tenant_id, - self.display_name, - self.tz_config, - tags=more_tags) - expected = self.tags + more_tags - self.assertEqual(sorted(expected), sorted(result[2])) diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_firewall_driver.py b/vmware-nsx/neutron/tests/unit/vmware/vshield/test_firewall_driver.py deleted file mode 100644 index 91cffc3bad..0000000000 --- a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_firewall_driver.py +++ /dev/null @@ -1,375 +0,0 @@ -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import contextlib -import mock -import webob.exc - -from neutron import context -from neutron.db.firewall import firewall_db -from neutron.openstack.common import uuidutils -from neutron.plugins.vmware.vshield.common import exceptions as vcns_exc -from neutron.plugins.vmware.vshield import vcns_driver -from neutron.tests.unit.db.firewall import test_db_firewall -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.vshield import fake_vcns - - -_uuid = uuidutils.generate_uuid - -VSE_ID = 'edge-1' -ROUTER_ID = '42f95450-5cc9-44e4-a744-1320e592a9d5' - -VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test") - - -class VcnsDriverTestCase(test_db_firewall.FirewallPluginDbTestCase, - firewall_db.Firewall_db_mixin): - - def vcns_firewall_patch(self): - instance = self.mock_vcns.start() - instance.return_value.update_firewall.side_effect = ( - self.fc2.update_firewall) - instance.return_value.delete_firewall.side_effect = ( - self.fc2.delete_firewall) - instance.return_value.update_firewall_rule.side_effect = ( - self.fc2.update_firewall_rule) - instance.return_value.delete_firewall_rule.side_effect = ( - self.fc2.delete_firewall_rule) - instance.return_value.add_firewall_rule_above.side_effect = ( - self.fc2.add_firewall_rule_above) - instance.return_value.add_firewall_rule.side_effect = ( - self.fc2.add_firewall_rule) - instance.return_value.get_firewall.side_effect = ( - self.fc2.get_firewall) - instance.return_value.get_firewall_rule.side_effect = ( - self.fc2.get_firewall_rule) - - def setUp(self): - - self.config_parse(args=['--config-file', VCNS_CONFIG_FILE]) - # mock vcns - self.fc2 = fake_vcns.FakeVcns(unique_router_name=False) - self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) - self.vcns_firewall_patch() - - self.driver = vcns_driver.VcnsDriver(mock.Mock()) - - super(VcnsDriverTestCase, self).setUp() - self.addCleanup(self.fc2.reset_all) - self.addCleanup(self.mock_vcns.stop) - - self.tenant_id = _uuid() - self.subnet_id = _uuid() - - -class TestEdgeFwDriver(VcnsDriverTestCase): - - def _make_firewall_dict_with_rules(self, context, firewall_id): - fw = self.get_firewall(context, firewall_id) - fw_policy_id = fw['firewall_policy_id'] - if fw_policy_id: - firewall_policy_db = self._get_firewall_policy( - context, fw_policy_id) - fw['firewall_rule_list'] = [ - self._make_firewall_rule_dict(fw_rule_db) - for fw_rule_db in firewall_policy_db['firewall_rules'] - ] - - return fw - - def _compare_firewall_rule_lists(self, firewall_policy_id, - list1, list2): - for r1, r2 in zip(list1, list2): - rule = r1['firewall_rule'] - rule['firewall_policy_id'] = firewall_policy_id - for k in rule: - self.assertEqual(rule[k], r2[k]) - - def test_create_and_get_firewall(self): - ctx = context.get_admin_context() - name = 'firewall' - with contextlib.nested(self.firewall_rule(name='fwr1', - do_delete=False), - self.firewall_rule(name='fwr2', - do_delete=False), - self.firewall_rule(name='fwr3', - do_delete=False)) as fr: - fw_rule_ids = [r['firewall_rule']['id'] for r in fr] - with self.firewall_policy(firewall_rules=fw_rule_ids, - do_delete=False) as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall(name=name, - firewall_policy_id=fwp_id) as firewall: - fw_create = firewall['firewall'] - fw_expect = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - self.driver.update_firewall(ctx, VSE_ID, fw_expect) - fw_get = self.driver.get_firewall(ctx, VSE_ID) - self._compare_firewall_rule_lists( - fwp_id, fw_get['firewall_rule_list'], - fw_expect['firewall_rule_list']) - - def test_update_firewall_with_rules(self): - ctx = context.get_admin_context() - name = 'new_firewall' - with contextlib.nested(self.firewall_rule(name='fwr1', - do_delete=False), - self.firewall_rule(name='fwr2', - do_delete=False), - self.firewall_rule(name='fwr3', - do_delete=False)) as fr: - fw_rule_ids = [r['firewall_rule']['id'] for r in fr] - with self.firewall_policy(firewall_rules=fw_rule_ids, - do_delete=False) as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall(name=name, - firewall_policy_id=fwp_id) as firewall: - fw_create = firewall['firewall'] - fw_create = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - self.driver.update_firewall(ctx, VSE_ID, fw_create) - - data = {'firewall_rule': {'name': name, - 'source_port': '10:20', - 'destination_port': '30:40'}} - self.new_update_request('firewall_rules', data, - fr[0]['firewall_rule']['id']) - fw_expect = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - self.driver.update_firewall(ctx, VSE_ID, fw_expect) - - fw_get = self.driver.get_firewall( - ctx, VSE_ID) - self._compare_firewall_rule_lists( - fwp_id, fw_get['firewall_rule_list'], - fw_expect['firewall_rule_list']) - - def test_delete_firewall(self): - ctx = context.get_admin_context() - name = 'firewall' - with contextlib.nested(self.firewall_rule(name='fwr1', - do_delete=False), - self.firewall_rule(name='fwr2', - do_delete=False), - self.firewall_rule(name='fwr3', - do_delete=False)) as fr: - fw_rule_ids = [r['firewall_rule']['id'] for r in fr] - with self.firewall_policy(firewall_rules=fw_rule_ids, - do_delete=False) as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall(name=name, - firewall_policy_id=fwp_id) as firewall: - fw_create = firewall['firewall'] - fw_expect = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - self.driver.update_firewall(ctx, VSE_ID, fw_expect) - self.driver.delete_firewall(ctx, VSE_ID) - fw_get = self.driver.get_firewall( - ctx, VSE_ID) - self.assertFalse(fw_get['firewall_rule_list']) - - def test_update_firewall_rule(self): - ctx = context.get_admin_context() - name = 'new_firewall' - with contextlib.nested(self.firewall_rule(name='fwr1', - do_delete=False)) as fr: - fw_rule_ids = [r['firewall_rule']['id'] for r in fr] - with self.firewall_policy(firewall_rules=fw_rule_ids, - do_delete=False) as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall(name=name, - firewall_policy_id=fwp_id) as firewall: - fw_create = firewall['firewall'] - fw_create = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - self.driver.update_firewall(ctx, VSE_ID, fw_create) - - data = {'firewall_rule': {'name': name, - 'source_port': '10:20', - 'destination_port': '30:40'}} - req = self.new_update_request( - 'firewall_rules', data, - fr[0]['firewall_rule']['id']) - res = self.deserialize(self.fmt, - req.get_response(self.ext_api)) - rule_expect = res['firewall_rule'] - rule_expect['edge_id'] = VSE_ID - self.driver.update_firewall_rule( - ctx, rule_expect['id'], VSE_ID, rule_expect) - rule_get = self.driver.get_firewall_rule( - ctx, rule_expect['id'], VSE_ID) - for k, v in rule_get['firewall_rule'].items(): - self.assertEqual(rule_expect[k], v) - - def test_delete_firewall_rule(self): - ctx = context.get_admin_context() - name = 'new_firewall' - with contextlib.nested(self.firewall_rule(name='fwr1', - do_delete=False), - self.firewall_rule(name='fwr2', - do_delete=False)) as fr: - fw_rule_ids = [r['firewall_rule']['id'] for r in fr] - with self.firewall_policy(firewall_rules=fw_rule_ids, - do_delete=False) as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall(name=name, - firewall_policy_id=fwp_id) as firewall: - fw_create = firewall['firewall'] - fw_create = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - self.driver.update_firewall(ctx, VSE_ID, fw_create) - - fr[0]['firewall_rule']['edge_id'] = VSE_ID - self.driver.delete_firewall_rule( - ctx, fr[0]['firewall_rule']['id'], - VSE_ID) - self.assertRaises(vcns_exc.VcnsNotFound, - self.driver.get_firewall_rule, - ctx, fr[0]['firewall_rule']['id'], - VSE_ID) - - def test_insert_rule(self): - ctx = context.get_admin_context() - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall(firewall_policy_id=fwp_id) as firewall: - fw_create = firewall['firewall'] - fw_create = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - self.driver.update_firewall(ctx, VSE_ID, fw_create) - with contextlib.nested(self.firewall_rule(name='fwr0', - do_delete=False), - self.firewall_rule(name='fwr1', - do_delete=False), - self.firewall_rule(name='fwr2', - do_delete=False), - self.firewall_rule(name='fwr3', - do_delete=False), - self.firewall_rule(name='fwr4', - do_delete=False), - self.firewall_rule(name='fwr5', - do_delete=False), - self.firewall_rule( - name='fwr6', - do_delete=False)) as fwr: - # test insert when rule list is empty - fwr0_id = fwr[0]['firewall_rule']['id'] - self._rule_action('insert', fwp_id, fwr0_id, - insert_before=None, - insert_after=None, - expected_code=webob.exc.HTTPOk.code) - fw_update = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - self.driver.update_firewall(ctx, VSE_ID, fw_update) - # test insert at top of list above existing rule - fwr1_id = fwr[1]['firewall_rule']['id'] - self._rule_action('insert', fwp_id, fwr1_id, - insert_before=fwr0_id, - insert_after=None, - expected_code=webob.exc.HTTPOk.code) - - fw_expect = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - - rule_info = {'firewall_rule_id': fwr1_id, - 'insert_before': fwr0_id, - 'insert_after': None} - rule = fwr[1]['firewall_rule'] - self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) - fw_get = self.driver.get_firewall( - ctx, VSE_ID) - self._compare_firewall_rule_lists( - fwp_id, fw_get['firewall_rule_list'], - fw_expect['firewall_rule_list']) - # test insert at bottom of list - fwr2_id = fwr[2]['firewall_rule']['id'] - self._rule_action('insert', fwp_id, fwr2_id, - insert_before=None, - insert_after=fwr0_id, - expected_code=webob.exc.HTTPOk.code) - fw_expect = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - - rule_info = {'firewall_rule_id': fwr2_id, - 'insert_before': None, - 'insert_after': fwr0_id} - rule = fwr[2]['firewall_rule'] - self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) - fw_get = self.driver.get_firewall( - ctx, VSE_ID) - self._compare_firewall_rule_lists( - fwp_id, fw_get['firewall_rule_list'], - fw_expect['firewall_rule_list']) - # test insert in the middle of the list using - # insert_before - fwr3_id = fwr[3]['firewall_rule']['id'] - self._rule_action('insert', fwp_id, fwr3_id, - insert_before=fwr2_id, - insert_after=None, - expected_code=webob.exc.HTTPOk.code) - fw_expect = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - - rule_info = {'firewall_rule_id': fwr3_id, - 'insert_before': fwr2_id, - 'insert_after': None} - rule = fwr[3]['firewall_rule'] - self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) - fw_get = self.driver.get_firewall( - ctx, VSE_ID) - self._compare_firewall_rule_lists( - fwp_id, fw_get['firewall_rule_list'], - fw_expect['firewall_rule_list']) - # test insert in the middle of the list using - # insert_after - fwr4_id = fwr[4]['firewall_rule']['id'] - self._rule_action('insert', fwp_id, fwr4_id, - insert_before=None, - insert_after=fwr3_id, - expected_code=webob.exc.HTTPOk.code) - fw_expect = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - - rule_info = {'firewall_rule_id': fwr4_id, - 'insert_before': None, - 'insert_after': fwr3_id} - rule = fwr[4]['firewall_rule'] - self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) - fw_get = self.driver.get_firewall( - ctx, VSE_ID) - self._compare_firewall_rule_lists( - fwp_id, fw_get['firewall_rule_list'], - fw_expect['firewall_rule_list']) - # test insert when both insert_before and - # insert_after are set - fwr5_id = fwr[5]['firewall_rule']['id'] - self._rule_action('insert', fwp_id, fwr5_id, - insert_before=fwr4_id, - insert_after=fwr4_id, - expected_code=webob.exc.HTTPOk.code) - fw_expect = self._make_firewall_dict_with_rules( - ctx, fw_create['id']) - - rule_info = {'firewall_rule_id': fwr5_id, - 'insert_before': fwr4_id, - 'insert_after': fwr4_id} - rule = fwr[5]['firewall_rule'] - self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) - fw_get = self.driver.get_firewall( - ctx, VSE_ID) - self._compare_firewall_rule_lists( - fwp_id, fw_get['firewall_rule_list'], - fw_expect['firewall_rule_list']) diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py b/vmware-nsx/neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py deleted file mode 100644 index a954a32b7d..0000000000 --- a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py +++ /dev/null @@ -1,682 +0,0 @@ -# Copyright 2013 VMware, Inc -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import contextlib -import copy -import webob.exc - -from neutron.api.v2 import attributes -from neutron import context -from neutron.extensions import firewall -from neutron import manager -from neutron.openstack.common import uuidutils -from neutron.plugins.common import constants as const -from neutron.tests.unit.db.firewall import test_db_firewall -from neutron.tests.unit.vmware.vshield import test_edge_router - -_uuid = uuidutils.generate_uuid - -FW_PLUGIN_CLASS = "neutron.plugins.vmware.plugin.NsxServicePlugin" - - -class FirewallTestExtensionManager( - test_edge_router.ServiceRouterTestExtensionManager): - - def get_resources(self): - # If l3 resources have been loaded and updated by main API - # router, update the map in the l3 extension so it will load - # the same attributes as the API router - resources = super(FirewallTestExtensionManager, self).get_resources() - firewall_attr_map = copy.deepcopy(firewall.RESOURCE_ATTRIBUTE_MAP) - for res in firewall.RESOURCE_ATTRIBUTE_MAP.keys(): - attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res) - if attr_info: - firewall.RESOURCE_ATTRIBUTE_MAP[res] = attr_info - fw_resources = firewall.Firewall.get_resources() - # restore the original resources once the controllers are created - firewall.RESOURCE_ATTRIBUTE_MAP = firewall_attr_map - - resources.extend(fw_resources) - - return resources - - def get_actions(self): - return [] - - def get_request_extensions(self): - return [] - - -class FirewallPluginTestCase(test_db_firewall.FirewallPluginDbTestCase, - test_edge_router.ServiceRouterTest): - - def vcns_firewall_patch(self): - self.vcns_instance.return_value.update_firewall.side_effect = ( - self.fc2.update_firewall) - self.vcns_instance.return_value.delete_firewall.side_effect = ( - self.fc2.delete_firewall) - self.vcns_instance.return_value.update_firewall_rule.side_effect = ( - self.fc2.update_firewall_rule) - self.vcns_instance.return_value.delete_firewall_rule.side_effect = ( - self.fc2.delete_firewall_rule) - self.vcns_instance.return_value.add_firewall_rule_above.side_effect = ( - self.fc2.add_firewall_rule_above) - self.vcns_instance.return_value.add_firewall_rule.side_effect = ( - self.fc2.add_firewall_rule) - self.vcns_instance.return_value.get_firewall.side_effect = ( - self.fc2.get_firewall) - self.vcns_instance.return_value.get_firewall_rule.side_effect = ( - self.fc2.get_firewall_rule) - - def setUp(self): - # Save the global RESOURCE_ATTRIBUTE_MAP - self.saved_attr_map = {} - for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): - self.saved_attr_map[resource] = attrs.copy() - - super(FirewallPluginTestCase, self).setUp( - ext_mgr=FirewallTestExtensionManager(), - fw_plugin=FW_PLUGIN_CLASS) - self.vcns_firewall_patch() - self.plugin = manager.NeutronManager.get_plugin() - - def tearDown(self): - super(FirewallPluginTestCase, self).tearDown() - # Restore the global RESOURCE_ATTRIBUTE_MAP - attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map - self.ext_api = None - self.plugin = None - - def _create_and_get_router(self): - req = self._create_router(self.fmt, self._tenant_id) - res = self.deserialize(self.fmt, req) - return res['router']['id'] - - def _create_firewall(self, fmt, name, description, firewall_policy_id, - admin_state_up=True, expected_res_status=None, - **kwargs): - data = {'firewall': {'name': name, - 'description': description, - 'firewall_policy_id': firewall_policy_id, - 'router_id': kwargs.get('router_id'), - 'admin_state_up': admin_state_up, - 'tenant_id': self._tenant_id}} - - firewall_req = self.new_create_request('firewalls', data, fmt) - firewall_res = firewall_req.get_response(self.ext_api) - if expected_res_status: - self.assertEqual(firewall_res.status_int, expected_res_status) - - return firewall_res - - def test_create_firewall(self): - name = "new_fw" - attrs = self._get_test_firewall_attrs(name) - - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['firewall_policy_id'] = fwp_id - attrs['router_id'] = self._create_and_get_router() - with self.firewall( - name=name, - firewall_policy_id=fwp_id, - router_id=attrs['router_id'], - admin_state_up=test_db_firewall.ADMIN_STATE_UP, - expected_res_status=201 - ) as fw: - attrs = self._replace_firewall_status( - attrs, const.PENDING_CREATE, const.ACTIVE) - for k, v in attrs.iteritems(): - self.assertEqual(fw['firewall'][k], v) - - def test_create_firewall_without_policy(self): - name = "new_fw" - attrs = self._get_test_firewall_attrs(name) - attrs['router_id'] = self._create_and_get_router() - - with self.firewall( - name=name, - router_id=attrs['router_id'], - admin_state_up=test_db_firewall.ADMIN_STATE_UP, - expected_res_status=201 - ) as fw: - attrs = self._replace_firewall_status( - attrs, const.PENDING_CREATE, const.ACTIVE) - for k, v in attrs.iteritems(): - self.assertEqual(fw['firewall'][k], v) - - def test_update_firewall(self): - name = "new_fw" - attrs = self._get_test_firewall_attrs(name) - attrs['router_id'] = self._create_and_get_router() - - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['firewall_policy_id'] = fwp_id - with self.firewall( - firewall_policy_id=fwp_id, router_id=attrs['router_id'], - admin_state_up=test_db_firewall.ADMIN_STATE_UP) as fw: - fw_id = fw['firewall']['id'] - new_data = {'firewall': {'name': name}} - req = self.new_update_request('firewalls', new_data, fw_id) - res = req.get_response(self.ext_api) - self.assertEqual(res.status_int, 200) - res_json = self.deserialize( - self.fmt, res) - attrs = self._replace_firewall_status( - attrs, const.PENDING_CREATE, const.ACTIVE) - for k, v in attrs.iteritems(): - self.assertEqual(res_json['firewall'][k], v) - - def test_delete_firewall(self): - ctx = context.get_admin_context() - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall( - firewall_policy_id=fwp_id, - router_id=self._create_and_get_router(), - admin_state_up=test_db_firewall.ADMIN_STATE_UP, - do_delete=False) as fw: - fw_id = fw['firewall']['id'] - with ctx.session.begin(subtransactions=True): - req = self.new_delete_request('firewalls', fw_id) - res = req.get_response(self.ext_api) - self.assertEqual(res.status_int, 204) - self.assertRaises( - firewall.FirewallNotFound, - self.plugin.get_firewall, ctx, fw_id) - - def test_delete_router_in_use_by_fwservice(self): - router_id = self._create_and_get_router() - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall( - name='fw', - firewall_policy_id=fwp_id, - router_id=router_id, - admin_state_up=test_db_firewall.ADMIN_STATE_UP, - expected_res_status=201 - ): - self._delete('routers', router_id, - expected_code=webob.exc.HTTPConflict.code) - - def test_show_firewall(self): - name = "firewall1" - attrs = self._get_test_firewall_attrs(name) - attrs['router_id'] = self._create_and_get_router() - - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['firewall_policy_id'] = fwp_id - with self.firewall( - name=name, - firewall_policy_id=fwp_id, router_id=attrs['router_id'], - admin_state_up=test_db_firewall.ADMIN_STATE_UP) as firewall: - - req = self.new_show_request('firewalls', - firewall['firewall']['id'], - fmt=self.fmt) - res = self.deserialize(self.fmt, - req.get_response(self.ext_api)) - attrs = self._replace_firewall_status( - attrs, const.PENDING_CREATE, const.ACTIVE) - for k, v in attrs.iteritems(): - self.assertEqual(res['firewall'][k], v) - - def test_list_firewalls(self): - keys_list = [] - for i in range(3): - keys_list.append({'name': "fw" + str(i), - 'router_id': self._create_and_get_router(), - 'admin_state_up': True, - 'status': "ACTIVE"}) - - with contextlib.nested( - self.firewall( - name='fw0', router_id=keys_list[0]['router_id'], - admin_state_up=True, description='fw'), - self.firewall( - name='fw1', router_id=keys_list[1]['router_id'], - admin_state_up=True, description='fw'), - self.firewall( - name='fw2', router_id=keys_list[2]['router_id'], - admin_state_up=True, description='fw'), - ) as (fw1, fw2, fw3): - self._test_list_resources( - 'firewall', (fw1, fw2, fw3), - query_params='description=fw') - - req = self.new_list_request('firewalls') - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - self.assertEqual(len(res['firewalls']), 3) - for index in range(len(res['firewalls'])): - for k, v in keys_list[index].items(): - self.assertEqual(res['firewalls'][index][k], v) - - def test_create_firewall_with_rules(self): - ctx = context.get_admin_context() - with contextlib.nested(self.firewall_rule(name='fwr1'), - self.firewall_rule(name='fwr2'), - self.firewall_rule(name='fwr3')) as fr: - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - fw_rule_ids = [r['firewall_rule']['id'] for r in fr] - data = {'firewall_policy': - {'firewall_rules': fw_rule_ids}} - req = self.new_update_request( - 'firewall_policies', data, fwp_id) - req.get_response(self.ext_api) - attrs = self._get_test_firewall_attrs() - attrs['firewall_policy_id'] = fwp_id - with self.firewall( - firewall_policy_id=fwp_id, - router_id=self._create_and_get_router(), - admin_state_up=test_db_firewall.ADMIN_STATE_UP) as fw: - rule_list = ( - self.plugin._make_firewall_rule_list_by_policy_id( - ctx, fw['firewall']['firewall_policy_id'])) - self._compare_firewall_rule_lists( - fwp_id, fr, rule_list) - - def test_update_firewall_policy_with_no_firewall(self): - name = "new_firewall_policy1" - attrs = self._get_test_firewall_policy_attrs(name, audited=False) - - with self.firewall_policy(shared=test_db_firewall.SHARED, - firewall_rules=None, - audited=test_db_firewall.AUDITED) as fwp: - data = {'firewall_policy': {'name': name}} - req = self.new_update_request('firewall_policies', data, - fwp['firewall_policy']['id']) - res = self.deserialize(self.fmt, req.get_response(self.ext_api)) - for k, v in attrs.iteritems(): - self.assertEqual(res['firewall_policy'][k], v) - - def test_update_firewall_policy_with_firewall(self): - name = "new_firewall_policy1" - attrs = self._get_test_firewall_policy_attrs(name, audited=False) - - with self.firewall_policy(shared=test_db_firewall.SHARED, - firewall_rules=None, - audited=test_db_firewall.AUDITED) as fwp: - fwp_id = fwp['firewall_policy']['id'] - with self.firewall( - firewall_policy_id=fwp_id, - router_id=self._create_and_get_router(), - admin_state_up=test_db_firewall.ADMIN_STATE_UP - ): - data = {'firewall_policy': {'name': name}} - req = self.new_update_request( - 'firewall_policies', data, fwp['firewall_policy']['id']) - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - for k, v in attrs.iteritems(): - self.assertEqual(res['firewall_policy'][k], v) - - def test_update_firewall_rule_with_no_firewall(self): - name = "new_firewall_rule1" - attrs = self._get_test_firewall_rule_attrs(name) - - attrs['source_port'] = '10:20' - attrs['destination_port'] = '30:40' - with self.firewall_rule() as fwr: - data = {'firewall_rule': {'name': name, - 'source_port': '10:20', - 'destination_port': '30:40'}} - req = self.new_update_request( - 'firewall_rules', data, fwr['firewall_rule']['id']) - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - for k, v in attrs.iteritems(): - self.assertEqual(res['firewall_rule'][k], v) - - attrs['source_port'] = '10000' - attrs['destination_port'] = '80' - with self.firewall_rule() as fwr: - data = {'firewall_rule': {'name': name, - 'source_port': 10000, - 'destination_port': 80}} - req = self.new_update_request('firewall_rules', data, - fwr['firewall_rule']['id']) - res = self.deserialize(self.fmt, req.get_response(self.ext_api)) - for k, v in attrs.iteritems(): - self.assertEqual(res['firewall_rule'][k], v) - - attrs['source_port'] = None - attrs['destination_port'] = None - with self.firewall_rule() as fwr: - data = {'firewall_rule': {'name': name, - 'source_port': None, - 'destination_port': None}} - req = self.new_update_request( - 'firewall_rules', data, fwr['firewall_rule']['id']) - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - for k, v in attrs.iteritems(): - self.assertEqual(res['firewall_rule'][k], v) - - def test_update_firewall_rule_with_firewall(self): - name = "new_firewall_rule1" - attrs = self._get_test_firewall_rule_attrs(name) - with self.firewall_rule() as fwr: - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['firewall_policy_id'] = fwp_id - with self.firewall( - firewall_policy_id=fwp_id, - router_id=self._create_and_get_router(), - admin_state_up=test_db_firewall.ADMIN_STATE_UP - ): - fwr_id = fwr['firewall_rule']['id'] - data = {'firewall_policy': {'firewall_rules': [fwr_id]}} - req = self.new_update_request( - 'firewall_policies', data, - fwp['firewall_policy']['id']) - req.get_response(self.ext_api) - data = {'firewall_rule': {'name': name}} - req = self.new_update_request( - 'firewall_rules', data, - fwr['firewall_rule']['id']) - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - attrs['firewall_policy_id'] = fwp_id - for k, v in attrs.iteritems(): - self.assertEqual(res['firewall_rule'][k], v) - - def test_insert_rule_with_no_firewall(self): - attrs = self._get_test_firewall_policy_attrs() - attrs['audited'] = False - attrs['firewall_list'] = [] - with contextlib.nested(self.firewall_rule(name='fwr0'), - self.firewall_rule(name='fwr1'), - self.firewall_rule(name='fwr2'), - self.firewall_rule(name='fwr3'), - self.firewall_rule(name='fwr4'), - self.firewall_rule(name='fwr5'), - self.firewall_rule(name='fwr6')) as fwr: - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['id'] = fwp_id - # test insert when rule list is empty - fwr0_id = fwr[0]['firewall_rule']['id'] - attrs['firewall_rules'].insert(0, fwr0_id) - self._rule_action('insert', fwp_id, fwr0_id, - insert_before=None, - insert_after=None, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert at top of rule list, insert_before and - # insert_after not provided - fwr1_id = fwr[1]['firewall_rule']['id'] - attrs['firewall_rules'].insert(0, fwr1_id) - insert_data = {'firewall_rule_id': fwr1_id} - self._rule_action('insert', fwp_id, fwr0_id, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs, body_data=insert_data) - # test insert at top of list above existing rule - fwr2_id = fwr[2]['firewall_rule']['id'] - attrs['firewall_rules'].insert(0, fwr2_id) - self._rule_action('insert', fwp_id, fwr2_id, - insert_before=fwr1_id, - insert_after=None, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert at bottom of list - fwr3_id = fwr[3]['firewall_rule']['id'] - attrs['firewall_rules'].append(fwr3_id) - self._rule_action('insert', fwp_id, fwr3_id, - insert_before=None, - insert_after=fwr0_id, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert in the middle of the list using - # insert_before - fwr4_id = fwr[4]['firewall_rule']['id'] - attrs['firewall_rules'].insert(1, fwr4_id) - self._rule_action('insert', fwp_id, fwr4_id, - insert_before=fwr1_id, - insert_after=None, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert in the middle of the list using - # insert_after - fwr5_id = fwr[5]['firewall_rule']['id'] - attrs['firewall_rules'].insert(1, fwr5_id) - self._rule_action('insert', fwp_id, fwr5_id, - insert_before=None, - insert_after=fwr2_id, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert when both insert_before and - # insert_after are set - fwr6_id = fwr[6]['firewall_rule']['id'] - attrs['firewall_rules'].insert(1, fwr6_id) - self._rule_action('insert', fwp_id, fwr6_id, - insert_before=fwr5_id, - insert_after=fwr5_id, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - - def test_insert_rule_with_firewall(self): - attrs = self._get_test_firewall_policy_attrs() - attrs['audited'] = False - attrs['firewall_list'] = [] - with contextlib.nested(self.firewall_rule(name='fwr0'), - self.firewall_rule(name='fwr1'), - self.firewall_rule(name='fwr2'), - self.firewall_rule(name='fwr3'), - self.firewall_rule(name='fwr4'), - self.firewall_rule(name='fwr5'), - self.firewall_rule(name='fwr6')) as fwr: - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['id'] = fwp_id - with self.firewall(router_id=self._create_and_get_router(), - firewall_policy_id=fwp_id) as fw: - # test insert when rule list is empty - fwr0_id = fwr[0]['firewall_rule']['id'] - attrs['firewall_rules'].insert(0, fwr0_id) - attrs['firewall_list'].insert(0, fw['firewall']['id']) - self._rule_action('insert', fwp_id, fwr0_id, - insert_before=None, - insert_after=None, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert at top of rule list, insert_before and - # insert_after not provided - fwr1_id = fwr[1]['firewall_rule']['id'] - attrs['firewall_rules'].insert(0, fwr1_id) - insert_data = {'firewall_rule_id': fwr1_id} - self._rule_action( - 'insert', fwp_id, fwr0_id, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs, body_data=insert_data) - # test insert at top of list above existing rule - fwr2_id = fwr[2]['firewall_rule']['id'] - attrs['firewall_rules'].insert(0, fwr2_id) - self._rule_action('insert', fwp_id, fwr2_id, - insert_before=fwr1_id, - insert_after=None, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert at bottom of list - fwr3_id = fwr[3]['firewall_rule']['id'] - attrs['firewall_rules'].append(fwr3_id) - self._rule_action('insert', fwp_id, fwr3_id, - insert_before=None, - insert_after=fwr0_id, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert in the middle of the list using - # insert_before - fwr4_id = fwr[4]['firewall_rule']['id'] - attrs['firewall_rules'].insert(1, fwr4_id) - self._rule_action('insert', fwp_id, fwr4_id, - insert_before=fwr1_id, - insert_after=None, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert in the middle of the list using - # insert_after - fwr5_id = fwr[5]['firewall_rule']['id'] - attrs['firewall_rules'].insert(1, fwr5_id) - self._rule_action('insert', fwp_id, fwr5_id, - insert_before=None, - insert_after=fwr2_id, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - # test insert when both insert_before and - # insert_after are set - fwr6_id = fwr[6]['firewall_rule']['id'] - attrs['firewall_rules'].insert(1, fwr6_id) - self._rule_action('insert', fwp_id, fwr6_id, - insert_before=fwr5_id, - insert_after=fwr5_id, - expected_code=webob.exc.HTTPOk.code, - expected_body=attrs) - - def test_remove_rule_with_no_firewall(self): - attrs = self._get_test_firewall_policy_attrs() - attrs['audited'] = False - attrs['firewall_list'] = [] - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['id'] = fwp_id - with contextlib.nested(self.firewall_rule(name='fwr1'), - self.firewall_rule(name='fwr2'), - self.firewall_rule(name='fwr3')) as fr1: - fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] - attrs['firewall_rules'] = fw_rule_ids[:] - data = {'firewall_policy': - {'firewall_rules': fw_rule_ids}} - req = self.new_update_request('firewall_policies', data, - fwp_id) - req.get_response(self.ext_api) - # test removing a rule from a policy that does not exist - self._rule_action('remove', '123', fw_rule_ids[1], - expected_code=webob.exc.HTTPNotFound.code, - expected_body=None) - # test removing a rule in the middle of the list - attrs['firewall_rules'].remove(fw_rule_ids[1]) - self._rule_action('remove', fwp_id, fw_rule_ids[1], - expected_body=attrs) - # test removing a rule at the top of the list - attrs['firewall_rules'].remove(fw_rule_ids[0]) - self._rule_action('remove', fwp_id, fw_rule_ids[0], - expected_body=attrs) - # test removing remaining rule in the list - attrs['firewall_rules'].remove(fw_rule_ids[2]) - self._rule_action('remove', fwp_id, fw_rule_ids[2], - expected_body=attrs) - # test removing rule that is not associated with the policy - self._rule_action('remove', fwp_id, fw_rule_ids[2], - expected_code=webob.exc.HTTPBadRequest.code, - expected_body=None) - - def test_remove_rule_with_firewall(self): - attrs = self._get_test_firewall_policy_attrs() - attrs['audited'] = False - attrs['firewall_list'] = [] - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['id'] = fwp_id - with self.firewall(router_id=self._create_and_get_router(), - firewall_policy_id=fwp_id) as fw: - attrs['firewall_list'].insert(0, fw['firewall']['id']) - with contextlib.nested(self.firewall_rule(name='fwr1'), - self.firewall_rule(name='fwr2'), - self.firewall_rule(name='fwr3')) as fr1: - fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] - attrs['firewall_rules'] = fw_rule_ids[:] - data = {'firewall_policy': - {'firewall_rules': fw_rule_ids}} - req = self.new_update_request( - 'firewall_policies', data, fwp_id) - req.get_response(self.ext_api) - # test removing a rule from a policy that does not exist - self._rule_action( - 'remove', '123', - fw_rule_ids[1], - expected_code=webob.exc.HTTPNotFound.code, - expected_body=None) - # test removing a rule in the middle of the list - attrs['firewall_rules'].remove(fw_rule_ids[1]) - self._rule_action('remove', fwp_id, fw_rule_ids[1], - expected_body=attrs) - # test removing a rule at the top of the list - attrs['firewall_rules'].remove(fw_rule_ids[0]) - self._rule_action('remove', fwp_id, fw_rule_ids[0], - expected_body=attrs) - # test removing remaining rule in the list - attrs['firewall_rules'].remove(fw_rule_ids[2]) - self._rule_action('remove', fwp_id, fw_rule_ids[2], - expected_body=attrs) - # test removing rule that is not - #associated with the policy - self._rule_action( - 'remove', fwp_id, fw_rule_ids[2], - expected_code=webob.exc.HTTPBadRequest.code, - expected_body=None) - - def test_remove_rule_with_firewalls(self): - attrs = self._get_test_firewall_policy_attrs() - attrs['audited'] = False - attrs['firewall_list'] = [] - with self.firewall_policy() as fwp: - fwp_id = fwp['firewall_policy']['id'] - attrs['id'] = fwp_id - with contextlib.nested( - self.firewall(router_id=self._create_and_get_router(), - firewall_policy_id=fwp_id), - self.firewall(router_id=self._create_and_get_router(), - firewall_policy_id=fwp_id)) as (fw1, fw2): - attrs['firewall_list'].insert(0, fw1['firewall']['id']) - attrs['firewall_list'].insert(1, fw2['firewall']['id']) - with contextlib.nested(self.firewall_rule(name='fwr1'), - self.firewall_rule(name='fwr2'), - self.firewall_rule(name='fwr3')) as fr1: - fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] - attrs['firewall_rules'] = fw_rule_ids[:] - data = {'firewall_policy': - {'firewall_rules': fw_rule_ids}} - req = self.new_update_request( - 'firewall_policies', data, fwp_id) - req.get_response(self.ext_api) - # test removing a rule from a policy that does not exist - self._rule_action( - 'remove', '123', - fw_rule_ids[1], - expected_code=webob.exc.HTTPNotFound.code, - expected_body=None) - # test removing a rule in the middle of the list - attrs['firewall_rules'].remove(fw_rule_ids[1]) - self._rule_action('remove', fwp_id, fw_rule_ids[1], - expected_body=attrs) - # test removing a rule at the top of the list - attrs['firewall_rules'].remove(fw_rule_ids[0]) - self._rule_action('remove', fwp_id, fw_rule_ids[0], - expected_body=attrs) - # test removing remaining rule in the list - attrs['firewall_rules'].remove(fw_rule_ids[2]) - self._rule_action('remove', fwp_id, fw_rule_ids[2], - expected_body=attrs) - # test removing rule that is not - #associated with the policy - self._rule_action( - 'remove', fwp_id, fw_rule_ids[2], - expected_code=webob.exc.HTTPBadRequest.code, - expected_body=None) diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py b/vmware-nsx/neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py deleted file mode 100644 index 978e159652..0000000000 --- a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py +++ /dev/null @@ -1,517 +0,0 @@ -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import contextlib - -import testtools -from webob import exc as web_exc - -from neutron.api.v2 import attributes -from neutron import context -from neutron.extensions import loadbalancer as lb -from neutron import manager -from neutron.openstack.common import uuidutils -from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer -from neutron.tests.unit.vmware.vshield import test_edge_router - -_uuid = uuidutils.generate_uuid - -LBAAS_PLUGIN_CLASS = "neutron.plugins.vmware.plugin.NsxServicePlugin" - - -class LoadBalancerTestExtensionManager( - test_edge_router.ServiceRouterTestExtensionManager): - - def get_resources(self): - # If l3 resources have been loaded and updated by main API - # router, update the map in the l3 extension so it will load - # the same attributes as the API router - resources = super(LoadBalancerTestExtensionManager, - self).get_resources() - lb_attr_map = lb.RESOURCE_ATTRIBUTE_MAP.copy() - for res in lb.RESOURCE_ATTRIBUTE_MAP.keys(): - attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res) - if attr_info: - lb.RESOURCE_ATTRIBUTE_MAP[res] = attr_info - lb_resources = lb.Loadbalancer.get_resources() - # restore the original resources once the controllers are created - lb.RESOURCE_ATTRIBUTE_MAP = lb_attr_map - resources.extend(lb_resources) - return resources - - -class TestLoadbalancerPlugin( - test_db_loadbalancer.LoadBalancerPluginDbTestCase, - test_edge_router.ServiceRouterTest): - - def vcns_loadbalancer_patch(self): - instance = self.vcns_instance - instance.return_value.create_vip.side_effect = ( - self.fc2.create_vip) - instance.return_value.get_vip.side_effect = ( - self.fc2.get_vip) - instance.return_value.update_vip.side_effect = ( - self.fc2.update_vip) - instance.return_value.delete_vip.side_effect = ( - self.fc2.delete_vip) - instance.return_value.create_pool.side_effect = ( - self.fc2.create_pool) - instance.return_value.get_pool.side_effect = ( - self.fc2.get_pool) - instance.return_value.update_pool.side_effect = ( - self.fc2.update_pool) - instance.return_value.delete_pool.side_effect = ( - self.fc2.delete_pool) - instance.return_value.create_health_monitor.side_effect = ( - self.fc2.create_health_monitor) - instance.return_value.get_health_monitor.side_effect = ( - self.fc2.get_health_monitor) - instance.return_value.update_health_monitor.side_effect = ( - self.fc2.update_health_monitor) - instance.return_value.delete_health_monitor.side_effect = ( - self.fc2.delete_health_monitor) - instance.return_value.create_app_profile.side_effect = ( - self.fc2.create_app_profile) - instance.return_value.update_app_profile.side_effect = ( - self.fc2.update_app_profile) - instance.return_value.delete_app_profile.side_effect = ( - self.fc2.delete_app_profile) - - def setUp(self): - # Save the global RESOURCE_ATTRIBUTE_MAP - self.saved_attr_map = {} - for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): - self.saved_attr_map[resource] = attrs.copy() - - super(TestLoadbalancerPlugin, self).setUp( - ext_mgr=LoadBalancerTestExtensionManager(), - lb_plugin=LBAAS_PLUGIN_CLASS) - self.vcns_loadbalancer_patch() - self.plugin = manager.NeutronManager.get_plugin() - - def tearDown(self): - super(TestLoadbalancerPlugin, self).tearDown() - # Restore the global RESOURCE_ATTRIBUTE_MAP - attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map - self.ext_api = None - self.plugin = None - - def _create_and_get_router(self): - req = self._create_router(self.fmt, self._tenant_id) - res = self.deserialize(self.fmt, req) - return res['router']['id'] - - def _get_vip_optional_args(self): - args = super(TestLoadbalancerPlugin, self)._get_vip_optional_args() - return args + ('router_id',) - - def test_update_healthmonitor(self): - keys = [('type', "TCP"), - ('tenant_id', self._tenant_id), - ('delay', 20), - ('timeout', 20), - ('max_retries', 2), - ('admin_state_up', False)] - - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, health_mon, pool): - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - with self.vip( - router_id=self._create_and_get_router(), - pool=pool, subnet=subnet): - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - health_mon, pool['pool']['id'] - ) - data = {'health_monitor': {'delay': 20, - 'timeout': 20, - 'max_retries': 2, - 'admin_state_up': False}} - req = self.new_update_request( - "health_monitors", - data, - health_mon['health_monitor']['id']) - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - for k, v in keys: - self.assertEqual(res['health_monitor'][k], v) - - def test_create_vip(self, **extras): - expected = { - 'name': 'vip1', - 'description': '', - 'protocol_port': 80, - 'protocol': 'HTTP', - 'connection_limit': -1, - 'admin_state_up': True, - 'status': 'ACTIVE', - 'router_id': self._create_and_get_router(), - 'tenant_id': self._tenant_id, - } - - expected.update(extras) - - name = expected['name'] - - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, monitor, pool): - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - expected['pool_id'] = pool['pool']['id'] - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool['pool']['id'] - ) - with self.vip( - router_id=expected['router_id'], name=name, - pool=pool, subnet=subnet, **extras) as vip: - for k in ('id', 'address', 'port_id', 'pool_id'): - self.assertTrue(vip['vip'].get(k, None)) - self.assertEqual( - dict((k, v) - for k, v in vip['vip'].items() if k in expected), - expected - ) - - def test_create_vip_with_session_persistence(self): - self.test_create_vip(session_persistence={'type': 'HTTP_COOKIE'}) - - def test_create_vip_with_invalid_persistence_method(self): - with testtools.ExpectedException(web_exc.HTTPClientError): - self.test_create_vip( - protocol='TCP', - session_persistence={'type': 'HTTP_COOKIE'}) - - def test_create_vips_with_same_names(self): - new_router_id = self._create_and_get_router() - with self.subnet() as subnet: - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - with contextlib.nested( - self.vip( - name='vip', - router_id=new_router_id, - subnet=subnet, protocol_port=80), - self.vip( - name='vip', - router_id=new_router_id, - subnet=subnet, protocol_port=81), - self.vip( - name='vip', - router_id=new_router_id, - subnet=subnet, protocol_port=82), - ) as (vip1, vip2, vip3): - req = self.new_list_request('vips') - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - for index in range(len(res['vips'])): - self.assertEqual(res['vips'][index]['name'], 'vip') - - def test_update_vip(self): - name = 'new_vip' - router_id = self._create_and_get_router() - keys = [('router_id', router_id), - ('name', name), - ('address', "10.0.0.2"), - ('protocol_port', 80), - ('connection_limit', 100), - ('admin_state_up', False), - ('status', 'ACTIVE')] - - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, monitor, pool): - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool['pool']['id'] - ) - with self.vip( - router_id=router_id, name=name, - pool=pool, subnet=subnet) as vip: - keys.append(('subnet_id', vip['vip']['subnet_id'])) - data = {'vip': {'name': name, - 'connection_limit': 100, - 'session_persistence': - {'type': "APP_COOKIE", - 'cookie_name': "jesssionId"}, - 'admin_state_up': False}} - req = self.new_update_request( - 'vips', data, vip['vip']['id']) - res = self.deserialize(self.fmt, - req.get_response(self.ext_api)) - for k, v in keys: - self.assertEqual(res['vip'][k], v) - - def test_delete_vip(self): - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, monitor, pool): - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool['pool']['id'] - ) - with self.vip( - router_id=self._create_and_get_router(), - pool=pool, subnet=subnet, do_delete=False) as vip: - req = self.new_delete_request('vips', vip['vip']['id']) - res = req.get_response(self.ext_api) - self.assertEqual(res.status_int, 204) - - def test_delete_router_in_use_by_lbservice(self): - router_id = self._create_and_get_router() - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, monitor, pool): - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool['pool']['id'] - ) - with self.vip( - router_id=router_id, - pool=pool, subnet=subnet): - self._delete('routers', router_id, - expected_code=web_exc.HTTPConflict.code) - - def test_show_vip(self): - router_id = self._create_and_get_router() - name = "vip_show" - keys = [('name', name), - ('protocol_port', 80), - ('protocol', 'HTTP'), - ('connection_limit', -1), - ('admin_state_up', True), - ('status', 'ACTIVE'), - ('router_id', router_id)] - - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, monitor, pool): - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool['pool']['id'] - ) - with self.vip( - router_id=router_id, name=name, - pool=pool, subnet=subnet) as vip: - req = self.new_show_request('vips', - vip['vip']['id']) - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - for k, v in keys: - self.assertEqual(res['vip'][k], v) - - def test_list_vips(self): - keys_list = [] - for i in range(3): - keys_list.append({'name': "vip" + str(i), - 'router_id': self._create_and_get_router(), - 'protocol_port': 80 + i, - 'protocol': "HTTP", - 'status': "ACTIVE", - 'admin_state_up': True}) - - with self.subnet() as subnet: - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - with contextlib.nested( - self.vip( - router_id=keys_list[0]['router_id'], name='vip0', - subnet=subnet, protocol_port=80), - self.vip( - router_id=keys_list[1]['router_id'], name='vip1', - subnet=subnet, protocol_port=81), - self.vip( - router_id=keys_list[2]['router_id'], name='vip2', - subnet=subnet, protocol_port=82), - ) as (vip1, vip2, vip3): - self._test_list_with_sort( - 'vip', - (vip1, vip2, vip3), - [('protocol_port', 'asc'), ('name', 'desc')] - ) - req = self.new_list_request('vips') - res = self.deserialize( - self.fmt, req.get_response(self.ext_api)) - self.assertEqual(len(res['vips']), 3) - for index in range(len(res['vips'])): - for k, v in keys_list[index].items(): - self.assertEqual(res['vips'][index][k], v) - - def test_update_pool(self): - data = {'pool': {'name': "new_pool", - 'admin_state_up': False}} - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, monitor, pool): - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool['pool']['id'] - ) - with self.vip( - router_id=self._create_and_get_router(), - pool=pool, subnet=subnet): - req = self.new_update_request( - 'pools', data, pool['pool']['id']) - res = self.deserialize(self.fmt, - req.get_response(self.ext_api)) - for k, v in data['pool'].items(): - self.assertEqual(res['pool'][k], v) - - def test_create_member(self): - router_id = self._create_and_get_router() - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, monitor, pool): - pool_id = pool['pool']['id'] - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool['pool']['id'] - ) - with self.vip( - router_id=router_id, - pool=pool, subnet=subnet): - with contextlib.nested( - self.member(address='192.168.1.100', - protocol_port=80, - pool_id=pool_id), - self.member(router_id=router_id, - address='192.168.1.101', - protocol_port=80, - pool_id=pool_id)) as (member1, member2): - req = self.new_show_request('pools', - pool_id, - fmt=self.fmt) - pool_update = self.deserialize( - self.fmt, - req.get_response(self.ext_api) - ) - self.assertIn(member1['member']['id'], - pool_update['pool']['members']) - self.assertIn(member2['member']['id'], - pool_update['pool']['members']) - - def _show_pool(self, pool_id): - req = self.new_show_request('pools', pool_id, fmt=self.fmt) - res = req.get_response(self.ext_api) - self.assertEqual(web_exc.HTTPOk.code, res.status_int) - return self.deserialize(self.fmt, res) - - def test_update_member(self): - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool(name="pool1"), - self.pool(name="pool2") - ) as (subnet, monitor, pool1, pool2): - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool1['pool']['id'] - ) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool2['pool']['id'] - ) - with self.vip( - router_id=self._create_and_get_router(), - pool=pool1, subnet=subnet): - keys = [('address', "192.168.1.100"), - ('tenant_id', self._tenant_id), - ('protocol_port', 80), - ('weight', 10), - ('pool_id', pool2['pool']['id']), - ('admin_state_up', False), - ('status', 'ACTIVE')] - with self.member( - pool_id=pool1['pool']['id']) as member: - - pool1_update = self._show_pool(pool1['pool']['id']) - self.assertEqual(len(pool1_update['pool']['members']), 1) - pool2_update = self._show_pool(pool2['pool']['id']) - self.assertEqual(len(pool1_update['pool']['members']), 1) - self.assertFalse(pool2_update['pool']['members']) - - data = {'member': {'pool_id': pool2['pool']['id'], - 'weight': 10, - 'admin_state_up': False}} - req = self.new_update_request('members', - data, - member['member']['id']) - raw_res = req.get_response(self.ext_api) - self.assertEqual(web_exc.HTTPOk.code, raw_res.status_int) - res = self.deserialize(self.fmt, raw_res) - for k, v in keys: - self.assertEqual(res['member'][k], v) - pool1_update = self._show_pool(pool1['pool']['id']) - pool2_update = self._show_pool(pool2['pool']['id']) - self.assertEqual(len(pool2_update['pool']['members']), 1) - self.assertFalse(pool1_update['pool']['members']) - - def test_delete_member(self): - with contextlib.nested( - self.subnet(), - self.health_monitor(), - self.pool() - ) as (subnet, monitor, pool): - pool_id = pool['pool']['id'] - net_id = subnet['subnet']['network_id'] - self._set_net_external(net_id) - self.plugin.create_pool_health_monitor( - context.get_admin_context(), - monitor, pool['pool']['id'] - ) - with self.vip( - router_id=self._create_and_get_router(), - pool=pool, subnet=subnet): - with self.member(pool_id=pool_id, - do_delete=False) as member: - req = self.new_delete_request('members', - member['member']['id']) - res = req.get_response(self.ext_api) - self.assertEqual(res.status_int, 204) - pool_update = self._show_pool(pool['pool']['id']) - self.assertFalse(pool_update['pool']['members']) diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py b/vmware-nsx/neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py deleted file mode 100644 index e8f0076645..0000000000 --- a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from neutron import context -from neutron.openstack.common import uuidutils -from neutron.plugins.vmware.dbexts import vcns_db -from neutron.plugins.vmware.vshield.common import exceptions as vcns_exc -from neutron.plugins.vmware.vshield import vcns_driver -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.vshield import fake_vcns -from neutron_lbaas.services.loadbalancer import constants as lb_constants -from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer - -_uuid = uuidutils.generate_uuid - -VSE_ID = 'edge-1' -POOL_MAP_INFO = { - 'pool_id': None, - 'edge_id': VSE_ID, - 'pool_vseid': 'pool-1'} - -VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test") - - -class VcnsDriverTestCase(test_db_loadbalancer.LoadBalancerPluginDbTestCase): - - def vcns_loadbalancer_patch(self): - instance = self.mock_vcns.start() - instance.return_value.create_vip.side_effect = ( - self.fc2.create_vip) - instance.return_value.get_vip.side_effect = ( - self.fc2.get_vip) - instance.return_value.update_vip.side_effect = ( - self.fc2.update_vip) - instance.return_value.delete_vip.side_effect = ( - self.fc2.delete_vip) - instance.return_value.create_pool.side_effect = ( - self.fc2.create_pool) - instance.return_value.get_pool.side_effect = ( - self.fc2.get_pool) - instance.return_value.update_pool.side_effect = ( - self.fc2.update_pool) - instance.return_value.delete_pool.side_effect = ( - self.fc2.delete_pool) - instance.return_value.create_health_monitor.side_effect = ( - self.fc2.create_health_monitor) - instance.return_value.get_health_monitor.side_effect = ( - self.fc2.get_health_monitor) - instance.return_value.update_health_monitor.side_effect = ( - self.fc2.update_health_monitor) - instance.return_value.delete_health_monitor.side_effect = ( - self.fc2.delete_health_monitor) - instance.return_value.create_app_profile.side_effect = ( - self.fc2.create_app_profile) - instance.return_value.update_app_profile.side_effect = ( - self.fc2.update_app_profile) - instance.return_value.delete_app_profile.side_effect = ( - self.fc2.delete_app_profile) - self.pool_id = None - self.vip_id = None - - def setUp(self): - - self.config_parse(args=['--config-file', VCNS_CONFIG_FILE]) - # mock vcns - self.fc2 = fake_vcns.FakeVcns(unique_router_name=False) - self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) - self.vcns_loadbalancer_patch() - - self.driver = vcns_driver.VcnsDriver(mock.Mock()) - - super(VcnsDriverTestCase, self).setUp() - self.addCleanup(self.fc2.reset_all) - self.addCleanup(self.mock_vcns.stop) - - def tearDown(self): - super(VcnsDriverTestCase, self).tearDown() - - -class TestEdgeLbDriver(VcnsDriverTestCase): - - def test_create_and_get_vip(self): - ctx = context.get_admin_context() - with self.pool(do_delete=False) as pool: - self.pool_id = pool['pool']['id'] - POOL_MAP_INFO['pool_id'] = pool['pool']['id'] - vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO) - with self.vip(pool=pool) as res: - vip_create = res['vip'] - self.driver.create_vip(ctx, VSE_ID, vip_create) - vip_get = self.driver.get_vip(ctx, vip_create['id']) - for k, v in vip_get.iteritems(): - self.assertEqual(vip_create[k], v) - - def test_create_two_vips_with_same_name(self): - ctx = context.get_admin_context() - with self.pool(do_delete=False) as pool: - self.pool_id = pool['pool']['id'] - POOL_MAP_INFO['pool_id'] = pool['pool']['id'] - vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO) - with self.vip(pool=pool) as res: - vip_create = res['vip'] - self.driver.create_vip(ctx, VSE_ID, vip_create) - self.assertRaises(vcns_exc.Forbidden, - self.driver.create_vip, - ctx, VSE_ID, vip_create) - - def test_convert_app_profile(self): - app_profile_name = 'app_profile_name' - sess_persist1 = {'type': "SOURCE_IP"} - sess_persist2 = {'type': "HTTP_COOKIE"} - sess_persist3 = {'type': "APP_COOKIE", - 'cookie_name': "app_cookie_name"} - # protocol is HTTP and type is SOURCE_IP - expect_vcns_app_profile1 = { - 'insertXForwardedFor': False, - 'name': app_profile_name, - 'serverSslEnabled': False, - 'sslPassthrough': False, - 'template': lb_constants.PROTOCOL_HTTP, - 'persistence': {'method': 'sourceip'}} - vcns_app_profile = self.driver._convert_app_profile( - app_profile_name, sess_persist1, lb_constants.PROTOCOL_HTTP) - for k, v in expect_vcns_app_profile1.iteritems(): - self.assertEqual(vcns_app_profile[k], v) - # protocol is HTTP and type is HTTP_COOKIE and APP_COOKIE - expect_vcns_app_profile2 = { - 'insertXForwardedFor': False, - 'name': app_profile_name, - 'serverSslEnabled': False, - 'sslPassthrough': False, - 'template': lb_constants.PROTOCOL_HTTP, - 'persistence': {'method': 'cookie', - 'cookieName': 'default_cookie_name', - 'cookieMode': 'insert'}} - vcns_app_profile = self.driver._convert_app_profile( - app_profile_name, sess_persist2, lb_constants.PROTOCOL_HTTP) - for k, v in expect_vcns_app_profile2.iteritems(): - self.assertEqual(vcns_app_profile[k], v) - expect_vcns_app_profile3 = { - 'insertXForwardedFor': False, - 'name': app_profile_name, - 'serverSslEnabled': False, - 'sslPassthrough': False, - 'template': lb_constants.PROTOCOL_HTTP, - 'persistence': {'method': 'cookie', - 'cookieName': sess_persist3['cookie_name'], - 'cookieMode': 'app'}} - vcns_app_profile = self.driver._convert_app_profile( - app_profile_name, sess_persist3, lb_constants.PROTOCOL_HTTP) - for k, v in expect_vcns_app_profile3.iteritems(): - self.assertEqual(vcns_app_profile[k], v) - # protocol is HTTPS and type is SOURCE_IP - expect_vcns_app_profile1 = { - 'insertXForwardedFor': False, - 'name': app_profile_name, - 'serverSslEnabled': False, - 'sslPassthrough': True, - 'template': lb_constants.PROTOCOL_HTTPS, - 'persistence': {'method': 'sourceip'}} - vcns_app_profile = self.driver._convert_app_profile( - app_profile_name, sess_persist1, lb_constants.PROTOCOL_HTTPS) - for k, v in expect_vcns_app_profile1.iteritems(): - self.assertEqual(vcns_app_profile[k], v) - # protocol is HTTPS, and type isn't SOURCE_IP - self.assertRaises(vcns_exc.VcnsBadRequest, - self.driver._convert_app_profile, - app_profile_name, - sess_persist2, lb_constants.PROTOCOL_HTTPS) - self.assertRaises(vcns_exc.VcnsBadRequest, - self.driver._convert_app_profile, - app_profile_name, - sess_persist3, lb_constants.PROTOCOL_HTTPS) - # protocol is TCP and type is SOURCE_IP - expect_vcns_app_profile1 = { - 'insertXForwardedFor': False, - 'name': app_profile_name, - 'serverSslEnabled': False, - 'sslPassthrough': False, - 'template': lb_constants.PROTOCOL_TCP, - 'persistence': {'method': 'sourceip'}} - vcns_app_profile = self.driver._convert_app_profile( - app_profile_name, sess_persist1, lb_constants.PROTOCOL_TCP) - for k, v in expect_vcns_app_profile1.iteritems(): - self.assertEqual(vcns_app_profile[k], v) - # protocol is TCP, and type isn't SOURCE_IP - self.assertRaises(vcns_exc.VcnsBadRequest, - self.driver._convert_app_profile, - app_profile_name, - sess_persist2, lb_constants.PROTOCOL_TCP) - self.assertRaises(vcns_exc.VcnsBadRequest, - self.driver._convert_app_profile, - app_profile_name, - sess_persist3, lb_constants.PROTOCOL_TCP) - - def test_update_vip(self): - ctx = context.get_admin_context() - with self.pool(do_delete=False) as pool: - self.pool_id = pool['pool']['id'] - POOL_MAP_INFO['pool_id'] = pool['pool']['id'] - vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO) - with self.vip(pool=pool) as res: - vip_create = res['vip'] - self.driver.create_vip(ctx, VSE_ID, vip_create) - vip_update = {'id': vip_create['id'], - 'pool_id': pool['pool']['id'], - 'name': 'update_name', - 'description': 'description', - 'address': 'update_address', - 'port_id': 'update_port_id', - 'protocol_port': 'protocol_port', - 'protocol': 'update_protocol'} - self.driver.update_vip(ctx, vip_update) - vip_get = self.driver.get_vip(ctx, vip_create['id']) - for k, v in vip_get.iteritems(): - if k in vip_update: - self.assertEqual(vip_update[k], v) - - def test_delete_vip(self): - ctx = context.get_admin_context() - with self.pool(do_delete=False) as pool: - self.pool_id = pool['pool']['id'] - POOL_MAP_INFO['pool_id'] = pool['pool']['id'] - vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO) - with self.vip(pool=pool) as res: - vip_create = res['vip'] - self.driver.create_vip(ctx, VSE_ID, vip_create) - self.driver.delete_vip(ctx, vip_create['id']) - self.assertRaises(vcns_exc.VcnsNotFound, - self.driver.get_vip, - ctx, - vip_create['id']) - - #Test Pool Operation - def test_create_and_get_pool(self): - ctx = context.get_admin_context() - with self.pool(do_delete=False) as p: - self.pool_id = p['pool']['id'] - pool_create = p['pool'] - self.driver.create_pool(ctx, VSE_ID, pool_create, []) - pool_get = self.driver.get_pool(ctx, pool_create['id'], VSE_ID) - for k, v in pool_get.iteritems(): - self.assertEqual(pool_create[k], v) - - def test_create_two_pools_with_same_name(self): - ctx = context.get_admin_context() - with self.pool(do_delete=False) as p: - self.pool_id = p['pool']['id'] - pool_create = p['pool'] - self.driver.create_pool(ctx, VSE_ID, pool_create, []) - self.assertRaises(vcns_exc.Forbidden, - self.driver.create_pool, - ctx, VSE_ID, pool_create, []) - - def test_update_pool(self): - ctx = context.get_admin_context() - with self.pool(do_delete=False) as p: - self.pool_id = p['pool']['id'] - pool_create = p['pool'] - self.driver.create_pool(ctx, VSE_ID, pool_create, []) - pool_update = {'id': pool_create['id'], - 'lb_method': 'lb_method', - 'name': 'update_name', - 'members': [], - 'health_monitors': []} - self.driver.update_pool(ctx, VSE_ID, pool_update, []) - pool_get = self.driver.get_pool(ctx, pool_create['id'], VSE_ID) - for k, v in pool_get.iteritems(): - if k in pool_update: - self.assertEqual(pool_update[k], v) - - def test_delete_pool(self): - ctx = context.get_admin_context() - with self.pool(do_delete=False) as p: - self.pool_id = p['pool']['id'] - pool_create = p['pool'] - self.driver.create_pool(ctx, VSE_ID, pool_create, []) - self.driver.delete_pool(ctx, pool_create['id'], VSE_ID) - self.assertRaises(vcns_exc.VcnsNotFound, - self.driver.get_pool, - ctx, - pool_create['id'], - VSE_ID) - - def test_create_and_get_monitor(self): - ctx = context.get_admin_context() - with self.health_monitor(do_delete=False) as m: - monitor_create = m['health_monitor'] - self.driver.create_health_monitor(ctx, VSE_ID, monitor_create) - monitor_get = self.driver.get_health_monitor( - ctx, monitor_create['id'], VSE_ID) - for k, v in monitor_get.iteritems(): - self.assertEqual(monitor_create[k], v) - - def test_update_health_monitor(self): - ctx = context.get_admin_context() - with self.health_monitor(do_delete=False) as m: - monitor_create = m['health_monitor'] - self.driver.create_health_monitor( - ctx, VSE_ID, monitor_create) - monitor_update = {'id': monitor_create['id'], - 'delay': 'new_delay', - 'timeout': "new_timeout", - 'type': 'type', - 'max_retries': "max_retries"} - self.driver.update_health_monitor( - ctx, VSE_ID, monitor_create, monitor_update) - monitor_get = self.driver.get_health_monitor( - ctx, monitor_create['id'], VSE_ID) - for k, v in monitor_get.iteritems(): - if k in monitor_update: - self.assertEqual(monitor_update[k], v) - - def test_delete_health_monitor(self): - ctx = context.get_admin_context() - with self.health_monitor(do_delete=False) as m: - monitor_create = m['health_monitor'] - self.driver.create_health_monitor(ctx, VSE_ID, monitor_create) - self.driver.delete_health_monitor( - ctx, monitor_create['id'], VSE_ID) - self.assertRaises(vcns_exc.VcnsNotFound, - self.driver.get_health_monitor, - ctx, - monitor_create['id'], - VSE_ID) diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py b/vmware-nsx/neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py deleted file mode 100644 index b6e1fa20c8..0000000000 --- a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py +++ /dev/null @@ -1,394 +0,0 @@ -# Copyright 2014 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import copy - -import webob.exc - -from neutron.api.v2 import attributes -from neutron.db.vpn import vpn_db -from neutron.extensions import vpnaas -from neutron import manager -from neutron.openstack.common import uuidutils -from neutron.tests.unit.db.vpn import test_db_vpnaas -from neutron.tests.unit.vmware.vshield import test_edge_router - -_uuid = uuidutils.generate_uuid - - -class VPNTestExtensionManager( - test_edge_router.ServiceRouterTestExtensionManager): - - def get_resources(self): - # If l3 resources have been loaded and updated by main API - # router, update the map in the l3 extension so it will load - # the same attributes as the API router - resources = super(VPNTestExtensionManager, self).get_resources() - vpn_attr_map = copy.deepcopy(vpnaas.RESOURCE_ATTRIBUTE_MAP) - for res in vpnaas.RESOURCE_ATTRIBUTE_MAP.keys(): - attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res) - if attr_info: - vpnaas.RESOURCE_ATTRIBUTE_MAP[res] = attr_info - vpn_resources = vpnaas.Vpnaas.get_resources() - # restore the original resources once the controllers are created - vpnaas.RESOURCE_ATTRIBUTE_MAP = vpn_attr_map - resources.extend(vpn_resources) - return resources - - -class TestVpnPlugin(test_db_vpnaas.VPNTestMixin, - test_edge_router.ServiceRouterTest): - - def vcns_vpn_patch(self): - instance = self.vcns_instance - instance.return_value.update_ipsec_config.side_effect = ( - self.fc2.update_ipsec_config) - instance.return_value.get_ipsec_config.side_effect = ( - self.fc2.get_ipsec_config) - instance.return_value.delete_ipsec_config.side_effect = ( - self.fc2.delete_ipsec_config) - - def setUp(self): - # Save the global RESOURCE_ATTRIBUTE_MAP - self.saved_attr_map = {} - for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items(): - self.saved_attr_map[resource] = attrs.copy() - - super(TestVpnPlugin, self).setUp(ext_mgr=VPNTestExtensionManager()) - self.vcns_vpn_patch() - self.plugin = manager.NeutronManager.get_plugin() - self.router_id = None - - def tearDown(self): - super(TestVpnPlugin, self).tearDown() - # Restore the global RESOURCE_ATTRIBUTE_MAP - attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map - self.ext_api = None - self.plugin = None - - @contextlib.contextmanager - def router(self, vlan_id=None): - with self._create_l3_ext_network(vlan_id) as net: - with self.subnet(cidr='100.0.0.0/24', network=net) as s: - data = {'router': {'tenant_id': self._tenant_id}} - data['router']['service_router'] = True - router_req = self.new_create_request('routers', data, self.fmt) - - res = router_req.get_response(self.ext_api) - router = self.deserialize(self.fmt, res) - self._add_external_gateway_to_router( - router['router']['id'], - s['subnet']['network_id']) - router = self._show('routers', router['router']['id']) - yield router - - self._delete('routers', router['router']['id']) - - def test_create_vpnservice(self, **extras): - """Test case to create a vpnservice.""" - description = 'my-vpn-service' - expected = {'name': 'vpnservice1', - 'description': 'my-vpn-service', - 'admin_state_up': True, - 'status': 'ACTIVE', - 'tenant_id': self._tenant_id, } - - expected.update(extras) - with self.subnet(cidr='10.2.0.0/24') as subnet: - with self.router() as router: - expected['router_id'] = router['router']['id'] - expected['subnet_id'] = subnet['subnet']['id'] - name = expected['name'] - with self.vpnservice(name=name, - subnet=subnet, - router=router, - description=description, - **extras) as vpnservice: - self.assertEqual(dict((k, v) for k, v in - vpnservice['vpnservice'].items() - if k in expected), - expected) - - def test_create_vpnservices_with_same_router(self, **extras): - """Test case to create two vpnservices with same router.""" - with self.subnet(cidr='10.2.0.0/24') as subnet: - with self.router() as router: - with self.vpnservice(name='vpnservice1', - subnet=subnet, - router=router): - res = self._create_vpnservice( - 'json', 'vpnservice2', True, - router_id=(router['router']['id']), - subnet_id=(subnet['subnet']['id'])) - self.assertEqual( - res.status_int, webob.exc.HTTPConflict.code) - - def test_update_vpnservice(self): - """Test case to update a vpnservice.""" - name = 'new_vpnservice1' - expected = [('name', name)] - with contextlib.nested( - self.subnet(cidr='10.2.0.0/24'), - self.router()) as (subnet, router): - with self.vpnservice(name=name, - subnet=subnet, - router=router) as vpnservice: - expected.append(('subnet_id', - vpnservice['vpnservice']['subnet_id'])) - expected.append(('router_id', - vpnservice['vpnservice']['router_id'])) - data = {'vpnservice': {'name': name, - 'admin_state_up': False}} - expected.append(('admin_state_up', False)) - self._set_active(vpn_db.VPNService, - vpnservice['vpnservice']['id']) - req = self.new_update_request( - 'vpnservices', - data, - vpnservice['vpnservice']['id']) - res = self.deserialize(self.fmt, - req.get_response(self.ext_api)) - for k, v in expected: - self.assertEqual(res['vpnservice'][k], v) - - def test_delete_vpnservice(self): - """Test case to delete a vpnservice.""" - with self.subnet(cidr='10.2.0.0/24') as subnet: - with self.router() as router: - with self.vpnservice(name='vpnservice', - subnet=subnet, - router=router, - do_delete=False) as vpnservice: - req = self.new_delete_request( - 'vpnservices', vpnservice['vpnservice']['id']) - res = req.get_response(self.ext_api) - self.assertEqual(res.status_int, 204) - - def test_delete_router_in_use_by_vpnservice(self): - """Test delete router in use by vpn service.""" - with self.subnet(cidr='10.2.0.0/24') as subnet: - with self.router() as router: - with self.vpnservice(subnet=subnet, - router=router): - self._delete('routers', router['router']['id'], - expected_code=webob.exc.HTTPConflict.code) - - def _test_create_ipsec_site_connection(self, key_overrides=None, - ike_key_overrides=None, - ipsec_key_overrides=None, - setup_overrides=None, - expected_status_int=200): - """Create ipsec_site_connection and check results.""" - params = {'ikename': 'ikepolicy1', - 'ipsecname': 'ipsecpolicy1', - 'vpnsname': 'vpnservice1', - 'subnet_cidr': '10.2.0.0/24', - 'subnet_version': 4} - if setup_overrides: - params.update(setup_overrides) - expected = {'name': 'connection1', - 'description': 'my-ipsec-connection', - 'peer_address': '192.168.1.10', - 'peer_id': '192.168.1.10', - 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], - 'initiator': 'bi-directional', - 'mtu': 1500, - 'tenant_id': self._tenant_id, - 'psk': 'abcd', - 'status': 'ACTIVE', - 'admin_state_up': True} - if key_overrides: - expected.update(key_overrides) - - ike_expected = {'name': params['ikename'], - 'auth_algorithm': 'sha1', - 'encryption_algorithm': 'aes-128', - 'ike_version': 'v1', - 'pfs': 'group5'} - if ike_key_overrides: - ike_expected.update(ike_key_overrides) - - ipsec_expected = {'name': params['ipsecname'], - 'auth_algorithm': 'sha1', - 'encryption_algorithm': 'aes-128', - 'pfs': 'group5'} - if ipsec_key_overrides: - ipsec_expected.update(ipsec_key_overrides) - - dpd = {'action': 'hold', - 'interval': 40, - 'timeout': 120} - with contextlib.nested( - self.ikepolicy(self.fmt, ike_expected['name'], - ike_expected['auth_algorithm'], - ike_expected['encryption_algorithm'], - ike_version=ike_expected['ike_version'], - pfs=ike_expected['pfs']), - self.ipsecpolicy(self.fmt, ipsec_expected['name'], - ipsec_expected['auth_algorithm'], - ipsec_expected['encryption_algorithm'], - pfs=ipsec_expected['pfs']), - self.subnet(cidr=params['subnet_cidr'], - ip_version=params['subnet_version']), - self.router()) as ( - ikepolicy, ipsecpolicy, subnet, router): - with self.vpnservice(name=params['vpnsname'], subnet=subnet, - router=router) as vpnservice1: - expected['ikepolicy_id'] = ikepolicy['ikepolicy']['id'] - expected['ipsecpolicy_id'] = ( - ipsecpolicy['ipsecpolicy']['id'] - ) - expected['vpnservice_id'] = ( - vpnservice1['vpnservice']['id'] - ) - try: - with self.ipsec_site_connection( - self.fmt, - expected['name'], - expected['peer_address'], - expected['peer_id'], - expected['peer_cidrs'], - expected['mtu'], - expected['psk'], - expected['initiator'], - dpd['action'], - dpd['interval'], - dpd['timeout'], - vpnservice1, - ikepolicy, - ipsecpolicy, - expected['admin_state_up'], - description=expected['description'] - ) as ipsec_site_connection: - if expected_status_int != 200: - self.fail("Expected failure on create") - self._check_ipsec_site_connection( - ipsec_site_connection['ipsec_site_connection'], - expected, - dpd) - except webob.exc.HTTPClientError as ce: - self.assertEqual(ce.code, expected_status_int) - - def test_create_ipsec_site_connection(self, **extras): - """Test case to create an ipsec_site_connection.""" - self._test_create_ipsec_site_connection(key_overrides=extras) - - def test_create_ipsec_site_connection_invalid_ikepolicy(self): - self._test_create_ipsec_site_connection( - ike_key_overrides={'ike_version': 'v2'}, - expected_status_int=400) - - def test_create_ipsec_site_connection_invalid_ipsecpolicy(self): - self._test_create_ipsec_site_connection( - ipsec_key_overrides={'encryption_algorithm': 'aes-192'}, - expected_status_int=400) - self._test_create_ipsec_site_connection( - ipsec_key_overrides={'pfs': 'group14'}, - expected_status_int=400) - - def _test_update_ipsec_site_connection(self, - update={'name': 'new name'}, - overrides=None, - expected_status_int=200): - """Creates and then updates ipsec_site_connection.""" - expected = {'name': 'new_ipsec_site_connection', - 'ikename': 'ikepolicy1', - 'ipsecname': 'ipsecpolicy1', - 'vpnsname': 'vpnservice1', - 'description': 'my-ipsec-connection', - 'peer_address': '192.168.1.10', - 'peer_id': '192.168.1.10', - 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], - 'initiator': 'bi-directional', - 'mtu': 1500, - 'tenant_id': self._tenant_id, - 'psk': 'abcd', - 'status': 'ACTIVE', - 'admin_state_up': True, - 'action': 'hold', - 'interval': 40, - 'timeout': 120, - 'subnet_cidr': '10.2.0.0/24', - 'subnet_version': 4, - 'make_active': True} - if overrides: - expected.update(overrides) - - with contextlib.nested( - self.ikepolicy(name=expected['ikename']), - self.ipsecpolicy(name=expected['ipsecname']), - self.subnet(cidr=expected['subnet_cidr'], - ip_version=expected['subnet_version']), - self.router() - ) as (ikepolicy, ipsecpolicy, subnet, router): - with self.vpnservice(name=expected['vpnsname'], subnet=subnet, - router=router) as vpnservice1: - expected['vpnservice_id'] = vpnservice1['vpnservice']['id'] - expected['ikepolicy_id'] = ikepolicy['ikepolicy']['id'] - expected['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id'] - with self.ipsec_site_connection( - self.fmt, - expected['name'], - expected['peer_address'], - expected['peer_id'], - expected['peer_cidrs'], - expected['mtu'], - expected['psk'], - expected['initiator'], - expected['action'], - expected['interval'], - expected['timeout'], - vpnservice1, - ikepolicy, - ipsecpolicy, - expected['admin_state_up'], - description=expected['description'] - ) as ipsec_site_connection: - data = {'ipsec_site_connection': update} - if expected.get('make_active'): - self._set_active( - vpn_db.IPsecSiteConnection, - (ipsec_site_connection['ipsec_site_connection'] - ['id'])) - req = self.new_update_request( - 'ipsec-site-connections', - data, - ipsec_site_connection['ipsec_site_connection']['id']) - res = req.get_response(self.ext_api) - self.assertEqual(expected_status_int, res.status_int) - if expected_status_int == 200: - res_dict = self.deserialize(self.fmt, res) - for k, v in update.items(): - self.assertEqual( - res_dict['ipsec_site_connection'][k], v) - - def test_update_ipsec_site_connection(self): - """Test case for valid updates to IPSec site connection.""" - dpd = {'action': 'hold', - 'interval': 40, - 'timeout': 120} - self._test_update_ipsec_site_connection(update={'dpd': dpd}) - self._test_update_ipsec_site_connection(update={'mtu': 2000}) - - def test_delete_ipsec_site_connection(self): - """Test case to delete a ipsec_site_connection.""" - with self.ipsec_site_connection( - do_delete=False) as ipsec_site_connection: - req = self.new_delete_request( - 'ipsec-site-connections', - ipsec_site_connection['ipsec_site_connection']['id'] - ) - res = req.get_response(self.ext_api) - self.assertEqual(res.status_int, 204) diff --git a/vmware_nsx/__init__.py b/vmware_nsx/__init__.py new file mode 100644 index 0000000000..de40ea7ca0 --- /dev/null +++ b/vmware_nsx/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/vmware-nsx/__init__.py b/vmware_nsx/neutron/__init__.py similarity index 100% rename from vmware-nsx/__init__.py rename to vmware_nsx/neutron/__init__.py diff --git a/vmware-nsx/neutron/__init__.py b/vmware_nsx/neutron/plugins/__init__.py similarity index 100% rename from vmware-nsx/neutron/__init__.py rename to vmware_nsx/neutron/plugins/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/__init__.py b/vmware_nsx/neutron/plugins/vmware/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/__init__.py rename to vmware_nsx/neutron/plugins/vmware/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/api_client/__init__.py b/vmware_nsx/neutron/plugins/vmware/api_client/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/api_client/__init__.py rename to vmware_nsx/neutron/plugins/vmware/api_client/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/api_client/base.py b/vmware_nsx/neutron/plugins/vmware/api_client/base.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/api_client/base.py rename to vmware_nsx/neutron/plugins/vmware/api_client/base.py diff --git a/vmware-nsx/neutron/plugins/vmware/api_client/client.py b/vmware_nsx/neutron/plugins/vmware/api_client/client.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/api_client/client.py rename to vmware_nsx/neutron/plugins/vmware/api_client/client.py diff --git a/vmware-nsx/neutron/plugins/vmware/api_client/eventlet_client.py b/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_client.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/api_client/eventlet_client.py rename to vmware_nsx/neutron/plugins/vmware/api_client/eventlet_client.py diff --git a/vmware-nsx/neutron/plugins/vmware/api_client/eventlet_request.py b/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_request.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/api_client/eventlet_request.py rename to vmware_nsx/neutron/plugins/vmware/api_client/eventlet_request.py diff --git a/vmware-nsx/neutron/plugins/vmware/api_client/exception.py b/vmware_nsx/neutron/plugins/vmware/api_client/exception.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/api_client/exception.py rename to vmware_nsx/neutron/plugins/vmware/api_client/exception.py diff --git a/vmware-nsx/neutron/plugins/vmware/api_client/request.py b/vmware_nsx/neutron/plugins/vmware/api_client/request.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/api_client/request.py rename to vmware_nsx/neutron/plugins/vmware/api_client/request.py diff --git a/vmware-nsx/neutron/plugins/vmware/api_client/version.py b/vmware_nsx/neutron/plugins/vmware/api_client/version.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/api_client/version.py rename to vmware_nsx/neutron/plugins/vmware/api_client/version.py diff --git a/vmware-nsx/neutron/plugins/vmware/check_nsx_config.py b/vmware_nsx/neutron/plugins/vmware/check_nsx_config.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/check_nsx_config.py rename to vmware_nsx/neutron/plugins/vmware/check_nsx_config.py diff --git a/vmware-nsx/neutron/plugins/__init__.py b/vmware_nsx/neutron/plugins/vmware/common/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/__init__.py rename to vmware_nsx/neutron/plugins/vmware/common/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/common/config.py b/vmware_nsx/neutron/plugins/vmware/common/config.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/common/config.py rename to vmware_nsx/neutron/plugins/vmware/common/config.py diff --git a/vmware-nsx/neutron/plugins/vmware/common/exceptions.py b/vmware_nsx/neutron/plugins/vmware/common/exceptions.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/common/exceptions.py rename to vmware_nsx/neutron/plugins/vmware/common/exceptions.py diff --git a/vmware-nsx/neutron/plugins/vmware/common/nsx_utils.py b/vmware_nsx/neutron/plugins/vmware/common/nsx_utils.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/common/nsx_utils.py rename to vmware_nsx/neutron/plugins/vmware/common/nsx_utils.py diff --git a/vmware-nsx/neutron/plugins/vmware/common/securitygroups.py b/vmware_nsx/neutron/plugins/vmware/common/securitygroups.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/common/securitygroups.py rename to vmware_nsx/neutron/plugins/vmware/common/securitygroups.py diff --git a/vmware-nsx/neutron/plugins/vmware/common/sync.py b/vmware_nsx/neutron/plugins/vmware/common/sync.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/common/sync.py rename to vmware_nsx/neutron/plugins/vmware/common/sync.py diff --git a/vmware-nsx/neutron/plugins/vmware/common/utils.py b/vmware_nsx/neutron/plugins/vmware/common/utils.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/common/utils.py rename to vmware_nsx/neutron/plugins/vmware/common/utils.py diff --git a/vmware-nsx/neutron/plugins/vmware/common/__init__.py b/vmware_nsx/neutron/plugins/vmware/dbexts/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/common/__init__.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/db.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/db.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/db.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/lsn_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/lsn_db.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/lsn_db.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/lsn_db.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/maclearning.py b/vmware_nsx/neutron/plugins/vmware/dbexts/maclearning.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/maclearning.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/maclearning.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/models.py b/vmware_nsx/neutron/plugins/vmware/dbexts/models.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/models.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/models.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/networkgw_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/networkgw_db.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/networkgw_db.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/networkgw_db.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/qos_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/qos_db.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/qos_db.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/qos_db.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/servicerouter.py b/vmware_nsx/neutron/plugins/vmware/dbexts/servicerouter.py similarity index 93% rename from vmware-nsx/neutron/plugins/vmware/dbexts/servicerouter.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/servicerouter.py index 81ffb714f7..cdbc4dce61 100644 --- a/vmware-nsx/neutron/plugins/vmware/dbexts/servicerouter.py +++ b/vmware_nsx/neutron/plugins/vmware/dbexts/servicerouter.py @@ -14,7 +14,7 @@ # from neutron.db import l3_dvr_db -from neutron.plugins.vmware.extensions import servicerouter +from vmware_nsx.neutron.plugins.vmware.extensions import servicerouter class ServiceRouter_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin): diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/vcns_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/vcns_db.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/vcns_db.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/vcns_db.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/vcns_models.py b/vmware_nsx/neutron/plugins/vmware/dbexts/vcns_models.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/vcns_models.py rename to vmware_nsx/neutron/plugins/vmware/dbexts/vcns_models.py diff --git a/vmware-nsx/neutron/plugins/vmware/dbexts/__init__.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dbexts/__init__.py rename to vmware_nsx/neutron/plugins/vmware/dhcp_meta/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/dhcp_meta/combined.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/combined.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dhcp_meta/combined.py rename to vmware_nsx/neutron/plugins/vmware/dhcp_meta/combined.py diff --git a/vmware-nsx/neutron/plugins/vmware/dhcp_meta/constants.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/constants.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dhcp_meta/constants.py rename to vmware_nsx/neutron/plugins/vmware/dhcp_meta/constants.py diff --git a/vmware-nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py rename to vmware_nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py diff --git a/vmware-nsx/neutron/plugins/vmware/dhcp_meta/migration.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/migration.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dhcp_meta/migration.py rename to vmware_nsx/neutron/plugins/vmware/dhcp_meta/migration.py diff --git a/vmware-nsx/neutron/plugins/vmware/dhcp_meta/nsx.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/nsx.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dhcp_meta/nsx.py rename to vmware_nsx/neutron/plugins/vmware/dhcp_meta/nsx.py diff --git a/vmware-nsx/neutron/plugins/vmware/dhcp_meta/rpc.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/rpc.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dhcp_meta/rpc.py rename to vmware_nsx/neutron/plugins/vmware/dhcp_meta/rpc.py diff --git a/vmware-nsx/neutron/plugins/vmware/dhcpmeta_modes.py b/vmware_nsx/neutron/plugins/vmware/dhcpmeta_modes.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dhcpmeta_modes.py rename to vmware_nsx/neutron/plugins/vmware/dhcpmeta_modes.py diff --git a/vmware-nsx/neutron/plugins/vmware/dhcp_meta/__init__.py b/vmware_nsx/neutron/plugins/vmware/extensions/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/dhcp_meta/__init__.py rename to vmware_nsx/neutron/plugins/vmware/extensions/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/extensions/lsn.py b/vmware_nsx/neutron/plugins/vmware/extensions/lsn.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/extensions/lsn.py rename to vmware_nsx/neutron/plugins/vmware/extensions/lsn.py diff --git a/vmware-nsx/neutron/plugins/vmware/extensions/maclearning.py b/vmware_nsx/neutron/plugins/vmware/extensions/maclearning.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/extensions/maclearning.py rename to vmware_nsx/neutron/plugins/vmware/extensions/maclearning.py diff --git a/vmware-nsx/neutron/plugins/vmware/extensions/networkgw.py b/vmware_nsx/neutron/plugins/vmware/extensions/networkgw.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/extensions/networkgw.py rename to vmware_nsx/neutron/plugins/vmware/extensions/networkgw.py diff --git a/vmware-nsx/neutron/plugins/vmware/extensions/nvp_qos.py b/vmware_nsx/neutron/plugins/vmware/extensions/nvp_qos.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/extensions/nvp_qos.py rename to vmware_nsx/neutron/plugins/vmware/extensions/nvp_qos.py diff --git a/vmware-nsx/neutron/plugins/vmware/extensions/qos.py b/vmware_nsx/neutron/plugins/vmware/extensions/qos.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/extensions/qos.py rename to vmware_nsx/neutron/plugins/vmware/extensions/qos.py diff --git a/vmware-nsx/neutron/plugins/vmware/extensions/servicerouter.py b/vmware_nsx/neutron/plugins/vmware/extensions/servicerouter.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/extensions/servicerouter.py rename to vmware_nsx/neutron/plugins/vmware/extensions/servicerouter.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsx_cluster.py b/vmware_nsx/neutron/plugins/vmware/nsx_cluster.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsx_cluster.py rename to vmware_nsx/neutron/plugins/vmware/nsx_cluster.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsxlib/__init__.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsxlib/__init__.py rename to vmware_nsx/neutron/plugins/vmware/nsxlib/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsxlib/l2gateway.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/l2gateway.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsxlib/l2gateway.py rename to vmware_nsx/neutron/plugins/vmware/nsxlib/l2gateway.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsxlib/lsn.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/lsn.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsxlib/lsn.py rename to vmware_nsx/neutron/plugins/vmware/nsxlib/lsn.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsxlib/queue.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/queue.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsxlib/queue.py rename to vmware_nsx/neutron/plugins/vmware/nsxlib/queue.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsxlib/router.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/router.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsxlib/router.py rename to vmware_nsx/neutron/plugins/vmware/nsxlib/router.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsxlib/secgroup.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/secgroup.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsxlib/secgroup.py rename to vmware_nsx/neutron/plugins/vmware/nsxlib/secgroup.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsxlib/switch.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/switch.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsxlib/switch.py rename to vmware_nsx/neutron/plugins/vmware/nsxlib/switch.py diff --git a/vmware-nsx/neutron/plugins/vmware/nsxlib/versioning.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/versioning.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/nsxlib/versioning.py rename to vmware_nsx/neutron/plugins/vmware/nsxlib/versioning.py diff --git a/vmware-nsx/neutron/plugins/vmware/plugin.py b/vmware_nsx/neutron/plugins/vmware/plugin.py similarity index 88% rename from vmware-nsx/neutron/plugins/vmware/plugin.py rename to vmware_nsx/neutron/plugins/vmware/plugin.py index f5ea3dba18..abe346876c 100644 --- a/vmware-nsx/neutron/plugins/vmware/plugin.py +++ b/vmware_nsx/neutron/plugins/vmware/plugin.py @@ -16,7 +16,5 @@ # from neutron.plugins.vmware.plugins import base -from neutron.plugins.vmware.plugins import service NsxPlugin = base.NsxPluginV2 -NsxServicePlugin = service.NsxAdvancedPlugin diff --git a/vmware-nsx/neutron/plugins/vmware/extensions/__init__.py b/vmware_nsx/neutron/plugins/vmware/plugins/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/extensions/__init__.py rename to vmware_nsx/neutron/plugins/vmware/plugins/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/plugins/base.py b/vmware_nsx/neutron/plugins/vmware/plugins/base.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/plugins/base.py rename to vmware_nsx/neutron/plugins/vmware/plugins/base.py diff --git a/vmware-nsx/neutron/plugins/vmware/shell/__init__.py b/vmware_nsx/neutron/plugins/vmware/shell/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/shell/__init__.py rename to vmware_nsx/neutron/plugins/vmware/shell/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/shell/commands.py b/vmware_nsx/neutron/plugins/vmware/shell/commands.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/shell/commands.py rename to vmware_nsx/neutron/plugins/vmware/shell/commands.py diff --git a/vmware-nsx/neutron/plugins/vmware/plugins/__init__.py b/vmware_nsx/neutron/plugins/vmware/vshield/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/plugins/__init__.py rename to vmware_nsx/neutron/plugins/vmware/vshield/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/common/VcnsApiClient.py b/vmware_nsx/neutron/plugins/vmware/vshield/common/VcnsApiClient.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/common/VcnsApiClient.py rename to vmware_nsx/neutron/plugins/vmware/vshield/common/VcnsApiClient.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/__init__.py b/vmware_nsx/neutron/plugins/vmware/vshield/common/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/__init__.py rename to vmware_nsx/neutron/plugins/vmware/vshield/common/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/common/constants.py b/vmware_nsx/neutron/plugins/vmware/vshield/common/constants.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/common/constants.py rename to vmware_nsx/neutron/plugins/vmware/vshield/common/constants.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/common/exceptions.py b/vmware_nsx/neutron/plugins/vmware/vshield/common/exceptions.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/common/exceptions.py rename to vmware_nsx/neutron/plugins/vmware/vshield/common/exceptions.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/edge_appliance_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_appliance_driver.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/edge_appliance_driver.py rename to vmware_nsx/neutron/plugins/vmware/vshield/edge_appliance_driver.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/edge_firewall_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_firewall_driver.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/edge_firewall_driver.py rename to vmware_nsx/neutron/plugins/vmware/vshield/edge_firewall_driver.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py rename to vmware_nsx/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py rename to vmware_nsx/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/common/__init__.py b/vmware_nsx/neutron/plugins/vmware/vshield/tasks/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/common/__init__.py rename to vmware_nsx/neutron/plugins/vmware/vshield/tasks/__init__.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/tasks/constants.py b/vmware_nsx/neutron/plugins/vmware/vshield/tasks/constants.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/tasks/constants.py rename to vmware_nsx/neutron/plugins/vmware/vshield/tasks/constants.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/tasks/tasks.py b/vmware_nsx/neutron/plugins/vmware/vshield/tasks/tasks.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/tasks/tasks.py rename to vmware_nsx/neutron/plugins/vmware/vshield/tasks/tasks.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/vcns.py b/vmware_nsx/neutron/plugins/vmware/vshield/vcns.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/vcns.py rename to vmware_nsx/neutron/plugins/vmware/vshield/vcns.py diff --git a/vmware-nsx/neutron/plugins/vmware/vshield/tasks/__init__.py b/vmware_nsx/neutron/tests/__init__.py similarity index 100% rename from vmware-nsx/neutron/plugins/vmware/vshield/tasks/__init__.py rename to vmware_nsx/neutron/tests/__init__.py diff --git a/vmware-nsx/neutron/tests/unit/__init__.py b/vmware_nsx/neutron/tests/unit/__init__.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/__init__.py rename to vmware_nsx/neutron/tests/unit/__init__.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/skip_this_dir__init__.py b/vmware_nsx/neutron/tests/unit/vmware/__init__.py similarity index 92% rename from vmware-nsx/neutron/tests/unit/vmware/skip_this_dir__init__.py rename to vmware_nsx/neutron/tests/unit/vmware/__init__.py index df96678603..e0c4b0f9e7 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/skip_this_dir__init__.py +++ b/vmware_nsx/neutron/tests/unit/vmware/__init__.py @@ -26,7 +26,6 @@ import neutron.plugins.vmware.vshield.vcns_driver as vcnsdriver plugin = neutron_plugin.NsxPlugin -service_plugin = neutron_plugin.NsxServicePlugin api_client = nsx_client.NsxApiClient evt_client = eventlet_client.EventletApiClient vcns_class = vcns.Vcns @@ -37,8 +36,6 @@ STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc') NSXEXT_PATH = os.path.dirname(extensions.__file__) NSXAPI_NAME = '%s.%s' % (api_client.__module__, api_client.__name__) PLUGIN_NAME = '%s.%s' % (plugin.__module__, plugin.__name__) -SERVICE_PLUGIN_NAME = '%s.%s' % (service_plugin.__module__, - service_plugin.__name__) CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__) VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__) VCNS_DRIVER_NAME = '%s.%s' % (vcns_driver.__module__, vcns_driver.__name__) diff --git a/vmware-nsx/neutron/tests/__init__.py b/vmware_nsx/neutron/tests/unit/vmware/apiclient/__init__.py similarity index 100% rename from vmware-nsx/neutron/tests/__init__.py rename to vmware_nsx/neutron/tests/unit/vmware/apiclient/__init__.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/apiclient/fake.py b/vmware_nsx/neutron/tests/unit/vmware/apiclient/fake.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/apiclient/fake.py rename to vmware_nsx/neutron/tests/unit/vmware/apiclient/fake.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/apiclient/test_api_common.py b/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_common.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/apiclient/test_api_common.py rename to vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_common.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py b/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py rename to vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py index ddd0bfd06c..bce6666681 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py +++ b/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py @@ -24,7 +24,7 @@ from neutron.openstack.common import log as logging from neutron.plugins.vmware.api_client import eventlet_client as client from neutron.plugins.vmware.api_client import eventlet_request as request from neutron.tests import base -from neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit import vmware LOG = logging.getLogger(__name__) diff --git a/vmware-nsx/neutron/tests/unit/vmware/apiclient/__init__.py b/vmware_nsx/neutron/tests/unit/vmware/db/__init__.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/apiclient/__init__.py rename to vmware_nsx/neutron/tests/unit/vmware/db/__init__.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/db/test_lsn_db.py b/vmware_nsx/neutron/tests/unit/vmware/db/test_lsn_db.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/db/test_lsn_db.py rename to vmware_nsx/neutron/tests/unit/vmware/db/test_lsn_db.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/db/test_nsx_db.py b/vmware_nsx/neutron/tests/unit/vmware/db/test_nsx_db.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/db/test_nsx_db.py rename to vmware_nsx/neutron/tests/unit/vmware/db/test_nsx_db.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_gwservice.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_gwservice.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_gwservice.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_gwservice.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lqueue.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lqueue.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lqueue.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lqueue.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_security_profile.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_security_profile.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_get_security_profile.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_get_security_profile.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_gwservice.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_gwservice.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_gwservice.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_gwservice.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lqueue.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lqueue.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lqueue.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lqueue.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lswitch.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lswitch.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lswitch.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lswitch.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_security_profile.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_security_profile.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_post_security_profile.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_post_security_profile.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json b/vmware_nsx/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json rename to vmware_nsx/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/neutron.conf.test b/vmware_nsx/neutron/tests/unit/vmware/etc/neutron.conf.test similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/neutron.conf.test rename to vmware_nsx/neutron/tests/unit/vmware/etc/neutron.conf.test diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test b/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test rename to vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.basic.test b/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.basic.test similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.basic.test rename to vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.basic.test diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.combined.test b/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.combined.test similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.combined.test rename to vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.combined.test diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.full.test b/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.full.test similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.full.test rename to vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.full.test diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.test b/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.test similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/nsx.ini.test rename to vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.test diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/nvp.ini.full.test b/vmware_nsx/neutron/tests/unit/vmware/etc/nvp.ini.full.test similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/nvp.ini.full.test rename to vmware_nsx/neutron/tests/unit/vmware/etc/nvp.ini.full.test diff --git a/vmware-nsx/neutron/tests/unit/vmware/etc/vcns.ini.test b/vmware_nsx/neutron/tests/unit/vmware/etc/vcns.ini.test similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/etc/vcns.ini.test rename to vmware_nsx/neutron/tests/unit/vmware/etc/vcns.ini.test diff --git a/vmware-nsx/neutron/tests/unit/vmware/db/__init__.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/__init__.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/db/__init__.py rename to vmware_nsx/neutron/tests/unit/vmware/extensions/__init__.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_addresspairs.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_addresspairs.py similarity index 95% rename from vmware-nsx/neutron/tests/unit/vmware/extensions/test_addresspairs.py rename to vmware_nsx/neutron/tests/unit/vmware/extensions/test_addresspairs.py index 11d84fea75..f550f717ff 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_addresspairs.py +++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_addresspairs.py @@ -15,7 +15,7 @@ from neutron.extensions import allowedaddresspairs as addr_pair from neutron.tests.unit import test_extension_allowedaddresspairs as ext_pairs -from neutron.tests.unit.vmware import test_nsx_plugin +from vmware_nsx.neutron.tests.unit.vmware import test_nsx_plugin class TestAllowedAddressPairs(test_nsx_plugin.NsxPluginV2TestCase, diff --git a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py similarity index 98% rename from vmware-nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py rename to vmware_nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py index 7b93432199..89b0ae45fc 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py +++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py @@ -25,8 +25,8 @@ from neutron.extensions import agent from neutron.plugins.vmware.api_client import version from neutron.plugins.vmware.common import sync from neutron.tests.unit import test_db_plugin -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.apiclient import fake +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake class MacLearningExtensionManager(object): diff --git a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py rename to vmware_nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py index 58ec9ce8c4..6f8bd09486 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py +++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py @@ -37,8 +37,8 @@ from neutron.tests.unit import test_api_v2 from neutron.tests.unit import test_db_plugin from neutron.tests.unit import test_extensions from neutron.tests.unit import testlib_plugin -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware import test_nsx_plugin +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware import test_nsx_plugin _uuid = test_api_v2._uuid _get_path = test_api_v2._get_path diff --git a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py similarity index 93% rename from vmware-nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py rename to vmware_nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py index 6b07b39c64..6be1248f6e 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py +++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py @@ -18,8 +18,8 @@ import mock from neutron.common import test_lib from neutron.plugins.vmware.common import sync from neutron.tests.unit import test_extension_portsecurity as psec -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.apiclient import fake +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake class PortSecurityTestCase(psec.PortSecurityDBTestCase): diff --git a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_providernet.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_providernet.py similarity index 98% rename from vmware-nsx/neutron/tests/unit/vmware/extensions/test_providernet.py rename to vmware_nsx/neutron/tests/unit/vmware/extensions/test_providernet.py index aaaf1d176b..f759262289 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_providernet.py +++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_providernet.py @@ -18,8 +18,8 @@ import webob.exc from neutron.extensions import multiprovidernet as mpnet from neutron.extensions import providernet as pnet -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware import test_nsx_plugin +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware import test_nsx_plugin class TestProvidernet(test_nsx_plugin.NsxPluginV2TestCase): diff --git a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py rename to vmware_nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py index 729fbb9e76..12225a25f6 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py +++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py @@ -24,8 +24,8 @@ from neutron.plugins.vmware.dbexts import qos_db from neutron.plugins.vmware.extensions import qos as ext_qos from neutron.plugins.vmware import nsxlib from neutron.tests.unit import test_extensions -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware import test_nsx_plugin +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware import test_nsx_plugin class QoSTestExtensionManager(object): diff --git a/vmware-nsx/neutron/tests/unit/vmware/extensions/__init__.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/__init__.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/extensions/__init__.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/__init__.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/base.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/base.py similarity index 96% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/base.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/base.py index 679db7b73b..1882f6d0b0 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/base.py +++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/base.py @@ -23,8 +23,8 @@ from neutron.plugins.vmware.common import config # noqa from neutron.plugins.vmware import nsx_cluster as cluster from neutron.tests import base from neutron.tests.unit import test_api_v2 -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.apiclient import fake +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake _uuid = test_api_v2._uuid diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py index 2a2311c010..ede517fc3a 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py +++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py @@ -23,7 +23,7 @@ from neutron.plugins.vmware import nsxlib from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib from neutron.plugins.vmware.nsxlib import switch as switchlib from neutron.tests.unit import test_api_v2 -from neutron.tests.unit.vmware.nsxlib import base +from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base _uuid = test_api_v2._uuid diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_lsn.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_lsn.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_lsn.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_lsn.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py similarity index 97% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py index 1d7e2ea562..d62cb0c607 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py +++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py @@ -20,7 +20,7 @@ from neutron.common import exceptions from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware import nsxlib from neutron.plugins.vmware.nsxlib import queue as queuelib -from neutron.tests.unit.vmware.nsxlib import base +from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base class TestLogicalQueueLib(base.NsxlibTestCase): diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_router.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_router.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_router.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_router.py index 39aa2e4a29..217d0fa362 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_router.py +++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_router.py @@ -28,7 +28,7 @@ from neutron.plugins.vmware import nsxlib from neutron.plugins.vmware.nsxlib import router as routerlib from neutron.plugins.vmware.nsxlib import switch as switchlib from neutron.tests.unit import test_api_v2 -from neutron.tests.unit.vmware.nsxlib import base +from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base _uuid = test_api_v2._uuid diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py index fb2574ff4c..a9fbe2c275 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py +++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py @@ -18,7 +18,7 @@ from neutron.common import exceptions from neutron.plugins.vmware import nsxlib from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib from neutron.tests.unit import test_api_v2 -from neutron.tests.unit.vmware.nsxlib import base +from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base _uuid = test_api_v2._uuid diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py index db8c5af984..31de145c37 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py +++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py @@ -22,7 +22,7 @@ from neutron.common import exceptions from neutron.plugins.vmware.common import utils from neutron.plugins.vmware.nsxlib import switch as switchlib from neutron.tests.unit import test_api_v2 -from neutron.tests.unit.vmware.nsxlib import base +from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base _uuid = test_api_v2._uuid diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_versioning.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_versioning.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/test_versioning.py rename to vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_versioning.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/test_agent_scheduler.py b/vmware_nsx/neutron/tests/unit/vmware/test_agent_scheduler.py similarity index 95% rename from vmware-nsx/neutron/tests/unit/vmware/test_agent_scheduler.py rename to vmware_nsx/neutron/tests/unit/vmware/test_agent_scheduler.py index 6d1193454f..6cb0c9ce8b 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/test_agent_scheduler.py +++ b/vmware_nsx/neutron/tests/unit/vmware/test_agent_scheduler.py @@ -21,8 +21,8 @@ from neutron.common import test_lib from neutron.plugins.vmware.common import sync from neutron.plugins.vmware.dhcp_meta import rpc from neutron.tests.unit.openvswitch import test_agent_scheduler as test_base -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.apiclient import fake +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake class DhcpAgentNotifierTestCase(test_base.OvsDhcpAgentNotifierTestCase): diff --git a/vmware-nsx/neutron/tests/unit/vmware/test_dhcpmeta.py b/vmware_nsx/neutron/tests/unit/vmware/test_dhcpmeta.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/test_dhcpmeta.py rename to vmware_nsx/neutron/tests/unit/vmware/test_dhcpmeta.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/test_nsx_opts.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_opts.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/test_nsx_opts.py rename to vmware_nsx/neutron/tests/unit/vmware/test_nsx_opts.py index 2f0e65d4ad..300d5a29a1 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/test_nsx_opts.py +++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_opts.py @@ -28,7 +28,7 @@ from neutron.plugins.vmware.common import sync from neutron.plugins.vmware import nsx_cluster from neutron.plugins.vmware.nsxlib import lsn as lsnlib from neutron.tests import base -from neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit import vmware BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test') NSX_INI_PATH = vmware.get_fake_conf('nsx.ini.basic.test') diff --git a/vmware-nsx/neutron/tests/unit/vmware/test_nsx_plugin.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_plugin.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/test_nsx_plugin.py rename to vmware_nsx/neutron/tests/unit/vmware/test_nsx_plugin.py index bd668d6719..01645ddf36 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/test_nsx_plugin.py +++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_plugin.py @@ -51,8 +51,8 @@ import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode import neutron.tests.unit.test_extension_security_group as ext_sg import neutron.tests.unit.test_l3_plugin as test_l3_plugin from neutron.tests.unit import testlib_api -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.apiclient import fake +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake LOG = log.getLogger(__name__) diff --git a/vmware-nsx/neutron/tests/unit/vmware/test_nsx_sync.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_sync.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/test_nsx_sync.py rename to vmware_nsx/neutron/tests/unit/vmware/test_nsx_sync.py index 93a9a5e125..799b60243f 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/test_nsx_sync.py +++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_sync.py @@ -38,8 +38,8 @@ from neutron.plugins.vmware import plugin from neutron.tests import base from neutron.tests.unit import test_api_v2 from neutron.tests.unit import testlib_api -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.apiclient import fake +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake LOG = log.getLogger(__name__) diff --git a/vmware-nsx/neutron/tests/unit/vmware/test_nsx_utils.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_utils.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/test_nsx_utils.py rename to vmware_nsx/neutron/tests/unit/vmware/test_nsx_utils.py index 44b72bfa01..74e65adfde 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/test_nsx_utils.py +++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_utils.py @@ -26,8 +26,8 @@ from neutron.plugins.vmware.common import utils from neutron.plugins.vmware.dbexts import models from neutron.plugins.vmware import nsxlib from neutron.tests import base -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.nsxlib import base as nsx_base +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base as nsx_base class NsxUtilsTestCase(base.BaseTestCase): diff --git a/vmware-nsx/neutron/tests/unit/vmware/nsxlib/__init__.py b/vmware_nsx/neutron/tests/unit/vmware/vshield/__init__.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/nsxlib/__init__.py rename to vmware_nsx/neutron/tests/unit/vmware/vshield/__init__.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/fake_vcns.py b/vmware_nsx/neutron/tests/unit/vmware/vshield/fake_vcns.py similarity index 100% rename from vmware-nsx/neutron/tests/unit/vmware/vshield/fake_vcns.py rename to vmware_nsx/neutron/tests/unit/vmware/vshield/fake_vcns.py diff --git a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py b/vmware_nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py similarity index 99% rename from vmware-nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py rename to vmware_nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py index c0451715f7..a30c5a7b5e 100644 --- a/vmware-nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py +++ b/vmware_nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py @@ -21,8 +21,8 @@ from neutron.plugins.vmware.vshield.tasks import constants as ts_const from neutron.plugins.vmware.vshield.tasks import tasks as ts from neutron.plugins.vmware.vshield import vcns_driver from neutron.tests import base -from neutron.tests.unit import vmware -from neutron.tests.unit.vmware.vshield import fake_vcns +from vmware_nsx.neutron.tests.unit import vmware +from vmware_nsx.neutron.tests.unit.vmware.vshield import fake_vcns VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test")