Retiring neutron-lbaas

Depends-On: https://review.opendev.org/#/c/658493/
Change-Id: Ia4f4b335295c0e6add79fe0db5dd31b4327fdb54
This commit is contained in:
Adam Harwell 2019-05-10 15:09:19 -07:00
parent a486af8f70
commit f8d023fbfe
312 changed files with 15 additions and 42594 deletions

View File

@ -1,7 +0,0 @@
[run]
branch = True
source = neutron_lbaas
omit = neutron_lbaas/tests*
[report]
ignore_errors = True

35
.gitignore vendored
View File

@ -1,35 +0,0 @@
AUTHORS
build/*
build-stamp
ChangeLog
cover/
covhtml/
dist/
doc/build
etc/*.sample
*.DS_Store
*.pyc
neutron.egg-info/
*.egg-info/
neutron/vcsversion.py
neutron/versioninfo
pbr*.egg/
run_tests.err.log
run_tests.log
setuptools*.egg/
subunit.log
tempest.log
.testrepository
*.mo
*.sw?
*~
/.*
!/.coveragerc
!/.gitignore
!/.gitreview
!/.mailmap
!/.pylintrc
!/.testr.conf
# Files created by releasenotes build
releasenotes/build

View File

@ -1,11 +0,0 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>
lawrancejing <lawrancejing@gmail.com> <liuqing@windawn.com>
Jiajun Liu <jiajun@unitedstack.com> <iamljj@gmail.com>
Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
Kun Huang <gareth@unitedstack.com> <academicgareth@gmail.com>
Zhenguo Niu <zhenguo@unitedstack.com> <Niu.ZGlinux@gmail.com>
Isaku Yamahata <isaku.yamahata@intel.com> <isaku.yamahata@gmail.com>
Isaku Yamahata <isaku.yamahata@intel.com> <yamahata@private.email.ne.jp>
Morgan Fainberg <morgan.fainberg@gmail.com> <m@metacloud.com>

View File

@ -1,93 +0,0 @@
# The format of this file isn't really documented; just use --generate-rcfile
[MASTER]
# Add <file or directory> to the black list. It should be a base name, not a
# path. You may set this option multiple times.
#
ignore=.git,tests
[MESSAGES CONTROL]
# NOTE(gus): This is a long list. A number of these are important and
# should be re-enabled once the offending code is fixed (or marked
# with a local disable)
disable=
# "F" Fatal errors that prevent further processing
import-error,
# "E" Error for important programming issues (likely bugs)
no-member,
too-many-function-args,
# "W" Warnings for stylistic problems or minor programming issues
abstract-method,
broad-except,
expression-not-assigned,
fixme,
global-statement,
no-init,
protected-access,
redefined-builtin,
star-args,
unused-argument,
# "C" Coding convention violations
bad-continuation,
invalid-name,
missing-docstring,
# "R" Refactor recommendations
abstract-class-little-used,
abstract-class-not-used,
duplicate-code,
interface-not-implemented,
no-self-use,
too-few-public-methods,
too-many-ancestors,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-public-methods,
too-many-return-statements,
too-many-statements
[BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names should be at least 3 characters long
# and be lowercased with underscores
method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
# Module names matching neutron-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=79
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
# _ is used by our localization
additional-builtins=_
[CLASSES]
# List of interface methods to ignore, separated by a comma.
ignore-iface-methods=
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=
# should use oslo_serialization.jsonutils
json
[TYPECHECK]
# List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems
[REPORTS]
# Tells whether to display a full report or only the messages
reports=no

View File

@ -1,3 +0,0 @@
[DEFAULT]
test_path=${OS_TEST_PATH:-./neutron_lbaas/tests/unit}
top_dir=./

View File

@ -1,8 +0,0 @@
.. warning::
Neutron-lbaas is now deprecated. Please see the FAQ: https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation
New features will not be accepted on this project.
Please see the Neutron CONTRIBUTING.rst file for how to contribute to
neutron-lbaas:
`Neutron CONTRIBUTING.rst <http://git.openstack.org/cgit/openstack/neutron/tree/CONTRIBUTING.rst>`_

View File

@ -1,7 +0,0 @@
Neutron LBaaS Style Commandments
================================
Please see the Neutron HACKING.rst file for style commandments for
neutron-lbaas:
`Neutron HACKING.rst <http://git.openstack.org/cgit/openstack/neutron/tree/HACKING.rst>`_

176
LICENSE
View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

15
README Normal file
View File

@ -0,0 +1,15 @@
This project is no longer maintained.
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
The new official OpenStack LBaaS project is Octavia. See the following
resources for more details:
https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation
https://governance.openstack.org/tc/reference/projects/octavia.html
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
Freenode.

View File

@ -1,33 +0,0 @@
========================
Team and repository tags
========================
.. image:: https://governance.openstack.org/tc/badges/neutron-lbaas.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
.. Change things from this point on
.. warning::
Neutron-lbaas is now deprecated. Please see the FAQ: https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation
Welcome!
========
This package contains the code for the Neutron Load Balancer as a
Service (LBaaS) service. This includes third-party drivers. This package
requires Neutron to run.
External Resources:
===================
The homepage for Octavia LBaaS is: https://launchpad.net/octavia. Use this
site for asking for help, and filing bugs.
Code is available on git.openstack.org at:
<http://git.openstack.org/cgit/openstack/neutron-lbaas>.
Please refer to Neutron documentation for more information:
`Neutron README.rst <http://git.openstack.org/cgit/openstack/neutron/tree/README.rst>`_
Release notes for the project can be found at:
<https://docs.openstack.org/releasenotes/neutron-lbaas/index.html>.

View File

@ -1,8 +0,0 @@
Testing Neutron LBaaS
=====================
Please see the TESTING.rst file for the Neutron project itself. This will have
the latest up to date instructions for how to test Neutron, and will
be applicable to neutron-lbaas as well:
`Neutron TESTING.rst <http://git.openstack.org/cgit/openstack/neutron/tree/TESTING.rst>`_

View File

@ -1,2 +0,0 @@
[python: **.py]

View File

@ -1,88 +0,0 @@
This directory contains the neutron-lbaas devstack plugin. To
configure the neutron load balancer, in the [[local|localrc]] section,
you will need to enable the neutron-lbaas devstack plugin and enable
the LBaaS service by editing the [[local|localrc]] section of your
local.conf file.
Octavia is the LBaaS V2 reference service provider and is used in the
examples below. Enabling another service provider, such as the agent
Haproxy driver, can be done by enabling its driver plugin, if
applicable, and setting the appropriate service provider value for
NEUTRON_LBAAS_SERVICE_PROVIDERV2, like the following:
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
In addition, you can enable multiple
service providers by enabling the applicable driver plugins and
space-delimiting the service provider values in
NEUTRON_LBAAS_SERVICE_PROVIDERV2.
1) Enable the plugins
To enable the plugin, add a line of the form:
enable_plugin neutron-lbaas <neutron-lbaas GITURL> [GITREF]
enable_plugin octavia <octavia GITURL> [GITREF]
where
<neutron-lbaas GITURL> is the URL of a neutron-lbaas repository
<octavia GITURL> is the URL of a octavia repository
[GITREF] is an optional git ref (branch/ref/tag). The default is
master.
For example
enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas stable/liberty
enable_plugin octavia https://git.openstack.org/openstack/octavia stable/liberty
"q-lbaasv2" is the default service enabled with neutron-lbaas plugin.
2) Enable the LBaaS services
To enable the LBaaS services, add lines in the form:
ENABLED_SERVICES+=<LBAAS-FLAG>
ENABLED_SERVICES+=<OCTAVIA-FLAGS>
where
<LBAAS-FLAG> is "q-lbaasv2" for LBaaS Version 2.
<OCTAVIA-FLAGS> are "octavia" the Octavia driver,
"o-cw" the Octavia Controller Worker,
"o-hk" the Octavia housekeeping manager,
"o-hm" the Octavia Health Manager,
and "o-api" the Octavia API service.
to the [[local|localrc]] section of local.conf
For example
# For LBaaS V2
ENABLED_SERVICES+=,q-lbaasv2
ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
3) Enable the dashboard of LBaaS V2
If using LBaaS V2 and you want to add horizon support, add lines in the form:
enable_plugin neutron-lbaas-dashboard <neutron-lbaas-dashboard GITURL> [GITREF]
where
<neutron-lbaas-dashboard GITURL> is the URL of a neutron-lbaas-dashboard repository
[GITREF] is an optional git ref (branch/ref/tag). The default is
master.
For example
enable_plugin neutron-lbaas-dashboard https://git.openstack.org/openstack/neutron-lbaas-dashboard stable/liberty
Once you enable the neutron-lbaas-dashboard plugin in your local.conf, ensure ``horizon`` and
``q-lbaasv2`` services are enabled. If both of them are enabled,
neutron-lbaas-dashboard will be enabled automatically
For more information, see the "Plugin Interface" section of
https://docs.openstack.org/devstack/latest/plugins.html

View File

@ -1,36 +0,0 @@
global
daemon
log /dev/log local0
log /dev/log local1 notice
defaults
log global
retries 3
option redispatch
timeout connect 5000
timeout client 50000
timeout server 50000
frontend neutron-frontend-api
option httplog
bind 0.0.0.0:NEUTRON_ALTERNATE_API_PORT
mode http
acl url_lbaas path_beg /v2.0/lbaas
use_backend octavia-backend-api if url_lbaas
default_backend neutron-backend-api
backend octavia-backend-api
mode http
balance roundrobin
option forwardfor
reqrep ^([^\ :]*)\ /v2.0/lbaas/(.*) \1\ /load-balancer/v2.0/\2
server octavia-1 127.0.0.1:80 weight 1
backend neutron-backend-api
mode http
balance roundrobin
option forwardfor
# the devstack plugin will add an entry here looking like:
# server neutron-1 <IP>:<PORT> weight 1

View File

@ -1 +0,0 @@
software-properties-common

View File

@ -1,180 +0,0 @@
# function definitions for neutron-lbaas devstack plugin
function neutron_lbaas_install {
setup_develop $NEUTRON_LBAAS_DIR
neutron_agent_lbaas_install_agent_packages
}
function neutron_agent_lbaas_install_agent_packages {
if is_ubuntu; then
if [[ ${OFFLINE} == False && ${os_CODENAME} =~ (trusty|precise) ]]; then
# Check for specific version of Ubuntu that requires backports repository for haproxy 1.5.14 or greater
BACKPORT="deb http://archive.ubuntu.com/ubuntu ${os_CODENAME}-backports main restricted universe multiverse"
BACKPORT_EXISTS=$(grep ^ /etc/apt/sources.list /etc/apt/sources.list.d/* | grep "${BACKPORT}") || true
if [[ -z "${BACKPORT_EXISTS}" ]]; then
sudo add-apt-repository "${BACKPORT}" -y
fi
sudo apt-get update
sudo apt-get install haproxy -t ${os_CODENAME}-backports
elif [[ ${OFFLINE} == False ]]; then
install_package haproxy
fi
fi
if is_fedora || is_suse; then
install_package haproxy
fi
}
function neutron_lbaas_configure_common {
cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_LBAAS_CONF
cp $NEUTRON_LBAAS_DIR/etc/services_lbaas.conf.sample $SERVICES_LBAAS_CONF
inicomment $NEUTRON_LBAAS_CONF service_providers service_provider
iniadd $NEUTRON_LBAAS_CONF service_providers service_provider $NEUTRON_LBAAS_SERVICE_PROVIDERV2
neutron_server_config_add $NEUTRON_LBAAS_CONF
neutron_service_plugin_class_add $LBAASV2_PLUGIN
# Ensure config is set up properly for authentication neutron-lbaas
iniset $NEUTRON_LBAAS_CONF service_auth auth_url $OS_AUTH_URL$AUTH_ENDPOINT
iniset $NEUTRON_LBAAS_CONF service_auth admin_tenant_name $ADMIN_TENANT_NAME
iniset $NEUTRON_LBAAS_CONF service_auth admin_user $ADMIN_USER
iniset $NEUTRON_LBAAS_CONF service_auth admin_password $ADMIN_PASSWORD
iniset $NEUTRON_LBAAS_CONF service_auth auth_version $AUTH_VERSION
# Ensure config is set up properly for authentication neutron
iniset $NEUTRON_CONF service_auth auth_url $OS_AUTH_URL$AUTH_ENDPOINT
iniset $NEUTRON_CONF service_auth admin_tenant_name $ADMIN_TENANT_NAME
iniset $NEUTRON_CONF service_auth admin_user $ADMIN_USER
iniset $NEUTRON_CONF service_auth admin_password $ADMIN_PASSWORD
iniset $NEUTRON_CONF service_auth auth_version $AUTH_VERSION
neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR
# If user enable the Neutron service_name like "q-*",
# the "Q_PLUGIN_CONF_FILE" would be the ml2 config path
# But if user enable the Neutron service name like "neutron-*",
# the same value will be stored into "NEUTRON_CORE_PLUGIN_CONF"
COMPATIBLE_NEUTRON_CORE_PLUGIN_CONF=`[ -n "$Q_PLUGIN_CONF_FILE" ] && echo $Q_PLUGIN_CONF_FILE || echo $NEUTRON_CORE_PLUGIN_CONF`
$NEUTRON_BIN_DIR/neutron-db-manage --subproject neutron-lbaas --config-file $NEUTRON_CONF --config-file /$COMPATIBLE_NEUTRON_CORE_PLUGIN_CONF upgrade head
}
function neutron_lbaas_configure_agent {
if [ -z "$1" ]; then
mkdir -p $LBAAS_AGENT_CONF_PATH
fi
conf=${1:-$LBAAS_AGENT_CONF_FILENAME}
cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini.sample $conf
if is_neutron_legacy_enabled; then
# ovs_use_veth needs to be set before the plugin configuration
# occurs to allow plugins to override the setting.
iniset $conf DEFAULT ovs_use_veth $Q_OVS_USE_VETH
fi
neutron_plugin_setup_interface_driver $conf
if is_fedora; then
iniset $conf DEFAULT user_group "nobody"
iniset $conf haproxy user_group "nobody"
fi
}
function configure_neutron_api_haproxy {
echo "Configuring neutron API haproxy for l7"
install_package haproxy
cp ${NEUTRON_LBAAS_DIR}/devstack/etc/neutron/haproxy.cfg ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg
sed -i.bak "s/NEUTRON_ALTERNATE_API_PORT/${NEUTRON_ALTERNATE_API_PORT}/" ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg
NEUTRON_API_PORT=9696
echo " server neutron-1 ${HOST_IP}:${NEUTRON_API_PORT} weight 1" >> ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg
/usr/sbin/haproxy -c -f ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg
run_process $NEUTRON_API_HAPROXY "/usr/sbin/haproxy -db -V -f ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg"
# Fix the endpoint
NEUTRON_ENDPOINT_ID=$(openstack endpoint list --service neutron -f value -c ID)
openstack endpoint set --url 'http://127.0.0.1:9695/' $NEUTRON_ENDPOINT_ID
}
function neutron_lbaas_generate_config_files {
# Uses oslo config generator to generate LBaaS sample configuration files
(cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh)
}
function neutron_lbaas_start {
local is_run_process=True
if is_neutron_legacy_enabled; then
LBAAS_VERSION="q-lbaasv2"
else
LBAAS_VERSION="neutron-lbaasv2"
fi
AGENT_LBAAS_BINARY=${AGENT_LBAASV2_BINARY}
# Octavia doesn't need the LBaaS V2 service running. If Octavia is the
# only provider then don't run the process.
if [[ "$NEUTRON_LBAAS_SERVICE_PROVIDERV2" == "$NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA" ]]; then
is_run_process=False
fi
if [[ "$is_run_process" == "True" ]] ; then
run_process $LBAAS_VERSION "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_LBAAS_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
fi
}
function neutron_lbaas_stop {
pids=$(ps aux | awk '/haproxy/ { print $2 }')
[ ! -z "$pids" ] && sudo kill $pids
}
function neutron_lbaas_cleanup {
# delete all namespaces created by neutron-lbaas
for ns in $(sudo ip netns list | grep -o -E '(qlbaas|nlbaas)-[0-9a-f-]*'); do
sudo ip netns delete ${ns}
done
}
# check for service enabled
if is_service_enabled $LBAAS_ANY; then
if ! is_service_enabled q-svc neutron-api; then
die "The neutron-api/q-svc service must be enabled to use $LBAAS_ANY"
fi
if [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing neutron-lbaas"
neutron_lbaas_install
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring neutron-lbaas"
if [[ "$PROXY_OCTAVIA" == "True" ]]; then
configure_neutron_api_haproxy
else
neutron_lbaas_generate_config_files
neutron_lbaas_configure_common
neutron_lbaas_configure_agent
fi
elif [[ "$1" == "stack" && "$2" == "extra" && "$PROXY_OCTAVIA" != "True" ]]; then
# Initialize and start the LBaaS service
echo_summary "Initializing neutron-lbaas"
neutron_lbaas_start
fi
fi
if [[ "$1" == "unstack" ]]; then
# Shut down LBaaS services
neutron_lbaas_stop
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
neutron_lbaas_cleanup
fi

View File

@ -1,37 +0,0 @@
This file describes how to use Vagrant (http://www.vagrantup.com) to
create a devstack virtual machine that contains two nova instances
running a simple web server and a working Neutron LBaaS Version 2 load
balancer.
1) Install vagrant on your host machine. Vagrant is available for
Windows, Mac OS, and most Linux distributions. Download and install
the package appropriate for your system. On Ubuntu, simply type:
sudo apt-get install vagrant
2) copy 'Vagrantfile' from this directory to any appropriate directory
and run 'vagrant up':
mkdir $HOME/lbaas-vagrant # or any other appropriate directory
cp Vagrantfile $HOME/lbaas-vagrant
cd $HOME/lbaas-vagrant
vagrant up
3) Wait for the vagrant VM to boot and install, typically 20-30 minutes
4) SSH into the vagrant box
vagrant ssh
5) Determine the loadbalancer IP:
source openrc admin admin
neutron lbaas-loadbalancer-show lb1 | grep vip_address
6) make HTTP requests to test your load balancer:
curl <LB_IP>
where <LB_IP> is the VIP address for lb1. The subsequent invocations of
"curl <LB_IP>" should demonstrate that the load balancer is alternating
between two member nodes.

View File

@ -1,31 +0,0 @@
This directory contains sample files for configuring neutron LBaaS using
devstack. By copying these files into the main devstack directory (not the
neutron-lbaas/devstack directory directly above this one), and running
stack.sh, you will create a fully functioning OpenStack installation running
a neutron-lbaas load balancer.
1) Copy the files into place:
cp local.conf local.sh webserver.sh <DEVSTACK_DIR>
where
<DEVSTCK_DIR> is the main devstack directory. Note: this is not
neutron-lbaas/devstack.
2) Build your devstack:
cd <DEVSTACK_DIR>
./stack.sh
3) Test your loadbalancer:
a) Determine the loadbalancer IP:
source openrc admin admin
neutron lbaas-loadbalancer-show lb1 | grep vip_address
curl <LB_IP>
where <LB_IP> is the VIP address for lb1. The subsequent invocations of
"curl <LB_IP>" should demonstrate that the load balancer is alternating
between two member nodes.

View File

@ -1,84 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure(2) do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://atlas.hashicorp.com/search.
config.vm.box = "ubuntu/trusty64"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
#config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
config.vm.provider "virtualbox" do |vb|
# Display the VirtualBox GUI when booting the machine
vb.gui = true
# Customize the amount of memory on the VM:
vb.memory = "8192"
end
#
# View the documentation for the provider you are using for more
# information on available options
# Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
# such as FTP and Heroku are also available. See the documentation at
# https://docs.vagrantup.com/v2/push/atlas.html for more information.
# config.push.define "atlas" do |push|
# push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
# end
# Enable provisioning with a shell script. Additional provisioners such as
# Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
# documentation for more information about their specific syntax and use.
config.vm.provision "shell", privileged: false, inline: <<-SHELL
#!/usr/bin/env bash
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get -y install git
git clone https://git.openstack.org/openstack-dev/devstack
git clone https://git.openstack.org/openstack/neutron-lbaas
git clone https://github.com/openstack/octavia
cd neutron-lbaas/devstack/samples
cp local.* webserver.sh ~/devstack
cd ~/devstack
./stack.sh
SHELL
end

View File

@ -1,86 +0,0 @@
# Sample ``local.conf`` that builds a devstack with neutron LBaaS Version 2
# NOTE: Copy this file to the root DevStack directory for it to work properly.
# ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``.
# This gives it the ability to override any variables set in ``stackrc``.
# Also, most of the settings in ``stack.sh`` are written to only be set if no
# value has already been set; this lets ``local.conf`` effectively override the
# default values.
# The ``localrc`` section replaces the old ``localrc`` configuration file.
# Note that if ``localrc`` is present it will be used in favor of this section.
[[local|localrc]]
# The name of the RECLONE environment variable is a bit misleading. It doesn't actually
# reclone repositories, rather it uses git fetch to make sure the repos are current.
RECLONE=True
# Load Barbican (optional)
# enable_plugin barbican https://review.openstack.org/openstack/barbican
# Load the external LBaaS plugin.
enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas
enable_plugin octavia https://github.com/openstack/octavia
LIBS_FROM_GIT+=python-neutronclient
DATABASE_PASSWORD=password
ADMIN_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
RABBIT_PASSWORD=password
# Enable Logging
LOGFILE=$DEST/logs/stack.sh.log
VERBOSE=True
LOG_COLOR=True
# Pre-requisite
ENABLED_SERVICES=rabbit,mysql,key
# Horizon
ENABLED_SERVICES+=,horizon
# Nova
ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
ENABLED_SERVICES+=,g-api,g-reg
# Neutron
ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta
# Enable LBaaS V2
ENABLED_SERVICES+=,q-lbaasv2
# Cinder
ENABLED_SERVICES+=,c-api,c-vol,c-sch
# Octavia
ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
# enable DVR
Q_PLUGIN=ml2
Q_ML2_TENANT_NETWORK_TYPE=vxlan
Q_DVR_MODE=dvr_snat
LOGFILE=$DEST/logs/stack.sh.log
# Old log files are automatically removed after 7 days to keep things neat. Change
# the number of days by setting ``LOGDAYS``.
LOGDAYS=2
# Swift
# -----
# Swift is now used as the back-end for the S3-like object store. If Nova's
# objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT
# run if Swift is enabled. Setting the hash value is required and you will
# be prompted for it if Swift is enabled so just set it to something already:
SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
# For development purposes the default of 3 replicas is usually not required.
# Set this to 1 to save some resources:
SWIFT_REPLICAS=1
# The data for Swift is stored by default in (``$DEST/data/swift``),
# or (``$DATA_DIR/swift``) if ``DATA_DIR`` has been set, and can be
# moved by setting ``SWIFT_DATA_DIR``. The directory will be created
# if it does not exist.
SWIFT_DATA_DIR=$DEST/data

View File

@ -1,101 +0,0 @@
#!/usr/bin/env bash
# Sample ``local.sh`` that configures two simple webserver instances and sets
# up a Neutron LBaaS Version 2 loadbalancer.
# Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
BOOT_DELAY=60
# Import common functions
source ${TOP_DIR}/functions
# Use openrc + stackrc for settings
source ${TOP_DIR}/stackrc
# Destination path for installation ``DEST``
DEST=${DEST:-/opt/stack}
# Additional Variables
IMAGE_NAME="cirros"
SUBNET_NAME="private-subnet"
if is_service_enabled nova; then
# Get OpenStack demo user auth
source ${TOP_DIR}/openrc demo demo
# Create an SSH key to use for the instances
HOST=$(echo $HOSTNAME | cut -d"." -f1)
DEVSTACK_LBAAS_SSH_KEY_NAME=${HOST}_DEVSTACK_LBAAS_SSH_KEY_RSA
DEVSTACK_LBAAS_SSH_KEY_DIR=${TOP_DIR}
DEVSTACK_LBAAS_SSH_KEY=${DEVSTACK_LBAAS_SSH_KEY_DIR}/${DEVSTACK_LBAAS_SSH_KEY_NAME}
rm -f ${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY}
ssh-keygen -b 2048 -t rsa -f ${DEVSTACK_LBAAS_SSH_KEY} -N ""
nova keypair-add --pub-key=${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY_NAME}
# Add tcp/22,80 and icmp to default security group
nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
nova secgroup-add-rule default tcp 80 80 0.0.0.0/0
nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
# Get Image id
IMAGE_ID=$(glance image-list | awk -v image=${IMAGE_NAME} '$0 ~ image {print $2}' | head -1)
# Get Network id
NET_ID=$(neutron subnet-show ${SUBNET_NAME} | awk '/network_id/ {print $4}')
# Boot some instances
NOVA_BOOT_ARGS="--key-name ${DEVSTACK_LBAAS_SSH_KEY_NAME} --image ${IMAGE_ID} --flavor 1 --nic net-id=$NET_ID"
nova boot ${NOVA_BOOT_ARGS} node1
nova boot ${NOVA_BOOT_ARGS} node2
echo "Waiting ${BOOT_DELAY} seconds for instances to boot"
sleep ${BOOT_DELAY}
# Get Instances IP Addresses
SUBNET_ID=$(neutron subnet-show ${SUBNET_NAME} | awk '/ id / {print $4}')
IP1=$(neutron port-list --device_owner compute:None -c fixed_ips | grep ${SUBNET_ID} | cut -d'"' -f8 | sed -n 1p)
IP2=$(neutron port-list --device_owner compute:None -c fixed_ips | grep ${SUBNET_ID} | cut -d'"' -f8 | sed -n 2p)
ssh-keygen -R ${IP1}
ssh-keygen -R ${IP2}
# Run a simple web server on the instances
scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP1}:webserver.sh
scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP2}:webserver.sh
screen_process node1 "ssh -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no cirros@${IP1} ./webserver.sh"
screen_process node2 "ssh -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no cirros@${IP2} ./webserver.sh"
fi
function wait_for_lb_active {
echo "Waiting for $1 to become ACTIVE..."
status=$(neutron lbaas-loadbalancer-show $1 | awk '/provisioning_status/ {print $4}')
while [ "$status" != "ACTIVE" ]
do
sleep 2
status=$(neutron lbaas-loadbalancer-show $1 | awk '/provisioning_status/ {print $4}')
if [ $status == "ERROR" ]
then
echo "$1 ERRORED. Exiting."
exit 1;
fi
done
}
if is_service_enabled q-lbaasv2; then
neutron lbaas-loadbalancer-create --name lb1 ${SUBNET_NAME}
wait_for_lb_active "lb1"
neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1
sleep 10
neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
sleep 10
neutron lbaas-member-create --subnet ${SUBNET_NAME} --address ${IP1} --protocol-port 80 pool1
sleep 10
neutron lbaas-member-create --subnet ${SUBNET_NAME} --address ${IP2} --protocol-port 80 pool1
fi

View File

@ -1,9 +0,0 @@
#!/bin/sh
MYIP=$(/sbin/ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}');
OUTPUT_STR="Welcome to $MYIP\r"
OUTPUT_LEN=${#OUTPUT_STR}
while true; do
echo -e "HTTP/1.0 200 OK\r\nContent-Length: ${OUTPUT_LEN}\r\n\r\n${OUTPUT_STR}" | sudo nc -l -p 80
done

View File

@ -1,40 +0,0 @@
# settings for LBaaS devstack pluginlib/neutron_plugins/services/loadbalancer
AGENT_LBAASV2_BINARY="$NEUTRON_BIN_DIR/neutron-lbaasv2-agent"
LBAAS_ANY="q-lbaasv2 neutron-lbaasv2"
# turn on lbaasv2 service by default
if is_neutron_legacy_enabled; then
enable_service q-lbaasv2
else
enable_service neutron-lbaasv2
fi
BARBICAN="barbican-svc"
ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-"admin"}
ADMIN_USER=${ADMIN_USER:-"admin"}
ADMIN_PASSWORD=${ADMIN_PASSWORD:-"password"}
AUTH_VERSION=${AUTH_VERSION:-"3"}
AUTH_ENDPOINT=${AUTH_ENDPOINT:-"/v3"}
LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy
LBAAS_AGENT_CONF_FILENAME=$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini
LBAASV2_PLUGIN=${LBAASV2_PLUGIN:-"lbaasv2"}
NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas
NEUTRON_LBAAS_CONF_FILE=neutron_lbaas.conf
SERVICES_LBAAS_CONF_FILE=services_lbaas.conf
NEUTRON_LBAAS_CONF=$NEUTRON_CONF_DIR/$NEUTRON_LBAAS_CONF_FILE
SERVICES_LBAAS_CONF=$NEUTRON_CONF_DIR/$SERVICES_LBAAS_CONF_FILE
NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA=${NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA:-"LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default"}
NEUTRON_LBAAS_SERVICE_PROVIDERV2=${NEUTRON_LBAAS_SERVICE_PROVIDERV2:-${NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA}}
NEUTRON_ALTERNATE_API_PORT=9695
NEUTRON_API_HAPROXY="q-api-ha"

View File

@ -1,3 +0,0 @@
sphinx>=1.6.2 # BSD
openstackdocstheme>=1.17.0 # Apache-2.0
reno>=2.5.0 # Apache-2.0

View File

@ -1,242 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2010 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Keystone documentation build configuration file, created by
# sphinx-quickstart on Tue May 18 13:50:15 2010.
#
# This file is execfile()'d with the current directory set to it's containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
import warnings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT_DIR)
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'openstackdocstheme']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Neutron LBaaS'
copyright = u'2011-present, OpenStack Foundation.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Version info
from neutron_lbaas.version import version_info as neutron_lbaas_version
release = neutron_lbaas_version.release_string()
# The short X.Y version.
version = neutron_lbaas_version.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['neutron_lbaas.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
#man_pages = [
# ('man/neutron-server', 'neutron-server', u'Neutron Server',
# [u'OpenStack'], 1)
#]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8')
except Exception:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
#htmlhelp_basename = 'neutrondoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
#latex_documents = [
# ('index', 'Neutron.tex', u'Neutron Documentation',
# u'Neutron development team', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True

View File

@ -1,17 +0,0 @@
Neutron LBaaS Check Pipeline Thumbnails
=======================================
Click to see full size figure.
.. raw:: html
<table border="1">
<tr>
<td align="center" width=50%>
Failure Percentage - Last 10 Days - V2 API and Scenario Jobs <br>
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - V2 API and Scenario Jobs&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-loadbalancer.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-loadbalancer.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-loadbalancer'),'green')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-listener.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-listener.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-listener'),'pink')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-pool.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-pool.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-pool'),'brown')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-member.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-member.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-member'),'blue')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-healthmonitor.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-healthmonitor.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-healthmonitor'),'orange')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-minimal.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-minimal.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-minimal'),'yellow')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-scenario.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-scenario.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-scenario'),'red')&drawNullAsZero=true">
<img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-loadbalancer.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-loadbalancer.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-loadbalancer'),'green')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-listener.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-listener.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-listener'),'pink')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-pool.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-pool.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-pool'),'brown')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-member.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-member.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-member'),'blue')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-healthmonitor.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-healthmonitor.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-healthmonitor'),'orange')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-minimal.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-minimal.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-minimal'),'yellow')&target=color(alias(movingAverage(asPercent(transformNull(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-scenario.FAILURE),transformNull(sum(stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-scenario.{SUCCESS,FAILURE}))),'36hours'),%20'gate-neutron-lbaasv2-dsvm-scenario'),'red')&drawNullAsZero=true" width="400"></a>
</td>
</tr>
</table>

View File

@ -1,7 +0,0 @@
Neutron LBaaS Graphite Pages
============================
.. toctree::
:maxdepth: 1
check.dashboard

View File

@ -1,19 +0,0 @@
.. documentation master file
Neutron LBaaS Documentation
===========================
.. warning::
Neutron-lbaas is now deprecated. Please see the FAQ: https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation
Dashboards
==========
There is a collection of dashboards to help developers and reviewers
located here.
.. toctree::
:maxdepth: 2
dashboards/index

View File

@ -1,9 +0,0 @@
To generate the sample neutron LBaaS configuration files, run the following
command from the top level of the neutron LBaaS directory:
tox -e genconfig
If a 'tox' environment is unavailable, then you can run the following script
instead to generate the configuration files:
./tools/generate_config_file_samples.sh

View File

@ -1,25 +0,0 @@
{
"context_is_admin": "role:admin",
"context_is_advsvc": "role:advsvc",
"default": "rule:admin_or_owner",
"create_loadbalancer": "",
"update_loadbalancer": "",
"get_loadbalancer": "",
"delete_loadbalancer": "",
"create_listener": "",
"get_listener": "",
"delete_listener": "",
"update_listener": "",
"create_pool": "",
"get_pool": "",
"delete_pool": "",
"update_pool": "",
"create_healthmonitor": "",
"get_healthmonitor": "",
"update_healthmonitor": "",
"delete_healthmonitor": "",
"create_pool_member": "",
"get_pool_member": "",
"update_pool_member": "",
"delete_pool_member": ""
}

View File

@ -1,26 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# haproxy
haproxy: CommandFilter, haproxy, root
# lbaas-agent uses kill as well, that's handled by the generic KillFilter
kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
mm-ctl: CommandFilter, mm-ctl, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
route: CommandFilter, route, root
# arping
arping: CommandFilter, arping, root

View File

@ -1,6 +0,0 @@
[DEFAULT]
output_file = etc/lbaas_agent.ini.sample
wrap_width = 79
namespace = neutron.lbaas.agent
namespace = oslo.log

View File

@ -1,5 +0,0 @@
[DEFAULT]
output_file = etc/neutron_lbaas.conf.sample
wrap_width = 79
namespace = neutron.lbaas

View File

@ -1,5 +0,0 @@
[DEFAULT]
output_file = etc/services_lbaas.conf.sample
wrap_width = 79
namespace = neutron.lbaas.service

View File

@ -1,154 +0,0 @@
alabaster==0.7.10
alembic==0.8.10
amqp==2.1.1
appdirs==1.3.0
asn1crypto==0.23.0
Babel==2.3.4
beautifulsoup4==4.6.0
cachetools==2.0.0
cffi==1.7.0
cliff==2.8.0
cmd2==0.8.0
contextlib2==0.4.0
coverage==4.0
cryptography==2.1
debtcollector==1.2.0
decorator==3.4.0
deprecation==1.0
docutils==0.11
dogpile.cache==0.6.2
dulwich==0.15.0
eventlet==0.18.2
extras==1.0.0
fasteners==0.7.0
fixtures==3.0.0
flake8-import-order==0.12
flake8==2.5.5
future==0.16.0
futurist==1.2.0
greenlet==0.4.10
hacking==0.12.0
httplib2==0.9.1
idna==2.6
imagesize==0.7.1
iso8601==0.1.11
Jinja2==2.10
jmespath==0.9.0
jsonpatch==1.16
jsonpointer==1.13
jsonschema==2.6.0
keystoneauth1==3.4.0
keystonemiddleware==4.17.0
kombu==4.0.0
linecache2==1.0.0
logutils==0.3.5
Mako==0.4.0
MarkupSafe==1.0
mccabe==0.2.1
mock==2.0.0
monotonic==0.6
mox3==0.20.0
msgpack-python==0.4.0
munch==2.1.0
netaddr==0.7.18
netifaces==0.10.4
neutron-lib==1.25.0
neutron==13.0.0
openstackdocstheme==1.18.1
openstacksdk==0.11.2
os-client-config==1.28.0
os-service-types==1.2.0
os-testr==1.0.0
os-xenapi==0.3.1
osc-lib==1.8.0
oslo.cache==1.26.0
oslo.concurrency==3.26.0
oslo.config==5.2.0
oslo.context==2.19.2
oslo.db==4.37.0
oslo.i18n==3.15.3
oslo.log==3.36.0
oslo.messaging==5.29.0
oslo.middleware==3.31.0
oslo.policy==1.30.0
oslo.privsep==1.23.0
oslo.reports==1.18.0
oslo.rootwrap==5.8.0
oslo.serialization==2.18.0
oslo.service==1.24.0
oslo.utils==3.33.0
oslo.versionedobjects==1.31.2
oslotest==3.2.0
osprofiler==1.4.0
ovs==2.8.0
ovsdbapp==0.10.0
paramiko==2.0.0
Paste==2.0.2
PasteDeploy==1.5.0
pbr==2.0.0
pecan==1.0.0
pep8==1.5.7
pika-pool==0.1.3
pika==0.10.0
positional==1.2.1
prettytable==0.7.2
psutil==3.2.2
pyasn1-modules==0.0.6
pyasn1==0.1.8
pycadf==1.1.0
pycodestyle==2.3.1
pycparser==2.18
pyflakes==0.8.1
Pygments==2.2.0
pyinotify==0.9.6
PyMySQL==0.7.6
pyOpenSSL==17.1.0
pyparsing==2.1.0
pyperclip==1.5.27
pyroute2==0.4.21
python-barbicanclient==4.5.2
python-dateutil==2.5.3
python-designateclient==2.7.0
python-editor==1.0.3
python-keystoneclient==3.8.0
python-mimeparse==1.6.0
python-neutronclient==6.7.0
python-novaclient==9.1.0
python-subunit==1.0.0
pytz==2013.6
PyYAML==3.12
reno==2.5.0
repoze.lru==0.7
requests-mock==1.2.0
requests==2.14.2
requestsexceptions==1.2.0
rfc3986==0.3.1
Routes==2.3.1
ryu==4.14
simplejson==3.5.1
six==1.10.0
snowballstemmer==1.2.1
Sphinx==1.6.2
sphinxcontrib-websupport==1.0.1
sqlalchemy-migrate==0.11.0
SQLAlchemy==1.2.0
sqlparse==0.2.2
statsd==3.2.1
stestr==1.0.0
stevedore==1.20.0
tempest==17.1.0
Tempita==0.5.2
tenacity==3.2.1
testrepository==0.0.18
testresources==2.0.0
testscenarios==0.4
testtools==2.2.0
tinyrpc==0.6
traceback2==1.4.0
unittest2==1.1.0
urllib3==1.21.1
vine==1.1.4
waitress==1.1.0
WebOb==1.7.1
WebTest==2.0.27
wrapt==1.7.0

View File

@ -1,32 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "neutron_lbaas"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
_C = _translators.contextual_form
# The plural translation function using the name "_P"
_P = _translators.plural_form
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)

View File

@ -1,79 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron.common import config as common_config
from neutron.conf.agent import common as config
from neutron_lib import rpc as n_rpc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import service
from neutron_lbaas._i18n import _
from neutron_lbaas.agent import agent_manager as manager
from neutron_lbaas.services.loadbalancer import constants
LOG = logging.getLogger(__name__)
OPTS = [
cfg.IntOpt(
'periodic_interval',
default=10,
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
help=_('Seconds between periodic task runs')
)
]
class LbaasAgentService(n_rpc.Service):
def start(self):
super(LbaasAgentService, self).start()
self.tg.add_timer(
cfg.CONF.periodic_interval,
self.manager.run_periodic_tasks,
None,
None
)
def main():
cfg.CONF.register_opts(OPTS)
cfg.CONF.register_opts(manager.OPTS)
# import interface options just in case the driver uses namespaces
config.register_interface_opts(cfg.CONF)
config.register_external_process_opts(cfg.CONF)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging()
config.setup_privsep()
LOG.warning('neutron-lbaas is now deprecated. See: '
'https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation')
mgr = manager.LbaasAgentManager(cfg.CONF)
svc = LbaasAgentService(
host=cfg.CONF.host,
topic=constants.LOADBALANCER_AGENTV2,
manager=mgr
)
service.launch(cfg.CONF, svc).wait()

View File

@ -1,72 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import rpc as n_rpc
import oslo_messaging
class LbaasAgentApi(object):
"""Agent side of the Agent to Plugin RPC API."""
# history
# 1.0 Initial version
def __init__(self, topic, context, host):
self.context = context
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_ready_devices(self):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_ready_devices', host=self.host)
def get_loadbalancer(self, loadbalancer_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_loadbalancer',
loadbalancer_id=loadbalancer_id)
def loadbalancer_deployed(self, loadbalancer_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'loadbalancer_deployed',
loadbalancer_id=loadbalancer_id)
def update_status(self, obj_type, obj_id, provisioning_status=None,
operating_status=None):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'update_status', obj_type=obj_type,
obj_id=obj_id,
provisioning_status=provisioning_status,
operating_status=operating_status)
def loadbalancer_destroyed(self, loadbalancer_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'loadbalancer_destroyed',
loadbalancer_id=loadbalancer_id)
def plug_vip_port(self, port_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'plug_vip_port', port_id=port_id,
host=self.host)
def unplug_vip_port(self, port_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'unplug_vip_port', port_id=port_id,
host=self.host)
def update_loadbalancer_stats(self, loadbalancer_id, stats):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'update_loadbalancer_stats',
loadbalancer_id=loadbalancer_id, stats=stats)

View File

@ -1,109 +0,0 @@
# Copyright 2013 OpenStack Foundation. All rights reserved
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AgentDeviceDriver(object):
"""Abstract device driver that defines the API required by LBaaS agent."""
def __init__(self, conf, plugin_rpc, process_monitor=None):
self.conf = conf
self.plugin_rpc = plugin_rpc
self.process_monitor = process_monitor
@abc.abstractproperty
def loadbalancer(self):
pass
@abc.abstractproperty
def listener(self):
pass
@abc.abstractproperty
def pool(self):
pass
@abc.abstractproperty
def member(self):
pass
@abc.abstractproperty
def healthmonitor(self):
pass
@abc.abstractmethod
def get_name(self):
"""Returns unique name across all LBaaS device drivers."""
pass
@abc.abstractmethod
def deploy_instance(self, loadbalancer):
"""Fully deploys a loadbalancer instance from a given loadbalancer."""
pass
@abc.abstractmethod
def undeploy_instance(self, loadbalancer_id, **kwargs):
"""Fully undeploys the loadbalancer instance."""
pass
def remove_orphans(self, known_loadbalancer_ids):
# Not all drivers will support this
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class BaseManager(object):
def __init__(self, driver):
self.driver = driver
@abc.abstractmethod
def create(self, obj):
pass
@abc.abstractmethod
def update(self, old_obj, obj):
pass
@abc.abstractmethod
def delete(self, obj):
pass
class BaseLoadBalancerManager(BaseManager):
@abc.abstractmethod
def get_stats(self, loadbalancer_id):
pass
class BaseListenerManager(BaseManager):
pass
class BasePoolManager(BaseManager):
pass
class BaseMemberManager(BaseManager):
pass
class BaseHealthMonitorManager(BaseManager):
pass

View File

@ -1,416 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.linux import external_process
from neutron.agent import rpc as agent_rpc
from neutron.services import provider_configuration as provconfig
from neutron_lib import constants
from neutron_lib import context as ncontext
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import importutils
from neutron_lbaas._i18n import _
from neutron_lbaas.agent import agent_api
from neutron_lbaas.drivers.common import agent_driver_base
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
DEVICE_DRIVERS = 'device_drivers'
OPTS = [
cfg.MultiStrOpt(
'device_driver',
default=['neutron_lbaas.drivers.haproxy.'
'namespace_driver.HaproxyNSDriver'],
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
help=_('Drivers used to manage loadbalancing devices'),
),
]
class DeviceNotFoundOnAgent(n_exc.NotFound):
message = _('Unknown device with loadbalancer_id %(loadbalancer_id)s')
class LbaasAgentManager(periodic_task.PeriodicTasks):
# history
# 1.0 Initial version
target = oslo_messaging.Target(version='1.0')
def __init__(self, conf):
super(LbaasAgentManager, self).__init__(conf)
self.conf = conf
self.context = ncontext.get_admin_context_without_session()
self.serializer = agent_driver_base.DataModelSerializer()
self.plugin_rpc = agent_api.LbaasAgentApi(
lb_const.LOADBALANCER_PLUGINV2,
self.context,
self.conf.host
)
self._process_monitor = external_process.ProcessMonitor(
config=self.conf, resource_type='loadbalancer')
self._load_drivers()
self.agent_state = {
'binary': 'neutron-lbaasv2-agent',
'host': conf.host,
'topic': lb_const.LOADBALANCER_AGENTV2,
'configurations': {'device_drivers': self.device_drivers.keys()},
'agent_type': lb_const.AGENT_TYPE_LOADBALANCERV2,
'start_flag': True}
self.admin_state_up = True
self._setup_state_rpc()
self.needs_resync = False
# pool_id->device_driver_name mapping used to store known instances
self.instance_mapping = {}
def _load_drivers(self):
self.device_drivers = {}
for driver in self.conf.device_driver:
driver = provconfig.get_provider_driver_class(driver,
DEVICE_DRIVERS)
try:
driver_inst = importutils.import_object(
driver,
self.conf,
self.plugin_rpc,
self._process_monitor
)
except ImportError:
msg = _('Error importing loadbalancer device driver: %s')
raise SystemExit(msg % driver)
driver_name = driver_inst.get_name()
if driver_name not in self.device_drivers:
self.device_drivers[driver_name] = driver_inst
else:
msg = _('Multiple device drivers with the same name found: %s')
raise SystemExit(msg % driver_name)
def _setup_state_rpc(self):
self.state_rpc = agent_rpc.PluginReportStateAPI(
lb_const.LOADBALANCER_PLUGINV2)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
instance_count = len(self.instance_mapping)
self.agent_state['configurations']['instances'] = instance_count
self.state_rpc.report_state(self.context, self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception("Failed reporting state!")
def initialize_service_hook(self, started_by):
self.sync_state()
@periodic_task.periodic_task
def periodic_resync(self, context):
if self.needs_resync:
self.needs_resync = False
self.sync_state()
@periodic_task.periodic_task(spacing=6)
def collect_stats(self, context):
for loadbalancer_id, driver_name in self.instance_mapping.items():
driver = self.device_drivers[driver_name]
try:
stats = driver.loadbalancer.get_stats(loadbalancer_id)
if stats:
self.plugin_rpc.update_loadbalancer_stats(
loadbalancer_id, stats)
except Exception:
LOG.exception('Error updating statistics on loadbalancer %s',
loadbalancer_id)
self.needs_resync = True
def sync_state(self):
known_instances = set(self.instance_mapping.keys())
try:
ready_instances = set(self.plugin_rpc.get_ready_devices())
for deleted_id in known_instances - ready_instances:
self._destroy_loadbalancer(deleted_id, resync=True)
for loadbalancer_id in ready_instances:
self._reload_loadbalancer(loadbalancer_id)
except Exception:
LOG.exception('Unable to retrieve ready devices')
self.needs_resync = True
self.remove_orphans(resync=True)
def _get_driver(self, loadbalancer_id):
if loadbalancer_id not in self.instance_mapping:
raise DeviceNotFoundOnAgent(loadbalancer_id=loadbalancer_id)
driver_name = self.instance_mapping[loadbalancer_id]
return self.device_drivers[driver_name]
def _reload_loadbalancer(self, loadbalancer_id):
try:
loadbalancer_dict = self.plugin_rpc.get_loadbalancer(
loadbalancer_id)
loadbalancer = data_models.LoadBalancer.from_dict(
loadbalancer_dict)
driver_name = loadbalancer.provider.device_driver
if driver_name not in self.device_drivers:
LOG.error('No device driver on agent: %s.', driver_name)
self.plugin_rpc.update_status(
'loadbalancer', loadbalancer_id, constants.ERROR)
return
self.device_drivers[driver_name].deploy_instance(loadbalancer)
self.instance_mapping[loadbalancer_id] = driver_name
self.plugin_rpc.loadbalancer_deployed(loadbalancer_id)
except Exception:
LOG.exception('Unable to deploy instance for loadbalancer: %s',
loadbalancer_id)
self.needs_resync = True
def _destroy_loadbalancer(self, lb_id, resync=False):
driver = self._get_driver(lb_id)
try:
driver.undeploy_instance(lb_id, delete_namespace=True,
resync=resync)
del self.instance_mapping[lb_id]
self.plugin_rpc.loadbalancer_destroyed(lb_id)
except Exception:
LOG.exception('Unable to destroy device for loadbalancer: %s',
lb_id)
self.needs_resync = True
def remove_orphans(self, resync=False):
for driver_name in self.device_drivers:
lb_ids = [lb_id for lb_id in self.instance_mapping
if self.instance_mapping[lb_id] == driver_name]
try:
self.device_drivers[driver_name].remove_orphans(lb_ids,
resync=resync)
except NotImplementedError:
pass # Not all drivers will support this
def _handle_failed_driver_call(self, operation, obj, driver):
obj_type = obj.__class__.__name__.lower()
LOG.exception('%(operation)s %(obj)s %(id)s failed on device '
'driver %(driver)s',
{'operation': operation.capitalize(), 'obj': obj_type,
'id': obj.id, 'driver': driver})
self._update_statuses(obj, error=True)
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
if payload['admin_state_up'] != self.admin_state_up:
self.admin_state_up = payload['admin_state_up']
if self.admin_state_up:
self.needs_resync = True
else:
# Copy keys since the dictionary is modified in the loop body
for loadbalancer_id in list(self.instance_mapping.keys()):
LOG.info("Destroying loadbalancer %s due to agent "
"disabling", loadbalancer_id)
self._destroy_loadbalancer(loadbalancer_id)
LOG.info("Agent_updated by server side %s!", payload)
def _update_statuses(self, obj, error=False):
lb_p_status = constants.ACTIVE
lb_o_status = None
obj_type = obj.__class__.__name__.lower()
obj_p_status = constants.ACTIVE
obj_o_status = lb_const.ONLINE
if error:
obj_p_status = constants.ERROR
obj_o_status = lb_const.OFFLINE
if isinstance(obj, data_models.HealthMonitor):
obj_o_status = None
if isinstance(obj, data_models.LoadBalancer):
lb_o_status = lb_const.ONLINE
if error:
lb_p_status = constants.ERROR
lb_o_status = lb_const.OFFLINE
lb = obj
else:
lb = obj.root_loadbalancer
self.plugin_rpc.update_status(obj_type, obj.id,
provisioning_status=obj_p_status,
operating_status=obj_o_status)
self.plugin_rpc.update_status('loadbalancer', lb.id,
provisioning_status=lb_p_status,
operating_status=lb_o_status)
def create_loadbalancer(self, context, loadbalancer, driver_name):
loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer)
if driver_name not in self.device_drivers:
LOG.error('No device driver on agent: %s.', driver_name)
self.plugin_rpc.update_status('loadbalancer', loadbalancer.id,
provisioning_status=constants.ERROR)
return
driver = self.device_drivers[driver_name]
try:
driver.loadbalancer.create(loadbalancer)
except Exception:
self._handle_failed_driver_call('create', loadbalancer,
driver.get_name())
else:
self.instance_mapping[loadbalancer.id] = driver_name
self._update_statuses(loadbalancer)
def update_loadbalancer(self, context, old_loadbalancer, loadbalancer):
loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer)
old_loadbalancer = data_models.LoadBalancer.from_dict(old_loadbalancer)
driver = self._get_driver(loadbalancer.id)
try:
driver.loadbalancer.update(old_loadbalancer, loadbalancer)
except Exception:
self._handle_failed_driver_call('update', loadbalancer,
driver.get_name())
else:
self._update_statuses(loadbalancer)
def delete_loadbalancer(self, context, loadbalancer):
loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer)
driver = self._get_driver(loadbalancer.id)
driver.loadbalancer.delete(loadbalancer)
del self.instance_mapping[loadbalancer.id]
def create_listener(self, context, listener):
listener = data_models.Listener.from_dict(listener)
driver = self._get_driver(listener.loadbalancer.id)
try:
driver.listener.create(listener)
except Exception:
self._handle_failed_driver_call('create', listener,
driver.get_name())
else:
self._update_statuses(listener)
def update_listener(self, context, old_listener, listener):
listener = data_models.Listener.from_dict(listener)
old_listener = data_models.Listener.from_dict(old_listener)
driver = self._get_driver(listener.loadbalancer.id)
try:
driver.listener.update(old_listener, listener)
except Exception:
self._handle_failed_driver_call('update', listener,
driver.get_name())
else:
self._update_statuses(listener)
def delete_listener(self, context, listener):
listener = data_models.Listener.from_dict(listener)
driver = self._get_driver(listener.loadbalancer.id)
driver.listener.delete(listener)
def create_pool(self, context, pool):
pool = data_models.Pool.from_dict(pool)
driver = self._get_driver(pool.loadbalancer.id)
try:
driver.pool.create(pool)
except Exception:
self._handle_failed_driver_call('create', pool, driver.get_name())
else:
self._update_statuses(pool)
def update_pool(self, context, old_pool, pool):
pool = data_models.Pool.from_dict(pool)
old_pool = data_models.Pool.from_dict(old_pool)
driver = self._get_driver(pool.loadbalancer.id)
try:
driver.pool.update(old_pool, pool)
except Exception:
self._handle_failed_driver_call('create', pool, driver.get_name())
else:
self._update_statuses(pool)
def delete_pool(self, context, pool):
pool = data_models.Pool.from_dict(pool)
driver = self._get_driver(pool.loadbalancer.id)
driver.pool.delete(pool)
def create_member(self, context, member):
member = data_models.Member.from_dict(member)
driver = self._get_driver(member.pool.loadbalancer.id)
try:
driver.member.create(member)
except Exception:
self._handle_failed_driver_call('create', member,
driver.get_name())
else:
self._update_statuses(member)
def update_member(self, context, old_member, member):
member = data_models.Member.from_dict(member)
old_member = data_models.Member.from_dict(old_member)
driver = self._get_driver(member.pool.loadbalancer.id)
try:
driver.member.update(old_member, member)
except Exception:
self._handle_failed_driver_call('create', member,
driver.get_name())
else:
self._update_statuses(member)
def delete_member(self, context, member):
member = data_models.Member.from_dict(member)
driver = self._get_driver(member.pool.loadbalancer.id)
driver.member.delete(member)
def create_healthmonitor(self, context, healthmonitor):
healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor)
driver = self._get_driver(healthmonitor.pool.loadbalancer.id)
try:
driver.healthmonitor.create(healthmonitor)
except Exception:
self._handle_failed_driver_call('create', healthmonitor,
driver.get_name())
else:
self._update_statuses(healthmonitor)
def update_healthmonitor(self, context, old_healthmonitor,
healthmonitor):
healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor)
old_healthmonitor = data_models.HealthMonitor.from_dict(
old_healthmonitor)
driver = self._get_driver(healthmonitor.pool.loadbalancer.id)
try:
driver.healthmonitor.update(old_healthmonitor, healthmonitor)
except Exception:
self._handle_failed_driver_call('create', healthmonitor,
driver.get_name())
else:
self._update_statuses(healthmonitor)
def delete_healthmonitor(self, context, healthmonitor):
healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor)
driver = self._get_driver(healthmonitor.pool.loadbalancer.id)
driver.healthmonitor.delete(healthmonitor)

View File

@ -1,157 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from neutron.db import agentschedulers_db
from neutron.db.models import agent as agents_db
from neutron_lib.db import model_base
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from neutron_lbaas.extensions import lbaas_agentschedulerv2
from neutron_lbaas.services.loadbalancer import constants as lb_const
LOG = logging.getLogger(__name__)
class LoadbalancerAgentBinding(model_base.BASEV2):
"""Represents binding between neutron loadbalancer and agents."""
__tablename__ = "lbaas_loadbalanceragentbindings"
loadbalancer_id = sa.Column(
sa.String(36),
sa.ForeignKey("lbaas_loadbalancers.id", ondelete='CASCADE'),
primary_key=True)
agent = orm.relation(agents_db.Agent)
agent_id = sa.Column(
sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
nullable=False)
class LbaasAgentSchedulerDbMixin(agentschedulers_db.AgentSchedulerDbMixin,
lbaas_agentschedulerv2
.LbaasAgentSchedulerPluginBase):
agent_notifiers = {}
def get_agent_hosting_loadbalancer(self, context,
loadbalancer_id, active=None):
query = context.session.query(LoadbalancerAgentBinding)
query = query.options(joinedload('agent'))
binding = query.get(loadbalancer_id)
if (binding and self.is_eligible_agent(
active, binding.agent)):
return {'agent': self._make_agent_dict(binding.agent)}
def get_lbaas_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter_by(agent_type=lb_const.AGENT_TYPE_LOADBALANCERV2)
if active is not None:
query = query.filter_by(admin_state_up=active)
if filters:
for key, value in filters.items():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
return [agent
for agent in query
if self.is_eligible_agent(active, agent)]
def list_loadbalancers_on_lbaas_agent(self, context, id):
query = context.session.query(
LoadbalancerAgentBinding.loadbalancer_id)
query = query.filter_by(agent_id=id)
loadbalancer_ids = [item[0] for item in query]
if loadbalancer_ids:
lbs = self.get_loadbalancers(context,
filters={'id': loadbalancer_ids})
return lbs
return []
def get_lbaas_agent_candidates(self, device_driver, active_agents):
candidates = []
for agent in active_agents:
agent_conf = self.get_configuration_dict(agent)
if device_driver in agent_conf['device_drivers']:
candidates.append(agent)
return candidates
def get_down_loadbalancer_bindings(self, context, agent_dead_limit):
cutoff = self.get_cutoff_time(agent_dead_limit)
return (context.session.query(LoadbalancerAgentBinding).join(
agents_db.Agent).filter(
agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up))
def _unschedule_loadbalancer(self, context, loadbalancer_id, agent_id):
with context.session.begin(subtransactions=True):
query = context.session.query(LoadbalancerAgentBinding)
query = query.filter(
LoadbalancerAgentBinding.loadbalancer_id == loadbalancer_id,
LoadbalancerAgentBinding.agent_id == agent_id)
query.delete()
class ChanceScheduler(object):
"""Allocate a loadbalancer agent for a vip in a random way."""
def schedule(self, plugin, context, loadbalancer, device_driver):
"""Schedule the load balancer to an active loadbalancer agent if there
is no enabled agent hosting it.
"""
with context.session.begin(subtransactions=True):
lbaas_agent = plugin.db.get_agent_hosting_loadbalancer(
context, loadbalancer.id)
if lbaas_agent:
LOG.debug('Load balancer %(loadbalancer_id)s '
'has already been hosted'
' by lbaas agent %(agent_id)s',
{'loadbalancer_id': loadbalancer.id,
'agent_id': lbaas_agent['agent']['id']})
return
active_agents = plugin.db.get_lbaas_agents(context, active=True)
if not active_agents:
LOG.warning(
'No active lbaas agents for load balancer %s',
loadbalancer.id)
return
candidates = plugin.db.get_lbaas_agent_candidates(device_driver,
active_agents)
if not candidates:
LOG.warning('No lbaas agent supporting device driver %s',
device_driver)
return
chosen_agent = random.choice(candidates)
binding = LoadbalancerAgentBinding()
binding.agent = chosen_agent
binding.loadbalancer_id = loadbalancer.id
context.session.add(binding)
LOG.debug(
'Load balancer %(loadbalancer_id)s is scheduled '
'to lbaas agent %(agent_id)s', {
'loadbalancer_id': loadbalancer.id,
'agent_id': chosen_agent['id']}
)
return chosen_agent

View File

@ -1,30 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging as sys_logging
import eventlet
from oslo_reports import guru_meditation_report as gmr
from neutron_lbaas import version
eventlet.monkey_patch()
# During the call to gmr.TextGuruMeditation.setup_autorun(), Guru Meditation
# Report tries to start logging. Set a handler here to accommodate this.
logger = sys_logging.getLogger(None)
if not logger.handlers:
logger.addHandler(sys_logging.StreamHandler())
_version_string = version.version_info.release_string()
gmr.TextGuruMeditation.setup_autorun(version=_version_string)

View File

@ -1,17 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lbaas.agent import agent
def main():
agent.main()

View File

@ -1,53 +0,0 @@
# Copyright 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from stevedore import driver
CONF = cfg.CONF
CERT_MANAGER_DEFAULT = 'barbican'
cert_manager_opts = [
cfg.StrOpt('cert_manager_type',
default=CERT_MANAGER_DEFAULT,
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now '
'deprecated. See: https://wiki.openstack.org/'
'wiki/Neutron/LBaaS/Deprecation',
help='Certificate Manager plugin. '
'Defaults to {0}.'.format(CERT_MANAGER_DEFAULT)),
cfg.StrOpt('barbican_auth',
default='barbican_acl_auth',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now '
'deprecated. See: https://wiki.openstack.org/'
'wiki/Neutron/LBaaS/Deprecation',
help='Name of the Barbican authentication method to use')
]
CONF.register_opts(cert_manager_opts, group='certificates')
_CERT_MANAGER_PLUGIN = None
def get_backend():
global _CERT_MANAGER_PLUGIN
if not _CERT_MANAGER_PLUGIN:
_CERT_MANAGER_PLUGIN = driver.DriverManager(
"neutron_lbaas.cert_manager.backend",
cfg.CONF.certificates.cert_manager_type).driver
return _CERT_MANAGER_PLUGIN

View File

@ -1,47 +0,0 @@
# Copyright (c) 2014-2016 Rackspace US, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Barbican ACL auth class for Barbican certificate handling
"""
from barbicanclient import client as barbican_client
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from neutron_lbaas.common.cert_manager.barbican_auth import common
from neutron_lbaas.common import keystone
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BarbicanACLAuth(common.BarbicanAuth):
_barbican_client = None
@classmethod
def get_barbican_client(cls, project_id=None):
if not cls._barbican_client:
try:
cls._barbican_client = barbican_client.Client(
session=keystone.get_session(),
region_name=CONF.service_auth.region,
interface=CONF.service_auth.endpoint_type
)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Error creating Barbican client")
return cls._barbican_client

View File

@ -1,28 +0,0 @@
# Copyright 2014-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class BarbicanAuth(object):
@abc.abstractmethod
def get_barbican_client(self, project_id):
"""Creates a Barbican client object.
:param project_id: Project ID that the request will be used for
:return: a Barbican Client object
:raises Exception: if the client cannot be created
"""

View File

@ -1,212 +0,0 @@
# Copyright 2014, 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from barbicanclient import client as barbican_client
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from stevedore import driver as stevedore_driver
from neutron_lbaas._i18n import _
from neutron_lbaas.common.cert_manager import cert_manager
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Cert(cert_manager.Cert):
"""Representation of a Cert based on the Barbican CertificateContainer."""
def __init__(self, cert_container):
if not isinstance(cert_container,
barbican_client.containers.CertificateContainer):
raise TypeError(_(
"Retrieved Barbican Container is not of the correct type "
"(certificate)."))
self._cert_container = cert_container
# Container secrets are accessed upon query and can return as None,
# don't return the payload if the secret is not available.
def get_certificate(self):
if self._cert_container.certificate:
return self._cert_container.certificate.payload
def get_intermediates(self):
if self._cert_container.intermediates:
return self._cert_container.intermediates.payload
def get_private_key(self):
if self._cert_container.private_key:
return self._cert_container.private_key.payload
def get_private_key_passphrase(self):
if self._cert_container.private_key_passphrase:
return self._cert_container.private_key_passphrase.payload
class CertManager(cert_manager.CertManager):
"""Certificate Manager that wraps the Barbican client API."""
def __init__(self):
super(CertManager, self).__init__()
self.auth = stevedore_driver.DriverManager(
namespace='neutron_lbaas.cert_manager.barbican_auth',
name=cfg.CONF.certificates.barbican_auth,
invoke_on_load=True,
).driver
def store_cert(self, project_id, certificate, private_key,
intermediates=None, private_key_passphrase=None,
expiration=None, name='LBaaS TLS Cert'):
"""Stores a certificate in the certificate manager.
:param certificate: PEM encoded TLS certificate
:param private_key: private key for the supplied certificate
:param intermediates: ordered and concatenated intermediate certs
:param private_key_passphrase: optional passphrase for the supplied key
:param expiration: the expiration time of the cert in ISO 8601 format
:param name: a friendly name for the cert
:returns: the container_ref of the stored cert
:raises Exception: if certificate storage fails
"""
connection = self.auth.get_barbican_client(project_id)
LOG.info((
"Storing certificate container '{0}' in Barbican."
).format(name))
certificate_secret = None
private_key_secret = None
intermediates_secret = None
pkp_secret = None
try:
certificate_secret = connection.secrets.create(
payload=certificate,
expiration=expiration,
name="Certificate"
)
private_key_secret = connection.secrets.create(
payload=private_key,
expiration=expiration,
name="Private Key"
)
certificate_container = connection.containers.create_certificate(
name=name,
certificate=certificate_secret,
private_key=private_key_secret
)
if intermediates:
intermediates_secret = connection.secrets.create(
payload=intermediates,
expiration=expiration,
name="Intermediates"
)
certificate_container.intermediates = intermediates_secret
if private_key_passphrase:
pkp_secret = connection.secrets.create(
payload=private_key_passphrase,
expiration=expiration,
name="Private Key Passphrase"
)
certificate_container.private_key_passphrase = pkp_secret
certificate_container.store()
return certificate_container.container_ref
# Barbican (because of Keystone-middleware) sometimes masks
# exceptions strangely -- this will catch anything that it raises and
# reraise the original exception, while also providing useful
# feedback in the logs for debugging
except Exception:
for secret in [certificate_secret, private_key_secret,
intermediates_secret, pkp_secret]:
if secret and secret.secret_ref:
old_ref = secret.secret_ref
try:
secret.delete()
LOG.info((
"Deleted secret {0} ({1}) during rollback."
).format(secret.name, old_ref))
except Exception:
LOG.warning((
"Failed to delete {0} ({1}) during rollback. This "
"is probably not a problem."
).format(secret.name, old_ref))
with excutils.save_and_reraise_exception():
LOG.exception("Error storing certificate data")
def get_cert(self, project_id, cert_ref, resource_ref,
check_only=False, service_name='lbaas'):
"""Retrieves the specified cert and registers as a consumer.
:param cert_ref: the UUID of the cert to retrieve
:param resource_ref: Full HATEOAS reference to the consuming resource
:param check_only: Read Certificate data without registering
:param service_name: Friendly name for the consuming service
:returns: octavia.certificates.common.Cert representation of the
certificate data
:raises Exception: if certificate retrieval fails
"""
connection = self.auth.get_barbican_client(project_id)
LOG.info((
"Loading certificate container {0} from Barbican."
).format(cert_ref))
try:
if check_only:
cert_container = connection.containers.get(
container_ref=cert_ref
)
else:
cert_container = connection.containers.register_consumer(
container_ref=cert_ref,
name=service_name,
url=resource_ref
)
return Cert(cert_container)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Error getting {0}".format(cert_ref))
def delete_cert(self, project_id, cert_ref, resource_ref,
service_name='lbaas'):
"""Deregister as a consumer for the specified cert.
:param cert_ref: the UUID of the cert to retrieve
:param service_name: Friendly name for the consuming service
:param lb_id: Loadbalancer id for building resource consumer URL
:raises Exception: if deregistration fails
"""
connection = self.auth.get_barbican_client(project_id)
LOG.info((
"Deregistering as a consumer of {0} in Barbican."
).format(cert_ref))
try:
connection.containers.remove_consumer(
container_ref=cert_ref,
name=service_name,
url=resource_ref
)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception((
"Error deregistering as a consumer of {0}"
).format(cert_ref))

View File

@ -1,100 +0,0 @@
# Copyright 2014, 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Certificate manager API
"""
import abc
from oslo_config import cfg
import six
cfg.CONF.import_group('service_auth', 'neutron_lbaas.common.keystone')
@six.add_metaclass(abc.ABCMeta)
class Cert(object):
"""Base class to represent all certificates."""
@abc.abstractmethod
def get_certificate(self):
"""Returns the certificate."""
pass
@abc.abstractmethod
def get_intermediates(self):
"""Returns the intermediate certificates."""
pass
@abc.abstractmethod
def get_private_key(self):
"""Returns the private key for the certificate."""
pass
@abc.abstractmethod
def get_private_key_passphrase(self):
"""Returns the passphrase for the private key."""
pass
@six.add_metaclass(abc.ABCMeta)
class CertManager(object):
"""Base Cert Manager Interface
A Cert Manager is responsible for managing certificates for TLS.
"""
@abc.abstractmethod
def store_cert(self, project_id, certificate, private_key,
intermediates=None, private_key_passphrase=None,
expiration=None, name=None):
"""Stores (i.e., registers) a cert with the cert manager.
This method stores the specified cert and returns its UUID that
identifies it within the cert manager.
If storage of the certificate data fails, a CertificateStorageException
should be raised.
"""
pass
@abc.abstractmethod
def get_cert(self, project_id, cert_ref, resource_ref,
check_only=False, service_name=None):
"""Retrieves the specified cert.
If check_only is True, don't perform any sort of registration.
If the specified cert does not exist, a CertificateStorageException
should be raised.
"""
pass
@abc.abstractmethod
def delete_cert(self, project_id, cert_ref, resource_ref,
service_name=None):
"""Deletes the specified cert.
If the specified cert does not exist, a CertificateStorageException
should be raised.
"""
pass
@classmethod
def get_service_url(cls, loadbalancer_id):
# Format: <servicename>://<region>/<resource>/<object_id>
return "{0}://{1}/{2}/{3}".format(
cfg.CONF.service_auth.service_name,
cfg.CONF.service_auth.region,
"loadbalancer",
loadbalancer_id
)

View File

@ -1,205 +0,0 @@
# Copyright 2014, 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron_lbaas.common.cert_manager import cert_manager
from neutron_lbaas.common import exceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TLS_STORAGE_DEFAULT = os.environ.get(
'OS_LBAAS_TLS_STORAGE', '/var/lib/neutron-lbaas/certificates/'
)
local_cert_manager_opts = [
cfg.StrOpt('storage_path',
default=TLS_STORAGE_DEFAULT,
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now '
'deprecated. See: https://wiki.openstack.org/'
'wiki/Neutron/LBaaS/Deprecation',
help='Absolute path to the certificate storage directory. '
'Defaults to env[OS_LBAAS_TLS_STORAGE].')
]
CONF.register_opts(local_cert_manager_opts, group='certificates')
class Cert(cert_manager.Cert):
"""Representation of a Cert for local storage."""
def __init__(self, certificate, private_key, intermediates=None,
private_key_passphrase=None):
self.certificate = certificate
self.intermediates = intermediates
self.private_key = private_key
self.private_key_passphrase = private_key_passphrase
def get_certificate(self):
return self.certificate
def get_intermediates(self):
return self.intermediates
def get_private_key(self):
return self.private_key
def get_private_key_passphrase(self):
return self.private_key_passphrase
class CertManager(cert_manager.CertManager):
"""Cert Manager Interface that stores data locally."""
def store_cert(self, project_id, certificate, private_key,
intermediates=None, private_key_passphrase=None, **kwargs):
"""Stores (i.e., registers) a cert with the cert manager.
This method stores the specified cert to the filesystem and returns
a UUID that can be used to retrieve it.
:param project_id: Project ID for the owner of the certificate
:param certificate: PEM encoded TLS certificate
:param private_key: private key for the supplied certificate
:param intermediates: ordered and concatenated intermediate certs
:param private_key_passphrase: optional passphrase for the supplied key
:returns: the UUID of the stored cert
:raises CertificateStorageException: if certificate storage fails
"""
cert_ref = uuidutils.generate_uuid()
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
LOG.info("Storing certificate data on the local filesystem.")
try:
filename_certificate = "{0}.crt".format(filename_base)
with open(filename_certificate, 'w') as cert_file:
cert_file.write(certificate)
filename_private_key = "{0}.key".format(filename_base)
with open(filename_private_key, 'w') as key_file:
key_file.write(private_key)
if intermediates:
filename_intermediates = "{0}.int".format(filename_base)
with open(filename_intermediates, 'w') as int_file:
int_file.write(intermediates)
if private_key_passphrase:
filename_pkp = "{0}.pass".format(filename_base)
with open(filename_pkp, 'w') as pass_file:
pass_file.write(private_key_passphrase)
except IOError as ioe:
LOG.error("Failed to store certificate.")
raise exceptions.CertificateStorageException(message=ioe.message)
return cert_ref
def get_cert(self, project_id, cert_ref, resource_ref, **kwargs):
"""Retrieves the specified cert.
:param project_id: Project ID for the owner of the certificate
:param cert_ref: the UUID of the cert to retrieve
:param resource_ref: Full HATEOAS reference to the consuming resource
:returns: neutron_lbaas.common.cert_manager.cert_manager.Cert
representation of the certificate data
:raises CertificateStorageException: if certificate retrieval fails
"""
LOG.info((
"Loading certificate {0} from the local filesystem."
).format(cert_ref))
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
filename_certificate = "{0}.crt".format(filename_base)
filename_private_key = "{0}.key".format(filename_base)
filename_intermediates = "{0}.int".format(filename_base)
filename_pkp = "{0}.pass".format(filename_base)
cert_data = dict()
try:
with open(filename_certificate, 'r') as cert_file:
cert_data['certificate'] = cert_file.read()
except IOError:
LOG.error((
"Failed to read certificate for {0}."
).format(cert_ref))
raise exceptions.CertificateStorageException(
msg="Certificate could not be read."
)
try:
with open(filename_private_key, 'r') as key_file:
cert_data['private_key'] = key_file.read()
except IOError:
LOG.error((
"Failed to read private key for {0}."
).format(cert_ref))
raise exceptions.CertificateStorageException(
msg="Private Key could not be read."
)
try:
with open(filename_intermediates, 'r') as int_file:
cert_data['intermediates'] = int_file.read()
except IOError:
pass
try:
with open(filename_pkp, 'r') as pass_file:
cert_data['private_key_passphrase'] = pass_file.read()
except IOError:
pass
return Cert(**cert_data)
def delete_cert(self, project_id, cert_ref, resource_ref, **kwargs):
"""Deletes the specified cert.
:param project_id: Project ID for the owner of the certificate
:param cert_ref: the UUID of the cert to delete
:param resource_ref: Full HATEOAS reference to the consuming resource
:raises CertificateStorageException: if certificate deletion fails
"""
LOG.info((
"Deleting certificate {0} from the local filesystem."
).format(cert_ref))
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
filename_certificate = "{0}.crt".format(filename_base)
filename_private_key = "{0}.key".format(filename_base)
filename_intermediates = "{0}.int".format(filename_base)
filename_pkp = "{0}.pass".format(filename_base)
try:
os.remove(filename_certificate)
os.remove(filename_private_key)
os.remove(filename_intermediates)
os.remove(filename_pkp)
except IOError as ioe:
LOG.error((
"Failed to delete certificate {0}."
).format(cert_ref))
raise exceptions.CertificateStorageException(message=ioe.message)

View File

@ -1,80 +0,0 @@
# Copyright 2013 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Neutron Lbaas base exception handling.
"""
from neutron_lib import exceptions
from neutron_lbaas._i18n import _
class ModelMapException(exceptions.NeutronException):
message = _("Unable to map model class %(target_name)s")
class LbaasException(exceptions.NeutronException):
pass
class TLSException(LbaasException):
pass
class NeedsPassphrase(TLSException):
message = _("Passphrase needed to decrypt key but client "
"did not provide one.")
class UnreadableCert(TLSException):
message = _("Could not read X509 from PEM")
class MisMatchedKey(TLSException):
message = _("Key and x509 certificate do not match")
class CertificateStorageException(TLSException):
message = _('Could not store certificate: %(msg)s')
class LoadbalancerReschedulingFailed(exceptions.Conflict):
message = _("Failed rescheduling loadbalancer %(loadbalancer_id)s: "
"no eligible lbaas agent found.")
class BadRequestException(exceptions.BadRequest):
message = "%(fault_string)s"
class ConflictException(exceptions.Conflict):
message = "%(fault_string)s"
class NotAuthorizedException(exceptions.NotAuthorized):
message = "%(fault_string)s"
class NotFoundException(exceptions.NotFound):
message = "%(fault_string)s"
class ServiceUnavailableException(exceptions.ServiceUnavailable):
message = "%(fault_string)s"
class UnknownException(exceptions.NeutronException):
message = "%(fault_string)s"

View File

@ -1,218 +0,0 @@
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1.identity import v2 as v2_client
from keystoneauth1.identity import v3 as v3_client
from keystoneauth1 import session
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from neutron_lbaas._i18n import _
LOG = logging.getLogger(__name__)
_SESSION = None
OPTS = [
cfg.StrOpt(
'auth_url',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
help=_('Authentication endpoint'),
),
cfg.StrOpt(
'admin_user',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default='admin',
help=_('The service admin user name'),
),
cfg.StrOpt(
'admin_tenant_name',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default='admin',
help=_('The service admin tenant name'),
),
cfg.StrOpt(
'admin_password',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
secret=True,
default='password',
help=_('The service admin password'),
),
cfg.StrOpt(
'admin_user_domain',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default='Default',
help=_('The admin user domain name'),
),
cfg.StrOpt(
'admin_project_domain',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default='Default',
help=_('The admin project domain name'),
),
cfg.StrOpt(
'region',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default='RegionOne',
help=_('The deployment region'),
),
cfg.StrOpt(
'service_name',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default='lbaas',
help=_('The name of the service'),
),
cfg.StrOpt(
'auth_version',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default='3',
help=_('The auth version used to authenticate'),
),
cfg.StrOpt(
'endpoint_type',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default='public',
help=_('The endpoint_type to be used')
),
cfg.BoolOpt(
'insecure',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
default=False,
help=_('Disable server certificate verification')
),
cfg.StrOpt(
'cafile',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
help=_('CA certificate file path')
),
cfg.StrOpt(
'certfile',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
help=_('Client certificate cert file path')
),
cfg.StrOpt(
'keyfile',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now deprecated. '
'See: https://wiki.openstack.org/wiki/Neutron/LBaaS/'
'Deprecation',
help=_('Client certificate key file path')
)
]
cfg.CONF.register_opts(OPTS, 'service_auth')
def get_session():
"""Initializes a Keystone session.
:returns: a Keystone Session object
:raises Exception: if the session cannot be established
"""
global _SESSION
if not _SESSION:
auth_url = cfg.CONF.service_auth.auth_url
insecure = cfg.CONF.service_auth.insecure
cacert = cfg.CONF.service_auth.cafile
cert = cfg.CONF.service_auth.certfile
key = cfg.CONF.service_auth.keyfile
if insecure:
verify = False
else:
verify = cacert or True
if cert and key:
cert = (cert, key)
kwargs = {'auth_url': auth_url,
'username': cfg.CONF.service_auth.admin_user,
'password': cfg.CONF.service_auth.admin_password}
if cfg.CONF.service_auth.auth_version == '2':
client = v2_client
kwargs['tenant_name'] = cfg.CONF.service_auth.admin_tenant_name
elif cfg.CONF.service_auth.auth_version == '3':
client = v3_client
kwargs['project_name'] = cfg.CONF.service_auth.admin_tenant_name
kwargs['user_domain_name'] = (cfg.CONF.service_auth.
admin_user_domain)
kwargs['project_domain_name'] = (cfg.CONF.service_auth.
admin_project_domain)
else:
raise Exception(_('Unknown keystone version!'))
try:
kc = client.Password(**kwargs)
_SESSION = session.Session(auth=kc, verify=verify, cert=cert)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Error creating Keystone session.")
return _SESSION

View File

@ -1,177 +0,0 @@
#
# Copyright 2014 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from oslo_log import log as logging
from oslo_utils import encodeutils
import neutron_lbaas.common.exceptions as exceptions
X509_BEG = "-----BEGIN CERTIFICATE-----"
X509_END = "-----END CERTIFICATE-----"
LOG = logging.getLogger(__name__)
def validate_cert(certificate, private_key=None,
private_key_passphrase=None, intermediates=None):
"""
Validate that the certificate is a valid PEM encoded X509 object
Optionally verify that the private key matches the certificate.
Optionally verify that the intermediates are valid X509 objects.
:param certificate: A PEM encoded certificate
:param private_key: The private key for the certificate
:param private_key_passphrase: Passphrase for accessing the private key
:param intermediates: PEM encoded intermediate certificates
:returns: boolean
"""
cert = _get_x509_from_pem_bytes(certificate)
if intermediates:
for x509Pem in _split_x509s(intermediates):
_get_x509_from_pem_bytes(x509Pem)
if private_key:
pkey = _read_privatekey(private_key, passphrase=private_key_passphrase)
pknum = pkey.public_key().public_numbers()
certnum = cert.public_key().public_numbers()
if pknum != certnum:
raise exceptions.MisMatchedKey
return True
def _read_privatekey(privatekey_pem, passphrase=None):
if passphrase is not None:
passphrase = encodeutils.to_utf8(passphrase)
privatekey_pem = privatekey_pem.encode('ascii')
try:
return serialization.load_pem_private_key(privatekey_pem, passphrase,
backends.default_backend())
except Exception:
raise exceptions.NeedsPassphrase
def _split_x509s(x509Str):
"""
Split the input string into individb(ual x509 text blocks
:param x509Str: A large multi x509 certificate blcok
:returns: A list of strings where each string represents an
X509 pem block surrounded by BEGIN CERTIFICATE,
END CERTIFICATE block tags
"""
curr_pem_block = []
inside_x509 = False
for line in x509Str.replace("\r", "").split("\n"):
if inside_x509:
curr_pem_block.append(line)
if line == X509_END:
yield "\n".join(curr_pem_block)
curr_pem_block = []
inside_x509 = False
continue
else:
if line == X509_BEG:
curr_pem_block.append(line)
inside_x509 = True
def _read_pyca_private_key(private_key, private_key_passphrase=None):
kw = {"password": None,
"backend": backends.default_backend()}
if private_key_passphrase is not None:
kw["password"] = encodeutils.to_utf8(private_key_passphrase)
else:
kw["password"] = None
private_key = encodeutils.to_utf8(private_key)
try:
pk = serialization.load_pem_private_key(private_key, **kw)
return pk
except TypeError as ex:
if len(ex.args) > 0 and ex.args[0].startswith("Password"):
raise exceptions.NeedsPassphrase
def dump_private_key(private_key, private_key_passphrase=None):
"""
Parses encrypted key to provide an unencrypted version in PKCS8
:param private_key: private key
:param private_key_passphrase: private key passphrase
:returns: Unencrypted private key in PKCS8
"""
# re encode the key as unencrypted PKCS8
pk = _read_pyca_private_key(private_key,
private_key_passphrase=private_key_passphrase)
key = pk.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
return key
def get_host_names(certificate):
"""Extract the host names from the Pem encoded X509 certificate
:param certificate: A PEM encoded certificate
:returns: A dictionary containing the following keys:
['cn', 'dns_names']
where 'cn' is the CN from the SubjectName of the certificate, and
'dns_names' is a list of dNSNames (possibly empty) from
the SubjectAltNames of the certificate.
"""
try:
certificate = certificate.encode('ascii')
cert = _get_x509_from_pem_bytes(certificate)
cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0]
host_names = {
'cn': cn.value.lower(),
'dns_names': []
}
try:
ext = cert.extensions.get_extension_for_oid(
x509.OID_SUBJECT_ALTERNATIVE_NAME
)
host_names['dns_names'] = ext.value.get_values_for_type(
x509.DNSName)
except x509.ExtensionNotFound:
LOG.debug("%s extension not found",
x509.OID_SUBJECT_ALTERNATIVE_NAME)
return host_names
except Exception:
LOG.exception("Unreadable certificate.")
raise exceptions.UnreadableCert
def _get_x509_from_pem_bytes(certificate_pem):
"""
Parse X509 data from a PEM encoded certificate
:param certificate_pem: Certificate in PEM format
:returns: crypto high-level x509 data from the PEM string
"""
certificate = encodeutils.to_utf8(certificate_pem)
try:
x509cert = x509.load_pem_x509_certificate(certificate,
backends.default_backend())
except Exception:
raise exceptions.UnreadableCert
return x509cert

File diff suppressed because it is too large Load Diff

View File

@ -1,555 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from neutron.db.models import servicetype as st_db
from neutron.db import models_v2
from neutron_lib.db import constants as db_const
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy.ext import orderinglist
from sqlalchemy import orm
from neutron_lbaas._i18n import _
from neutron_lbaas.services.loadbalancer import constants as lb_const
class SessionPersistenceV2(model_base.BASEV2):
NAME = 'session_persistence'
__tablename__ = "lbaas_sessionpersistences"
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_pools.id"),
primary_key=True,
nullable=False)
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_SP_TYPES,
name="lbaas_sesssionpersistences_typev2"),
nullable=False)
cookie_name = sa.Column(sa.String(1024), nullable=True)
class LoadBalancerStatistics(model_base.BASEV2):
"""Represents load balancer statistics."""
NAME = 'loadbalancer_stats'
__tablename__ = "lbaas_loadbalancer_statistics"
loadbalancer_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_loadbalancers.id"),
primary_key=True,
nullable=False)
bytes_in = sa.Column(sa.BigInteger, nullable=False)
bytes_out = sa.Column(sa.BigInteger, nullable=False)
active_connections = sa.Column(sa.BigInteger, nullable=False)
total_connections = sa.Column(sa.BigInteger, nullable=False)
@orm.validates('bytes_in', 'bytes_out',
'active_connections', 'total_connections')
def validate_non_negative_int(self, key, value):
if value < 0:
data = {'key': key, 'value': value}
raise ValueError(_('The %(key)s field can not have '
'negative value. '
'Current value is %(value)d.') % data)
return value
class MemberV2(model_base.BASEV2, model_base.HasId, model_base.HasProject):
"""Represents a v2 neutron load balancer member."""
NAME = 'member'
__tablename__ = "lbaas_members"
__table_args__ = (
sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
name='uniq_pool_address_port_v2'),
)
pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
nullable=False)
address = sa.Column(sa.String(64), nullable=False)
protocol_port = sa.Column(sa.Integer, nullable=False)
weight = sa.Column(sa.Integer, nullable=True)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), nullable=True)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=True)
@property
def root_loadbalancer(self):
return self.pool.loadbalancer
@property
def to_api_dict(self):
def to_dict(sa_model, attributes):
ret = {}
for attr in attributes:
value = getattr(sa_model, attr)
if six.PY2 and isinstance(value, six.text_type):
ret[attr.encode('utf8')] = value.encode('utf8')
else:
ret[attr] = value
return ret
ret_dict = to_dict(self, [
'id', 'tenant_id', 'pool_id', 'address', 'protocol_port', 'weight',
'admin_state_up', 'subnet_id', 'name'])
return ret_dict
class HealthMonitorV2(model_base.BASEV2, model_base.HasId,
model_base.HasProject):
"""Represents a v2 neutron load balancer healthmonitor."""
NAME = 'healthmonitor'
__tablename__ = "lbaas_healthmonitors"
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_HEALTH_MONITOR_TYPES,
name="healthmonitors_typev2"),
nullable=False)
delay = sa.Column(sa.Integer, nullable=False)
timeout = sa.Column(sa.Integer, nullable=False)
max_retries = sa.Column(sa.Integer, nullable=False)
http_method = sa.Column(sa.String(16), nullable=True)
url_path = sa.Column(sa.String(255), nullable=True)
expected_codes = sa.Column(sa.String(64), nullable=True)
provisioning_status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=True)
max_retries_down = sa.Column(sa.Integer, nullable=True)
@property
def root_loadbalancer(self):
return self.pool.loadbalancer
@property
def to_api_dict(self):
def to_dict(sa_model, attributes):
ret = {}
for attr in attributes:
value = getattr(sa_model, attr)
if six.PY2 and isinstance(value, six.text_type):
ret[attr.encode('utf8')] = value.encode('utf8')
else:
ret[attr] = value
return ret
ret_dict = to_dict(self, [
'id', 'tenant_id', 'type', 'delay', 'timeout', 'max_retries',
'http_method', 'url_path', 'expected_codes', 'admin_state_up',
'name', 'max_retries_down'])
ret_dict['pools'] = []
if self.pool:
ret_dict['pools'].append({'id': self.pool.id})
if self.type in [lb_const.HEALTH_MONITOR_TCP,
lb_const.HEALTH_MONITOR_PING]:
ret_dict.pop('http_method')
ret_dict.pop('url_path')
ret_dict.pop('expected_codes')
return ret_dict
class LoadBalancer(model_base.BASEV2, model_base.HasId, model_base.HasProject):
"""Represents a v2 neutron load balancer."""
NAME = 'loadbalancer'
__tablename__ = "lbaas_loadbalancers"
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
vip_subnet_id = sa.Column(sa.String(36), nullable=False)
vip_port_id = sa.Column(sa.String(36), sa.ForeignKey(
'ports.id', name='fk_lbaas_loadbalancers_ports_id'))
vip_address = sa.Column(sa.String(36))
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vip_port = orm.relationship(models_v2.Port)
stats = orm.relationship(
LoadBalancerStatistics,
uselist=False,
backref=orm.backref("loadbalancer", uselist=False),
cascade="all, delete-orphan")
provider = orm.relationship(
st_db.ProviderResourceAssociation,
uselist=False,
primaryjoin="LoadBalancer.id==ProviderResourceAssociation.resource_id",
foreign_keys=[st_db.ProviderResourceAssociation.resource_id],
# NOTE(ihrachys) it's not exactly clear why we would need to have the
# backref created (and not e.g. just back_populates= link) since we
# don't use the reverse property anywhere, but it helps with
# accommodating to the new neutron code that automatically detects
# obsolete foreign key state and expires affected relationships. The
# code is located in neutron/db/api.py and assumes all relationships
# should have backrefs.
backref='loadbalancer',
# this is only for old API backwards compatibility because when a load
# balancer is deleted the pool ID should be the same as the load
# balancer ID and should not be cleared out in this table
viewonly=True)
flavor_id = sa.Column(sa.String(36), sa.ForeignKey(
'flavors.id', name='fk_lbaas_loadbalancers_flavors_id'))
@property
def root_loadbalancer(self):
return self
@property
def to_api_dict(self):
def to_dict(sa_model, attributes):
ret = {}
for attr in attributes:
value = getattr(sa_model, attr)
if six.PY2 and isinstance(value, six.text_type):
ret[attr.encode('utf8')] = value.encode('utf8')
else:
ret[attr] = value
return ret
ret_dict = to_dict(self, [
'id', 'tenant_id', 'name', 'description',
'vip_subnet_id', 'vip_port_id', 'vip_address', 'operating_status',
'provisioning_status', 'admin_state_up', 'flavor_id'])
ret_dict['listeners'] = [{'id': listener.id}
for listener in self.listeners]
ret_dict['pools'] = [{'id': pool.id} for pool in self.pools]
if self.provider:
ret_dict['provider'] = self.provider.provider_name
if not self.flavor_id:
del ret_dict['flavor_id']
return ret_dict
class PoolV2(model_base.BASEV2, model_base.HasId, model_base.HasProject):
"""Represents a v2 neutron load balancer pool."""
NAME = 'pool'
__tablename__ = "lbaas_pools"
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey(
"lbaas_loadbalancers.id"))
healthmonitor_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_healthmonitors.id"),
unique=True,
nullable=True)
protocol = sa.Column(sa.Enum(*lb_const.POOL_SUPPORTED_PROTOCOLS,
name="pool_protocolsv2"),
nullable=False)
lb_algorithm = sa.Column(sa.Enum(*lb_const.SUPPORTED_LB_ALGORITHMS,
name="lb_algorithmsv2"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
members = orm.relationship(MemberV2,
backref=orm.backref("pool", uselist=False),
cascade="all, delete-orphan")
healthmonitor = orm.relationship(
HealthMonitorV2,
backref=orm.backref("pool", uselist=False))
session_persistence = orm.relationship(
SessionPersistenceV2,
uselist=False,
backref=orm.backref("pool", uselist=False),
cascade="all, delete-orphan")
loadbalancer = orm.relationship(
LoadBalancer, uselist=False,
backref=orm.backref("pools", uselist=True))
@property
def root_loadbalancer(self):
return self.loadbalancer
# No real relationship here. But we want to fake a pool having a
# 'listener_id' sometimes for API back-ward compatibility purposes.
@property
def listener(self):
if self.listeners:
return self.listeners[0]
else:
return None
@property
def to_api_dict(self):
def to_dict(sa_model, attributes):
ret = {}
for attr in attributes:
value = getattr(sa_model, attr)
if six.PY2 and isinstance(value, six.text_type):
ret[attr.encode('utf8')] = value.encode('utf8')
else:
ret[attr] = value
return ret
ret_dict = to_dict(self, [
'id', 'tenant_id', 'name', 'description',
'healthmonitor_id', 'protocol', 'lb_algorithm', 'admin_state_up'])
ret_dict['loadbalancers'] = []
if self.loadbalancer:
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
ret_dict['session_persistence'] = None
if self.session_persistence:
ret_dict['session_persistence'] = (
to_dict(self.session_persistence, [
'type', 'cookie_name']))
ret_dict['members'] = [{'id': member.id} for member in self.members]
ret_dict['listeners'] = [{'id': listener.id}
for listener in self.listeners]
if self.listener:
ret_dict['listener_id'] = self.listener.id
else:
ret_dict['listener_id'] = None
ret_dict['l7_policies'] = [{'id': l7_policy.id}
for l7_policy in self.l7_policies]
return ret_dict
class SNI(model_base.BASEV2):
"""Many-to-many association between Listener and TLS container ids
Making the SNI certificates list, ordered using the position
"""
NAME = 'sni'
__tablename__ = "lbaas_sni"
listener_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_listeners.id"),
primary_key=True,
nullable=False)
tls_container_id = sa.Column(sa.String(128),
primary_key=True,
nullable=False)
position = sa.Column(sa.Integer)
@property
def root_loadbalancer(self):
return self.listener.loadbalancer
class L7Rule(model_base.BASEV2, model_base.HasId, model_base.HasProject):
"""Represents L7 Rule."""
NAME = 'l7rule'
__tablename__ = "lbaas_l7rules"
l7policy_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_l7policies.id"),
nullable=False)
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_RULE_TYPES,
name="l7rule_typesv2"),
nullable=False)
compare_type = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_RULE_COMPARE_TYPES,
name="l7rule_compare_typev2"),
nullable=False)
invert = sa.Column(sa.Boolean(), nullable=False)
key = sa.Column(sa.String(255), nullable=True)
value = sa.Column(sa.String(255), nullable=False)
provisioning_status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
@property
def root_loadbalancer(self):
return self.policy.listener.loadbalancer
@property
def to_api_dict(self):
def to_dict(sa_model, attributes):
ret = {}
for attr in attributes:
value = getattr(sa_model, attr)
if six.PY2 and isinstance(value, six.text_type):
ret[attr.encode('utf8')] = value.encode('utf8')
else:
ret[attr] = value
return ret
ret_dict = to_dict(self, [
'id', 'tenant_id', 'type', 'compare_type', 'invert', 'key',
'value', 'admin_state_up'])
ret_dict['policies'] = []
if self.policy:
ret_dict['policies'].append({'id': self.policy.id})
return ret_dict
class L7Policy(model_base.BASEV2, model_base.HasId, model_base.HasProject):
"""Represents L7 Policy."""
NAME = 'l7policy'
__tablename__ = "lbaas_l7policies"
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
listener_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_listeners.id"),
nullable=False)
action = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_POLICY_ACTIONS,
name="l7policy_action_typesv2"),
nullable=False)
redirect_pool_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_pools.id"),
nullable=True)
redirect_url = sa.Column(sa.String(255),
nullable=True)
position = sa.Column(sa.Integer, nullable=False)
provisioning_status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
rules = orm.relationship(
L7Rule,
uselist=True,
primaryjoin="L7Policy.id==L7Rule.l7policy_id",
foreign_keys=[L7Rule.l7policy_id],
cascade="all, delete-orphan",
backref=orm.backref("policy")
)
redirect_pool = orm.relationship(
PoolV2, backref=orm.backref("l7_policies", uselist=True))
@property
def root_loadbalancer(self):
return self.listener.loadbalancer
@property
def to_api_dict(self):
def to_dict(sa_model, attributes):
ret = {}
for attr in attributes:
value = getattr(sa_model, attr)
if six.PY2 and isinstance(value, six.text_type):
ret[attr.encode('utf8')] = value.encode('utf8')
else:
ret[attr] = value
return ret
ret_dict = to_dict(self, [
'id', 'tenant_id', 'name', 'description', 'listener_id', 'action',
'redirect_pool_id', 'redirect_url', 'position', 'admin_state_up'])
ret_dict['listeners'] = [{'id': self.listener_id}]
ret_dict['rules'] = [{'id': rule.id} for rule in self.rules]
if (ret_dict.get('action') ==
lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL):
del ret_dict['redirect_url']
return ret_dict
class Listener(model_base.BASEV2, model_base.HasId, model_base.HasProject):
"""Represents a v2 neutron listener."""
NAME = 'listener'
__tablename__ = "lbaas_listeners"
__table_args__ = (
sa.schema.UniqueConstraint('loadbalancer_id', 'protocol_port',
name='uniq_loadbalancer_listener_port'),
)
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
default_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
nullable=True)
loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey(
"lbaas_loadbalancers.id"))
protocol = sa.Column(sa.Enum(*lb_const.LISTENER_SUPPORTED_PROTOCOLS,
name="listener_protocolsv2"),
nullable=False)
default_tls_container_id = sa.Column(sa.String(128),
default=None, nullable=True)
sni_containers = orm.relationship(
SNI,
backref=orm.backref("listener", uselist=False),
uselist=True,
primaryjoin="Listener.id==SNI.listener_id",
order_by='SNI.position',
collection_class=orderinglist.ordering_list(
'position'),
foreign_keys=[SNI.listener_id],
cascade="all, delete-orphan"
)
protocol_port = sa.Column(sa.Integer, nullable=False)
connection_limit = sa.Column(sa.Integer)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
default_pool = orm.relationship(
PoolV2, backref=orm.backref("listeners"))
loadbalancer = orm.relationship(
LoadBalancer,
backref=orm.backref("listeners", uselist=True))
l7_policies = orm.relationship(
L7Policy,
uselist=True,
primaryjoin="Listener.id==L7Policy.listener_id",
order_by="L7Policy.position",
collection_class=orderinglist.ordering_list('position', count_from=1),
foreign_keys=[L7Policy.listener_id],
cascade="all, delete-orphan",
backref=orm.backref("listener"))
@property
def root_loadbalancer(self):
return self.loadbalancer
@property
def to_api_dict(self):
def to_dict(sa_model, attributes):
ret = {}
for attr in attributes:
value = getattr(sa_model, attr)
if six.PY2 and isinstance(value, six.text_type):
ret[attr.encode('utf8')] = value.encode('utf8')
else:
ret[attr] = value
return ret
ret_dict = to_dict(self, [
'id', 'tenant_id', 'name', 'description', 'default_pool_id',
'protocol', 'default_tls_container_id', 'protocol_port',
'connection_limit', 'admin_state_up'])
# NOTE(blogan): Returning a list to future proof for M:N objects
# that are not yet implemented.
ret_dict['loadbalancers'] = []
if self.loadbalancer:
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
ret_dict['sni_container_refs'] = [container.tls_container_id
for container in self.sni_containers]
ret_dict['default_tls_container_ref'] = self.default_tls_container_id
ret_dict['l7policies'] = [{'id': l7_policy.id}
for l7_policy in self.l7_policies]
return ret_dict

View File

@ -1 +0,0 @@
Generic single-database configuration.

View File

@ -1,99 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from logging import config as logging_config
from alembic import context
from neutron_lib.db import model_base
from oslo_config import cfg
from oslo_db.sqlalchemy import session
import sqlalchemy as sa
from sqlalchemy import event
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration.models import head # noqa
from neutron_lbaas.db.models import head # noqa
MYSQL_ENGINE = None
LBAAS_VERSION_TABLE = 'alembic_version_lbaas'
config = context.config
neutron_config = config.neutron_config
logging_config.fileConfig(config.config_file_name)
target_metadata = model_base.BASEV2.metadata
def set_mysql_engine():
try:
mysql_engine = neutron_config.command.mysql_engine
except cfg.NoSuchOptError:
mysql_engine = None
global MYSQL_ENGINE
MYSQL_ENGINE = (mysql_engine or
model_base.BASEV2.__table_args__['mysql_engine'])
def include_object(object, name, type_, reflected, compare_to):
# external.LBAAS_TABLES is the list of LBaaS v1 tables, now defunct
external_tables = set(external.TABLES) - set(external.LBAAS_TABLES)
if type_ == 'table' and name in external_tables:
return False
else:
return True
def run_migrations_offline():
set_mysql_engine()
kwargs = dict()
if neutron_config.database.connection:
kwargs['url'] = neutron_config.database.connection
else:
kwargs['dialect_name'] = neutron_config.database.engine
kwargs['include_object'] = include_object
kwargs['version_table'] = LBAAS_VERSION_TABLE
context.configure(**kwargs)
with context.begin_transaction():
context.run_migrations()
@event.listens_for(sa.Table, 'after_parent_attach')
def set_storage_engine(target, parent):
if MYSQL_ENGINE:
target.kwargs['mysql_engine'] = MYSQL_ENGINE
def run_migrations_online():
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table=LBAAS_VERSION_TABLE
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -1,36 +0,0 @@
# Copyright ${create_date.year} <PUT YOUR NAME/COMPANY HERE>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
% if branch_labels:
branch_labels = ${repr(branch_labels)}
%endif
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}

View File

@ -1,40 +0,0 @@
# Copyright 2015 Rackspace.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""agentv2
Revision ID: 364f9b6064f0
Revises: 4b6d8d5310b8
Create Date: 2015-02-05 10:17:13.229358
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '364f9b6064f0'
down_revision = '4b6d8d5310b8'
def upgrade():
op.create_table(
'lbaas_loadbalanceragentbindings',
sa.Column('loadbalancer_id', sa.String(length=36), nullable=False),
sa.Column('agent_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['loadbalancer_id'],
['lbaas_loadbalancers.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['agent_id'], ['agents.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('loadbalancer_id'))

View File

@ -1,38 +0,0 @@
# Copyright 2015
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add_index_tenant_id
Revision ID: 4b6d8d5310b8
Revises: 4deef6d81931
Create Date: 2015-02-10 18:28:26.362881
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '4b6d8d5310b8'
down_revision = '4deef6d81931'
TABLES = ['lbaas_members', 'lbaas_healthmonitors', 'lbaas_pools',
'lbaas_loadbalancers', 'lbaas_listeners', 'vips', 'members',
'pools', 'healthmonitors']
def upgrade():
for table in TABLES:
op.create_index(op.f('ix_%s_tenant_id' % table),
table, ['tenant_id'], unique=False)

View File

@ -1,62 +0,0 @@
# Copyright 2015 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""edge_driver
Revision ID: 4ba00375f715
Revises: lbaasv2_tls
Create Date: 2015-02-03 20:35:54.830634
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4ba00375f715'
down_revision = 'lbaasv2_tls'
def upgrade():
op.create_table(
'nsxv_edge_pool_mappings',
sa.Column('pool_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('edge_pool_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pool_id')
)
op.create_table(
'nsxv_edge_vip_mappings',
sa.Column('pool_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('edge_app_profile_id', sa.String(length=36),
nullable=False),
sa.Column('edge_vse_id', sa.String(length=36), nullable=False),
sa.Column('edge_fw_rule_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pool_id')
)
op.create_table(
'nsxv_edge_monitor_mappings',
sa.Column('monitor_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('edge_monitor_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('monitor_id'),
sa.UniqueConstraint('monitor_id', 'edge_id',
name='uniq_nsxv_edge_monitor_mappings')
)

View File

@ -1,77 +0,0 @@
# Copyright 2014-2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add provisioning and operating statuses
Revision ID: 4deef6d81931
Revises: lbaasv2
Create Date: 2015-01-27 20:38:20.796401
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4deef6d81931'
down_revision = 'lbaasv2'
PROVISIONING_STATUS = u'provisioning_status'
OPERATING_STATUS = u'operating_status'
STATUS = u'status'
def upgrade():
op.drop_column(u'lbaas_loadbalancers', STATUS)
op.add_column(
u'lbaas_loadbalancers',
sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False)
)
op.add_column(
u'lbaas_loadbalancers',
sa.Column(OPERATING_STATUS, sa.String(16), nullable=False)
)
op.drop_column(u'lbaas_listeners', STATUS)
op.add_column(
u'lbaas_listeners',
sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False)
)
op.add_column(
u'lbaas_listeners',
sa.Column(OPERATING_STATUS, sa.String(16), nullable=False)
)
op.drop_column(u'lbaas_pools', STATUS)
op.add_column(
u'lbaas_pools',
sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False)
)
op.add_column(
u'lbaas_pools',
sa.Column(OPERATING_STATUS, sa.String(16), nullable=False)
)
op.drop_column(u'lbaas_members', STATUS)
op.add_column(
u'lbaas_members',
sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False)
)
op.add_column(
u'lbaas_members',
sa.Column(OPERATING_STATUS, sa.String(16), nullable=False)
)
op.drop_column(u'lbaas_healthmonitors', STATUS)
op.add_column(
u'lbaas_healthmonitors',
sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False)
)

View File

@ -1,29 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""kilo
Revision ID: kilo
Revises: 4ba00375f715
Create Date: 2015-04-16 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'kilo'
down_revision = '4ba00375f715'
def upgrade():
"""A no-op migration for marking the Kilo release."""
pass

View File

@ -1,153 +0,0 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""lbaas version 2 api
Revision ID: lbaasv2
Revises: start_neutron_lbaas
Create Date: 2014-06-18 10:50:15.606420
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'lbaasv2'
down_revision = 'start_neutron_lbaas'
listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP",
name="listener_protocolsv2")
pool_protocols = sa.Enum("HTTP", "HTTPS", "TCP",
name="pool_protocolsv2")
sesssionpersistences_type = sa.Enum("SOURCE_IP", "HTTP_COOKIE", "APP_COOKIE",
name="sesssionpersistences_typev2")
lb_algorithms = sa.Enum("ROUND_ROBIN", "LEAST_CONNECTIONS", "SOURCE_IP",
name="lb_algorithmsv2")
healthmonitors_type = sa.Enum("PING", "TCP", "HTTP", "HTTPS",
name="healthmonitors_typev2")
def upgrade():
op.create_table(
u'lbaas_healthmonitors',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'type', healthmonitors_type, nullable=False),
sa.Column(u'delay', sa.Integer(), nullable=False),
sa.Column(u'timeout', sa.Integer(), nullable=False),
sa.Column(u'max_retries', sa.Integer(), nullable=False),
sa.Column(u'http_method', sa.String(16), nullable=True),
sa.Column(u'url_path', sa.String(255), nullable=True),
sa.Column(u'expected_codes', sa.String(64), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_pools',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'protocol', pool_protocols, nullable=False),
sa.Column(u'lb_algorithm', lb_algorithms, nullable=False),
sa.Column(u'healthmonitor_id', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.UniqueConstraint(u'healthmonitor_id'),
sa.ForeignKeyConstraint([u'healthmonitor_id'],
[u'lbaas_healthmonitors.id'])
)
op.create_table(
u'lbaas_sessionpersistences',
sa.Column(u'pool_id', sa.String(36), nullable=False),
sa.Column(u'type', sesssionpersistences_type, nullable=False),
sa.Column(u'cookie_name', sa.String(1024), nullable=True),
sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']),
sa.PrimaryKeyConstraint(u'pool_id')
)
op.create_table(
u'lbaas_members',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'pool_id', sa.String(36), nullable=False),
sa.Column(u'subnet_id', sa.String(36), nullable=True),
sa.Column(u'address', sa.String(64), nullable=False),
sa.Column(u'protocol_port', sa.Integer(), nullable=False),
sa.Column(u'weight', sa.Integer(), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']),
sa.UniqueConstraint(u'pool_id', u'address', u'protocol_port',
name=u'uniq_pool_address_port_v2')
)
op.create_table(
u'lbaas_loadbalancers',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'vip_port_id', sa.String(36), nullable=True),
sa.Column(u'vip_subnet_id', sa.String(36), nullable=False),
sa.Column(u'vip_address', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint([u'vip_port_id'], [u'ports.id'],
name=u'fk_lbaas_loadbalancers_ports_id'),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_listeners',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'protocol', listener_protocols, nullable=False),
sa.Column(u'protocol_port', sa.Integer(), nullable=False),
sa.Column(u'connection_limit', sa.Integer(), nullable=True),
sa.Column(u'loadbalancer_id', sa.String(36), nullable=True),
sa.Column(u'default_pool_id', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint([u'loadbalancer_id'],
[u'lbaas_loadbalancers.id']),
sa.ForeignKeyConstraint([u'default_pool_id'],
[u'lbaas_pools.id']),
sa.UniqueConstraint(u'default_pool_id'),
sa.UniqueConstraint(u'loadbalancer_id', u'protocol_port',
name=u'uniq_loadbalancer_listener_port'),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_loadbalancer_statistics',
sa.Column(u'loadbalancer_id', sa.String(36), nullable=False),
sa.Column(u'bytes_in', sa.BigInteger(), nullable=False),
sa.Column(u'bytes_out', sa.BigInteger(), nullable=False),
sa.Column(u'active_connections', sa.BigInteger(), nullable=False),
sa.Column(u'total_connections', sa.BigInteger(), nullable=False),
sa.PrimaryKeyConstraint(u'loadbalancer_id'),
sa.ForeignKeyConstraint([u'loadbalancer_id'],
[u'lbaas_loadbalancers.id'])
)

View File

@ -1,54 +0,0 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""lbaasv2 TLS
Revision ID: lbaasv2_tls
Revises: 364f9b6064f0
Create Date: 2015-01-18 10:00:00
"""
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = 'lbaasv2_tls'
down_revision = '364f9b6064f0'
old_listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP",
name="listener_protocolsv2")
new_listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP", "TERMINATED_HTTPS",
name="listener_protocolsv2")
def upgrade():
migration.alter_enum('lbaas_listeners', 'protocol', new_listener_protocols,
nullable=False)
op.create_table(
u'lbaas_sni',
sa.Column(u'listener_id', sa.String(36), nullable=False),
sa.Column(u'tls_container_id', sa.String(128), nullable=False),
sa.Column(u'position', sa.Integer),
sa.ForeignKeyConstraint(['listener_id'], [u'lbaas_listeners.id'], ),
sa.PrimaryKeyConstraint(u'listener_id', u'tls_container_id')
)
op.add_column('lbaas_listeners',
sa.Column(u'default_tls_container_id', sa.String(128),
nullable=True))

View File

@ -1,37 +0,0 @@
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial Liberty no-op contract revision.
Revision ID: 130ebfdef43
Revises: kilo
Create Date: 2015-07-18 14:35:22.242794
"""
from neutron.db import migration
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = '130ebfdef43'
down_revision = 'kilo'
branch_labels = (cli.CONTRACT_BRANCH,)
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.LIBERTY, migration.MITAKA]
def upgrade():
pass

View File

@ -1,37 +0,0 @@
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial Liberty no-op expand script.
Revision ID: 3345facd0452
Revises: kilo
Create Date: 2015-07-18 14:35:22.234191
"""
from neutron.db import migration
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = '3345facd0452'
down_revision = 'kilo'
branch_labels = (cli.EXPAND_BRANCH,)
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.LIBERTY]
def upgrade():
pass

View File

@ -1,39 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add flavor id
Revision ID: 3426acbc12de
Revises: 4a408dd491c2
Create Date: 2015-12-02 15:24:35.775474
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3426acbc12de'
down_revision = '4a408dd491c2'
def upgrade():
op.add_column('lbaas_loadbalancers',
sa.Column(u'flavor_id', sa.String(36), nullable=True))
op.create_foreign_key(u'fk_lbaas_loadbalancers_flavors_id',
u'lbaas_loadbalancers',
u'flavors',
[u'flavor_id'],
[u'id'])

View File

@ -1,76 +0,0 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add_l7_tables
Revision ID: 3543deab1547
Revises: 6aee0434f911
Create Date: 2015-02-05 10:50:15.606420
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3543deab1547'
down_revision = '6aee0434f911'
l7rule_type = sa.Enum("HOST_NAME", "PATH", "FILE_TYPE", "HEADER", "COOKIE",
name="l7rule_typesv2")
l7rule_compare_type = sa.Enum("REGEX", "STARTS_WITH", "ENDS_WITH", "CONTAINS",
"EQUAL_TO", name="l7rule_compare_typesv2")
l7policy_action_type = sa.Enum("REJECT", "REDIRECT_TO_URL", "REDIRECT_TO_POOL",
name="l7policy_action_typesv2")
def upgrade():
op.create_table(
u'lbaas_l7policies',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'listener_id', sa.String(36), nullable=False),
sa.Column(u'action', l7policy_action_type, nullable=False),
sa.Column(u'redirect_pool_id', sa.String(36), nullable=True),
sa.Column(u'redirect_url', sa.String(255), nullable=True),
sa.Column(u'position', sa.Integer, nullable=False),
sa.Column(u'provisioning_status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.ForeignKeyConstraint([u'listener_id'],
[u'lbaas_listeners.id']),
sa.ForeignKeyConstraint([u'redirect_pool_id'],
[u'lbaas_pools.id'])
)
op.create_table(
u'lbaas_l7rules',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'l7policy_id', sa.String(36), nullable=False),
sa.Column(u'type', l7rule_type, nullable=False),
sa.Column(u'compare_type', l7rule_compare_type, nullable=False),
sa.Column(u'invert', sa.Boolean(), nullable=False),
sa.Column(u'key', sa.String(255), nullable=True),
sa.Column(u'value', sa.String(255), nullable=False),
sa.Column(u'provisioning_status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.ForeignKeyConstraint([u'l7policy_id'],
[u'lbaas_l7policies.id'])
)

View File

@ -1,35 +0,0 @@
# Copyright 2015 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Addition of Name column to lbaas_members and lbaas_healthmonitors table
Revision ID: 4a408dd491c2
Revises: 3345facd0452
Create Date: 2015-11-16 11:47:43.061649
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4a408dd491c2'
down_revision = '3345facd0452'
LB_TAB_NAME = ['lbaas_members', 'lbaas_healthmonitors']
def upgrade():
for table in LB_TAB_NAME:
op.add_column(table, sa.Column('name', sa.String(255), nullable=True))

View File

@ -1,41 +0,0 @@
# Copyright (c) 2016 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add tenant-id index for L7 tables
Revision ID: 62deca5010cd
Revises: 3543deab1547
Create Date: 2016-03-02 08:42:37.737281
"""
from alembic import op
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '62deca5010cd'
down_revision = '3543deab1547'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.MITAKA]
def upgrade():
for table in ['lbaas_l7rules', 'lbaas_l7policies']:
op.create_index(op.f('ix_%s_tenant_id' % table),
table, ['tenant_id'], unique=False)

View File

@ -1,90 +0,0 @@
# Copyright 2015 OpenStack Foundation
# Copyright 2015 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""independent pools
Revision ID: 6aee0434f911
Revises: 3426acbc12de
Create Date: 2015-08-28 03:15:42.533386
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6aee0434f911'
down_revision = '3426acbc12de'
def upgrade():
conn = op.get_bind()
# Minimal examples of the tables we need to manipulate
listeners = sa.sql.table(
'lbaas_listeners',
sa.sql.column('loadbalancer_id', sa.String),
sa.sql.column('default_pool_id', sa.String))
pools = sa.sql.table(
'lbaas_pools',
sa.sql.column('loadbalancer_id', sa.String),
sa.sql.column('id', sa.String))
# This foreign key does not need to be unique anymore. To remove the
# uniqueness but keep the foreign key we have to do some juggling.
#
# Also, because different database engines handle unique constraints
# in incompatible ways, we can't simply call op.drop_constraint and
# expect it to work for all DB engines. This is yet another unfortunate
# case where sqlalchemy isn't able to abstract everything away.
if op.get_context().dialect.name == 'postgresql':
# PostgreSQL path:
op.drop_constraint('lbaas_listeners_default_pool_id_key',
'lbaas_listeners', 'unique')
else:
# MySQL path:
op.drop_constraint('lbaas_listeners_ibfk_2', 'lbaas_listeners',
type_='foreignkey')
op.drop_constraint('default_pool_id', 'lbaas_listeners',
type_='unique')
op.create_foreign_key('lbaas_listeners_ibfk_2', 'lbaas_listeners',
'lbaas_pools', ['default_pool_id'], ['id'])
op.add_column(
u'lbaas_pools',
sa.Column('loadbalancer_id', sa.String(36),
sa.ForeignKey('lbaas_loadbalancers.id'), nullable=True)
)
# Populate this new column appropriately
select_obj = sa.select([listeners.c.loadbalancer_id,
listeners.c.default_pool_id]).where(
listeners.c.default_pool_id is not None)
result = conn.execute(select_obj)
for row in result:
stmt = pools.update().values(loadbalancer_id=row[0]).where(
pools.c.id == row[1])
op.execute(stmt)
# For existing installations, the above ETL should populate the above column
# using the following procedure:
#
# Get the output from this:
#
# SELECT default_pool_id, loadbalancer_id l_id FROM lbaas_listeners WHERE
# default_pool_id IS NOT NULL;
#
# Then for every row returned run:
#
# UPDATE lbaas_pools SET loadbalancer_id = l_id WHERE id = default_pool_id;

View File

@ -1,158 +0,0 @@
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""rename tenant to project
Revision ID: 4b4dc6d5d843
Create Date: 2016-07-13 02:40:51.683659
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4b4dc6d5d843'
down_revision = '130ebfdef43'
depends_on = ('62deca5010cd',)
_INSPECTOR = None
def get_inspector():
"""Reuse inspector"""
global _INSPECTOR
if _INSPECTOR:
return _INSPECTOR
else:
bind = op.get_bind()
_INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind)
return _INSPECTOR
def get_tables():
"""
Returns hardcoded list of tables which have ``tenant_id`` column.
DB head can be changed. To prevent possible problems, when models will be
updated, return hardcoded list of tables, up-to-date for this day.
Output retrieved by using:
>>> metadata = head.get_metadata()
>>> all_tables = metadata.sorted_tables
>>> tenant_tables = []
>>> for table in all_tables:
... for column in table.columns:
... if column.name == 'tenant_id':
... tenant_tables.append((table, column))
"""
tables = [
'vips',
'members',
'pools',
'healthmonitors',
'lbaas_members',
'lbaas_healthmonitors',
'lbaas_loadbalancers',
'lbaas_pools',
'lbaas_l7rules',
'lbaas_l7policies',
'lbaas_listeners',
]
return tables
def get_columns(table):
"""Returns list of columns for given table."""
inspector = get_inspector()
return inspector.get_columns(table)
def get_data():
"""Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists.
"""
output = []
tables = get_tables()
for table in tables:
columns = get_columns(table)
for column in columns:
if column['name'] == 'tenant_id':
output.append((table, column))
return output
def alter_column(table, column):
old_name = 'tenant_id'
new_name = 'project_id'
op.alter_column(
table_name=table,
column_name=old_name,
new_column_name=new_name,
existing_type=column['type'],
existing_nullable=column['nullable']
)
def recreate_index(index, table_name):
old_name = index['name']
new_name = old_name.replace('tenant', 'project')
op.drop_index(op.f(old_name), table_name)
op.create_index(new_name, table_name, ['project_id'])
def upgrade():
"""Code reused from
Change-Id: I87a8ef342ccea004731ba0192b23a8e79bc382dc
"""
inspector = get_inspector()
data = get_data()
for table, column in data:
alter_column(table, column)
indexes = inspector.get_indexes(table)
for index in indexes:
if 'tenant_id' in index['name']:
recreate_index(index, table)
def contract_creation_exceptions():
"""Special migration for the blueprint to support Keystone V3.
We drop all tenant_id columns and create project_id columns instead.
"""
return {
sa.Column: ['.'.join([table, 'project_id']) for table in get_tables()],
sa.Index: get_tables()
}

View File

@ -1,47 +0,0 @@
# Copyright 2016 <PUT YOUR NAME/COMPANY HERE>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Drop v1 tables
Revision ID: e6417a8b114d
Create Date: 2016-08-23 12:48:46.985939
"""
from alembic import op
from neutron.db import migration
revision = 'e6417a8b114d'
down_revision = '4b4dc6d5d843'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.NEWTON]
def upgrade():
op.drop_table('nsxv_edge_pool_mappings')
op.drop_table('nsxv_edge_vip_mappings')
op.drop_table('nsxv_edge_monitor_mappings')
op.drop_table('members')
op.drop_table('poolstatisticss')
op.drop_table('poolloadbalanceragentbindings')
op.drop_table('poolmonitorassociations')
op.drop_table('pools')
op.drop_table('sessionpersistences')
op.drop_table('vips')
op.drop_table('healthmonitors')

View File

@ -1,41 +0,0 @@
# Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add healthmonitor max retries down
Revision ID: 844352f9fe6f
Revises: 62deca5010cd
Create Date: 2016-04-21 15:32:05.647920
"""
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '844352f9fe6f'
down_revision = '62deca5010cd'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.NEWTON]
def upgrade():
op.add_column('lbaas_healthmonitors', sa.Column(
u'max_retries_down', sa.Integer(), nullable=True))

View File

@ -1,30 +0,0 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""start neutron-lbaas chain
Revision ID: start_neutron_lbaas
Revises: None
Create Date: 2014-12-09 11:06:18.196062
"""
# revision identifiers, used by Alembic.
revision = 'start_neutron_lbaas'
down_revision = None
def upgrade():
pass

View File

@ -1,22 +0,0 @@
# Copyright 2016 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.db.migration.models import head
import neutron_lbaas.agent_scheduler # noqa
import neutron_lbaas.db.loadbalancer.models # noqa
def get_metadata():
return head.model_base.BASEV2.metadata

View File

@ -1,130 +0,0 @@
# Copyright 2015, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import a10_neutron_lbaas
from oslo_log import log as logging
from neutron_lbaas.drivers import driver_base
VERSION = "2.0.0"
LOG = logging.getLogger(__name__)
class ThunderDriver(driver_base.LoadBalancerBaseDriver):
def __init__(self, plugin):
super(ThunderDriver, self).__init__(plugin)
self.load_balancer = LoadBalancerManager(self)
self.listener = ListenerManager(self)
self.pool = PoolManager(self)
self.member = MemberManager(self)
self.health_monitor = HealthMonitorManager(self)
self.l7policy = L7PolicyManager(self)
self.l7rule = L7RuleManager(self)
LOG.debug("A10Driver: v2 initializing, version=%s, lbaas_manager=%s",
VERSION, a10_neutron_lbaas.VERSION)
self.a10 = a10_neutron_lbaas.A10OpenstackLBV2(self)
class LoadBalancerManager(driver_base.BaseLoadBalancerManager):
def create(self, context, lb):
self.driver.a10.lb.create(context, lb)
def update(self, context, old_lb, lb):
self.driver.a10.lb.update(context, old_lb, lb)
def delete(self, context, lb):
self.driver.a10.lb.delete(context, lb)
def refresh(self, context, lb):
self.driver.a10.lb.refresh(context, lb)
def stats(self, context, lb):
return self.driver.a10.lb.stats(context, lb)
class ListenerManager(driver_base.BaseListenerManager):
def create(self, context, listener):
self.driver.a10.listener.create(context, listener)
def update(self, context, old_listener, listener):
self.driver.a10.listener.update(context, old_listener, listener)
def delete(self, context, listener):
self.driver.a10.listener.delete(context, listener)
class PoolManager(driver_base.BasePoolManager):
def create(self, context, pool):
self.driver.a10.pool.create(context, pool)
def update(self, context, old_pool, pool):
self.driver.a10.pool.update(context, old_pool, pool)
def delete(self, context, pool):
self.driver.a10.pool.delete(context, pool)
class MemberManager(driver_base.BaseMemberManager):
def create(self, context, member):
self.driver.a10.member.create(context, member)
def update(self, context, old_member, member):
self.driver.a10.member.update(context, old_member, member)
def delete(self, context, member):
self.driver.a10.member.delete(context, member)
class HealthMonitorManager(driver_base.BaseHealthMonitorManager):
def create(self, context, hm):
self.driver.a10.hm.create(context, hm)
def update(self, context, old_hm, hm):
self.driver.a10.hm.update(context, old_hm, hm)
def delete(self, context, hm):
self.driver.a10.hm.delete(context, hm)
class L7PolicyManager(driver_base.BaseL7PolicyManager):
def create(self, context, l7policy):
self.driver.a10.l7policy.create(context, l7policy)
def update(self, context, old_l7policy, l7policy):
self.driver.a10.l7policy.update(context, old_l7policy, l7policy)
def delete(self, context, l7policy):
self.driver.a10.l7policy.delete(context, l7policy)
class L7RuleManager(driver_base.BaseL7RuleManager):
def create(self, context, l7rule):
self.driver.a10.l7rule.create(context, l7rule)
def update(self, context, old_l7rule, l7rule):
self.driver.a10.l7rule.update(context, old_l7rule, l7rule)
def delete(self, context, l7rule):
self.driver.a10.l7rule.delete(context, l7rule)

View File

@ -1,12 +0,0 @@
Brocade LBaaS Driver
Installation info:
- Install Brocade LBaaS Device Driver
- Enable Brocade as default lbaas service provider in neutron_lbaas.conf file
- Restart Neutron Server
Third-party CI info:
Contact info for any problems is: DL-GRP-ENG-brocade-adx-openstack-ci at brocade dot com
Or contact Pattabi Ayyasami directly (IRC: pattabi)

View File

@ -1,167 +0,0 @@
#
# Copyright 2014 Brocade Communications Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Pattabi Ayyasami (pattabi), Brocade Communications Systems,Inc.
#
from brocade_neutron_lbaas import adx_device_driver_v2 as device_driver
from neutron_lbaas.drivers import driver_base
class BrocadeLoadBalancerDriver(driver_base.LoadBalancerBaseDriver):
def __init__(self, plugin):
super(BrocadeLoadBalancerDriver, self).__init__(plugin)
self.load_balancer = BrocadeLoadBalancerManager(self)
self.listener = BrocadeListenerManager(self)
self.pool = BrocadePoolManager(self)
self.member = BrocadeMemberManager(self)
self.health_monitor = BrocadeHealthMonitorManager(self)
self.device_driver = device_driver.BrocadeAdxDeviceDriverV2(plugin)
class BrocadeLoadBalancerManager(driver_base.BaseLoadBalancerManager):
def create(self, context, obj):
try:
self.driver.device_driver.create_loadbalancer(obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def update(self, context, old_obj, obj):
try:
self.driver.device_driver.update_loadbalancer(obj, old_obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def delete(self, context, obj):
try:
self.driver.device_driver.delete_loadbalancer(obj)
except Exception:
# Ignore the exception
pass
self.successful_completion(context, obj, delete=True)
def refresh(self, context, lb_obj):
# This is intended to trigger the backend to check and repair
# the state of this load balancer and all of its dependent objects
self.driver.device_driver.refresh(lb_obj)
def stats(self, context, lb_obj):
return self.driver.device_driver.stats(lb_obj)
class BrocadeListenerManager(driver_base.BaseListenerManager):
def create(self, context, obj):
try:
self.driver.device_driver.create_listener(obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def update(self, context, old_obj, obj):
try:
self.driver.device_driver.update_listener(obj, old_obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def delete(self, context, obj):
try:
self.driver.device_driver.delete_listener(obj)
except Exception:
# Ignore the exception
pass
self.successful_completion(context, obj, delete=True)
class BrocadePoolManager(driver_base.BasePoolManager):
def create(self, context, obj):
try:
self.driver.device_driver.create_pool(obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def update(self, context, old_obj, obj):
try:
self.driver.device_driver.update_pool(obj, old_obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def delete(self, context, obj):
try:
self.driver.device_driver.delete_pool(obj)
except Exception:
# Ignore the exception
pass
self.successful_completion(context, obj, delete=True)
class BrocadeMemberManager(driver_base.BaseMemberManager):
def create(self, context, obj):
try:
self.driver.device_driver.create_member(obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def update(self, context, old_obj, obj):
try:
self.driver.device_driver.update_member(obj, old_obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def delete(self, context, obj):
try:
self.driver.device_driver.delete_member(obj)
except Exception:
# Ignore the exception
pass
self.successful_completion(context, obj, delete=True)
class BrocadeHealthMonitorManager(driver_base.BaseHealthMonitorManager):
def create(self, context, obj):
try:
self.driver.device_driver.create_healthmonitor(obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def update(self, context, old_obj, obj):
try:
self.driver.device_driver.update_healthmonitor(obj, old_obj)
self.successful_completion(context, obj)
except Exception:
self.failed_completion(context, obj)
def delete(self, context, obj):
try:
self.driver.device_driver.delete_healthmonitor(obj)
except Exception:
# Ignore the exception
pass
self.successful_completion(context, obj, delete=True)

View File

@ -1,214 +0,0 @@
# Copyright 2015 Rackspace.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from neutron_lbaas._i18n import _
from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2
from neutron_lbaas.db.loadbalancer import models as db_models
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
class LoadBalancerCallbacks(object):
# history
# 1.0 Initial version
target = messaging.Target(version='1.0')
def __init__(self, plugin):
super(LoadBalancerCallbacks, self).__init__()
self.plugin = plugin
def get_ready_devices(self, context, host=None):
with context.session.begin(subtransactions=True):
agents = self.plugin.db.get_lbaas_agents(
context, filters={'host': [host]})
if not agents:
return []
elif len(agents) > 1:
LOG.warning('Multiple lbaas agents found on host %s', host)
loadbalancers = self.plugin.db.list_loadbalancers_on_lbaas_agent(
context, agents[0].id)
loadbalancer_ids = [
l.id for l in loadbalancers]
qry = context.session.query(
loadbalancer_dbv2.models.LoadBalancer.id)
qry = qry.filter(
loadbalancer_dbv2.models.LoadBalancer.id.in_(
loadbalancer_ids))
qry = qry.filter(
loadbalancer_dbv2.models.LoadBalancer.provisioning_status.in_(
constants.ACTIVE_PENDING_STATUSES))
up = True # makes pep8 and sqlalchemy happy
qry = qry.filter(
loadbalancer_dbv2.models.LoadBalancer.admin_state_up == up)
return [id for id, in qry]
def get_loadbalancer(self, context, loadbalancer_id=None):
lb_model = self.plugin.db.get_loadbalancer(context, loadbalancer_id)
if lb_model.vip_port and lb_model.vip_port.fixed_ips:
for fixed_ip in lb_model.vip_port.fixed_ips:
subnet_dict = self.plugin.db._core_plugin.get_subnet(
context, fixed_ip.subnet_id
)
setattr(fixed_ip, 'subnet', data_models.Subnet.from_dict(
subnet_dict))
if lb_model.provider:
device_driver = self.plugin.drivers[
lb_model.provider.provider_name].device_driver
setattr(lb_model.provider, 'device_driver', device_driver)
if lb_model.vip_port:
network_dict = self.plugin.db._core_plugin.get_network(
context, lb_model.vip_port.network_id)
setattr(lb_model.vip_port, 'network',
data_models.Network.from_dict(network_dict))
lb_dict = lb_model.to_dict(stats=False)
return lb_dict
def loadbalancer_deployed(self, context, loadbalancer_id):
with context.session.begin(subtransactions=True):
qry = context.session.query(db_models.LoadBalancer)
qry = qry.filter_by(id=loadbalancer_id)
loadbalancer = qry.one()
# set all resources to active
if (loadbalancer.provisioning_status in
constants.ACTIVE_PENDING_STATUSES):
loadbalancer.provisioning_status = constants.ACTIVE
if loadbalancer.listeners:
for l in loadbalancer.listeners:
if (l.provisioning_status in
constants.ACTIVE_PENDING_STATUSES):
l.provisioning_status = constants.ACTIVE
if (l.default_pool and
l.default_pool.provisioning_status in
constants.ACTIVE_PENDING_STATUSES):
l.default_pool.provisioning_status = constants.ACTIVE
if l.default_pool.members:
for m in l.default_pool.members:
if (m.provisioning_status in
constants.ACTIVE_PENDING_STATUSES):
m.provisioning_status = constants.ACTIVE
if l.default_pool.healthmonitor:
hm = l.default_pool.healthmonitor
ps = hm.provisioning_status
if ps in constants.ACTIVE_PENDING_STATUSES:
(l.default_pool.healthmonitor
.provisioning_status) = constants.ACTIVE
def update_status(self, context, obj_type, obj_id,
provisioning_status=None, operating_status=None):
if not provisioning_status and not operating_status:
LOG.warning('update_status for %(obj_type)s %(obj_id)s called '
'without specifying provisioning_status or '
'operating_status' % {'obj_type': obj_type,
'obj_id': obj_id})
return
model_mapping = {
'loadbalancer': db_models.LoadBalancer,
'pool': db_models.PoolV2,
'listener': db_models.Listener,
'member': db_models.MemberV2,
'healthmonitor': db_models.HealthMonitorV2
}
if obj_type not in model_mapping:
raise n_exc.Invalid(_('Unknown object type: %s') % obj_type)
try:
self.plugin.db.update_status(
context, model_mapping[obj_type], obj_id,
provisioning_status=provisioning_status,
operating_status=operating_status)
except n_exc.NotFound:
# update_status may come from agent on an object which was
# already deleted from db with other request
LOG.warning('Cannot update status: %(obj_type)s %(obj_id)s '
'not found in the DB, it was probably deleted '
'concurrently',
{'obj_type': obj_type, 'obj_id': obj_id})
def loadbalancer_destroyed(self, context, loadbalancer_id=None):
"""Agent confirmation hook that a load balancer has been destroyed.
This method exists for subclasses to change the deletion
behavior.
"""
pass
def plug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin.db._core_plugin.get_port(
context,
port_id
)
except n_exc.PortNotFound:
LOG.debug('Unable to find port %s to plug.', port_id)
return
port['admin_state_up'] = True
port[portbindings.HOST_ID] = host
port['device_owner'] = constants.DEVICE_OWNER_LOADBALANCERV2
self.plugin.db._core_plugin.update_port(
context,
port_id,
{'port': port}
)
def unplug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin.db._core_plugin.get_port(
context,
port_id
)
except n_exc.PortNotFound:
LOG.debug('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.',
port_id)
return
port['admin_state_up'] = False
port['device_owner'] = ''
port['device_id'] = ''
try:
self.plugin.db._core_plugin.update_port(
context,
port_id,
{'port': port}
)
except n_exc.PortNotFound:
LOG.debug('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.',
port_id)
def update_loadbalancer_stats(self, context,
loadbalancer_id=None,
stats=None):
self.plugin.db.update_loadbalancer_stats(context, loadbalancer_id,
stats)

View File

@ -1,416 +0,0 @@
# Copyright 2015 Rackspace.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import agents_db
from neutron.services import provider_configuration as provconf
from neutron_lib import exceptions as n_exc
from neutron_lib import rpc as n_rpc
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import importutils
from neutron_lbaas._i18n import _
from neutron_lbaas import agent_scheduler as agent_scheduler_v2
from neutron_lbaas.common import exceptions
from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2 as ldbv2
from neutron_lbaas.drivers.common import agent_callbacks
from neutron_lbaas.drivers import driver_base
from neutron_lbaas.extensions import lbaas_agentschedulerv2
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LB_SCHEDULERS = 'loadbalancer_schedulers'
AGENT_SCHEDULER_OPTS = [
cfg.StrOpt('loadbalancer_scheduler_driver',
default='neutron_lbaas.agent_scheduler.ChanceScheduler',
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now '
'deprecated. See: https://wiki.openstack.org/'
'wiki/Neutron/LBaaS/Deprecation',
help=_('Driver to use for scheduling '
'to a default loadbalancer agent')),
cfg.BoolOpt('allow_automatic_lbaas_agent_failover',
default=False,
deprecated_for_removal=True,
deprecated_since='Queens',
deprecated_reason='The neutron-lbaas project is now '
'deprecated. See: https://wiki.openstack.org'
'/wiki/Neutron/LBaaS/Deprecation',
help=_('Automatically reschedule loadbalancer from offline '
'to online lbaas agents. This is only supported for '
'drivers who use the neutron LBaaSv2 agent')),
]
cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS)
class DriverNotSpecified(n_exc.NeutronException):
message = _("Device driver for agent should be specified "
"in plugin driver.")
class DataModelSerializer(object):
def serialize_entity(self, ctx, entity):
if isinstance(entity, data_models.BaseDataModel):
return entity.to_dict(stats=False)
else:
return entity
class LoadBalancerAgentApi(object):
"""Plugin side of plugin to agent RPC API."""
# history
# 1.0 Initial version
#
def __init__(self, topic):
target = messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target,
serializer=DataModelSerializer())
def agent_updated(self, context, admin_state_up, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'agent_updated',
payload={'admin_state_up': admin_state_up})
def create_loadbalancer(self, context, loadbalancer, host, driver_name):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_loadbalancer',
loadbalancer=loadbalancer, driver_name=driver_name)
def update_loadbalancer(self, context, old_loadbalancer,
loadbalancer, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_loadbalancer',
old_loadbalancer=old_loadbalancer,
loadbalancer=loadbalancer)
def delete_loadbalancer(self, context, loadbalancer, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_loadbalancer', loadbalancer=loadbalancer)
def create_listener(self, context, listener, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_listener', listener=listener)
def update_listener(self, context, old_listener, listener,
host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_listener', old_listener=old_listener,
listener=listener)
def delete_listener(self, context, listener, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_listener', listener=listener)
def create_pool(self, context, pool, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_pool', pool=pool)
def update_pool(self, context, old_pool, pool, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_pool', old_pool=old_pool, pool=pool)
def delete_pool(self, context, pool, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_pool', pool=pool)
def create_member(self, context, member, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_member', member=member)
def update_member(self, context, old_member, member, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_member', old_member=old_member,
member=member)
def delete_member(self, context, member, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_member', member=member)
def create_healthmonitor(self, context, healthmonitor, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_healthmonitor',
healthmonitor=healthmonitor)
def update_healthmonitor(self, context, old_healthmonitor,
healthmonitor, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_healthmonitor',
old_healthmonitor=old_healthmonitor,
healthmonitor=healthmonitor)
def delete_healthmonitor(self, context, healthmonitor, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_healthmonitor',
healthmonitor=healthmonitor)
class LoadBalancerManager(driver_base.BaseLoadBalancerManager,
agent_scheduler_v2.LbaasAgentSchedulerDbMixin):
def __init__(self, driver):
super(LoadBalancerManager, self).__init__(driver)
self.db = ldbv2.LoadBalancerPluginDbv2()
def reschedule_lbaas_from_down_agents(self):
"""Reschedule lbaas from down lbaasv2 agents if admin state is up."""
self.reschedule_resources_from_down_agents(
agent_type=lb_const.AGENT_TYPE_LOADBALANCERV2,
get_down_bindings=self.get_down_loadbalancer_bindings,
agent_id_attr='agent_id',
resource_id_attr='loadbalancer_id',
resource_name='loadbalancer',
reschedule_resource=self.reschedule_loadbalancer,
rescheduling_failed=exceptions.LoadbalancerReschedulingFailed)
def reschedule_loadbalancer(self, context, loadbalancer_id):
"""Reschedule loadbalancer to a new lbaas agent
Remove the loadbalancer from the agent currently hosting it and
schedule it again
"""
cur_agent = self.get_agent_hosting_loadbalancer(context,
loadbalancer_id)
agent_data = cur_agent['agent']
with context.session.begin(subtransactions=True):
self._unschedule_loadbalancer(context, loadbalancer_id,
agent_data['id'])
self._schedule_loadbalancer(context, loadbalancer_id)
new_agent = self.get_agent_hosting_loadbalancer(context,
loadbalancer_id)
if not new_agent:
raise exceptions.LoadbalancerReschedulingFailed(
loadbalancer_id=loadbalancer_id)
def _schedule_loadbalancer(self, context, loadbalancer_id):
lb_db = self.db.get_loadbalancer(context, loadbalancer_id)
self.create(context, lb_db)
def update(self, context, old_loadbalancer, loadbalancer):
super(LoadBalancerManager, self).update(context, old_loadbalancer,
loadbalancer)
agent = self.driver.get_loadbalancer_agent(context, loadbalancer.id)
self.driver.agent_rpc.update_loadbalancer(
context, old_loadbalancer, loadbalancer, agent['host'])
def create(self, context, loadbalancer):
super(LoadBalancerManager, self).create(context, loadbalancer)
agent = self.driver.loadbalancer_scheduler.schedule(
self.driver.plugin, context, loadbalancer,
self.driver.device_driver)
if not agent:
raise lbaas_agentschedulerv2.NoEligibleLbaasAgent(
loadbalancer_id=loadbalancer.id)
self.driver.agent_rpc.create_loadbalancer(
context, loadbalancer, agent['host'], self.driver.device_driver)
def delete(self, context, loadbalancer):
super(LoadBalancerManager, self).delete(context, loadbalancer)
agent = self.driver.get_loadbalancer_agent(context, loadbalancer.id)
# TODO(blogan): Rethink deleting from the database here. May want to
# wait until the agent actually deletes it. Doing this now to keep
# what v1 had.
self.driver.plugin.db.delete_loadbalancer(context, loadbalancer.id)
if agent:
self.driver.agent_rpc.delete_loadbalancer(context, loadbalancer,
agent['host'])
def stats(self, context, loadbalancer):
pass
def refresh(self, context, loadbalancer):
pass
class ListenerManager(driver_base.BaseListenerManager):
def update(self, context, old_listener, listener):
super(ListenerManager, self).update(
context, old_listener.to_dict(), listener.to_dict())
agent = self.driver.get_loadbalancer_agent(
context, listener.loadbalancer.id)
self.driver.agent_rpc.update_listener(context, old_listener, listener,
agent['host'])
def create(self, context, listener):
super(ListenerManager, self).create(context, listener)
agent = self.driver.get_loadbalancer_agent(
context, listener.loadbalancer.id)
self.driver.agent_rpc.create_listener(context, listener, agent['host'])
def delete(self, context, listener):
super(ListenerManager, self).delete(context, listener)
agent = self.driver.get_loadbalancer_agent(context,
listener.loadbalancer.id)
# TODO(blogan): Rethink deleting from the database and updating the lb
# status here. May want to wait until the agent actually deletes it.
# Doing this now to keep what v1 had.
self.driver.plugin.db.delete_listener(context, listener.id)
self.driver.plugin.db.update_loadbalancer_provisioning_status(
context, listener.loadbalancer.id)
self.driver.agent_rpc.delete_listener(context, listener, agent['host'])
class PoolManager(driver_base.BasePoolManager):
def update(self, context, old_pool, pool):
super(PoolManager, self).update(context, old_pool, pool)
agent = self.driver.get_loadbalancer_agent(
context, pool.loadbalancer.id)
self.driver.agent_rpc.update_pool(context, old_pool, pool,
agent['host'])
def create(self, context, pool):
super(PoolManager, self).create(context, pool)
agent = self.driver.get_loadbalancer_agent(
context, pool.loadbalancer.id)
self.driver.agent_rpc.create_pool(context, pool, agent['host'])
def delete(self, context, pool):
super(PoolManager, self).delete(context, pool)
agent = self.driver.get_loadbalancer_agent(
context, pool.loadbalancer.id)
# TODO(blogan): Rethink deleting from the database and updating the lb
# status here. May want to wait until the agent actually deletes it.
# Doing this now to keep what v1 had.
self.driver.plugin.db.delete_pool(context, pool.id)
self.driver.plugin.db.update_loadbalancer_provisioning_status(
context, pool.loadbalancer.id)
self.driver.agent_rpc.delete_pool(context, pool, agent['host'])
class MemberManager(driver_base.BaseMemberManager):
def update(self, context, old_member, member):
super(MemberManager, self).update(context, old_member, member)
agent = self.driver.get_loadbalancer_agent(
context, member.pool.loadbalancer.id)
self.driver.agent_rpc.update_member(context, old_member, member,
agent['host'])
def create(self, context, member):
super(MemberManager, self).create(context, member)
agent = self.driver.get_loadbalancer_agent(
context, member.pool.loadbalancer.id)
self.driver.agent_rpc.create_member(context, member, agent['host'])
def delete(self, context, member):
super(MemberManager, self).delete(context, member)
agent = self.driver.get_loadbalancer_agent(
context, member.pool.loadbalancer.id)
# TODO(blogan): Rethink deleting from the database and updating the lb
# status here. May want to wait until the agent actually deletes it.
# Doing this now to keep what v1 had.
self.driver.plugin.db.delete_pool_member(context, member.id)
self.driver.plugin.db.update_loadbalancer_provisioning_status(
context, member.pool.loadbalancer.id)
self.driver.agent_rpc.delete_member(context, member, agent['host'])
class HealthMonitorManager(driver_base.BaseHealthMonitorManager):
def update(self, context, old_healthmonitor, healthmonitor):
super(HealthMonitorManager, self).update(
context, old_healthmonitor, healthmonitor)
agent = self.driver.get_loadbalancer_agent(
context, healthmonitor.pool.loadbalancer.id)
self.driver.agent_rpc.update_healthmonitor(
context, old_healthmonitor, healthmonitor, agent['host'])
def create(self, context, healthmonitor):
super(HealthMonitorManager, self).create(context, healthmonitor)
agent = self.driver.get_loadbalancer_agent(
context, healthmonitor.pool.loadbalancer.id)
self.driver.agent_rpc.create_healthmonitor(
context, healthmonitor, agent['host'])
def delete(self, context, healthmonitor):
super(HealthMonitorManager, self).delete(context, healthmonitor)
agent = self.driver.get_loadbalancer_agent(
context, healthmonitor.pool.loadbalancer.id)
# TODO(blogan): Rethink deleting from the database and updating the lb
# status here. May want to wait until the agent actually deletes it.
# Doing this now to keep what v1 had.
self.driver.plugin.db.delete_healthmonitor(context, healthmonitor.id)
self.driver.plugin.db.update_loadbalancer_provisioning_status(
context, healthmonitor.pool.loadbalancer.id)
self.driver.agent_rpc.delete_healthmonitor(
context, healthmonitor, agent['host'])
class AgentDriverBase(driver_base.LoadBalancerBaseDriver):
# name of device driver that should be used by the agent;
# vendor specific plugin drivers must override it;
device_driver = None
def __init__(self, plugin):
super(AgentDriverBase, self).__init__(plugin)
if not self.device_driver:
raise DriverNotSpecified()
self.load_balancer = LoadBalancerManager(self)
self.listener = ListenerManager(self)
self.pool = PoolManager(self)
self.member = MemberManager(self)
self.health_monitor = HealthMonitorManager(self)
self.agent_rpc = LoadBalancerAgentApi(lb_const.LOADBALANCER_AGENTV2)
self.agent_endpoints = [
agent_callbacks.LoadBalancerCallbacks(self.plugin),
agents_db.AgentExtRpcCallback(self.plugin.db)
]
self.conn = None
# Setting this on the db because the plugin no longer inherts from
# database classes, the db does.
self.plugin.db.agent_notifiers.update(
{lb_const.AGENT_TYPE_LOADBALANCERV2: self.agent_rpc})
lb_sched_driver = provconf.get_provider_driver_class(
cfg.CONF.loadbalancer_scheduler_driver, LB_SCHEDULERS)
self.loadbalancer_scheduler = importutils.import_object(
lb_sched_driver)
def get_periodic_jobs(self):
periodic_jobs = []
if cfg.CONF.allow_automatic_lbaas_agent_failover:
periodic_jobs.append(
self.load_balancer.reschedule_lbaas_from_down_agents)
return periodic_jobs
def start_rpc_listeners(self):
# other agent based plugin driver might already set callbacks on plugin
if hasattr(self.plugin, 'agent_callbacks'):
return
self.conn = n_rpc.Connection()
self.conn.create_consumer(lb_const.LOADBALANCER_PLUGINV2,
self.agent_endpoints,
fanout=False)
return self.conn.consume_in_threads()
def get_loadbalancer_agent(self, context, loadbalancer_id):
agent = self.plugin.db.get_agent_hosting_loadbalancer(
context, loadbalancer_id)
if not agent:
raise lbaas_agentschedulerv2.NoActiveLbaasAgent(
loadbalancer_id=loadbalancer_id)
return agent['agent']

View File

@ -1,192 +0,0 @@
# Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import wraps
from neutron_lib import context as ncontext
from oslo_utils import excutils
from neutron_lbaas.common import exceptions
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.drivers import driver_mixins
from neutron_lbaas.services.loadbalancer import constants
class NotImplementedManager(object):
"""Helper class to make any subclass of LoadBalancerBaseDriver explode if
it is missing any of the required object managers.
"""
def create(self, context, obj):
raise NotImplementedError()
def update(self, context, old_obj, obj):
raise NotImplementedError()
def delete(self, context, obj):
raise NotImplementedError()
class LoadBalancerBaseDriver(object):
"""LBaaSv2 object model drivers should subclass LoadBalancerBaseDriver,
and initialize the following manager classes to create, update, and delete
the various load balancer objects.
"""
model_map = {constants.LOADBALANCER_EVENT: models.LoadBalancer,
constants.LISTENER_EVENT: models.Listener,
constants.POOL_EVENT: models.PoolV2,
constants.MEMBER_EVENT: models.MemberV2}
load_balancer = NotImplementedManager()
listener = NotImplementedManager()
pool = NotImplementedManager()
member = NotImplementedManager()
health_monitor = NotImplementedManager()
l7policy = NotImplementedManager()
l7rule = NotImplementedManager()
def __init__(self, plugin):
self.plugin = plugin
def handle_streamed_event(self, container):
if container.info_type not in LoadBalancerBaseDriver.model_map:
if container.info_type == constants.LOADBALANCER_STATS_EVENT:
context = ncontext.get_admin_context()
self.plugin.db.update_loadbalancer_stats(
context, container.info_id, container.info_payload)
elif container.info_type == constants.LISTENER_STATS_EVENT:
return
else:
exc = exceptions.ModelMapException(
target_name=container.info_type)
raise exc
else:
model_class = LoadBalancerBaseDriver.model_map[
container.info_type]
context = ncontext.get_admin_context()
self.plugin.db.update_status(context, model_class,
container.info_id,
**container.info_payload)
class BaseLoadBalancerManager(driver_mixins.BaseRefreshMixin,
driver_mixins.BaseStatsMixin,
driver_mixins.BaseManagerMixin):
model_class = models.LoadBalancer
@property
def allows_create_graph(self):
"""
Can this driver create a load balancer graph in one call.
Return True if this driver has the capability to create a load balancer
and any of its children in one driver call. If this returns True and
the user requests the creation of a load balancer graph, then the
create_graph method will be called to create the load balancer.
"""
return False
@property
def allows_healthmonitor_thresholds(self):
"""Does this driver support thresholds for health monitors"""
return False
@property
def allocates_vip(self):
"""Does this driver need to allocate its own virtual IPs"""
return False
def create_and_allocate_vip(self, context, obj):
"""Create the load balancer and allocate a VIP
If this method is implemented AND allocates_vip returns True, then
this method will be called instead of the create method. Any driver
that implements this method is responsible for allocating a virtual IP
and updating at least the vip_address attribute in the loadbalancer
database table.
"""
raise NotImplementedError
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_loadbalancer
class BaseListenerManager(driver_mixins.BaseManagerMixin):
model_class = models.Listener
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_listener
class BasePoolManager(driver_mixins.BaseManagerMixin):
model_class = models.PoolV2
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_pool
class BaseMemberManager(driver_mixins.BaseManagerMixin):
model_class = models.MemberV2
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_member
class BaseHealthMonitorManager(driver_mixins.BaseManagerMixin):
model_class = models.HealthMonitorV2
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_healthmonitor
class BaseL7PolicyManager(driver_mixins.BaseManagerMixin):
model_class = models.L7Policy
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_l7policy
class BaseL7RuleManager(driver_mixins.BaseManagerMixin):
model_class = models.L7Rule
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_l7policy_rule
# A decorator for wrapping driver operations, which will automatically
# set the neutron object's status based on whether it sees an exception
def driver_op(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
d = (func.__name__ == 'delete')
lb_create = ((func.__name__ == 'create') and
isinstance(args[0], BaseLoadBalancerManager))
try:
r = func(*args, **kwargs)
args[0].successful_completion(
args[1], args[2], delete=d, lb_create=lb_create)
return r
except Exception:
with excutils.save_and_reraise_exception():
args[0].failed_completion(args[1], args[2])
return func_wrapper

View File

@ -1,236 +0,0 @@
# Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib import constants
from oslo_log import log as logging
import six
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseManagerMixin(object):
def __init__(self, driver):
self.driver = driver
@abc.abstractproperty
def db_delete_method(self):
pass
@abc.abstractmethod
def create(self, context, obj):
pass
@abc.abstractmethod
def update(self, context, obj_old, obj):
pass
@abc.abstractmethod
def delete(self, context, obj):
pass
def _successful_completion_lb_graph(self, context, obj):
listeners = obj.listeners
obj.listeners = []
for listener in listeners:
# need to maintain the link from the child to the load balancer
listener.loadbalancer = obj
pool = listener.default_pool
l7_policies = listener.l7_policies
if pool:
pool.listener = listener
hm = pool.healthmonitor
if hm:
hm.pool = pool
self.successful_completion(context, hm)
for member in pool.members:
member.pool = pool
self.successful_completion(context, member)
self.successful_completion(context, pool)
if l7_policies:
for l7policy in l7_policies:
l7policy.listener = listener
l7rules = l7policy.rules
for l7rule in l7rules:
l7rule.l7policy = l7policy
self.successful_completion(context, l7rule)
redirect_pool = l7policy.redirect_pool
if redirect_pool:
redirect_pool.listener = listener
rhm = redirect_pool.healthmonitor
if rhm:
rhm.pool = redirect_pool
self.successful_completion(context, rhm)
for rmember in redirect_pool.members:
rmember.pool = redirect_pool
self.successful_completion(context, rmember)
self.successful_completion(context, redirect_pool)
self.successful_completion(context, l7policy)
self.successful_completion(context, listener)
self.successful_completion(context, obj)
def successful_completion(self, context, obj, delete=False,
lb_create=False):
"""
Sets the provisioning_status of the load balancer and obj to
ACTIVE. Should be called last in the implementor's BaseManagerMixin
methods for successful runs.
:param context: neutron_lib context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
:param delete: set True if being called from a delete method. Will
most likely result in the obj being deleted from the db.
:param lb_create: set True if this is being called after a successful
load balancer create.
"""
LOG.debug("Starting successful_completion method after a successful "
"driver action.")
if lb_create and obj.listeners:
self._successful_completion_lb_graph(context, obj)
return
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
if delete:
# Check if driver is responsible for vip allocation. If the driver
# is responsible, then it is also responsible for cleaning it up.
# At this point, the VIP should already be cleaned up, so we are
# just doing neutron lbaas db cleanup.
if (obj == obj.root_loadbalancer and
self.driver.load_balancer.allocates_vip):
# NOTE(blogan): this is quite dumb to do but it is necessary
# so that a false negative pep8 error does not get thrown. An
# "unexpected-keyword-argument" pep8 error occurs bc
# self.db_delete_method is a @property method that returns a
# method.
kwargs = {'delete_vip_port': False}
self.db_delete_method(context, obj.id, **kwargs)
else:
self.db_delete_method(context, obj.id)
if obj == obj.root_loadbalancer and delete:
# Load balancer was deleted and no longer exists
return
lb_op_status = None
lb_p_status = constants.ACTIVE
if obj == obj.root_loadbalancer:
# only set the status to online if this an operation on the
# load balancer
lb_op_status = lb_const.ONLINE
# Update the load balancer's vip address and vip port id if the driver
# was responsible for allocating the vip.
if (self.driver.load_balancer.allocates_vip and lb_create and
isinstance(obj, data_models.LoadBalancer)):
self.driver.plugin.db.update_loadbalancer(
context, obj.id, {'vip_address': obj.vip_address,
'vip_port_id': obj.vip_port_id})
if delete:
# We cannot update the status of obj if it was deleted but if the
# obj is not a load balancer, the root load balancer should be
# updated
if not isinstance(obj, data_models.LoadBalancer):
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=lb_p_status,
operating_status=lb_op_status)
return
obj_op_status = lb_const.ONLINE
if isinstance(obj, data_models.HealthMonitor):
# Health Monitor does not have an operating status
obj_op_status = None
LOG.debug("Updating object of type %s with id of %s to "
"provisioning_status = %s, operating_status = %s",
obj.__class__, obj.id, constants.ACTIVE, obj_op_status)
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ACTIVE,
operating_status=obj_op_status)
if not isinstance(obj, data_models.LoadBalancer):
# Only update the status of the root_loadbalancer if the previous
# update was not the root load balancer so we are not updating
# it twice.
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=lb_p_status,
operating_status=lb_op_status)
def failed_completion(self, context, obj):
"""
Sets the provisioning status of the obj to ERROR. If obj is a
loadbalancer it will be set to ERROR, otherwise set to ACTIVE. Should
be called whenever something goes wrong (raised exception) in an
implementor's BaseManagerMixin methods.
:param context: neutron_lib context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
"""
LOG.debug("Starting failed_completion method after a failed driver "
"action.")
if isinstance(obj, data_models.LoadBalancer):
LOG.debug("Updating load balancer %s to provisioning_status = "
"%s, operating_status = %s.",
obj.root_loadbalancer.id, constants.ERROR,
lb_const.OFFLINE)
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
return
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
LOG.debug("Updating object of type %s with id of %s to "
"provisioning_status = %s, operating_status = %s",
obj.__class__, obj.id, constants.ERROR,
lb_const.OFFLINE)
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
LOG.debug("Updating load balancer %s to "
"provisioning_status = %s", obj.root_loadbalancer.id,
constants.ACTIVE)
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ACTIVE)
def update_vip(self, context, loadbalancer_id, vip_address,
vip_port_id=None):
lb_update = {'vip_address': vip_address}
if vip_port_id:
lb_update['vip_port_id'] = vip_port_id
self.driver.plugin.db.update_loadbalancer(context, loadbalancer_id,
lb_update)
@six.add_metaclass(abc.ABCMeta)
class BaseRefreshMixin(object):
@abc.abstractmethod
def refresh(self, context, obj):
pass
@six.add_metaclass(abc.ABCMeta)
class BaseStatsMixin(object):
@abc.abstractmethod
def stats(self, context, obj):
pass

Some files were not shown because too many files have changed in this diff Show More