From 66826e9e5b36d60f66b6c3539c9d468c7312db3f Mon Sep 17 00:00:00 2001 From: Alexandre Levine Date: Fri, 11 Jul 2014 03:46:52 +0400 Subject: [PATCH] Initial EC2-API service commit. This code introduces standalone service which proxies its calls to existing nova EC2-API. All the code here except for the ec2api/api/proxy.py, ec2api/api/ec2client.py and some util functions is taken from current nova and unused functionality is cut of it. The proxy.py and ec2client.py files implement the new code which proxies incoming request (on port 8788) to original EC2 API in nova on port 8773. The result is transparently translated back to user. Change-Id: I4cb84f833d7d4f0e379672710ed39562811d43e0 --- .testr.conf | 4 + HACKING.rst | 43 + LICENSE | 176 ++++ MANIFEST.in | 20 + README.rst | 68 ++ babel.cfg | 1 + bin/ec2api-db-setup | 292 ++++++ ec2api/.project | 18 + ec2api/.pydevproject | 5 + ec2api/__init__.py | 27 + ec2api/api/__init__.py | 400 ++++++++ ec2api/api/apirequest.py | 143 +++ ec2api/api/auth.py | 54 ++ ec2api/api/clients.py | 141 +++ ec2api/api/cloud.py | 41 + ec2api/api/ec2client.py | 222 +++++ ec2api/api/ec2utils.py | 186 ++++ ec2api/api/faults.py | 96 ++ ec2api/api/proxy.py | 27 + ec2api/api/validator.py | 132 +++ ec2api/cmd/__init__.py | 16 + ec2api/cmd/api.py | 42 + ec2api/cmd/manage.py | 75 ++ ec2api/config.py | 30 + ec2api/context.py | 150 +++ ec2api/exception.py | 279 ++++++ ec2api/openstack/__init__.py | 0 ec2api/openstack/common/__init__.py | 17 + ec2api/openstack/common/context.py | 126 +++ ec2api/openstack/common/db/__init__.py | 0 ec2api/openstack/common/db/api.py | 162 ++++ ec2api/openstack/common/db/exception.py | 56 ++ ec2api/openstack/common/db/options.py | 171 ++++ .../common/db/sqlalchemy/__init__.py | 0 .../common/db/sqlalchemy/migration.py | 278 ++++++ .../openstack/common/db/sqlalchemy/models.py | 119 +++ .../common/db/sqlalchemy/provision.py | 157 +++ .../openstack/common/db/sqlalchemy/session.py | 905 ++++++++++++++++++ .../common/db/sqlalchemy/test_base.py | 167 ++++ .../common/db/sqlalchemy/test_migrations.py | 270 ++++++ .../openstack/common/db/sqlalchemy/utils.py | 655 +++++++++++++ ec2api/openstack/common/eventlet_backdoor.py | 145 +++ ec2api/openstack/common/excutils.py | 113 +++ ec2api/openstack/common/gettextutils.py | 479 +++++++++ ec2api/openstack/common/importutils.py | 73 ++ ec2api/openstack/common/jsonutils.py | 190 ++++ ec2api/openstack/common/local.py | 45 + ec2api/openstack/common/log.py | 689 +++++++++++++ ec2api/openstack/common/loopingcall.py | 147 +++ ec2api/openstack/common/service.py | 512 ++++++++++ ec2api/openstack/common/strutils.py | 295 ++++++ ec2api/openstack/common/systemd.py | 106 ++ ec2api/openstack/common/threadgroup.py | 147 +++ ec2api/openstack/common/timeutils.py | 210 ++++ ec2api/openstack/common/uuidutils.py | 37 + ec2api/paths.py | 64 ++ ec2api/service.py | 163 ++++ ec2api/tests/__init__.py | 26 + ec2api/tests/fakes_request_response.py | 312 ++++++ ec2api/tests/matchers.py | 451 +++++++++ ec2api/tests/test_api_init.py | 129 +++ ec2api/tests/test_tools.py | 45 + ec2api/tests/tools.py | 38 + ec2api/utils.py | 49 + ec2api/version.py | 17 + ec2api/wsgi.py | 501 ++++++++++ etc/ec2api/api-paste.ini | 39 + etc/ec2api/ec2api.conf.sample | 717 ++++++++++++++ install.sh | 247 +++++ openstack-common.conf | 7 + requirements.txt | 26 + run_tests.sh | 123 +++ setup.cfg | 69 ++ setup.py | 30 + test-requirements.txt | 13 + tools/config/README | 20 + tools/config/analyze_opts.py | 81 ++ tools/config/check_uptodate.sh | 25 + tools/config/generate_sample.sh | 119 +++ tools/config/oslo.config.generator.rc | 2 + tools/db/schema_diff.py | 270 ++++++ tools/enable-pre-commit-hook.sh | 42 + tools/install_venv.py | 74 ++ tools/install_venv_common.py | 213 +++++ tools/lintstack.py | 199 ++++ tools/lintstack.sh | 59 ++ tools/patch_tox_venv.py | 50 + tools/regression_tester.py | 109 +++ tools/with_venv.sh | 7 + tox.ini | 56 ++ 90 files changed, 13351 insertions(+) create mode 100644 .testr.conf create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100755 bin/ec2api-db-setup create mode 100644 ec2api/.project create mode 100644 ec2api/.pydevproject create mode 100644 ec2api/__init__.py create mode 100644 ec2api/api/__init__.py create mode 100644 ec2api/api/apirequest.py create mode 100644 ec2api/api/auth.py create mode 100644 ec2api/api/clients.py create mode 100644 ec2api/api/cloud.py create mode 100644 ec2api/api/ec2client.py create mode 100644 ec2api/api/ec2utils.py create mode 100644 ec2api/api/faults.py create mode 100644 ec2api/api/proxy.py create mode 100644 ec2api/api/validator.py create mode 100644 ec2api/cmd/__init__.py create mode 100644 ec2api/cmd/api.py create mode 100644 ec2api/cmd/manage.py create mode 100644 ec2api/config.py create mode 100644 ec2api/context.py create mode 100644 ec2api/exception.py create mode 100644 ec2api/openstack/__init__.py create mode 100644 ec2api/openstack/common/__init__.py create mode 100644 ec2api/openstack/common/context.py create mode 100644 ec2api/openstack/common/db/__init__.py create mode 100644 ec2api/openstack/common/db/api.py create mode 100644 ec2api/openstack/common/db/exception.py create mode 100644 ec2api/openstack/common/db/options.py create mode 100644 ec2api/openstack/common/db/sqlalchemy/__init__.py create mode 100644 ec2api/openstack/common/db/sqlalchemy/migration.py create mode 100644 ec2api/openstack/common/db/sqlalchemy/models.py create mode 100644 ec2api/openstack/common/db/sqlalchemy/provision.py create mode 100644 ec2api/openstack/common/db/sqlalchemy/session.py create mode 100644 ec2api/openstack/common/db/sqlalchemy/test_base.py create mode 100644 ec2api/openstack/common/db/sqlalchemy/test_migrations.py create mode 100644 ec2api/openstack/common/db/sqlalchemy/utils.py create mode 100644 ec2api/openstack/common/eventlet_backdoor.py create mode 100644 ec2api/openstack/common/excutils.py create mode 100644 ec2api/openstack/common/gettextutils.py create mode 100644 ec2api/openstack/common/importutils.py create mode 100644 ec2api/openstack/common/jsonutils.py create mode 100644 ec2api/openstack/common/local.py create mode 100644 ec2api/openstack/common/log.py create mode 100644 ec2api/openstack/common/loopingcall.py create mode 100644 ec2api/openstack/common/service.py create mode 100644 ec2api/openstack/common/strutils.py create mode 100644 ec2api/openstack/common/systemd.py create mode 100644 ec2api/openstack/common/threadgroup.py create mode 100644 ec2api/openstack/common/timeutils.py create mode 100644 ec2api/openstack/common/uuidutils.py create mode 100644 ec2api/paths.py create mode 100644 ec2api/service.py create mode 100644 ec2api/tests/__init__.py create mode 100644 ec2api/tests/fakes_request_response.py create mode 100644 ec2api/tests/matchers.py create mode 100644 ec2api/tests/test_api_init.py create mode 100644 ec2api/tests/test_tools.py create mode 100644 ec2api/tests/tools.py create mode 100644 ec2api/utils.py create mode 100644 ec2api/version.py create mode 100644 ec2api/wsgi.py create mode 100644 etc/ec2api/api-paste.ini create mode 100644 etc/ec2api/ec2api.conf.sample create mode 100755 install.sh create mode 100644 openstack-common.conf create mode 100644 requirements.txt create mode 100755 run_tests.sh create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 test-requirements.txt create mode 100644 tools/config/README create mode 100755 tools/config/analyze_opts.py create mode 100755 tools/config/check_uptodate.sh create mode 100755 tools/config/generate_sample.sh create mode 100644 tools/config/oslo.config.generator.rc create mode 100755 tools/db/schema_diff.py create mode 100755 tools/enable-pre-commit-hook.sh create mode 100644 tools/install_venv.py create mode 100644 tools/install_venv_common.py create mode 100755 tools/lintstack.py create mode 100755 tools/lintstack.sh create mode 100644 tools/patch_tox_venv.py create mode 100755 tools/regression_tester.py create mode 100755 tools/with_venv.sh create mode 100644 tox.ini diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 00000000..d43ba196 --- /dev/null +++ b/.testr.conf @@ -0,0 +1,4 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ec2api/tests $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 00000000..146bc497 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,43 @@ +Ec2api Style Commandments +========================= + +- Step 1: Read the OpenStack Style Commandments + https://github.com/openstack-dev/hacking/blob/master/doc/source/index.rst +- Step 2: Read on + +Ec2api Specific Commandments +---------------------------- + +General +------- +- Do not use locals(). Example:: + + LOG.debug(_("volume %(vol_name)s: creating size %(vol_size)sG") % + locals()) # BAD + + LOG.debug(_("volume %(vol_name)s: creating size %(vol_size)sG") % + {'vol_name': vol_name, + 'vol_size': vol_size}) # OKAY + +- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised:: + + except Exception as e: + ... + raise e # BAD + + except Exception: + ... + raise # OKAY + + + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +For more information on creating unit tests and utilizing the testing +infrastructure in OpenStack Ec2api, please read ec2api/testing/README.rst. diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..68c771a0 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..7f0f5687 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,20 @@ +include run_tests.sh ChangeLog +include README.rst builddeb.sh +include MANIFEST.in pylintrc +include AUTHORS +include run_tests.py +include HACKING.rst +include LICENSE +include ChangeLog +include babel.cfg tox.ini +include openstack-common.conf +include ec2api/openstack/common/README +include ec2api/db/sqlalchemy/migrate_repo/README +include ec2api/db/sqlalchemy/migrate_repo/migrate.cfg +include ec2api/db/sqlalchemy/migrate_repo/versions/*.sql +graft doc +graft etc +graft ec2api/locale +graft ec2api/tests +graft tools +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..ce181484 --- /dev/null +++ b/README.rst @@ -0,0 +1,68 @@ +OpenStack EC2 API README +----------------------------- + +Support of EC2 API for OpenStack. +This project provides a standalone EC2 API service which pursues two goals: +1. Implement VPC API which now absent in nova's EC2 API +2. Create a standalone service for EC2 API support which later can accommodate +not only the VPC API but the rest of the EC2 API currently present in nova as +well. + +This service implements VPC API related commands only. For the rest of the +EC2 API functionality it redirects request to original EC2 API in nova. + +It doesn't replace existing nova EC2 API service in deployment it gets +installed to a different port (8788 by default). + +Installation +===== + +Run install.sh + +#TODO: The following should be automated later. + +Change /etc/ec2api/ec2api.conf: +[database] +connection_nova = #should be taken from nova.conf +[DEFAULT] +external_network = #obtained by neutron net-external-list + +The service gets installed on port 8788 by default. It can be changed before the +installation in install.sh script. + +Usage +===== + +Download aws cli from Amazon. +Create configuration file for aws cli in your home directory ~/.aws/config: + +[default] +aws_access_key_id = 1b013f18d5ed47ae8ed0fbb8debc036b +aws_secret_access_key = 9bbc6f270ffd4dfdbe0e896947f41df3 +region = us-east-1 + +Change the aws_access_key_id and aws_secret_acces_key above to the values +appropriate for your cloud (can be obtained by "keystone ec2-credentials-list" +command). + +Run aws cli commands using new EC2 API endpoint URL (can be obtained from +keystone with the new port 8788) like this: + +aws --endpoint-url http://10.0.2.15:8788/services/Cloud ec2 describe-instances + + +Limitations +=========== + +This is an alpha-version, Tempest tests are not run yet. +VPN-related functionality is not supported yet. +Route-tables functionality is limited. +Filtering in describe functions can be done by IDs only. +Security groups are attached to network interfaces only, not to instances yet. +Rollbacks in case of failure during object creation are not supported yet. +Some other not-listed here limitations exist also. + +Supported Features +================== + +VPC API except for the Limitations above is supported. diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 00000000..efceab81 --- /dev/null +++ b/babel.cfg @@ -0,0 +1 @@ +[python: **.py] diff --git a/bin/ec2api-db-setup b/bin/ec2api-db-setup new file mode 100755 index 00000000..1b7cbcd5 --- /dev/null +++ b/bin/ec2api-db-setup @@ -0,0 +1,292 @@ +#!/bin/bash -e +# +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# +# Print --help output and exit. +# +usage() { + +cat << EOF +Set up a local MySQL database for use with ec2api. +This script will create a 'ec2api' database that is accessible +only on localhost by user 'ec2api' with password 'ec2api'. + +Usage: ec2api-db-setup [options] +Options: + select a distro type (rpm or debian) + + --help | -h + Print usage information. + --password | -p + Specify the password for the 'ec2api' MySQL user that will + use to connect to the 'ec2api' MySQL database. By default, + the password 'ec2api' will be used. + --rootpw | -r + Specify the root MySQL password. If the script installs + the MySQL server, it will set the root password to this value + instead of prompting for a password. If the MySQL server is + already installed, this password will be used to connect to the + database instead of having to prompt for it. + --yes | -y + In cases where the script would normally ask for confirmation + before doing something, such as installing mysql-server, + just assume yes. This is useful if you want to run the script + non-interactively. +EOF + + exit 0 +} + +install_mysql_server() { + if [ -z "${ASSUME_YES}" ] ; then + $PACKAGE_INSTALL mysql-server + else + $PACKAGE_INSTALL -y mysql-server + fi +} + +start_mysql_server() { + $SERVICE_START +} + +MYSQL_EC2API_PW_DEFAULT="ec2api" +MYSQL_EC2API_PW=${MYSQL_EC2API_PW_DEFAULT} +EC2API_CONFIG="/etc/ec2api/ec2api.conf" +ASSUME_YES="" +ELEVATE="" + +# Check for root privileges +if [[ $EUID -ne 0 ]] ; then + echo "This operation requires superuser privileges, using sudo:" + if sudo -l > /dev/null ; then + ELEVATE="sudo" + else + exit 1 + fi +fi + +case "$1" in + rpm) + echo "Installing on an RPM system." + PACKAGE_INSTALL="$ELEVATE yum install" + PACKAGE_STATUS="rpm -q" + SERVICE_MYSQLD="mysqld" + SERVICE_START="$ELEVATE service $SERVICE_MYSQLD start" + SERVICE_STATUS="service $SERVICE_MYSQLD status" + SERVICE_ENABLE="$ELEVATE chkconfig" + ;; + deb) + echo "Installing on a Debian system." + PACKAGE_INSTALL="$ELEVATE apt-get install" + PACKAGE_STATUS="dpkg-query -s" + SERVICE_MYSQLD="mysql" + SERVICE_START="$ELEVATE service $SERVICE_MYSQLD start" + SERVICE_STATUS="$ELEVATE service $SERVICE_MYSQLD status" + SERVICE_ENABLE="" + ;; + *) + usage + ;; +esac + +while [ $# -gt 0 ] +do + case "$1" in + -h|--help) + usage + ;; + -p|--password) + shift + MYSQL_EC2API_PW=${1} + ;; + -r|--rootpw) + shift + MYSQL_ROOT_PW=${1} + ;; + -y|--yes) + ASSUME_YES="yes" + ;; + *) + # ignore + ;; + esac + shift +done + + +# Make sure MySQL is installed. + +NEW_MYSQL_INSTALL=0 +if ! $PACKAGE_STATUS mysql-server && ! $PACKAGE_STATUS mariadb-server && ! $PACKAGE_STATUS mariadb-galera-server > /dev/null +then + if [ -z "${ASSUME_YES}" ] ; then + printf "mysql-server is not installed. Would you like to install it now? (y/n): " + read response + case "$response" in + y|Y) + ;; + n|N) + echo "mysql-server must be installed. Please install it before proceeding." + exit 0 + ;; + *) + echo "Invalid response." + exit 1 + esac + fi + + NEW_MYSQL_INSTALL=1 + install_mysql_server +fi + + +# Make sure mysqld is running. + +if ! $SERVICE_STATUS > /dev/null +then + if [ -z "${ASSUME_YES}" ] ; then + printf "$SERVICE_MYSQLD is not running. Would you like to start it now? (y/n): " + read response + case "$response" in + y|Y) + ;; + n|N) + echo "$SERVICE_MYSQLD must be running. Please start it before proceeding." + exit 0 + ;; + *) + echo "Invalid response." + exit 1 + esac + fi + + start_mysql_server + + # If we both installed and started, ensure it starts at boot + [ $NEW_MYSQL_INSTALL -eq 1 ] && $SERVICE_ENABLE $SERVICE_MYSQLD on +fi + + +# Get MySQL root access. + +if [ $NEW_MYSQL_INSTALL -eq 1 ] +then + if [ ! "${MYSQL_ROOT_PW+defined}" ] ; then + echo "Since this is a fresh installation of MySQL, please set a password for the 'root' mysql user." + + PW_MATCH=0 + while [ $PW_MATCH -eq 0 ] + do + printf "Enter new password for 'root' mysql user: " + read -s MYSQL_ROOT_PW + echo + printf "Enter new password again: " + read -s PW2 + echo + if [ "${MYSQL_ROOT_PW}" = "${PW2}" ] ; then + PW_MATCH=1 + else + echo "Passwords did not match." + fi + done + fi + + echo "UPDATE mysql.user SET password = password('${MYSQL_ROOT_PW}') WHERE user = 'root'; DELETE FROM mysql.user WHERE user = ''; flush privileges;" | mysql -u root + if ! [ $? -eq 0 ] ; then + echo "Failed to set password for 'root' MySQL user." + exit 1 + fi +elif [ ! "${MYSQL_ROOT_PW+defined}" ] ; then + printf "Please enter the password for the 'root' MySQL user: " + read -s MYSQL_ROOT_PW + echo +fi + + +# Sanity check MySQL credentials. + +MYSQL_ROOT_PW_ARG="" +if [ "${MYSQL_ROOT_PW+defined}" ] +then + MYSQL_ROOT_PW_ARG="--password=${MYSQL_ROOT_PW}" +fi +echo "SELECT 1;" | mysql -u root ${MYSQL_ROOT_PW_ARG} > /dev/null +if ! [ $? -eq 0 ] +then + echo "Failed to connect to the MySQL server. Please check your root user credentials." + exit 1 +fi +echo "Verified connectivity to MySQL." + + +# Now create the db. + +echo "Creating 'ec2api' database." +cat << EOF | mysql -u root ${MYSQL_ROOT_PW_ARG} +DROP DATABASE IF EXISTS ec2api; +CREATE DATABASE IF NOT EXISTS ec2api DEFAULT CHARACTER SET utf8; +GRANT ALL ON ec2api.* TO 'ec2api'@'localhost' IDENTIFIED BY '${MYSQL_EC2API_PW}'; +GRANT ALL ON ec2api.* TO 'ec2api'@'%' IDENTIFIED BY '${MYSQL_EC2API_PW}'; +flush privileges; +EOF + + +# Make sure ec2api configuration has the right MySQL password. + +if [ "${MYSQL_EC2API_PW}" != "${MYSQL_EC2API_PW_DEFAULT}" ] ; then + echo "Updating 'ec2api' database password in ${EC2API_CONFIG}" + sed -i -e "s/mysql:\/\/ec2api:\(.*\)@/mysql:\/\/ec2api:${MYSQL_EC2API_PW}@/" ${EC2API_CONFIG} +fi + +# override the logging config in ec2api.conf +log_conf=$(mktemp /tmp/ec2api-logging.XXXXXXXXXX.conf) +cat < $log_conf +[loggers] +keys=root + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[handler_consoleHandler] +class=StreamHandler +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(name)s - %(levelname)s - %(message)s +EOF + +ec2-api-manage --log-config=$log_conf db_sync +rm $log_conf + +# Do a final sanity check on the database. + +echo "SELECT * FROM migrate_version;" | mysql -u ec2api --password=${MYSQL_EC2API_PW} ec2api > /dev/null +if ! [ $? -eq 0 ] +then + echo "Final sanity check failed." + exit 1 +fi + +echo "Complete!" diff --git a/ec2api/.project b/ec2api/.project new file mode 100644 index 00000000..27c66e74 --- /dev/null +++ b/ec2api/.project @@ -0,0 +1,18 @@ + + + ec2api + + + + + + org.python.pydev.PyDevBuilder + + + + + + com.aptana.projects.webnature + org.python.pydev.pythonNature + + diff --git a/ec2api/.pydevproject b/ec2api/.pydevproject new file mode 100644 index 00000000..40e9f40a --- /dev/null +++ b/ec2api/.pydevproject @@ -0,0 +1,5 @@ + + +Default +python 2.7 + diff --git a/ec2api/__init__.py b/ec2api/__init__.py new file mode 100644 index 00000000..62f43c53 --- /dev/null +++ b/ec2api/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`ec2api` -- Cloud IaaS Platform +=================================== + +.. automodule:: ec2api + :platform: Unix + :synopsis: Infrastructure-as-a-Service Cloud platform. +""" + +import gettext + + +gettext.install('ec2api', unicode=1) diff --git a/ec2api/api/__init__.py b/ec2api/api/__init__.py new file mode 100644 index 00000000..f3aceff8 --- /dev/null +++ b/ec2api/api/__init__.py @@ -0,0 +1,400 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Starting point for routing EC2 requests. +""" + +from eventlet.green import httplib +import netaddr +from oslo.config import cfg +import six +import six.moves.urllib.parse as urlparse +import webob +import webob.dec +import webob.exc + +from ec2api.api import apirequest +from ec2api.api import ec2utils +from ec2api.api import faults +from ec2api.api import validator +from ec2api import context +from ec2api import exception +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import jsonutils +from ec2api.openstack.common import log as logging +from ec2api.openstack.common import timeutils +from ec2api import wsgi + + +LOG = logging.getLogger(__name__) + +ec2_opts = [ + cfg.StrOpt('keystone_url', + default='http://localhost:5000/v2.0', + help='URL to get token from ec2 request.'), + cfg.IntOpt('ec2_timestamp_expiry', + default=300, + help='Time in seconds before ec2 timestamp expires'), +] + +CONF = cfg.CONF +CONF.register_opts(ec2_opts) +CONF.import_opt('use_forwarded_for', 'ec2api.api.auth') + + +# Fault Wrapper around all EC2 requests # +class FaultWrapper(wsgi.Middleware): + """Calls the middleware stack, captures any exceptions into faults.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + try: + return req.get_response(self.application) + except Exception as ex: + LOG.exception(_("FaultWrapper: %s"), unicode(ex)) + return faults.Fault(webob.exc.HTTPInternalServerError()) + + +class RequestLogging(wsgi.Middleware): + """Access-Log akin logging for all EC2 API requests.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + start = timeutils.utcnow() + rv = req.get_response(self.application) + self.log_request_completion(rv, req, start) + return rv + + def log_request_completion(self, response, request, start): + apireq = request.environ.get('ec2.request', None) + if apireq: + action = apireq.action + else: + action = None + ctxt = request.environ.get('ec2api.context', None) + delta = timeutils.utcnow() - start + seconds = delta.seconds + microseconds = delta.microseconds + LOG.info( + "%s.%ss %s %s %s %s %s [%s] %s %s", + seconds, + microseconds, + request.remote_addr, + request.method, + "%s%s" % (request.script_name, request.path_info), + action, + response.status_int, + request.user_agent, + request.content_type, + response.content_type, + context=ctxt) + + +class EC2KeystoneAuth(wsgi.Middleware): + """Authenticate an EC2 request with keystone and convert to context.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + request_id = context.generate_request_id() + signature = req.params.get('Signature') + if not signature: + msg = _("Signature not provided") + return faults.ec2_error_response(request_id, "AuthFailure", msg, + status=400) + access = req.params.get('AWSAccessKeyId') + if not access: + msg = _("Access key not provided") + return faults.ec2_error_response(request_id, "AuthFailure", msg, + status=400) + + # Make a copy of args for authentication and signature verification. + auth_params = dict(req.params) + # Not part of authentication args + auth_params.pop('Signature') + + cred_dict = { + 'access': access, + 'signature': signature, + 'host': req.host, + 'verb': req.method, + 'path': req.path, + 'params': auth_params, + } + token_url = CONF.keystone_url + "/ec2tokens" + if "ec2" in token_url: + creds = {'ec2Credentials': cred_dict} + else: + creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}} + creds_json = jsonutils.dumps(creds) + headers = {'Content-Type': 'application/json'} + + o = urlparse.urlparse(token_url) + if o.scheme == "http": + conn = httplib.HTTPConnection(o.netloc) + else: + conn = httplib.HTTPSConnection(o.netloc) + conn.request('POST', o.path, body=creds_json, headers=headers) + response = conn.getresponse() + data = response.read() + if response.status != 200: + if response.status == 401: + msg = response.reason + else: + msg = _("Failure communicating with keystone") + return faults.ec2_error_response(request_id, "AuthFailure", msg, + status=response.status) + result = jsonutils.loads(data) + conn.close() + + try: + token_id = result['access']['token']['id'] + user_id = result['access']['user']['id'] + project_id = result['access']['token']['tenant']['id'] + user_name = result['access']['user'].get('name') + project_name = result['access']['token']['tenant'].get('name') + roles = [role['name'] for role + in result['access']['user']['roles']] + except (AttributeError, KeyError) as e: + LOG.exception(_("Keystone failure: %s") % e) + msg = _("Failure communicating with keystone") + return faults.ec2_error_response(request_id, "AuthFailure", msg, + status=400) + + remote_address = req.remote_addr + if CONF.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', + remote_address) + + headers["X-Auth-Token"] = token_id + o = urlparse.urlparse(CONF.keystone_url + + ("/users/%s/credentials/OS-EC2/%s" % (user_id, access))) + if o.scheme == "http": + conn = httplib.HTTPConnection(o.netloc) + else: + conn = httplib.HTTPSConnection(o.netloc) + conn.request('GET', o.path, headers=headers) + response = conn.getresponse() + data = response.read() + if response.status != 200: + if response.status == 401: + msg = response.reason + else: + msg = _("Failure communicating with keystone") + return faults.ec2_error_response(request_id, "AuthFailure", msg, + status=response.status) + ec2_creds = jsonutils.loads(data) + conn.close() + + catalog = result['access']['serviceCatalog'] + ctxt = context.RequestContext(user_id, + project_id, + ec2_creds["credential"]["access"], + ec2_creds["credential"]["secret"], + user_name=user_name, + project_name=project_name, + roles=roles, + auth_token=token_id, + remote_address=remote_address, + service_catalog=catalog, + api_version=req.params.get('Version')) + + req.environ['ec2api.context'] = ctxt + + return self.application + + +class Requestify(wsgi.Middleware): + + def __init__(self, app): + super(Requestify, self).__init__(app) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Version', 'Timestamp'] + args = dict(req.params) + try: + expired = ec2utils.is_ec2_timestamp_expired(req.params, + expires=CONF.ec2_timestamp_expiry) + if expired: + msg = _("Timestamp failed validation.") + LOG.exception(msg) + raise webob.exc.HTTPForbidden(explanation=msg) + + # Raise KeyError if omitted + action = req.params['Action'] + # Fix bug lp:720157 for older (version 1) clients + version = req.params['SignatureVersion'] + if int(version) == 1: + non_args.remove('SignatureMethod') + if 'SignatureMethod' in args: + args.pop('SignatureMethod') + for non_arg in non_args: + # Remove, but raise KeyError if omitted + args.pop(non_arg) + except KeyError: + raise webob.exc.HTTPBadRequest() + except exception.InvalidRequest as err: + raise webob.exc.HTTPBadRequest(explanation=unicode(err)) + + LOG.debug('action: %s', action) + for key, value in args.items(): + LOG.debug('arg: %(key)s\t\tval: %(value)s', + {'key': key, 'value': value}) + + # Success! + api_request = apirequest.APIRequest( + action, req.params['Version'], args) + req.environ['ec2.request'] = api_request + return self.application + + +def validate_ec2_id(val): + if not validator.validate_str()(val): + return False + try: + ec2utils.ec2_id_to_id(val) + except exception.InvalidEc2Id: + return False + return True + + +def is_valid_ipv4(address): + """Verify that address represents a valid IPv4 address.""" + try: + return netaddr.valid_ipv4(address) + except Exception: + return False + + +class Validator(wsgi.Middleware): + + validator.validate_ec2_id = validate_ec2_id + + validator.DEFAULT_VALIDATOR = { + 'instance_id': validate_ec2_id, + 'volume_id': validate_ec2_id, + 'image_id': validate_ec2_id, + 'attribute': validator.validate_str(), + 'image_location': validator.validate_image_path, + 'public_ip': is_valid_ipv4, + 'region_name': validator.validate_str(), + 'group_name': validator.validate_str(max_length=255), + 'group_description': validator.validate_str(max_length=255), + 'size': validator.validate_int(), + 'user_data': validator.validate_user_data + } + + def __init__(self, application): + super(Validator, self).__init__(application) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if validator.validate(req.environ['ec2.request'].args, + validator.DEFAULT_VALIDATOR): + return self.application + else: + raise webob.exc.HTTPBadRequest() + + +def exception_to_ec2code(ex): + """Helper to extract EC2 error code from exception. + + For other than EC2 exceptions (those without ec2_code attribute), + use exception name. + """ + if hasattr(ex, 'ec2_code'): + code = ex.ec2_code + else: + code = type(ex).__name__ + return code + + +def ec2_error_ex(ex, req, code=None, message=None): + """Return an EC2 error response based on passed exception and log it.""" + if not code: + code = exception_to_ec2code(ex) + status = getattr(ex, 'code', None) + if not status: + status = 500 + + log_fun = LOG.error + log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s") + + context = req.environ['ec2api.context'] + request_id = context.request_id + log_msg_args = { + 'ex_name': type(ex).__name__, + 'ex_str': unicode(ex) + } + log_fun(log_msg % log_msg_args, context=context) + + if ex.args and not message and status < 500: + message = unicode(ex.args[0]) + # Log filtered environment for unexpected errors. + env = req.environ.copy() + for k in env.keys(): + if not isinstance(env[k], six.string_types): + env.pop(k) + log_fun(_('Environment: %s') % jsonutils.dumps(env)) + if not message: + message = _('Unknown error occurred.') + return faults.ec2_error_response(request_id, code, message, status=status) + + +class Executor(wsgi.Application): + + """Execute an EC2 API request. + + Executes 'ec2.action', passing 'ec2api.context' and + 'ec2.action_args' (all variables in WSGI environ.) Returns an XML + response, or a 400 upon failure. + """ + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + context = req.environ['ec2api.context'] + api_request = req.environ['ec2.request'] + try: + result = api_request.invoke(context) + except exception.InstanceNotFound as ex: + ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id']) + message = ex.msg_fmt % {'instance_id': ec2_id} + return ec2_error_ex(ex, req, message=message) + except exception.MethodNotFound: + try: + http, response = api_request.proxy(req) + resp = webob.Response() + resp.status = http["status"] + resp.headers["content-type"] = http["content-type"] + resp.body = str(response) + return resp + except Exception as ex: + return ec2_error_ex(ex, req) + except exception.EC2ServerError as ex: + resp = webob.Response() + resp.status = ex.response['status'] + resp.headers['Content-Type'] = ex.response['content-type'] + resp.body = ex.content + return resp + except Exception as ex: + return ec2_error_ex(ex, req) + else: + resp = webob.Response() + resp.status = 200 + resp.headers['Content-Type'] = 'text/xml' + resp.body = str(result) + + return resp diff --git a/ec2api/api/apirequest.py b/ec2api/api/apirequest.py new file mode 100644 index 00000000..75ffd027 --- /dev/null +++ b/ec2api/api/apirequest.py @@ -0,0 +1,143 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +APIRequest class +""" + +import datetime +# TODO(termie): replace minidom with etree +from xml.dom import minidom + +from lxml import etree + +from ec2api.api import cloud +from ec2api.api import ec2utils +from ec2api.api import proxy +from ec2api import exception +from ec2api.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def _underscore_to_camelcase(st): + return ''.join([x[:1].upper() + x[1:] for x in st.split('_')]) + + +def _underscore_to_xmlcase(st): + res = _underscore_to_camelcase(st) + return res[:1].lower() + res[1:] + + +def _database_to_isoformat(datetimeobj): + """Return a xs:dateTime parsable string from datatime.""" + return datetimeobj.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z' + + +class APIRequest(object): + def __init__(self, action, version, args): + self.action = action + self.version = version + self.args = args + self.controller = cloud.CloudController() + self.proxyController = proxy.ProxyController() + + def invoke(self, context): + try: + method = getattr(self.controller, + ec2utils.camelcase_to_underscore(self.action)) + except AttributeError: + raise exception.MethodNotFound(name=self.action) + + args = ec2utils.dict_from_dotted_str(self.args.items()) + + for key in args.keys(): + # NOTE(vish): Turn numeric dict keys into lists + # NOTE(Alex): Turn "value"-only dict keys into values + if isinstance(args[key], dict): + if args[key] == {}: + continue + if args[key].keys()[0].isdigit(): + s = args[key].items() + s.sort() + args[key] = [v for k, v in s] + elif args[key].keys()[0] == 'value' and len(args[key]) == 1: + args[key] = args[key]['value'] + + result = method(context, **args) + return self._render_response(result, context.request_id) + + def proxy(self, req): + return self.proxyController.proxy(req, self.args) + + def _render_response(self, response_data, request_id): + xml = minidom.Document() + + response_el = xml.createElement(self.action + 'Response') + response_el.setAttribute('xmlns', + 'http://ec2.amazonaws.com/doc/%s/' % self.version) + request_id_el = xml.createElement('requestId') + request_id_el.appendChild(xml.createTextNode(request_id)) + response_el.appendChild(request_id_el) + if response_data is True: + self._render_dict(xml, response_el, {'return': 'true'}) + else: + self._render_dict(xml, response_el, response_data) + + xml.appendChild(response_el) + + response = xml.toxml() + root = etree.fromstring(response) + response = etree.tostring(root, pretty_print=True) + + xml.unlink() + + # Don't write private key to log + if self.action != "CreateKeyPair": + LOG.debug(response) + else: + LOG.debug("CreateKeyPair: Return Private Key") + + return response + + def _render_dict(self, xml, el, data): + try: + for key in data.keys(): + val = data[key] + el.appendChild(self._render_data(xml, key, val)) + except Exception: + LOG.debug(data) + raise + + def _render_data(self, xml, el_name, data): + el_name = _underscore_to_xmlcase(el_name) + data_el = xml.createElement(el_name) + + if isinstance(data, list): + for item in data: + data_el.appendChild(self._render_data(xml, 'item', item)) + elif isinstance(data, dict): + self._render_dict(xml, data_el, data) + elif hasattr(data, '__dict__'): + self._render_dict(xml, data_el, data.__dict__) + elif isinstance(data, bool): + data_el.appendChild(xml.createTextNode(str(data).lower())) + elif isinstance(data, datetime.datetime): + data_el.appendChild( + xml.createTextNode(_database_to_isoformat(data))) + elif data is not None: + data_el.appendChild(xml.createTextNode(str(data))) + + return data_el diff --git a/ec2api/api/auth.py b/ec2api/api/auth.py new file mode 100644 index 00000000..4978dd5e --- /dev/null +++ b/ec2api/api/auth.py @@ -0,0 +1,54 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common Auth Middleware. + +""" + +from oslo.config import cfg + +from ec2api.openstack.common import log as logging + + +auth_opts = [ + cfg.BoolOpt('api_rate_limit', + default=False, + help='whether to use per-user rate limiting for the api.'), + cfg.BoolOpt('use_forwarded_for', + default=False, + help='Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.'), +] + +CONF = cfg.CONF +CONF.register_opts(auth_opts) + +LOG = logging.getLogger(__name__) + + +def pipeline_factory(loader, global_conf, **local_conf): + """A paste pipeline replica that keys off of auth_strategy.""" + auth_strategy = "keystone" + pipeline = local_conf[auth_strategy] + if not CONF.api_rate_limit: + limit_name = auth_strategy + '_nolimit' + pipeline = local_conf.get(limit_name, pipeline) + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for fltr in filters: + app = fltr(app) + return app diff --git a/ec2api/api/clients.py b/ec2api/api/clients.py new file mode 100644 index 00000000..b9078526 --- /dev/null +++ b/ec2api/api/clients.py @@ -0,0 +1,141 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from keystoneclient.v2_0 import client as kc +from novaclient import client as novaclient +from novaclient import shell as novashell +from oslo.config import cfg + +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import log as logging + +logger = logging.getLogger(__name__) + +CONF = cfg.CONF + + +try: + from neutronclient.v2_0 import client as neutronclient +except ImportError: + neutronclient = None + logger.info(_('neutronclient not available')) +try: + from cinderclient import client as cinderclient +except ImportError: + cinderclient = None + logger.info(_('cinderclient not available')) +try: + from glanceclient import client as glanceclient +except ImportError: + glanceclient = None + logger.info(_('glanceclient not available')) + + +def nova(context, service_type='compute'): + computeshell = novashell.OpenStackComputeShell() + extensions = computeshell._discover_extensions("1.1") + + args = { + 'project_id': context.project_id, + 'auth_url': CONF.keystone_url, + 'service_type': service_type, + 'username': None, + 'api_key': None, + 'extensions': extensions, + } + + client = novaclient.Client(1.1, **args) + + management_url = _url_for(context, service_type=service_type) + client.client.auth_token = context.auth_token + client.client.management_url = management_url + + return client + + +def neutron(context): + if neutronclient is None: + return None + + args = { + 'auth_url': CONF.keystone_url, + 'service_type': 'network', + 'token': context.auth_token, + 'endpoint_url': _url_for(context, service_type='network'), + } + + return neutronclient.Client(**args) + + +def glance(context): + if glanceclient is None: + return None + + args = { + 'auth_url': CONF.keystone_url, + 'service_type': 'image', + 'token': context.auth_token, + } + + return glanceclient.Client( + "1", endpoint=_url_for(context, service_type='image'), **args) + + +def cinder(context): + if cinderclient is None: + return nova(context, 'volume') + + args = { + 'service_type': 'volume', + 'auth_url': CONF.keystone_url, + 'username': None, + 'api_key': None, + } + + _cinder = cinderclient.Client('1', **args) + management_url = _url_for(context, service_type='volume') + _cinder.client.auth_token = context.auth_token + _cinder.client.management_url = management_url + + return _cinder + + +def keystone(context): + _keystone = kc.Client( + token=context.auth_token, + tenant_id=context.project_id, + auth_url=CONF.keystone_url) + + return _keystone + + +def _url_for(context, **kwargs): + service_catalog = context.service_catalog + if not service_catalog: + catalog = keystone(context).service_catalog.catalog + service_catalog = catalog["serviceCatalog"] + context.service_catalog = service_catalog + + service_type = kwargs["service_type"] + for service in service_catalog: + if service["type"] != service_type: + continue + for endpoint in service["endpoints"]: + if "publicURL" in endpoint: + return endpoint["publicURL"] + else: + return None + + return None diff --git a/ec2api/api/cloud.py b/ec2api/api/cloud.py new file mode 100644 index 00000000..f43e49c2 --- /dev/null +++ b/ec2api/api/cloud.py @@ -0,0 +1,41 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" +Cloud Controller: Implementation of EC2 REST API calls, which are +dispatched to other nodes via AMQP RPC. State is via distributed +datastore. +""" + +from oslo.config import cfg + +from ec2api.openstack.common import log as logging + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class CloudController(object): + """Cloud Controller + + Provides the critical dispatch between + inbound API calls through the endpoint and messages + sent to the other nodes. + """ + def __init__(self): + pass + + def __str__(self): + return 'CloudController' \ No newline at end of file diff --git a/ec2api/api/ec2client.py b/ec2api/api/ec2client.py new file mode 100644 index 00000000..d57d12e9 --- /dev/null +++ b/ec2api/api/ec2client.py @@ -0,0 +1,222 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import hashlib +import hmac +import re +import time +import types +import urllib +import urlparse + +import httplib2 +from lxml import etree +from oslo.config import cfg + +from ec2api.api import ec2utils +from ec2api import exception +from ec2api.openstack.common import log as logging + + +ec2_opts = [ + cfg.StrOpt('base_ec2_host', + default="localhost", + help='The IP address of the EC2 API server'), + cfg.IntOpt('base_ec2_port', + default=8773, + help='The port of the EC2 API server'), + cfg.StrOpt('base_ec2_scheme', + default='http', + help='The protocol to use when connecting to the EC2 API ' + 'server (http, https)'), + cfg.StrOpt('base_ec2_path', + default='/services/Cloud', + help='The path prefix used to call the ec2 API server'), +] + +CONF = cfg.CONF +CONF.register_opts(ec2_opts) +LOG = logging.getLogger(__name__) + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + + +def ec2client(context): + return EC2Client(context) + + +class EC2Requester(object): + + def __init__(self, version, http_method): + self.http_obj = httplib2.Http( + disable_ssl_certificate_validation=True) + self.version = version + self.method = http_method + + def request(self, context, action, args): + headers = { + 'content-type': 'application/x-www-form-urlencoded', + 'connection': 'close', + } + params = args + params['Action'] = action + params['Version'] = self.version + self._add_auth(context, params) + params = self._get_query_string(params) + + if self.method == 'POST': + url = self._ec2_url + body = params + else: + url = '?'.join((self._ec2_url, params,)) + body = None + + response, content = self.http_obj.request(url, self.method, + body=body, headers=headers) + return response, content + + _ec2_url = '%s://%s:%s%s' % (CONF.base_ec2_scheme, + CONF.base_ec2_host, + CONF.base_ec2_port, + CONF.base_ec2_path) + + @staticmethod + def _get_query_string(params): + pairs = [] + for key in sorted(params): + value = params[key] + pairs.append(urllib.quote(key.encode('utf-8'), safe='') + '=' + + urllib.quote(value.encode('utf-8'), safe='-_~')) + return '&'.join(pairs) + + def _calc_signature(self, context, params): + LOG.debug('Calculating signature using v2 auth.') + split = urlparse.urlsplit(self._ec2_url) + path = split.path + if len(path) == 0: + path = '/' + string_to_sign = '%s\n%s\n%s\n' % (self.method, + split.netloc, + path) + secret = context.secret_key + lhmac = hmac.new(secret.encode('utf-8'), digestmod=hashlib.sha256) + string_to_sign += self._get_query_string(params) + LOG.debug('String to sign: %s', string_to_sign) + lhmac.update(string_to_sign.encode('utf-8')) + b64 = base64.b64encode(lhmac.digest()).strip().decode('utf-8') + return b64 + + def _add_auth(self, context, params): + params['AWSAccessKeyId'] = context.access_key + params['SignatureVersion'] = '2' + params['SignatureMethod'] = 'HmacSHA256' + params['Timestamp'] = time.strftime(ISO8601, time.gmtime()) + signature = self._calc_signature(context, params) + params['Signature'] = signature + + +class EC2Client(object): + + def __init__(self, context): + self.context = context + self.requester = EC2Requester(context.api_version, 'POST') + + def __getattr__(self, name): + ec2_name = self._underscore_to_camelcase(name) + + def func(self, **kwargs): + params = self._build_params(**kwargs) + response, content = self.requester.request(self.context, ec2_name, + params) + return self._process_response(response, content) + + func.__name__ = name + setattr(self, name, types.MethodType(func, self, self.__class__)) + setattr(self.__class__, name, + types.MethodType(func, None, self.__class__)) + return getattr(self, name) + + @staticmethod + def _process_response(response, content): + if response.status > 200: + raise exception.EC2ServerError(response, content) + + res = EC2Client._parse_xml(content) + + res = next(res.itervalues()) + if 'return' in res: + return res['return'] + else: + res.pop('requestId') + return res + + @staticmethod + def _build_params(**kwargs): + def add_list_param(params, items, label): + for i in range(1, len(items) + 1): + item = items[i - 1] + item_label = '%s.%d' % (label, i) + if isinstance(item, dict): + add_dict_param(params, item, item_label) + else: + params[item_label] = str(item) + + def add_dict_param(params, items, label=None): + for key, value in items.iteritems(): + ec2_key = EC2Client._underscore_to_camelcase(key) + item_label = '%s.%s' % (label, ec2_key) if label else ec2_key + if isinstance(value, dict): + add_dict_param(params, value, item_label) + elif isinstance(value, list): + add_list_param(params, value, item_label) + else: + params[item_label] = str(value) + + params = {} + add_dict_param(params, kwargs) + return params + + _xml_scheme = re.compile('\sxmlns=".*"') + + @staticmethod + # NOTE(ft): this function is used in unit tests until it be moved to one + # of utils module + def _parse_xml(xml_string): + xml_string = EC2Client._xml_scheme.sub('', xml_string) + xml = etree.fromstring(xml_string) + + def convert_node(node): + children = list(node) + if len(children): + if children[0].tag == 'item': + val = list(convert_node(child)[1] for child in children) + else: + val = dict(convert_node(child) for child in children) + elif node.tag.endswith('Set'): + val = [] + else: + # TODO(ft): do not use private function + val = (ec2utils._try_convert(node.text) + if node.text + else node.text) + return node.tag, val + + return dict([convert_node(xml)]) + + @staticmethod + # NOTE(ft): this function is copied from apirequest to avoid circular + # module reference. It should be moved to one of utils module + def _underscore_to_camelcase(st): + return ''.join([x[:1].upper() + x[1:] for x in st.split('_')]) diff --git a/ec2api/api/ec2utils.py b/ec2api/api/ec2utils.py new file mode 100644 index 00000000..07340caf --- /dev/null +++ b/ec2api/api/ec2utils.py @@ -0,0 +1,186 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +from ec2api import exception +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import log as logging +from ec2api.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +def resource_type_from_id(context, resource_id): + """Get resource type by ID + + Returns a string representation of the Amazon resource type, if known. + Returns None on failure. + + :param context: context under which the method is called + :param resource_id: resource_id to evaluate + """ + + known_types = { + 'i': 'instance', + 'r': 'reservation', + 'vol': 'volume', + 'snap': 'snapshot', + 'ami': 'image', + 'aki': 'image', + 'ari': 'image' + } + + type_marker = resource_id.split('-')[0] + + return known_types.get(type_marker) + + +_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') + + +def camelcase_to_underscore(str): + return _c2u.sub(r'_\1', str).lower().strip('_') + + +def _try_convert(value): + """Return a non-string from a string or unicode, if possible. + + ============= ===================================================== + When value is returns + ============= ===================================================== + zero-length '' + 'None' None + 'True' True case insensitive + 'False' False case insensitive + '0', '-0' 0 + 0xN, -0xN int from hex (positive) (N is any number) + 0bN, -0bN int from binary (positive) (N is any number) + * try conversion to int, float, complex, fallback value + + """ + def _negative_zero(value): + epsilon = 1e-7 + return 0 if abs(value) < epsilon else value + + if len(value) == 0: + return '' + if value == 'None': + return None + lowered_value = value.lower() + if lowered_value == 'true': + return True + if lowered_value == 'false': + return False + for prefix, base in [('0x', 16), ('0b', 2), ('0', 8), ('', 10)]: + try: + if lowered_value.startswith((prefix, "-" + prefix)): + return int(lowered_value, base) + except ValueError: + pass + try: + return _negative_zero(float(value)) + except ValueError: + return value + + +def dict_from_dotted_str(items): + """parse multi dot-separated argument into dict. + + EBS boot uses multi dot-separated arguments like + BlockDeviceMapping.1.DeviceName=snap-id + Convert the above into + {'block_device_mapping': {'1': {'device_name': snap-id}}} + """ + args = {} + for key, value in items: + parts = key.split(".") + key = str(camelcase_to_underscore(parts[0])) + if isinstance(value, str) or isinstance(value, unicode): + # NOTE(vish): Automatically convert strings back + # into their respective values + value = _try_convert(value) + + if len(parts) > 1: + d = args.get(key, {}) + args[key] = d + for k in parts[1:-1]: + k = camelcase_to_underscore(k) + v = d.get(k, {}) + d[k] = v + d = v + d[camelcase_to_underscore(parts[-1])] = value + else: + args[key] = value + + return args + + +_ms_time_regex = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3,6}Z$') + + +def is_ec2_timestamp_expired(request, expires=None): + """Checks the timestamp or expiry time included in an EC2 request + + and returns true if the request is expired + """ + query_time = None + timestamp = request.get('Timestamp') + expiry_time = request.get('Expires') + + def parse_strtime(strtime): + if _ms_time_regex.match(strtime): + # NOTE(MotoKen): time format for aws-sdk-java contains millisecond + time_format = "%Y-%m-%dT%H:%M:%S.%fZ" + else: + time_format = "%Y-%m-%dT%H:%M:%SZ" + return timeutils.parse_strtime(strtime, time_format) + + try: + if timestamp and expiry_time: + msg = _("Request must include either Timestamp or Expires," + " but cannot contain both") + LOG.error(msg) + raise exception.InvalidRequest(msg) + elif expiry_time: + query_time = parse_strtime(expiry_time) + return timeutils.is_older_than(query_time, -1) + elif timestamp: + query_time = parse_strtime(timestamp) + + # Check if the difference between the timestamp in the request + # and the time on our servers is larger than 5 minutes, the + # request is too old (or too new). + if query_time and expires: + return (timeutils.is_older_than(query_time, expires) or + timeutils.is_newer_than(query_time, expires)) + return False + except ValueError: + LOG.audit(_("Timestamp is invalid.")) + return True + + +# TODO(Alex) This function is copied as is from original cloud.py. It doesn't +# check for the prefix which allows any prefix used for any object. +def ec2_id_to_id(ec2_id): + """Convert an ec2 ID (i-[base 16 number]) to an instance id (int).""" + try: + return int(ec2_id.split('-')[-1], 16) + except ValueError: + raise exception.InvalidEc2Id(ec2_id=ec2_id) + + +def id_to_ec2_id(instance_id, template='i-%08x'): + """Convert an instance ID (int) to an ec2 ID (i-[base 16 number]).""" + return template % int(instance_id) diff --git a/ec2api/api/faults.py b/ec2api/api/faults.py new file mode 100644 index 00000000..4fb9b672 --- /dev/null +++ b/ec2api/api/faults.py @@ -0,0 +1,96 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.sax import saxutils + +from oslo.config import cfg +import webob.dec +import webob.exc + +import ec2api.api +from ec2api import context +from ec2api.openstack.common import gettextutils +from ec2api.openstack.common import log as logging + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def xhtml_escape(value): + """Escapes a string so it is valid within XML or XHTML. + + """ + return saxutils.escape(value, {'"': '"', "'": '''}) + + +def utf8(value): + """Try to turn a string into utf-8 if possible. + + Code is directly from the utf8 function in + http://github.com/facebook/tornado/blob/master/tornado/escape.py + + """ + if isinstance(value, unicode): + return value.encode('utf-8') + elif isinstance(value, gettextutils.Message): + return unicode(value).encode('utf-8') + assert isinstance(value, str) + return value + + +def ec2_error_response(request_id, code, message, status=500): + """Helper to construct an EC2 compatible error response.""" + LOG.debug('EC2 error response: %(code)s: %(message)s', + {'code': code, 'message': message}) + resp = webob.Response() + resp.status = status + resp.headers['Content-Type'] = 'text/xml' + resp.body = str('\n' + '%s' + '%s' + '%s' % + (xhtml_escape(utf8(code)), + xhtml_escape(utf8(message)), + xhtml_escape(utf8(request_id)))) + return resp + + +class Fault(webob.exc.HTTPException): + """Captures exception and return REST Response.""" + + def __init__(self, exception): + """Create a response for the given webob.exc.exception.""" + self.wrapped_exc = exception + + @webob.dec.wsgify + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + code = ec2api.api.exception_to_ec2code(self.wrapped_exc) + status = self.wrapped_exc.status_int + message = self.wrapped_exc.explanation + + if status == 501: + message = "The requested function is not supported" + + if 'AWSAccessKeyId' not in req.params: + raise webob.exc.HTTPBadRequest() + user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if CONF.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + + resp = ec2_error_response(context.generate_request_id(), code, + message=message, status=status) + return resp diff --git a/ec2api/api/proxy.py b/ec2api/api/proxy.py new file mode 100644 index 00000000..87645e19 --- /dev/null +++ b/ec2api/api/proxy.py @@ -0,0 +1,27 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ec2api.api import ec2client + + +class ProxyController(object): + + def __str__(self): + return 'ProxyController' + + def proxy(self, req, args): + requester = ec2client.EC2Requester(req.params["Version"], + req.environ["REQUEST_METHOD"]) + return requester.request(req.environ['ec2api.context'], + req.params["Action"], args) diff --git a/ec2api/api/validator.py b/ec2api/api/validator.py new file mode 100644 index 00000000..01745faf --- /dev/null +++ b/ec2api/api/validator.py @@ -0,0 +1,132 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import re + +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def _get_path_validator_regex(): + # rfc3986 path validator regex from + # http://jmrware.com/articles/2009/uri_regexp/URI_regex.html + pchar = "([A-Za-z0-9\-._~!$&'()*+,;=:@]|%[0-9A-Fa-f]{2})" + path = "((/{pchar}*)*|" + path += "/({pchar}+(/{pchar}*)*)?|" + path += "{pchar}+(/{pchar}*)*|" + path += "{pchar}+(/{pchar}*)*|)" + path = path.format(pchar=pchar) + return re.compile(path) + + +VALIDATE_PATH_RE = _get_path_validator_regex() + + +def validate_str(max_length=None): + + def _do(val): + if not isinstance(val, basestring): + return False + if max_length and len(val) > max_length: + return False + return True + + return _do + + +def validate_int(max_value=None): + + def _do(val): + if not isinstance(val, int): + return False + if max_value and val > max_value: + return False + return True + + return _do + + +def validate_url_path(val): + """True if val is matched by the path component grammar in rfc3986.""" + + if not validate_str()(val): + return False + + return VALIDATE_PATH_RE.match(val).end() == len(val) + + +def validate_image_path(val): + if not validate_str()(val): + return False + + bucket_name = val.split('/')[0] + manifest_path = val[len(bucket_name) + 1:] + if not len(bucket_name) or not len(manifest_path): + return False + + if val[0] == '/': + return False + + # make sure the image path if rfc3986 compliant + # prepend '/' to make input validate + if not validate_url_path('/' + val): + return False + + return True + + +def validate_user_data(user_data): + """Check if the user_data is encoded properly.""" + try: + user_data = base64.b64decode(user_data) + except TypeError: + return False + return True + + +def validate(args, validator): + """Validate values of args against validators in validator. + + :param args: Dict of values to be validated. + :param validator: A dict where the keys map to keys in args + and the values are validators. + Applies each validator to ``args[key]`` + :returns: True if validation succeeds. Otherwise False. + + A validator should be a callable which accepts 1 argument and which + returns True if the argument passes validation. False otherwise. + A validator should not raise an exception to indicate validity of the + argument. + + Only validates keys which show up in both args and validator. + + """ + + for key in validator: + if key not in args: + continue + + f = validator[key] + assert callable(f) + + if not f(args[key]): + LOG.debug(_("%(key)s with value %(value)s failed" + " validator %(name)s"), + {'key': key, 'value': args[key], 'name': f.__name__}) + return False + return True diff --git a/ec2api/cmd/__init__.py b/ec2api/cmd/__init__.py new file mode 100644 index 00000000..e7cf12cc --- /dev/null +++ b/ec2api/cmd/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ec2api.openstack.common import gettextutils +gettextutils.install('ec2api') diff --git a/ec2api/cmd/api.py b/ec2api/cmd/api.py new file mode 100644 index 00000000..42212e13 --- /dev/null +++ b/ec2api/cmd/api.py @@ -0,0 +1,42 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +EC2api API Server +""" + +import sys + +from oslo.config import cfg + +from ec2api import config +from ec2api.openstack.common import log as logging +from ec2api import service + +CONF = cfg.CONF +CONF.import_opt('use_ssl', 'ec2api.service') + + +def main(): + config.parse_args(sys.argv) + logging.setup('ec2api') + + server = service.WSGIService( + 'ec2api', use_ssl=CONF.use_ssl, max_url_len=16384) + service.serve(server) + service.wait() + + +if __name__ == '__main__': + main() diff --git a/ec2api/cmd/manage.py b/ec2api/cmd/manage.py new file mode 100644 index 00000000..6cded389 --- /dev/null +++ b/ec2api/cmd/manage.py @@ -0,0 +1,75 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" + CLI interface for EC2 API management. +""" + +import sys + +from oslo.config import cfg + +from ec2api.db import migration +from ec2api.openstack.common import log +from ec2api import version + + +CONF = cfg.CONF + + +def do_db_version(): + """Print database's current migration level.""" + print(migration.db_version()) + + +def do_db_sync(): + """Place a database under migration control and upgrade, + + creating if necessary. + """ + migration.db_sync(CONF.command.version) + + +def add_command_parsers(subparsers): + parser = subparsers.add_parser('db_version') + parser.set_defaults(func=do_db_version) + + parser = subparsers.add_parser('db_sync') + parser.set_defaults(func=do_db_sync) + parser.add_argument('version', nargs='?') + parser.add_argument('current_version', nargs='?') + + +command_opt = cfg.SubCommandOpt('command', + title='Commands', + help='Available commands', + handler=add_command_parsers) + + +def main(): + CONF.register_cli_opt(command_opt) + try: + default_config_files = cfg.find_config_files('ec2api') + CONF(sys.argv[1:], project='ec2api', prog='ec2-api-manage', + version=version.version_info.version_string(), + default_config_files=default_config_files) + log.setup("ec2api") + except RuntimeError as e: + sys.exit("ERROR: %s" % e) + + try: + CONF.command.func() + except Exception as e: + sys.exit("ERROR: %s" % e) diff --git a/ec2api/config.py b/ec2api/config.py new file mode 100644 index 00000000..a50c1ca7 --- /dev/null +++ b/ec2api/config.py @@ -0,0 +1,30 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from ec2api.openstack.common.db import options +from ec2api import paths +from ec2api import version + +_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('nova.sqlite') + + +def parse_args(argv, default_config_files=None): + options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, + sqlite_db='nova.sqlite') + cfg.CONF(argv[1:], + project='ec2api', + version=version.version_info.version_string(), + default_config_files=default_config_files) diff --git a/ec2api/context.py b/ec2api/context.py new file mode 100644 index 00000000..a0953dd0 --- /dev/null +++ b/ec2api/context.py @@ -0,0 +1,150 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""RequestContext: context for requests that persist through all of ec2.""" + +import uuid + +import six + +from ec2api import exception +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import local +from ec2api.openstack.common import log as logging +from ec2api.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + + +class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, project_id, access_key, secret_key, + is_admin=None, roles=None, remote_address=None, + auth_token=None, user_name=None, project_name=None, + overwrite=True, service_catalog=None, api_version=None, + **kwargs): + """Parameters + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + if kwargs: + LOG.warn(_('Arguments dropped when creating context: %s') % + str(kwargs)) + + self.user_id = user_id + self.project_id = project_id + self.access_key = access_key + self.secret_key = secret_key + self.roles = roles or [] + self.remote_address = remote_address + timestamp = timeutils.utcnow() + if isinstance(timestamp, six.string_types): + timestamp = timeutils.parse_strtime(timestamp) + self.timestamp = timestamp + self.request_id = generate_request_id() + self.auth_token = auth_token + + self.service_catalog = service_catalog + if self.service_catalog is None: + # if list is empty or none + self.service_catalog = [] + + self.user_name = user_name + self.project_name = project_name + self.is_admin = is_admin + self.api_version = api_version + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'user_id': self.user_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'roles': self.roles, + 'remote_address': self.remote_address, + 'timestamp': timeutils.strtime(self.timestamp), + 'request_id': self.request_id, + 'auth_token': self.auth_token, + 'user_name': self.user_name, + 'service_catalog': self.service_catalog, + 'project_name': self.project_name, + 'tenant': self.tenant, + 'user': self.user} + + @classmethod + def from_dict(cls, values): + values.pop('user', None) + values.pop('tenant', None) + return cls(**values) + + # NOTE(sirp): the openstack/common version of RequestContext uses + # tenant/user whereas the ec2 version uses project_id/user_id. We need + # this shim in order to use context-aware code from openstack/common, like + # logging, until we make the switch to using openstack/common's version of + # RequestContext. + @property + def tenant(self): + return self.project_id + + @property + def user(self): + return self.user_id + + +def get_admin_context(read_deleted="no"): + return RequestContext(user_id=None, + project_id=None, + access_key=None, + secret_key=None, + is_admin=True, + read_deleted=read_deleted, + overwrite=False) + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True + + +def require_context(ctxt): + """Raise exception.Forbidden() + + if context is not a user or an admin context. + """ + if not ctxt.is_admin and not is_user_context(ctxt): + raise exception.Forbidden() diff --git a/ec2api/exception.py b/ec2api/exception.py new file mode 100644 index 00000000..0db93dfe --- /dev/null +++ b/ec2api/exception.py @@ -0,0 +1,279 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""ec2api base exception handling. + +Includes decorator for re-raising ec2api-type exceptions. + +SHOULD include dedicated exception logging. + +""" + +import sys + +from oslo.config import cfg + +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +exc_log_opts = [ + cfg.BoolOpt('fatal_exception_format_errors', + default=False, + help='Make exception message format errors fatal'), +] + +CONF = cfg.CONF +CONF.register_opts(exc_log_opts) + + +class EC2ServerError(Exception): + + def __init__(self, response, content): + self.response = response + self.content = content + + +class EC2Exception(Exception): + + """Base EC2 Exception + + To correctly use this class, inherit from it and define + a 'msg_fmt' property. That msg_fmt will get printf'd + with the keyword arguments provided to the constructor. + + """ + msg_fmt = _("An unknown exception occurred.") + code = 500 + headers = {} + safe = False + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.msg_fmt % kwargs + + except Exception: + exc_info = sys.exc_info() + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + + if CONF.fatal_exception_format_errors: + raise exc_info[0], exc_info[1], exc_info[2] + else: + # at least get the core message out if something happened + message = self.msg_fmt + + super(EC2Exception, self).__init__(message) + + def format_message(self): + # NOTE(mrodden): use the first argument to the python Exception object + # which should be our full EC2Exception message, (see __init__) + return self.args[0] + + +class Invalid(EC2Exception): + msg_fmt = _("Unacceptable parameters.") + code = 400 + + +class InvalidRequest(Invalid): + msg_fmt = _("The request is invalid.") + + +class InvalidEc2Id(Invalid): + msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.") + + +class InvalidInput(Invalid): + msg_fmt = _("Invalid input received: %(reason)s") + + +class ConfigNotFound(EC2Exception): + msg_fmt = _("Could not find config at %(path)s") + + +class PasteAppNotFound(EC2Exception): + msg_fmt = _("Could not load paste app '%(name)s' from %(path)s") + + +class MethodNotFound(EC2Exception): + msg_fmt = _("Could not find method '%(name)s'") + + +class Forbidden(EC2Exception): + ec2_code = 'AuthFailure' + msg_fmt = _("Not authorized.") + code = 403 + + +class AuthFailure(Invalid): + pass + + +class NotFound(EC2Exception): + msg_fmt = _("Resource could not be found.") + code = 404 + + +class EC2NotFound(NotFound): + code = 400 + + +class InstanceNotFound(EC2NotFound): + ec2_code = 'InvalidInstanceID.NotFound' + msg_fmt = _("Instance %(instance_id)s could not be found.") + + +class InvalidVpcIDNotFound(EC2NotFound): + ec2_code = 'InvalidVpcID.NotFound' + msg_fmt = _("The vpc ID '%(vpc_id)s' does not exist") + + +class InvalidInternetGatewayIDNotFound(EC2NotFound): + ec2_code = 'InvalidInternetGatewayID.NotFound' + msg_fmt = _("The internetGateway ID '%(igw_id)s' does not exist") + + +class InvalidSubnetIDNotFound(EC2NotFound): + ec2_code = 'InvalidSubnetID.NotFound' + msg_fmt = _("The subnet ID '%(subnet_id)s' does not exist") + + +class InvalidNetworkInterfaceIDNotFound(EC2NotFound): + ec2_code = 'InvalidNetworkInterfaceID.NotFound' + msg_fmt = _("Network interface %(eni_id)s could not " + "be found.") + + +class InvalidAttachmentIDNotFound(EC2NotFound): + ec2_code = 'InvalidAttachmentID.NotFound' + msg_fmt = _("Attachment %(eni-attach_id)s could not " + "be found.") + + +class InvalidDhcpOptionsIDNotFound(EC2NotFound): + ec2_code = 'InvalidDhcpOptionsID.NotFound' + msg_fmt = _("The dhcp options ID '%(dopt_id)s' does not exist") + + +class InvalidAllocationIDNotFound(EC2NotFound): + ec2_code = 'InvalidAllocationID.NotFound' + msg_fmt = _("The allocation ID '%(eipalloc_id)s' does not exist") + + +class InvalidAssociationIDNotFound(EC2NotFound): + ec2_code = 'InvalidAssociationID.NotFound' + msg_fmt = _("The association ID '%(assoc_id)s' does not exist") + + +class InvalidRouteTableIDNotFound(EC2NotFound): + ec2_code = 'InvalidRouteTableID.NotFound' + msg_fmt = _("The routeTable ID '%(rtb_id)s' does not exist") + + +class InvalidRouteNotFound(EC2NotFound): + ec2_code = 'InvalidRoute.NotFound' + msg_fmt = _('No route with destination-cidr-block ' + '%(destination_cidr_block)s in route table %(route_table_id)s') + + +class InvalidGroupNotFound(EC2NotFound): + ec2_code = 'InvalidGroup.NotFound' + msg_fmg = _("The security group ID '%(sg_id)s' does not exist") + + +class InvalidPermissionNotFound(EC2NotFound): + ec2_code = 'InvalidPermission.NotFound' + msg_fmg = _("The specified permission does not exist") + + +class IncorrectState(EC2Exception): + ec2_code = 'IncorrectState' + code = 400 + msg_fmt = _("The resource is in incorrect state for the request - reason: " + "'%(reason)s'") + + +class InvalidVpcRange(Invalid): + ec2_code = 'InvalidVpc.Range' + msg_fmt = _("The CIDR '%(cidr_block)s' is invalid.") + + +class InvalidSubnetRange(Invalid): + ec2_code = 'InvalidSubnet.Range' + msg_fmt = _("The CIDR '%(cidr_block)s' is invalid.") + + +class InvalidSubnetConflict(Invalid): + ec2_code = 'InvalidSubnet.Conflict' + msg_fmt = _("The CIDR '%(cidr_block)s' conflicts with another subnet") + + +class MissingParameter(Invalid): + pass + + +class InvalidParameterValue(Invalid): + msg_fmt = _("Value (%(value)s) for parameter %(parameter)s is invalid. " + "%(reason)s") + + +class InvalidParameterCombination(Invalid): + pass + + +class ResourceAlreadyAssociated(Invalid): + ec2_code = 'Resource.AlreadyAssociated' + + +class GatewayNotAttached(Invalid): + ec2_code = 'Gateway.NotAttached' + msg_fmt = _("resource %(igw_id)s is not attached to network %(vpc_id)s") + + +class DependencyViolation(Invalid): + ec2_code = 'DependencyViolation' + msg_fmt = _('Object %(obj1_id)s has dependent resource %(obj2_id)s') + + +class InvalidNetworkInterfaceInUse(Invalid): + ec2_code = 'InvalidNetworkInterface.InUse' + msg_fmt = _('Interface: %(interface_ids)s in use.') + + +class InvalidInstanceId(Invalid): + ec2_code = 'InvalidInstanceID' + + +class InvalidIPAddressInUse(Invalid): + ec2_code = 'InvalidIPAddress.InUse' + msg_fmt = _('Address %(ip_address)s is in use.') + + +class RouteAlreadyExists(Invalid): + msg_fmt = _('The route identified by %(destination_cidr_block)s ' + 'already exists.') diff --git a/ec2api/openstack/__init__.py b/ec2api/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ec2api/openstack/common/__init__.py b/ec2api/openstack/common/__init__.py new file mode 100644 index 00000000..d1223eaf --- /dev/null +++ b/ec2api/openstack/common/__init__.py @@ -0,0 +1,17 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + + +six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/ec2api/openstack/common/context.py b/ec2api/openstack/common/context.py new file mode 100644 index 00000000..b612db71 --- /dev/null +++ b/ec2api/openstack/common/context.py @@ -0,0 +1,126 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple class that stores security context information in the web request. + +Projects should subclass this class if they wish to enhance the request +context or provide additional information in their specific WSGI pipeline. +""" + +import itertools +import uuid + + +def generate_request_id(): + return b'req-' + str(uuid.uuid4()).encode('ascii') + + +class RequestContext(object): + + """Helper class to represent useful information about a request context. + + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}' + + def __init__(self, auth_token=None, user=None, tenant=None, domain=None, + user_domain=None, project_domain=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None, + instance_uuid=None): + self.auth_token = auth_token + self.user = user + self.tenant = tenant + self.domain = domain + self.user_domain = user_domain + self.project_domain = project_domain + self.is_admin = is_admin + self.read_only = read_only + self.show_deleted = show_deleted + self.instance_uuid = instance_uuid + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + user_idt = ( + self.user_idt_format.format(user=self.user or '-', + tenant=self.tenant or '-', + domain=self.domain or '-', + user_domain=self.user_domain or '-', + p_domain=self.project_domain or '-')) + + return {'user': self.user, + 'tenant': self.tenant, + 'domain': self.domain, + 'user_domain': self.user_domain, + 'project_domain': self.project_domain, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_token, + 'request_id': self.request_id, + 'instance_uuid': self.instance_uuid, + 'user_identity': user_idt} + + @classmethod + def from_dict(cls, ctx): + return cls( + auth_token=ctx.get("auth_token"), + user=ctx.get("user"), + tenant=ctx.get("tenant"), + domain=ctx.get("domain"), + user_domain=ctx.get("user_domain"), + project_domain=ctx.get("project_domain"), + is_admin=ctx.get("is_admin", False), + read_only=ctx.get("read_only", False), + show_deleted=ctx.get("show_deleted", False), + request_id=ctx.get("request_id"), + instance_uuid=ctx.get("instance_uuid")) + + +def get_admin_context(show_deleted=False): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True diff --git a/ec2api/openstack/common/db/__init__.py b/ec2api/openstack/common/db/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ec2api/openstack/common/db/api.py b/ec2api/openstack/common/db/api.py new file mode 100644 index 00000000..7c56cccf --- /dev/null +++ b/ec2api/openstack/common/db/api.py @@ -0,0 +1,162 @@ +# Copyright (c) 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Multiple DB API backend support. + +A DB backend module should implement a method named 'get_backend' which +takes no arguments. The method can return any object that implements DB +API methods. +""" + +import functools +import logging +import threading +import time + +from ec2api.openstack.common.db import exception +from ec2api.openstack.common.gettextutils import _LE +from ec2api.openstack.common import importutils + + +LOG = logging.getLogger(__name__) + + +def safe_for_db_retry(f): + """Enable db-retry for decorated function, if config option enabled.""" + f.__dict__['enable_retry'] = True + return f + + +class wrap_db_retry(object): + """Retry db.api methods, if DBConnectionError() raised + + Retry decorated db.api methods. If we enabled `use_db_reconnect` + in config, this decorator will be applied to all db.api functions, + marked with @safe_for_db_retry decorator. + Decorator catchs DBConnectionError() and retries function in a + loop until it succeeds, or until maximum retries count will be reached. + """ + + def __init__(self, retry_interval, max_retries, inc_retry_interval, + max_retry_interval): + super(wrap_db_retry, self).__init__() + + self.retry_interval = retry_interval + self.max_retries = max_retries + self.inc_retry_interval = inc_retry_interval + self.max_retry_interval = max_retry_interval + + def __call__(self, f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + next_interval = self.retry_interval + remaining = self.max_retries + + while True: + try: + return f(*args, **kwargs) + except exception.DBConnectionError as e: + if remaining == 0: + LOG.exception(_LE('DB exceeded retry limit.')) + raise exception.DBError(e) + if remaining != -1: + remaining -= 1 + LOG.exception(_LE('DB connection error.')) + # NOTE(vsergeyev): We are using patched time module, so + # this effectively yields the execution + # context to another green thread. + time.sleep(next_interval) + if self.inc_retry_interval: + next_interval = min( + next_interval * 2, + self.max_retry_interval + ) + return wrapper + + +class DBAPI(object): + def __init__(self, backend_name, backend_mapping=None, lazy=False, + **kwargs): + """Initialize the chosen DB API backend. + + :param backend_name: name of the backend to load + :type backend_name: str + + :param backend_mapping: backend name -> module/class to load mapping + :type backend_mapping: dict + + :param lazy: load the DB backend lazily on the first DB API method call + :type lazy: bool + + Keyword arguments: + + :keyword use_db_reconnect: retry DB transactions on disconnect or not + :type use_db_reconnect: bool + + :keyword retry_interval: seconds between transaction retries + :type retry_interval: int + + :keyword inc_retry_interval: increase retry interval or not + :type inc_retry_interval: bool + + :keyword max_retry_interval: max interval value between retries + :type max_retry_interval: int + + :keyword max_retries: max number of retries before an error is raised + :type max_retries: int + + """ + + self._backend = None + self._backend_name = backend_name + self._backend_mapping = backend_mapping or {} + self._lock = threading.Lock() + + if not lazy: + self._load_backend() + + self.use_db_reconnect = kwargs.get('use_db_reconnect', False) + self.retry_interval = kwargs.get('retry_interval', 1) + self.inc_retry_interval = kwargs.get('inc_retry_interval', True) + self.max_retry_interval = kwargs.get('max_retry_interval', 10) + self.max_retries = kwargs.get('max_retries', 20) + + def _load_backend(self): + with self._lock: + if not self._backend: + # Import the untranslated name if we don't have a mapping + backend_path = self._backend_mapping.get(self._backend_name, + self._backend_name) + backend_mod = importutils.import_module(backend_path) + self._backend = backend_mod.get_backend() + + def __getattr__(self, key): + if not self._backend: + self._load_backend() + + attr = getattr(self._backend, key) + if not hasattr(attr, '__call__'): + return attr + # NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry + # DB API methods, decorated with @safe_for_db_retry + # on disconnect. + if self.use_db_reconnect and hasattr(attr, 'enable_retry'): + attr = wrap_db_retry( + retry_interval=self.retry_interval, + max_retries=self.max_retries, + inc_retry_interval=self.inc_retry_interval, + max_retry_interval=self.max_retry_interval)(attr) + + return attr diff --git a/ec2api/openstack/common/db/exception.py b/ec2api/openstack/common/db/exception.py new file mode 100644 index 00000000..28fdb388 --- /dev/null +++ b/ec2api/openstack/common/db/exception.py @@ -0,0 +1,56 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""DB related custom exceptions.""" + +import six + +from ec2api.openstack.common.gettextutils import _ + + +class DBError(Exception): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(six.text_type(inner_exception)) + + +class DBDuplicateEntry(DBError): + """Wraps an implementation specific exception.""" + def __init__(self, columns=[], inner_exception=None): + self.columns = columns + super(DBDuplicateEntry, self).__init__(inner_exception) + + +class DBDeadlock(DBError): + def __init__(self, inner_exception=None): + super(DBDeadlock, self).__init__(inner_exception) + + +class DBInvalidUnicodeParameter(Exception): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +class DbMigrationError(DBError): + """Wraps migration specific exception.""" + def __init__(self, message=None): + super(DbMigrationError, self).__init__(message) + + +class DBConnectionError(DBError): + """Wraps connection specific exception.""" + pass diff --git a/ec2api/openstack/common/db/options.py b/ec2api/openstack/common/db/options.py new file mode 100644 index 00000000..df5daa03 --- /dev/null +++ b/ec2api/openstack/common/db/options.py @@ -0,0 +1,171 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo.config import cfg + + +database_opts = [ + cfg.StrOpt('sqlite_db', + deprecated_group='DEFAULT', + default='ec2api.sqlite', + help='The file name to use with SQLite'), + cfg.BoolOpt('sqlite_synchronous', + deprecated_group='DEFAULT', + default=True, + help='If True, SQLite uses synchronous mode'), + cfg.StrOpt('backend', + default='sqlalchemy', + deprecated_name='db_backend', + deprecated_group='DEFAULT', + help='The backend to use for db'), + cfg.StrOpt('connection', + help='The SQLAlchemy connection string used to connect to the ' + 'database', + secret=True, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_connection', + group='DATABASE'), + cfg.DeprecatedOpt('connection', + group='sql'), ]), + cfg.StrOpt('mysql_sql_mode', + default='TRADITIONAL', + help='The SQL mode to be used for MySQL sessions. ' + 'This option, including the default, overrides any ' + 'server-set SQL mode. To use whatever SQL mode ' + 'is set by the server configuration, ' + 'set this to no value. Example: mysql_sql_mode='), + cfg.IntOpt('idle_timeout', + default=3600, + deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_idle_timeout', + group='DATABASE'), + cfg.DeprecatedOpt('idle_timeout', + group='sql')], + help='Timeout before idle sql connections are reaped'), + cfg.IntOpt('min_pool_size', + default=1, + deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_min_pool_size', + group='DATABASE')], + help='Minimum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_pool_size', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_pool_size', + group='DATABASE')], + help='Maximum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_retries', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_retries', + group='DATABASE')], + help='Maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('retry_interval', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', + group='DEFAULT'), + cfg.DeprecatedOpt('reconnect_interval', + group='DATABASE')], + help='Interval between retries of opening a sql connection'), + cfg.IntOpt('max_overflow', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', + group='DEFAULT'), + cfg.DeprecatedOpt('sqlalchemy_max_overflow', + group='DATABASE')], + help='If set, use this value for max_overflow with sqlalchemy'), + cfg.IntOpt('connection_debug', + default=0, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', + group='DEFAULT')], + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.BoolOpt('connection_trace', + default=False, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', + group='DEFAULT')], + help='Add python stack traces to SQL as comment strings'), + cfg.IntOpt('pool_timeout', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', + group='DATABASE')], + help='If set, use this value for pool_timeout with sqlalchemy'), + cfg.BoolOpt('use_db_reconnect', + default=False, + help='Enable the experimental use of database reconnect ' + 'on connection lost'), + cfg.IntOpt('db_retry_interval', + default=1, + help='seconds between db connection retries'), + cfg.BoolOpt('db_inc_retry_interval', + default=True, + help='Whether to increase interval between db connection ' + 'retries, up to db_max_retry_interval'), + cfg.IntOpt('db_max_retry_interval', + default=10, + help='max seconds between db connection retries, if ' + 'db_inc_retry_interval is enabled'), + cfg.IntOpt('db_max_retries', + default=20, + help='maximum db connection retries before error is raised. ' + '(setting -1 implies an infinite retry count)'), +] + +CONF = cfg.CONF +CONF.register_opts(database_opts, 'database') + + +def set_defaults(sql_connection, sqlite_db, max_pool_size=None, + max_overflow=None, pool_timeout=None): + """Set defaults for configuration variables.""" + cfg.set_defaults(database_opts, + connection=sql_connection, + sqlite_db=sqlite_db) + # Update the QueuePool defaults + if max_pool_size is not None: + cfg.set_defaults(database_opts, + max_pool_size=max_pool_size) + if max_overflow is not None: + cfg.set_defaults(database_opts, + max_overflow=max_overflow) + if pool_timeout is not None: + cfg.set_defaults(database_opts, + pool_timeout=pool_timeout) + + +def list_opts(): + """Returns a list of oslo.config options available in the library. + + The returned list includes all oslo.config options which may be registered + at runtime by the library. + + Each element of the list is a tuple. The first element is the name of the + group under which the list of elements in the second element will be + registered. A group name of None corresponds to the [DEFAULT] group in + config files. + + The purpose of this is to allow tools like the Oslo sample config file + generator to discover the options exposed to users by this library. + + :returns: a list of (group_name, opts) tuples + """ + return [('database', copy.deepcopy(database_opts))] diff --git a/ec2api/openstack/common/db/sqlalchemy/__init__.py b/ec2api/openstack/common/db/sqlalchemy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ec2api/openstack/common/db/sqlalchemy/migration.py b/ec2api/openstack/common/db/sqlalchemy/migration.py new file mode 100644 index 00000000..4c44fb81 --- /dev/null +++ b/ec2api/openstack/common/db/sqlalchemy/migration.py @@ -0,0 +1,278 @@ +# coding: utf-8 +# +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Base on code in migrate/changeset/databases/sqlite.py which is under +# the following license: +# +# The MIT License +# +# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import os +import re + +from migrate.changeset import ansisql +from migrate.changeset.databases import sqlite +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository +import sqlalchemy +from sqlalchemy.schema import UniqueConstraint + +from ec2api.openstack.common.db import exception +from ec2api.openstack.common.gettextutils import _ + + +def _get_unique_constraints(self, table): + """Retrieve information about existing unique constraints of the table + + This feature is needed for _recreate_table() to work properly. + Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x. + + """ + + data = table.metadata.bind.execute( + """SELECT sql + FROM sqlite_master + WHERE + type='table' AND + name=:table_name""", + table_name=table.name + ).fetchone()[0] + + UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" + return [ + UniqueConstraint( + *[getattr(table.columns, c.strip(' "')) for c in cols.split(",")], + name=name + ) + for name, cols in re.findall(UNIQUE_PATTERN, data) + ] + + +def _recreate_table(self, table, column=None, delta=None, omit_uniques=None): + """Recreate the table properly + + Unlike the corresponding original method of sqlalchemy-migrate this one + doesn't drop existing unique constraints when creating a new one. + + """ + + table_name = self.preparer.format_table(table) + + # we remove all indexes so as not to have + # problems during copy and re-create + for index in table.indexes: + index.drop() + + # reflect existing unique constraints + for uc in self._get_unique_constraints(table): + table.append_constraint(uc) + # omit given unique constraints when creating a new table if required + table.constraints = set([ + cons for cons in table.constraints + if omit_uniques is None or cons.name not in omit_uniques + ]) + + self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name) + self.execute() + + insertion_string = self._modify_table(table, column, delta) + + table.create(bind=self.connection) + self.append(insertion_string % {'table_name': table_name}) + self.execute() + self.append('DROP TABLE migration_tmp') + self.execute() + + +def _visit_migrate_unique_constraint(self, *p, **k): + """Drop the given unique constraint + + The corresponding original method of sqlalchemy-migrate just + raises NotImplemented error + + """ + + self.recreate_table(p[0].table, omit_uniques=[p[0].name]) + + +def patch_migrate(): + """A workaround for SQLite's inability to alter things + + SQLite abilities to alter tables are very limited (please read + http://www.sqlite.org/lang_altertable.html for more details). + E. g. one can't drop a column or a constraint in SQLite. The + workaround for this is to recreate the original table omitting + the corresponding constraint (or column). + + sqlalchemy-migrate library has recreate_table() method that + implements this workaround, but it does it wrong: + + - information about unique constraints of a table + is not retrieved. So if you have a table with one + unique constraint and a migration adding another one + you will end up with a table that has only the + latter unique constraint, and the former will be lost + + - dropping of unique constraints is not supported at all + + The proper way to fix this is to provide a pull-request to + sqlalchemy-migrate, but the project seems to be dead. So we + can go on with monkey-patching of the lib at least for now. + + """ + + # this patch is needed to ensure that recreate_table() doesn't drop + # existing unique constraints of the table when creating a new one + helper_cls = sqlite.SQLiteHelper + helper_cls.recreate_table = _recreate_table + helper_cls._get_unique_constraints = _get_unique_constraints + + # this patch is needed to be able to drop existing unique constraints + constraint_cls = sqlite.SQLiteConstraintDropper + constraint_cls.visit_migrate_unique_constraint = \ + _visit_migrate_unique_constraint + constraint_cls.__bases__ = (ansisql.ANSIColumnDropper, + sqlite.SQLiteConstraintGenerator) + + +def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True): + """Upgrade or downgrade a database. + + Function runs the upgrade() or downgrade() functions in change scripts. + + :param engine: SQLAlchemy engine instance for a given database + :param abs_path: Absolute path to migrate repository. + :param version: Database will upgrade/downgrade until this version. + If None - database will update to the latest + available version. + :param init_version: Initial database version + :param sanity_check: Require schema sanity checking for all tables + """ + + if version is not None: + try: + version = int(version) + except ValueError: + raise exception.DbMigrationError( + message=_("version should be an integer")) + + current_version = db_version(engine, abs_path, init_version) + repository = _find_migrate_repo(abs_path) + if sanity_check: + _db_schema_sanity_check(engine) + if version is None or version > current_version: + return versioning_api.upgrade(engine, repository, version) + else: + return versioning_api.downgrade(engine, repository, + version) + + +def _db_schema_sanity_check(engine): + """Ensure all database tables were created with required parameters. + + :param engine: SQLAlchemy engine instance for a given database + + """ + + if engine.name == 'mysql': + onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' + 'from information_schema.TABLES ' + 'where TABLE_SCHEMA=%s and ' + 'TABLE_COLLATION NOT LIKE "%%utf8%%"') + + # NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic + # versioning tables from the tables we need to verify utf8 status on. + # Non-standard table names are not supported. + EXCLUDED_TABLES = ['migrate_version', 'alembic_version'] + + table_names = [res[0] for res in + engine.execute(onlyutf8_sql, engine.url.database) if + res[0].lower() not in EXCLUDED_TABLES] + + if len(table_names) > 0: + raise ValueError(_('Tables "%s" have non utf8 collation, ' + 'please make sure all tables are CHARSET=utf8' + ) % ','.join(table_names)) + + +def db_version(engine, abs_path, init_version): + """Show the current version of the repository. + + :param engine: SQLAlchemy engine instance for a given database + :param abs_path: Absolute path to migrate repository + :param version: Initial database version + """ + repository = _find_migrate_repo(abs_path) + try: + return versioning_api.db_version(engine, repository) + except versioning_exceptions.DatabaseNotControlledError: + meta = sqlalchemy.MetaData() + meta.reflect(bind=engine) + tables = meta.tables + if len(tables) == 0 or 'alembic_version' in tables: + db_version_control(engine, abs_path, version=init_version) + return versioning_api.db_version(engine, repository) + else: + raise exception.DbMigrationError( + message=_( + "The database is not under version control, but has " + "tables. Please stamp the current version of the schema " + "manually.")) + + +def db_version_control(engine, abs_path, version=None): + """Mark a database as under this repository's version control. + + Once a database is under version control, schema changes should + only be done via change scripts in this repository. + + :param engine: SQLAlchemy engine instance for a given database + :param abs_path: Absolute path to migrate repository + :param version: Initial database version + """ + repository = _find_migrate_repo(abs_path) + versioning_api.version_control(engine, repository, version) + return version + + +def _find_migrate_repo(abs_path): + """Get the project's change script repository + + :param abs_path: Absolute path to migrate repository + """ + if not os.path.exists(abs_path): + raise exception.DbMigrationError("Path %s not found" % abs_path) + return Repository(abs_path) diff --git a/ec2api/openstack/common/db/sqlalchemy/models.py b/ec2api/openstack/common/db/sqlalchemy/models.py new file mode 100644 index 00000000..db401b78 --- /dev/null +++ b/ec2api/openstack/common/db/sqlalchemy/models.py @@ -0,0 +1,119 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# Copyright 2012 Cloudscaling Group, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models. +""" + +import six + +from sqlalchemy import Column, Integer +from sqlalchemy import DateTime +from sqlalchemy.orm import object_mapper + +from ec2api.openstack.common import timeutils + + +class ModelBase(six.Iterator): + """Base class for models.""" + __table_initialized__ = False + + def save(self, session): + """Save this object.""" + + # NOTE(boris-42): This part of code should be look like: + # session.add(self) + # session.flush() + # But there is a bug in sqlalchemy and eventlet that + # raises NoneType exception if there is no running + # transaction and rollback is called. As long as + # sqlalchemy has this bug we have to create transaction + # explicitly. + with session.begin(subtransactions=True): + session.add(self) + session.flush() + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + @property + def _extra_keys(self): + """Specifies custom fields + + Subclasses can override this property to return a list + of custom fields that should be included in their dict + representation. + + For reference check tests/db/sqlalchemy/test_models.py + """ + return [] + + def __iter__(self): + columns = list(dict(object_mapper(self).columns).keys()) + # NOTE(russellb): Allow models to specify other keys that can be looked + # up, beyond the actual db columns. An example would be the 'name' + # property for an Instance. + columns.extend(self._extra_keys) + self._i = iter(columns) + return self + + # In Python 3, __next__() has replaced next(). + def __next__(self): + n = six.advance_iterator(self._i) + return n, getattr(self, n) + + def next(self): + return self.__next__() + + def update(self, values): + """Make the model object behave like a dict.""" + for k, v in six.iteritems(values): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins. + """ + local = dict(self) + joined = dict([(k, v) for k, v in six.iteritems(self.__dict__) + if not k[0] == '_']) + local.update(joined) + return six.iteritems(local) + + +class TimestampMixin(object): + created_at = Column(DateTime, default=lambda: timeutils.utcnow()) + updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) + + +class SoftDeleteMixin(object): + deleted_at = Column(DateTime) + deleted = Column(Integer, default=0) + + def soft_delete(self, session): + """Mark this object as deleted.""" + self.deleted = self.id + self.deleted_at = timeutils.utcnow() + self.save(session=session) diff --git a/ec2api/openstack/common/db/sqlalchemy/provision.py b/ec2api/openstack/common/db/sqlalchemy/provision.py new file mode 100644 index 00000000..2412bc6d --- /dev/null +++ b/ec2api/openstack/common/db/sqlalchemy/provision.py @@ -0,0 +1,157 @@ +# Copyright 2013 Mirantis.inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provision test environment for specific DB backends""" + +import argparse +import logging +import os +import random +import string + +from six import moves +import sqlalchemy + +from ec2api.openstack.common.db import exception as exc + + +LOG = logging.getLogger(__name__) + + +def get_engine(uri): + """Engine creation + + Call the function without arguments to get admin connection. Admin + connection required to create temporary user and database for each + particular test. Otherwise use existing connection to recreate connection + to the temporary database. + """ + return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool) + + +def _execute_sql(engine, sql, driver): + """Initialize connection, execute sql query and close it.""" + try: + with engine.connect() as conn: + if driver == 'postgresql': + conn.connection.set_isolation_level(0) + for s in sql: + conn.execute(s) + except sqlalchemy.exc.OperationalError: + msg = ('%s does not match database admin ' + 'credentials or database does not exist.') + LOG.exception(msg % engine.url) + raise exc.DBConnectionError(msg % engine.url) + + +def create_database(engine): + """Provide temporary user and database for each particular test.""" + driver = engine.name + + auth = { + 'database': ''.join(random.choice(string.ascii_lowercase) + for i in moves.range(10)), + 'user': engine.url.username, + 'passwd': engine.url.password, + } + + sqls = [ + "drop database if exists %(database)s;", + "create database %(database)s;" + ] + + if driver == 'sqlite': + return 'sqlite:////tmp/%s' % auth['database'] + elif driver in ['mysql', 'postgresql']: + sql_query = map(lambda x: x % auth, sqls) + _execute_sql(engine, sql_query, driver) + else: + raise ValueError('Unsupported RDBMS %s' % driver) + + params = auth.copy() + params['backend'] = driver + return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params + + +def drop_database(admin_engine, current_uri): + """Drop temporary database and user after each particular test.""" + + engine = get_engine(current_uri) + driver = engine.name + auth = {'database': engine.url.database, 'user': engine.url.username} + + if driver == 'sqlite': + try: + os.remove(auth['database']) + except OSError: + pass + elif driver in ['mysql', 'postgresql']: + sql = "drop database if exists %(database)s;" + _execute_sql(admin_engine, [sql % auth], driver) + else: + raise ValueError('Unsupported RDBMS %s' % driver) + + +def main(): + """Controller to handle commands + + ::create: Create test user and database with random names. + ::drop: Drop user and database created by previous command. + """ + parser = argparse.ArgumentParser( + description='Controller to handle database creation and dropping' + ' commands.', + epilog='Under normal circumstances is not used directly.' + ' Used in .testr.conf to automate test database creation' + ' and dropping processes.') + subparsers = parser.add_subparsers( + help='Subcommands to manipulate temporary test databases.') + + create = subparsers.add_parser( + 'create', + help='Create temporary test ' + 'databases and users.') + create.set_defaults(which='create') + create.add_argument( + 'instances_count', + type=int, + help='Number of databases to create.') + + drop = subparsers.add_parser( + 'drop', + help='Drop temporary test databases and users.') + drop.set_defaults(which='drop') + drop.add_argument( + 'instances', + nargs='+', + help='List of databases uri to be dropped.') + + args = parser.parse_args() + + connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', + 'sqlite://') + engine = get_engine(connection_string) + which = args.which + + if which == "create": + for i in range(int(args.instances_count)): + print(create_database(engine)) + elif which == "drop": + for db in args.instances: + drop_database(engine, db) + + +if __name__ == "__main__": + main() diff --git a/ec2api/openstack/common/db/sqlalchemy/session.py b/ec2api/openstack/common/db/sqlalchemy/session.py new file mode 100644 index 00000000..a22f6f26 --- /dev/null +++ b/ec2api/openstack/common/db/sqlalchemy/session.py @@ -0,0 +1,905 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend. + +Recommended ways to use sessions within this framework: + +* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``. + `model_query()` will implicitly use a session when called without one + supplied. This is the ideal situation because it will allow queries + to be automatically retried if the database connection is interrupted. + + .. note:: Automatic retry will be enabled in a future patch. + + It is generally fine to issue several queries in a row like this. Even though + they may be run in separate transactions and/or separate sessions, each one + will see the data from the prior calls. If needed, undo- or rollback-like + functionality should be handled at a logical level. For an example, look at + the code around quotas and `reservation_rollback()`. + + Examples: + + .. code-block:: python + + def get_foo(context, foo): + return (model_query(context, models.Foo). + filter_by(foo=foo). + first()) + + def update_foo(context, id, newfoo): + (model_query(context, models.Foo). + filter_by(id=id). + update({'foo': newfoo})) + + def create_foo(context, values): + foo_ref = models.Foo() + foo_ref.update(values) + foo_ref.save() + return foo_ref + + +* Within the scope of a single method, keep all the reads and writes within + the context managed by a single session. In this way, the session's + `__exit__` handler will take care of calling `flush()` and `commit()` for + you. If using this approach, you should not explicitly call `flush()` or + `commit()`. Any error within the context of the session will cause the + session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be + raised in `session`'s `__exit__` handler, and any try/except within the + context managed by `session` will not be triggered. And catching other + non-database errors in the session will not trigger the ROLLBACK, so + exception handlers should always be outside the session, unless the + developer wants to do a partial commit on purpose. If the connection is + dropped before this is possible, the database will implicitly roll back the + transaction. + + .. note:: Statements in the session scope will not be automatically retried. + + If you create models within the session, they need to be added, but you + do not need to call `model.save()`: + + .. code-block:: python + + def create_many_foo(context, foos): + session = sessionmaker() + with session.begin(): + for foo in foos: + foo_ref = models.Foo() + foo_ref.update(foo) + session.add(foo_ref) + + def update_bar(context, foo_id, newbar): + session = sessionmaker() + with session.begin(): + foo_ref = (model_query(context, models.Foo, session). + filter_by(id=foo_id). + first()) + (model_query(context, models.Bar, session). + filter_by(id=foo_ref['bar_id']). + update({'bar': newbar})) + + .. note:: `update_bar` is a trivially simple example of using + ``with session.begin``. Whereas `create_many_foo` is a good example of + when a transaction is needed, it is always best to use as few queries as + possible. + + The two queries in `update_bar` can be better expressed using a single query + which avoids the need for an explicit transaction. It can be expressed like + so: + + .. code-block:: python + + def update_bar(context, foo_id, newbar): + subq = (model_query(context, models.Foo.id). + filter_by(id=foo_id). + limit(1). + subquery()) + (model_query(context, models.Bar). + filter_by(id=subq.as_scalar()). + update({'bar': newbar})) + + For reference, this emits approximately the following SQL statement: + + .. code-block:: sql + + UPDATE bar SET bar = ${newbar} + WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + + .. note:: `create_duplicate_foo` is a trivially simple example of catching an + exception while using ``with session.begin``. Here create two duplicate + instances with same primary key, must catch the exception out of context + managed by a single session: + + .. code-block:: python + + def create_duplicate_foo(context): + foo1 = models.Foo() + foo2 = models.Foo() + foo1.id = foo2.id = 1 + session = sessionmaker() + try: + with session.begin(): + session.add(foo1) + session.add(foo2) + except exception.DBDuplicateEntry as e: + handle_error(e) + +* Passing an active session between methods. Sessions should only be passed + to private methods. The private method must use a subtransaction; otherwise + SQLAlchemy will throw an error when you call `session.begin()` on an existing + transaction. Public methods should not accept a session parameter and should + not be involved in sessions within the caller's scope. + + Note that this incurs more overhead in SQLAlchemy than the above means + due to nesting transactions, and it is not possible to implicitly retry + failed database operations when using this approach. + + This also makes code somewhat more difficult to read and debug, because a + single database transaction spans more than one method. Error handling + becomes less clear in this situation. When this is needed for code clarity, + it should be clearly documented. + + .. code-block:: python + + def myfunc(foo): + session = sessionmaker() + with session.begin(): + # do some database things + bar = _private_func(foo, session) + return bar + + def _private_func(foo, session=None): + if not session: + session = sessionmaker() + with session.begin(subtransaction=True): + # do some other database things + return bar + + +There are some things which it is best to avoid: + +* Don't keep a transaction open any longer than necessary. + + This means that your ``with session.begin()`` block should be as short + as possible, while still containing all the related calls for that + transaction. + +* Avoid ``with_lockmode('UPDATE')`` when possible. + + In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match + any rows, it will take a gap-lock. This is a form of write-lock on the + "gap" where no rows exist, and prevents any other writes to that space. + This can effectively prevent any INSERT into a table by locking the gap + at the end of the index. Similar problems will occur if the SELECT FOR UPDATE + has an overly broad WHERE clause, or doesn't properly use an index. + + One idea proposed at ODS Fall '12 was to use a normal SELECT to test the + number of rows matching a query, and if only one row is returned, + then issue the SELECT FOR UPDATE. + + The better long-term solution is to use + ``INSERT .. ON DUPLICATE KEY UPDATE``. + However, this can not be done until the "deleted" columns are removed and + proper UNIQUE constraints are added to the tables. + + +Enabling soft deletes: + +* To use/enable soft-deletes, the `SoftDeleteMixin` must be added + to your model class. For example: + + .. code-block:: python + + class NovaBase(models.SoftDeleteMixin, models.ModelBase): + pass + + +Efficient use of soft deletes: + +* There are two possible ways to mark a record as deleted: + `model.soft_delete()` and `query.soft_delete()`. + + The `model.soft_delete()` method works with a single already-fetched entry. + `query.soft_delete()` makes only one db request for all entries that + correspond to the query. + +* In almost all cases you should use `query.soft_delete()`. Some examples: + + .. code-block:: python + + def soft_delete_bar(): + count = model_query(BarModel).find(some_condition).soft_delete() + if count == 0: + raise Exception("0 entries were soft deleted") + + def complex_soft_delete_with_synchronization_bar(session=None): + if session is None: + session = sessionmaker() + with session.begin(subtransactions=True): + count = (model_query(BarModel). + find(some_condition). + soft_delete(synchronize_session=True)) + # Here synchronize_session is required, because we + # don't know what is going on in outer session. + if count == 0: + raise Exception("0 entries were soft deleted") + +* There is only one situation where `model.soft_delete()` is appropriate: when + you fetch a single record, work with it, and mark it as deleted in the same + transaction. + + .. code-block:: python + + def soft_delete_bar_model(): + session = sessionmaker() + with session.begin(): + bar_ref = model_query(BarModel).find(some_condition).first() + # Work with bar_ref + bar_ref.soft_delete(session=session) + + However, if you need to work with all entries that correspond to query and + then soft delete them you should use the `query.soft_delete()` method: + + .. code-block:: python + + def soft_delete_multi_models(): + session = sessionmaker() + with session.begin(): + query = (model_query(BarModel, session=session). + find(some_condition)) + model_refs = query.all() + # Work with model_refs + query.soft_delete(synchronize_session=False) + # synchronize_session=False should be set if there is no outer + # session and these entries are not used after this. + + When working with many rows, it is very important to use query.soft_delete, + which issues a single query. Using `model.soft_delete()`, as in the following + example, is very inefficient. + + .. code-block:: python + + for bar_ref in bar_refs: + bar_ref.soft_delete(session=session) + # This will produce count(bar_refs) db requests. + +""" + +import functools +import logging +import re +import time + +import six +from sqlalchemy import exc as sqla_exc +from sqlalchemy.interfaces import PoolListener +import sqlalchemy.orm +from sqlalchemy.pool import NullPool, StaticPool +from sqlalchemy.sql.expression import literal_column + +from ec2api.openstack.common.db import exception +from ec2api.openstack.common.gettextutils import _LE, _LW +from ec2api.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + + +class SqliteForeignKeysListener(PoolListener): + """Ensures that the foreign key constraints are enforced in SQLite. + + The foreign key constraints are disabled by default in SQLite, + so the foreign key constraints will be enabled here for every + database connection + """ + def connect(self, dbapi_con, con_record): + dbapi_con.execute('pragma foreign_keys=ON') + + +# note(boris-42): In current versions of DB backends unique constraint +# violation messages follow the structure: +# +# sqlite: +# 1 column - (IntegrityError) column c1 is not unique +# N columns - (IntegrityError) column c1, c2, ..., N are not unique +# +# sqlite since 3.7.16: +# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1 +# +# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2 +# +# postgres: +# 1 column - (IntegrityError) duplicate key value violates unique +# constraint "users_c1_key" +# N columns - (IntegrityError) duplicate key value violates unique +# constraint "name_of_our_constraint" +# +# mysql: +# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key +# 'c1'") +# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined +# with -' for key 'name_of_our_constraint'") +# +# ibm_db_sa: +# N columns - (IntegrityError) SQL0803N One or more values in the INSERT +# statement, UPDATE statement, or foreign key update caused by a +# DELETE statement are not valid because the primary key, unique +# constraint or unique index identified by "2" constrains table +# "NOVA.KEY_PAIRS" from having duplicate values for the index +# key. +_DUP_KEY_RE_DB = { + "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), + re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), + "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), + "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),), + "ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),), +} + + +def _raise_if_duplicate_entry_error(integrity_error, engine_name): + """Raise exception if two entries are duplicated. + + In this function will be raised DBDuplicateEntry exception if integrity + error wrap unique constraint violation. + """ + + def get_columns_from_uniq_cons_or_name(columns): + # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" + # where `t` it is table name and columns `c1`, `c2` + # are in UniqueConstraint. + uniqbase = "uniq_" + if not columns.startswith(uniqbase): + if engine_name == "postgresql": + return [columns[columns.index("_") + 1:columns.rindex("_")]] + return [columns] + return columns[len(uniqbase):].split("0")[1:] + + if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"): + return + + # FIXME(johannes): The usage of the .message attribute has been + # deprecated since Python 2.6. However, the exceptions raised by + # SQLAlchemy can differ when using unicode() and accessing .message. + # An audit across all three supported engines will be necessary to + # ensure there are no regressions. + for pattern in _DUP_KEY_RE_DB[engine_name]: + match = pattern.match(integrity_error.message) + if match: + break + else: + return + + # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the + # columns so we have to omit that from the DBDuplicateEntry error. + columns = '' + + if engine_name != 'ibm_db_sa': + columns = match.group(1) + + if engine_name == "sqlite": + columns = [c.split('.')[-1] for c in columns.strip().split(", ")] + else: + columns = get_columns_from_uniq_cons_or_name(columns) + raise exception.DBDuplicateEntry(columns, integrity_error) + + +# NOTE(comstud): In current versions of DB backends, Deadlock violation +# messages follow the structure: +# +# mysql: +# (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' +# 'restarting transaction') +_DEADLOCK_RE_DB = { + "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") +} + + +def _raise_if_deadlock_error(operational_error, engine_name): + """Raise exception on deadlock condition. + + Raise DBDeadlock exception if OperationalError contains a Deadlock + condition. + """ + re = _DEADLOCK_RE_DB.get(engine_name) + if re is None: + return + # FIXME(johannes): The usage of the .message attribute has been + # deprecated since Python 2.6. However, the exceptions raised by + # SQLAlchemy can differ when using unicode() and accessing .message. + # An audit across all three supported engines will be necessary to + # ensure there are no regressions. + m = re.match(operational_error.message) + if not m: + return + raise exception.DBDeadlock(operational_error) + + +def _wrap_db_error(f): + @functools.wraps(f) + def _wrap(self, *args, **kwargs): + try: + assert issubclass( + self.__class__, sqlalchemy.orm.session.Session + ), ('_wrap_db_error() can only be applied to methods of ' + 'subclasses of sqlalchemy.orm.session.Session.') + + return f(self, *args, **kwargs) + except UnicodeEncodeError: + raise exception.DBInvalidUnicodeParameter() + except sqla_exc.OperationalError as e: + _raise_if_db_connection_lost(e, self.bind) + _raise_if_deadlock_error(e, self.bind.dialect.name) + # NOTE(comstud): A lot of code is checking for OperationalError + # so let's not wrap it for now. + raise + # note(boris-42): We should catch unique constraint violation and + # wrap it by our own DBDuplicateEntry exception. Unique constraint + # violation is wrapped by IntegrityError. + except sqla_exc.IntegrityError as e: + # note(boris-42): SqlAlchemy doesn't unify errors from different + # DBs so we must do this. Also in some tables (for example + # instance_types) there are more than one unique constraint. This + # means we should get names of columns, which values violate + # unique constraint, from error message. + _raise_if_duplicate_entry_error(e, self.bind.dialect.name) + raise exception.DBError(e) + except Exception as e: + LOG.exception(_LE('DB exception wrapped.')) + raise exception.DBError(e) + return _wrap + + +def _synchronous_switch_listener(dbapi_conn, connection_rec): + """Switch sqlite connections to non-synchronous mode.""" + dbapi_conn.execute("PRAGMA synchronous = OFF") + + +def _add_regexp_listener(dbapi_con, con_record): + """Add REGEXP function to sqlite connections.""" + + def regexp(expr, item): + reg = re.compile(expr) + return reg.search(six.text_type(item)) is not None + dbapi_con.create_function('regexp', 2, regexp) + + +def _thread_yield(dbapi_con, con_record): + """Ensure other greenthreads get a chance to be executed. + + If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will + execute instead of time.sleep(0). + Force a context switch. With common database backends (eg MySQLdb and + sqlite), there is no implicit yield caused by network I/O since they are + implemented by C libraries that eventlet cannot monkey patch. + """ + time.sleep(0) + + +def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): + """Ensures that MySQL, PostgreSQL or DB2 connections are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + cursor = dbapi_conn.cursor() + try: + ping_sql = 'select 1' + if engine.name == 'ibm_db_sa': + # DB2 requires a table expression + ping_sql = 'select 1 from (values (1)) AS t1' + cursor.execute(ping_sql) + except Exception as ex: + if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): + msg = _LW('Database server has gone away: %s') % ex + LOG.warning(msg) + + # if the database server has gone away, all connections in the pool + # have become invalid and we can safely close all of them here, + # rather than waste time on checking of every single connection + engine.dispose() + + # this will be handled by SQLAlchemy and will force it to create + # a new connection and retry the original action + raise sqla_exc.DisconnectionError(msg) + else: + raise + + +def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None): + """Set the sql_mode session variable. + + MySQL supports several server modes. The default is None, but sessions + may choose to enable server modes like TRADITIONAL, ANSI, + several STRICT_* modes and others. + + Note: passing in '' (empty string) for sql_mode clears + the SQL mode for the session, overriding a potentially set + server default. + """ + + cursor = dbapi_con.cursor() + cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) + + +def _mysql_get_effective_sql_mode(engine): + """Returns the effective SQL mode for connections from the engine pool. + + Returns ``None`` if the mode isn't available, otherwise returns the mode. + + """ + # Get the real effective SQL mode. Even when unset by + # our own config, the server may still be operating in a specific + # SQL mode as set by the server configuration. + # Also note that the checkout listener will be called on execute to + # set the mode if it's registered. + row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone() + if row is None: + return + return row[1] + + +def _mysql_check_effective_sql_mode(engine): + """Logs a message based on the effective SQL mode for MySQL connections.""" + realmode = _mysql_get_effective_sql_mode(engine) + + if realmode is None: + LOG.warning(_LW('Unable to detect effective SQL mode')) + return + + LOG.debug('MySQL server mode set to %s', realmode) + # 'TRADITIONAL' mode enables several other modes, so + # we need a substring match here + if not ('TRADITIONAL' in realmode.upper() or + 'STRICT_ALL_TABLES' in realmode.upper()): + LOG.warning(_LW("MySQL SQL mode is '%s', " + "consider enabling TRADITIONAL or STRICT_ALL_TABLES"), + realmode) + + +def _mysql_set_mode_callback(engine, sql_mode): + if sql_mode is not None: + mode_callback = functools.partial(_set_session_sql_mode, + sql_mode=sql_mode) + sqlalchemy.event.listen(engine, 'connect', mode_callback) + _mysql_check_effective_sql_mode(engine) + + +def _is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + # For the db2, the error code is -30081 since the db2 is still not ready + conn_err_codes = ('2002', '2003', '2006', '2013', '-30081') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def _raise_if_db_connection_lost(error, engine): + # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor) + # requires connection and cursor in incoming parameters, + # but we have no possibility to create connection if DB + # is not available, so in such case reconnect fails. + # But is_disconnect() ignores these parameters, so it + # makes sense to pass to function None as placeholder + # instead of connection and cursor. + if engine.dialect.is_disconnect(error, None, None): + raise exception.DBConnectionError(error) + + +def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, + idle_timeout=3600, + connection_debug=0, max_pool_size=None, max_overflow=None, + pool_timeout=None, sqlite_synchronous=True, + connection_trace=False, max_retries=10, retry_interval=10): + """Return a new SQLAlchemy engine.""" + + connection_dict = sqlalchemy.engine.url.make_url(sql_connection) + + engine_args = { + "pool_recycle": idle_timeout, + 'convert_unicode': True, + } + + logger = logging.getLogger('sqlalchemy.engine') + + # Map SQL debug level to Python log level + if connection_debug >= 100: + logger.setLevel(logging.DEBUG) + elif connection_debug >= 50: + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.WARNING) + + if "sqlite" in connection_dict.drivername: + if sqlite_fk: + engine_args["listeners"] = [SqliteForeignKeysListener()] + engine_args["poolclass"] = NullPool + + if sql_connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + else: + if max_pool_size is not None: + engine_args['pool_size'] = max_pool_size + if max_overflow is not None: + engine_args['max_overflow'] = max_overflow + if pool_timeout is not None: + engine_args['pool_timeout'] = pool_timeout + + engine = sqlalchemy.create_engine(sql_connection, **engine_args) + + sqlalchemy.event.listen(engine, 'checkin', _thread_yield) + + if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'): + ping_callback = functools.partial(_ping_listener, engine) + sqlalchemy.event.listen(engine, 'checkout', ping_callback) + if engine.name == 'mysql': + if mysql_sql_mode: + _mysql_set_mode_callback(engine, mysql_sql_mode) + elif 'sqlite' in connection_dict.drivername: + if not sqlite_synchronous: + sqlalchemy.event.listen(engine, 'connect', + _synchronous_switch_listener) + sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) + + if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb': + _patch_mysqldb_with_stacktrace_comments() + + try: + engine.connect() + except sqla_exc.OperationalError as e: + if not _is_db_connection_error(e.args[0]): + raise + + remaining = max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _LW('SQL connection failed. %s attempts left.') + LOG.warning(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(retry_interval) + try: + engine.connect() + break + except sqla_exc.OperationalError as e: + if (remaining != 'infinite' and remaining == 0) or \ + not _is_db_connection_error(e.args[0]): + raise + return engine + + +class Query(sqlalchemy.orm.query.Query): + """Subclass of sqlalchemy.query with soft_delete() method.""" + def soft_delete(self, synchronize_session='evaluate'): + return self.update({'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow()}, + synchronize_session=synchronize_session) + + +class Session(sqlalchemy.orm.session.Session): + """Custom Session class to avoid SqlAlchemy Session monkey patching.""" + @_wrap_db_error + def query(self, *args, **kwargs): + return super(Session, self).query(*args, **kwargs) + + @_wrap_db_error + def flush(self, *args, **kwargs): + return super(Session, self).flush(*args, **kwargs) + + @_wrap_db_error + def execute(self, *args, **kwargs): + return super(Session, self).execute(*args, **kwargs) + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + class_=Session, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + query_cls=Query) + + +def _patch_mysqldb_with_stacktrace_comments(): + """Adds current stack trace as a comment in queries. + + Patches MySQLdb.cursors.BaseCursor._do_query. + """ + import traceback + + import MySQLdb.cursors + + old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query + + def _do_query(self, q): + stack = '' + for filename, line, method, function in traceback.extract_stack(): + # exclude various common things from trace + if filename.endswith('session.py') and method == '_do_query': + continue + if filename.endswith('api.py') and method == 'wrapper': + continue + if filename.endswith('utils.py') and method == '_inner': + continue + if filename.endswith('exception.py') and method == '_wrap': + continue + # db/api is just a wrapper around db/sqlalchemy/api + if filename.endswith('db/api.py'): + continue + # only trace inside ec2api + index = filename.rfind('ec2api') + if index == -1: + continue + stack += "File:%s:%s Method:%s() Line:%s | " \ + % (filename[index:], line, method, function) + + # strip trailing " | " from stack + if stack: + stack = stack[:-3] + qq = "%s /* %s */" % (q, stack) + else: + qq = q + old_mysql_do_query(self, qq) + + setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) + + +class EngineFacade(object): + """A helper class for removing of global engine instances from ec2api.db. + + As a library, ec2api.db can't decide where to store/when to create engine + and sessionmaker instances, so this must be left for a target application. + + On the other hand, in order to simplify the adoption of ec2api.db changes, + we'll provide a helper class, which creates engine and sessionmaker + on its instantiation and provides get_engine()/get_session() methods + that are compatible with corresponding utility functions that currently + exist in target projects, e.g. in Nova. + + engine/sessionmaker instances will still be global (and they are meant to + be global), but they will be stored in the app context, rather that in the + ec2api.db context. + + Note: using of this helper is completely optional and you are encouraged to + integrate engine/sessionmaker instances into your apps any way you like + (e.g. one might want to bind a session to a request context). Two important + things to remember: + + 1. An Engine instance is effectively a pool of DB connections, so it's + meant to be shared (and it's thread-safe). + 2. A Session instance is not meant to be shared and represents a DB + transactional context (i.e. it's not thread-safe). sessionmaker is + a factory of sessions. + + """ + + def __init__(self, sql_connection, + sqlite_fk=False, autocommit=True, + expire_on_commit=False, **kwargs): + """Initialize engine and sessionmaker instances. + + :param sqlite_fk: enable foreign keys in SQLite + :type sqlite_fk: bool + + :param autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :param expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + Keyword arguments: + + :keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions. + (defaults to TRADITIONAL) + :keyword idle_timeout: timeout before idle sql connections are reaped + (defaults to 3600) + :keyword connection_debug: verbosity of SQL debugging information. + 0=None, 100=Everything (defaults to 0) + :keyword max_pool_size: maximum number of SQL connections to keep open + in a pool (defaults to SQLAlchemy settings) + :keyword max_overflow: if set, use this value for max_overflow with + sqlalchemy (defaults to SQLAlchemy settings) + :keyword pool_timeout: if set, use this value for pool_timeout with + sqlalchemy (defaults to SQLAlchemy settings) + :keyword sqlite_synchronous: if True, SQLite uses synchronous mode + (defaults to True) + :keyword connection_trace: add python stack traces to SQL as comment + strings (defaults to False) + :keyword max_retries: maximum db connection retries during startup. + (setting -1 implies an infinite retry count) + (defaults to 10) + :keyword retry_interval: interval between retries of opening a sql + connection (defaults to 10) + + """ + + super(EngineFacade, self).__init__() + + self._engine = create_engine( + sql_connection=sql_connection, + sqlite_fk=sqlite_fk, + mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'), + idle_timeout=kwargs.get('idle_timeout', 3600), + connection_debug=kwargs.get('connection_debug', 0), + max_pool_size=kwargs.get('max_pool_size'), + max_overflow=kwargs.get('max_overflow'), + pool_timeout=kwargs.get('pool_timeout'), + sqlite_synchronous=kwargs.get('sqlite_synchronous', True), + connection_trace=kwargs.get('connection_trace', False), + max_retries=kwargs.get('max_retries', 10), + retry_interval=kwargs.get('retry_interval', 10)) + self._session_maker = get_maker( + engine=self._engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) + + def get_engine(self): + """Get the engine instance (note, that it's shared).""" + + return self._engine + + def get_session(self, **kwargs): + """Get a Session instance. + + If passed, keyword arguments values override the ones used when the + sessionmaker instance was created. + + :keyword autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :keyword expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + """ + + for arg in kwargs: + if arg not in ('autocommit', 'expire_on_commit'): + del kwargs[arg] + + return self._session_maker(**kwargs) + + @classmethod + def from_config(cls, connection_string, conf, + sqlite_fk=False, autocommit=True, expire_on_commit=False): + """Initialize EngineFacade using oslo.config config instance options. + + :param connection_string: SQLAlchemy connection string + :type connection_string: string + + :param conf: oslo.config config instance + :type conf: oslo.config.cfg.ConfigOpts + + :param sqlite_fk: enable foreign keys in SQLite + :type sqlite_fk: bool + + :param autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :param expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + """ + + return cls(sql_connection=connection_string, + sqlite_fk=sqlite_fk, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + **dict(conf.database.items())) diff --git a/ec2api/openstack/common/db/sqlalchemy/test_base.py b/ec2api/openstack/common/db/sqlalchemy/test_base.py new file mode 100644 index 00000000..07f2dd7c --- /dev/null +++ b/ec2api/openstack/common/db/sqlalchemy/test_base.py @@ -0,0 +1,167 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import functools +import os + +import fixtures +from oslotest import base as test_base +import six + +from ec2api.openstack.common.db.sqlalchemy import provision +from ec2api.openstack.common.db.sqlalchemy import session +from ec2api.openstack.common.db.sqlalchemy import utils + + +class DbFixture(fixtures.Fixture): + """Basic database fixture. + + Allows to run tests on various db backends, such as SQLite, MySQL and + PostgreSQL. By default use sqlite backend. To override default backend + uri set env variable OS_TEST_DBAPI_CONNECTION with database admin + credentials for specific backend. + """ + + def _get_uri(self): + return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://') + + def __init__(self, test): + super(DbFixture, self).__init__() + + self.test = test + + def cleanUp(self): + self.test.engine.dispose() + + def setUp(self): + super(DbFixture, self).setUp() + + self.test.engine = session.create_engine(self._get_uri()) + self.test.sessionmaker = session.get_maker(self.test.engine) + + +class DbTestCase(test_base.BaseTestCase): + """Base class for testing of DB code. + + Using `DbFixture`. Intended to be the main database test case to use all + the tests on a given backend with user defined uri. Backend specific + tests should be decorated with `backend_specific` decorator. + """ + + FIXTURE = DbFixture + + def setUp(self): + super(DbTestCase, self).setUp() + self.useFixture(self.FIXTURE(self)) + + +ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] + + +def backend_specific(*dialects): + """Decorator to skip backend specific tests on inappropriate engines. + + ::dialects: list of dialects names under which the test will be launched. + """ + def wrap(f): + @functools.wraps(f) + def ins_wrap(self): + if not set(dialects).issubset(ALLOWED_DIALECTS): + raise ValueError( + "Please use allowed dialects: %s" % ALLOWED_DIALECTS) + if self.engine.name not in dialects: + msg = ('The test "%s" can be run ' + 'only on %s. Current engine is %s.') + args = (f.__name__, ' '.join(dialects), self.engine.name) + self.skip(msg % args) + else: + return f(self) + return ins_wrap + return wrap + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticFixture(DbFixture): + """Base fixture to use default CI databases. + + The databases exist in OpenStack CI infrastructure. But for the + correct functioning in local environment the databases must be + created manually. + """ + + DRIVER = abc.abstractproperty(lambda: None) + DBNAME = PASSWORD = USERNAME = 'openstack_citest' + + def setUp(self): + self._provisioning_engine = provision.get_engine( + utils.get_connect_string(backend=self.DRIVER, + user=self.USERNAME, + passwd=self.PASSWORD, + database=self.DBNAME) + ) + self._uri = provision.create_database(self._provisioning_engine) + + super(OpportunisticFixture, self).setUp() + + def cleanUp(self): + super(OpportunisticFixture, self).cleanUp() + + provision.drop_database(self._provisioning_engine, self._uri) + + def _get_uri(self): + return self._uri + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticTestCase(DbTestCase): + """Base test case to use default CI databases. + + The subclasses of the test case are running only when openstack_citest + database is available otherwise a tests will be skipped. + """ + + FIXTURE = abc.abstractproperty(lambda: None) + + def setUp(self): + credentials = { + 'backend': self.FIXTURE.DRIVER, + 'user': self.FIXTURE.USERNAME, + 'passwd': self.FIXTURE.PASSWORD, + 'database': self.FIXTURE.DBNAME} + + if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials): + msg = '%s backend is not available.' % self.FIXTURE.DRIVER + return self.skip(msg) + + super(OpportunisticTestCase, self).setUp() + + +class MySQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'mysql' + DBNAME = '' # connect to MySQL server, but not to the openstack_citest db + + +class PostgreSQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'postgresql' + DBNAME = 'postgres' # PostgreSQL requires the db name here,use service one + + +class MySQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = MySQLOpportunisticFixture + + +class PostgreSQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = PostgreSQLOpportunisticFixture diff --git a/ec2api/openstack/common/db/sqlalchemy/test_migrations.py b/ec2api/openstack/common/db/sqlalchemy/test_migrations.py new file mode 100644 index 00000000..45bdda58 --- /dev/null +++ b/ec2api/openstack/common/db/sqlalchemy/test_migrations.py @@ -0,0 +1,270 @@ +# Copyright 2010-2011 OpenStack Foundation +# Copyright 2012-2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import logging +import os +import subprocess + +import lockfile +from oslotest import base as test_base +from six import moves +from six.moves.urllib import parse +import sqlalchemy +import sqlalchemy.exc + +from ec2api.openstack.common.db.sqlalchemy import utils +from ec2api.openstack.common.gettextutils import _LE + +LOG = logging.getLogger(__name__) + + +def _have_mysql(user, passwd, database): + present = os.environ.get('TEST_MYSQL_PRESENT') + if present is None: + return utils.is_backend_avail(backend='mysql', + user=user, + passwd=passwd, + database=database) + return present.lower() in ('', 'true') + + +def _have_postgresql(user, passwd, database): + present = os.environ.get('TEST_POSTGRESQL_PRESENT') + if present is None: + return utils.is_backend_avail(backend='postgres', + user=user, + passwd=passwd, + database=database) + return present.lower() in ('', 'true') + + +def _set_db_lock(lock_path=None, lock_prefix=None): + def decorator(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + try: + path = lock_path or os.environ.get("EC2API_LOCK_PATH") + lock = lockfile.FileLock(os.path.join(path, lock_prefix)) + with lock: + LOG.debug('Got lock "%s"' % f.__name__) + return f(*args, **kwargs) + finally: + LOG.debug('Lock released "%s"' % f.__name__) + return wrapper + return decorator + + +class BaseMigrationTestCase(test_base.BaseTestCase): + """Base class fort testing of migration utils.""" + + def __init__(self, *args, **kwargs): + super(BaseMigrationTestCase, self).__init__(*args, **kwargs) + + self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), + 'test_migrations.conf') + # Test machines can set the TEST_MIGRATIONS_CONF variable + # to override the location of the config file for migration testing + self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF', + self.DEFAULT_CONFIG_FILE) + self.test_databases = {} + self.migration_api = None + + def setUp(self): + super(BaseMigrationTestCase, self).setUp() + + # Load test databases from the config file. Only do this + # once. No need to re-run this on each test... + LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) + if os.path.exists(self.CONFIG_FILE_PATH): + cp = moves.configparser.RawConfigParser() + try: + cp.read(self.CONFIG_FILE_PATH) + defaults = cp.defaults() + for key, value in defaults.items(): + self.test_databases[key] = value + except moves.configparser.ParsingError as e: + self.fail("Failed to read test_migrations.conf config " + "file. Got error: %s" % e) + else: + self.fail("Failed to find test_migrations.conf config " + "file.") + + self.engines = {} + for key, value in self.test_databases.items(): + self.engines[key] = sqlalchemy.create_engine(value) + + # We start each test case with a completely blank slate. + self._reset_databases() + + def tearDown(self): + # We destroy the test data store between each test case, + # and recreate it, which ensures that we have no side-effects + # from the tests + self._reset_databases() + super(BaseMigrationTestCase, self).tearDown() + + def execute_cmd(self, cmd=None): + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output = process.communicate()[0] + LOG.debug(output) + self.assertEqual(0, process.returncode, + "Failed to run: %s\n%s" % (cmd, output)) + + def _reset_pg(self, conn_pieces): + (user, + password, + database, + host) = utils.get_db_connection_info(conn_pieces) + os.environ['PGPASSWORD'] = password + os.environ['PGUSER'] = user + # note(boris-42): We must create and drop database, we can't + # drop database which we have connected to, so for such + # operations there is a special database template1. + sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" + " '%(sql)s' -d template1") + + sql = ("drop database if exists %s;") % database + droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + self.execute_cmd(droptable) + + sql = ("create database %s;") % database + createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + self.execute_cmd(createtable) + + os.unsetenv('PGPASSWORD') + os.unsetenv('PGUSER') + + @_set_db_lock(lock_prefix='migration_tests-') + def _reset_databases(self): + for key, engine in self.engines.items(): + conn_string = self.test_databases[key] + conn_pieces = parse.urlparse(conn_string) + engine.dispose() + if conn_string.startswith('sqlite'): + # We can just delete the SQLite database, which is + # the easiest and cleanest solution + db_path = conn_pieces.path.strip('/') + if os.path.exists(db_path): + os.unlink(db_path) + # No need to recreate the SQLite DB. SQLite will + # create it for us if it's not there... + elif conn_string.startswith('mysql'): + # We can execute the MySQL client to destroy and re-create + # the MYSQL database, which is easier and less error-prone + # than using SQLAlchemy to do this via MetaData...trust me. + (user, password, database, host) = \ + utils.get_db_connection_info(conn_pieces) + sql = ("drop database if exists %(db)s; " + "create database %(db)s;") % {'db': database} + cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s " + "-e \"%(sql)s\"") % {'user': user, 'password': password, + 'host': host, 'sql': sql} + self.execute_cmd(cmd) + elif conn_string.startswith('postgresql'): + self._reset_pg(conn_pieces) + + +class WalkVersionsMixin(object): + def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): + # Determine latest version script from the repo, then + # upgrade from 1 through to the latest, with no data + # in the databases. This just checks that the schema itself + # upgrades successfully. + + # Place the database under version control + self.migration_api.version_control(engine, self.REPOSITORY, + self.INIT_VERSION) + self.assertEqual(self.INIT_VERSION, + self.migration_api.db_version(engine, + self.REPOSITORY)) + + LOG.debug('latest version is %s' % self.REPOSITORY.latest) + versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) + + for version in versions: + # upgrade -> downgrade -> upgrade + self._migrate_up(engine, version, with_data=True) + if snake_walk: + downgraded = self._migrate_down( + engine, version - 1, with_data=True) + if downgraded: + self._migrate_up(engine, version) + + if downgrade: + # Now walk it back down to 0 from the latest, testing + # the downgrade paths. + for version in reversed(versions): + # downgrade -> upgrade -> downgrade + downgraded = self._migrate_down(engine, version - 1) + + if snake_walk and downgraded: + self._migrate_up(engine, version) + self._migrate_down(engine, version - 1) + + def _migrate_down(self, engine, version, with_data=False): + try: + self.migration_api.downgrade(engine, self.REPOSITORY, version) + except NotImplementedError: + # NOTE(sirp): some migrations, namely release-level + # migrations, don't support a downgrade. + return False + + self.assertEqual( + version, self.migration_api.db_version(engine, self.REPOSITORY)) + + # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' + # version). So if we have any downgrade checks, they need to be run for + # the previous (higher numbered) migration. + if with_data: + post_downgrade = getattr( + self, "_post_downgrade_%03d" % (version + 1), None) + if post_downgrade: + post_downgrade(engine) + + return True + + def _migrate_up(self, engine, version, with_data=False): + """migrate up to a new version of the db. + + We allow for data insertion and post checks at every + migration version with special _pre_upgrade_### and + _check_### functions in the main test. + """ + # NOTE(sdague): try block is here because it's impossible to debug + # where a failed data migration happens otherwise + try: + if with_data: + data = None + pre_upgrade = getattr( + self, "_pre_upgrade_%03d" % version, None) + if pre_upgrade: + data = pre_upgrade(engine) + + self.migration_api.upgrade(engine, self.REPOSITORY, version) + self.assertEqual(version, + self.migration_api.db_version(engine, + self.REPOSITORY)) + if with_data: + check = getattr(self, "_check_%03d" % version, None) + if check: + check(engine, data) + except Exception: + LOG.error(_LE("Failed to migrate to version %(version)s " + "on engine %(engine)s") % {'version': version, + 'engine': engine}) + raise diff --git a/ec2api/openstack/common/db/sqlalchemy/utils.py b/ec2api/openstack/common/db/sqlalchemy/utils.py new file mode 100644 index 00000000..7a9721e0 --- /dev/null +++ b/ec2api/openstack/common/db/sqlalchemy/utils.py @@ -0,0 +1,655 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack Foundation. +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import re + +import sqlalchemy +from sqlalchemy import Boolean +from sqlalchemy import CheckConstraint +from sqlalchemy import Column +from sqlalchemy.engine import reflection +from sqlalchemy.ext.compiler import compiles +from sqlalchemy import func +from sqlalchemy import Index +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import or_ +from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql.expression import UpdateBase +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy.types import NullType + +from ec2api.openstack.common import context as request_context +from ec2api.openstack.common.db.sqlalchemy import models +from ec2api.openstack.common.gettextutils import _, _LI, _LW +from ec2api.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+") + + +def sanitize_db_url(url): + match = _DBURL_REGEX.match(url) + if match: + return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):]) + return url + + +class InvalidSortKey(Exception): + message = _("Sort key supplied was not valid.") + + +# copy from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + try: + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + except KeyError: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise InvalidSortKey() + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in range(len(sort_keys)): + crit_attrs = [] + for j in range(i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + else: + crit_attrs.append((model_attr > marker_values[i])) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query + + +def _read_deleted_filter(query, db_model, read_deleted): + if 'deleted' not in db_model.__table__.columns: + raise ValueError(_("There is no `deleted` column in `%s` table. " + "Project doesn't use soft-deleted feature.") + % db_model.__name__) + + default_deleted_value = db_model.__table__.c.deleted.default.arg + if read_deleted == 'no': + query = query.filter(db_model.deleted == default_deleted_value) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter(db_model.deleted != default_deleted_value) + else: + raise ValueError(_("Unrecognized read_deleted value '%s'") + % read_deleted) + return query + + +def _project_filter(query, db_model, context, project_only): + if project_only and 'project_id' not in db_model.__table__.columns: + raise ValueError(_("There is no `project_id` column in `%s` table.") + % db_model.__name__) + + if request_context.is_user_context(context) and project_only: + if project_only == 'allow_none': + is_none = None + query = query.filter(or_(db_model.project_id == context.project_id, + db_model.project_id == is_none)) + else: + query = query.filter(db_model.project_id == context.project_id) + + return query + + +def model_query(context, model, session, args=None, project_only=False, + read_deleted=None): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + + :param model: Model to query. Must be a subclass of ModelBase. + :type model: models.ModelBase + + :param session: The session to use. + :type session: sqlalchemy.orm.session.Session + + :param args: Arguments to query. If None - model is used. + :type args: tuple + + :param project_only: If present and context is user-type, then restrict + query to match the context's project_id. If set to + 'allow_none', restriction includes project_id = None. + :type project_only: bool + + :param read_deleted: If present, overrides context's read_deleted field. + :type read_deleted: bool + + Usage: + + ..code:: python + + result = (utils.model_query(context, models.Instance, session=session) + .filter_by(uuid=instance_uuid) + .all()) + + query = utils.model_query( + context, Node, + session=session, + args=(func.count(Node.id), func.sum(Node.ram)) + ).filter_by(project_id=project_id) + + """ + + if not read_deleted: + if hasattr(context, 'read_deleted'): + # NOTE(viktors): some projects use `read_deleted` attribute in + # their contexts instead of `show_deleted`. + read_deleted = context.read_deleted + else: + read_deleted = context.show_deleted + + if not issubclass(model, models.ModelBase): + raise TypeError(_("model should be a subclass of ModelBase")) + + query = session.query(model) if not args else session.query(*args) + query = _read_deleted_filter(query, model, read_deleted) + query = _project_filter(query, model, context, project_only) + + return query + + +def get_table(engine, name): + """Returns an sqlalchemy table dynamically from db. + + Needed because the models don't work for us in migrations + as models will be far out of sync with the current data. + + .. warning:: + + Do not use this method when creating ForeignKeys in database migrations + because sqlalchemy needs the same MetaData object to hold information + about the parent table and the reference table in the ForeignKey. This + method uses a unique MetaData object per table object so it won't work + with ForeignKey creation. + """ + metadata = MetaData() + metadata.bind = engine + return Table(name, metadata, autoload=True) + + +class InsertFromSelect(UpdateBase): + """Form the base for `INSERT INTO table (SELECT ... )` statement.""" + def __init__(self, table, select): + self.table = table + self.select = select + + +@compiles(InsertFromSelect) +def visit_insert_from_select(element, compiler, **kw): + """Form the `INSERT INTO table (SELECT ... )` statement.""" + return "INSERT INTO %s %s" % ( + compiler.process(element.table, asfrom=True), + compiler.process(element.select)) + + +class ColumnError(Exception): + """Error raised when no column or an invalid column is found.""" + + +def _get_not_supported_column(col_name_col_instance, column_name): + try: + column = col_name_col_instance[column_name] + except KeyError: + msg = _("Please specify column %s in col_name_col_instance " + "param. It is required because column has unsupported " + "type by sqlite).") + raise ColumnError(msg % column_name) + + if not isinstance(column, Column): + msg = _("col_name_col_instance param has wrong type of " + "column instance for column %s It should be instance " + "of sqlalchemy.Column.") + raise ColumnError(msg % column_name) + return column + + +def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, + **col_name_col_instance): + """Drop unique constraint from table. + + DEPRECATED: this function is deprecated and will be removed from ec2api.db + in a few releases. Please use UniqueConstraint.drop() method directly for + sqlalchemy-migrate migration scripts. + + This method drops UC from table and works for mysql, postgresql and sqlite. + In mysql and postgresql we are able to use "alter table" construction. + Sqlalchemy doesn't support some sqlite column types and replaces their + type with NullType in metadata. We process these columns and replace + NullType with the correct column type. + + :param migrate_engine: sqlalchemy engine + :param table_name: name of table that contains uniq constraint. + :param uc_name: name of uniq constraint that will be dropped. + :param columns: columns that are in uniq constraint. + :param col_name_col_instance: contains pair column_name=column_instance. + column_instance is instance of Column. These params + are required only for columns that have unsupported + types by sqlite. For example BigInteger. + """ + + from migrate.changeset import UniqueConstraint + + meta = MetaData() + meta.bind = migrate_engine + t = Table(table_name, meta, autoload=True) + + if migrate_engine.name == "sqlite": + override_cols = [ + _get_not_supported_column(col_name_col_instance, col.name) + for col in t.columns + if isinstance(col.type, NullType) + ] + for col in override_cols: + t.columns.replace(col) + + uc = UniqueConstraint(*columns, table=t, name=uc_name) + uc.drop() + + +def drop_old_duplicate_entries_from_table(migrate_engine, table_name, + use_soft_delete, *uc_column_names): + """Drop all old rows having the same values for columns in uc_columns. + + This method drop (or mark ad `deleted` if use_soft_delete is True) old + duplicate rows form table with name `table_name`. + + :param migrate_engine: Sqlalchemy engine + :param table_name: Table with duplicates + :param use_soft_delete: If True - values will be marked as `deleted`, + if False - values will be removed from table + :param uc_column_names: Unique constraint columns + """ + meta = MetaData() + meta.bind = migrate_engine + + table = Table(table_name, meta, autoload=True) + columns_for_group_by = [table.c[name] for name in uc_column_names] + + columns_for_select = [func.max(table.c.id)] + columns_for_select.extend(columns_for_group_by) + + duplicated_rows_select = sqlalchemy.sql.select( + columns_for_select, group_by=columns_for_group_by, + having=func.count(table.c.id) > 1) + + for row in migrate_engine.execute(duplicated_rows_select): + # NOTE(boris-42): Do not remove row that has the biggest ID. + delete_condition = table.c.id != row[0] + is_none = None # workaround for pyflakes + delete_condition &= table.c.deleted_at == is_none + for name in uc_column_names: + delete_condition &= table.c[name] == row[name] + + rows_to_delete_select = sqlalchemy.sql.select( + [table.c.id]).where(delete_condition) + for row in migrate_engine.execute(rows_to_delete_select).fetchall(): + LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: " + "%(table)s") % dict(id=row[0], table=table_name)) + + if use_soft_delete: + delete_statement = table.update().\ + where(delete_condition).\ + values({ + 'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow() + }) + else: + delete_statement = table.delete().where(delete_condition) + migrate_engine.execute(delete_statement) + + +def _get_default_deleted_value(table): + if isinstance(table.c.id.type, Integer): + return 0 + if isinstance(table.c.id.type, String): + return "" + raise ColumnError(_("Unsupported id columns type")) + + +def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): + table = get_table(migrate_engine, table_name) + + insp = reflection.Inspector.from_engine(migrate_engine) + real_indexes = insp.get_indexes(table_name) + existing_index_names = dict( + [(index['name'], index['column_names']) for index in real_indexes]) + + # NOTE(boris-42): Restore indexes on `deleted` column + for index in indexes: + if 'deleted' not in index['column_names']: + continue + name = index['name'] + if name in existing_index_names: + column_names = [table.c[c] for c in existing_index_names[name]] + old_index = Index(name, *column_names, unique=index["unique"]) + old_index.drop(migrate_engine) + + column_names = [table.c[c] for c in index['column_names']] + new_index = Index(index["name"], *column_names, unique=index["unique"]) + new_index.create(migrate_engine) + + +def change_deleted_column_type_to_boolean(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_boolean_sqlite( + migrate_engine, table_name, **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + old_deleted = Column('old_deleted', Boolean, default=False) + old_deleted.create(table, populate_default=False) + + table.update().\ + where(table.c.deleted == table.c.id).\ + values(old_deleted=True).\ + execute() + + table.c.deleted.drop() + table.c.old_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, + **col_name_col_instance): + insp = reflection.Inspector.from_engine(migrate_engine) + table = get_table(migrate_engine, table_name) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', Boolean, default=0) + columns.append(column_copy) + + constraints = [constraint.copy() for constraint in table.constraints] + + meta = table.metadata + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + c_select = [] + for c in table.c: + if c.name != "deleted": + c_select.append(c) + else: + c_select.append(table.c.deleted == table.c.id) + + ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select)) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + new_table.update().\ + where(new_table.c.deleted == new_table.c.id).\ + values(deleted=True).\ + execute() + + +def change_deleted_column_type_to_id_type(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_id_type_sqlite( + migrate_engine, table_name, **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + new_deleted = Column('new_deleted', table.c.id.type, + default=_get_default_deleted_value(table)) + new_deleted.create(table, populate_default=True) + + deleted = True # workaround for pyflakes + table.update().\ + where(table.c.deleted == deleted).\ + values(new_deleted=table.c.id).\ + execute() + table.c.deleted.drop() + table.c.new_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, + **col_name_col_instance): + # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check + # constraints in sqlite DB and our `deleted` column has + # 2 check constraints. So there is only one way to remove + # these constraints: + # 1) Create new table with the same columns, constraints + # and indexes. (except deleted column). + # 2) Copy all data from old to new table. + # 3) Drop old table. + # 4) Rename new table to old table name. + insp = reflection.Inspector.from_engine(migrate_engine) + meta = MetaData(bind=migrate_engine) + table = Table(table_name, meta, autoload=True) + default_deleted_value = _get_default_deleted_value(table) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', table.c.id.type, + default=default_deleted_value) + columns.append(column_copy) + + def is_deleted_column_constraint(constraint): + # NOTE(boris-42): There is no other way to check is CheckConstraint + # associated with deleted column. + if not isinstance(constraint, CheckConstraint): + return False + sqltext = str(constraint.sqltext) + return (sqltext.endswith("deleted in (0, 1)") or + sqltext.endswith("deleted IN (:deleted_1, :deleted_2)")) + + constraints = [] + for constraint in table.constraints: + if not is_deleted_column_constraint(constraint): + constraints.append(constraint.copy()) + + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + ins = InsertFromSelect(new_table, table.select()) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + deleted = True # workaround for pyflakes + new_table.update().\ + where(new_table.c.deleted == deleted).\ + values(deleted=new_table.c.id).\ + execute() + + # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. + deleted = False # workaround for pyflakes + new_table.update().\ + where(new_table.c.deleted == deleted).\ + values(deleted=default_deleted_value).\ + execute() + + +def get_connect_string(backend, database, user=None, passwd=None): + """Get database connection + + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped + """ + args = {'backend': backend, + 'user': user, + 'passwd': passwd, + 'database': database} + if backend == 'sqlite': + template = '%(backend)s:///%(database)s' + else: + template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + return template % args + + +def is_backend_avail(backend, database, user=None, passwd=None): + try: + connect_uri = get_connect_string(backend=backend, + database=database, + user=user, + passwd=passwd) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + return False + else: + connection.close() + engine.dispose() + return True + + +def get_db_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + + return (user, password, database, host) diff --git a/ec2api/openstack/common/eventlet_backdoor.py b/ec2api/openstack/common/eventlet_backdoor.py new file mode 100644 index 00000000..bc4de0bc --- /dev/null +++ b/ec2api/openstack/common/eventlet_backdoor.py @@ -0,0 +1,145 @@ +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import errno +import gc +import os +import pprint +import socket +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +from ec2api.openstack.common.gettextutils import _LI +from ec2api.openstack.common import log as logging + +help_for_backdoor_port = ( + "Acceptable values are 0, , and :, where 0 results " + "in listening on a random tcp port number; results in listening " + "on the specified port number (and not enabling backdoor if that port " + "is in use); and : results in listening on the smallest " + "unused port number within the specified range of port numbers. The " + "chosen port is displayed in the service's log file.") +eventlet_backdoor_opts = [ + cfg.StrOpt('backdoor_port', + help="Enable eventlet backdoor. %s" % help_for_backdoor_port) +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range + + +def _dont_use_this(): + print("Don't use this, just disconnect instead") + + +def _find_objects(t): + return [o for o in gc.get_objects() if isinstance(o, t)] + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print(i, gt) + traceback.print_stack(gt.gr_frame) + print() + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print(threadId) + traceback.print_stack(stack) + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. + port = sock.getsockname()[1] + LOG.info( + _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()} + ) + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/ec2api/openstack/common/excutils.py b/ec2api/openstack/common/excutils.py new file mode 100644 index 00000000..abb6f9ed --- /dev/null +++ b/ec2api/openstack/common/excutils.py @@ -0,0 +1,113 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import logging +import sys +import time +import traceback + +import six + +from ec2api.openstack.common.gettextutils import _LE + + +class save_and_reraise_exception(object): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + + In some cases the caller may not want to re-raise the exception, and + for those circumstances this context provides a reraise flag that + can be used to suppress the exception. For example:: + + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False + + If another exception occurs and reraise flag is False, + the saved exception will not be logged. + + If the caller wants to raise new exception during exception handling + he/she sets reraise to False initially with an ability to set it back to + True if needed:: + + except Exception: + with save_and_reraise_exception(reraise=False) as ctxt: + [if statements to determine whether to raise a new exception] + # Not raising a new exception, so reraise + ctxt.reraise = True + """ + def __init__(self, reraise=True): + self.reraise = reraise + + def __enter__(self): + self.type_, self.value, self.tb, = sys.exc_info() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + if self.reraise: + logging.error(_LE('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) + return False + if self.reraise: + six.reraise(self.type_, self.value, self.tb) + + +def forever_retry_uncaught_exceptions(infunc): + def inner_func(*args, **kwargs): + last_log_time = 0 + last_exc_message = None + exc_count = 0 + while True: + try: + return infunc(*args, **kwargs) + except Exception as exc: + this_exc_message = six.u(str(exc)) + if this_exc_message == last_exc_message: + exc_count += 1 + else: + exc_count = 1 + # Do not log any more frequently than once a minute unless + # the exception message changes + cur_time = int(time.time()) + if (cur_time - last_log_time > 60 or + this_exc_message != last_exc_message): + logging.exception( + _LE('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) + last_log_time = cur_time + last_exc_message = this_exc_message + exc_count = 0 + # This should be a very rare event. In case it isn't, do + # a sleep. + time.sleep(1) + return inner_func diff --git a/ec2api/openstack/common/gettextutils.py b/ec2api/openstack/common/gettextutils.py new file mode 100644 index 00000000..57ee7cd5 --- /dev/null +++ b/ec2api/openstack/common/gettextutils.py @@ -0,0 +1,479 @@ +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from ec2api.openstack.common.gettextutils import _ +""" + +import copy +import gettext +import locale +from logging import handlers +import os + +from babel import localedata +import six + +_AVAILABLE_LANGUAGES = {} + +# FIXME(dhellmann): Remove this when moving to oslo.i18n. +USE_LAZY = False + + +class TranslatorFactory(object): + """Create translator functions + """ + + def __init__(self, domain, localedir=None): + """Establish a set of translation functions for the domain. + + :param domain: Name of translation domain, + specifying a message catalog. + :type domain: str + :param lazy: Delays translation until a message is emitted. + Defaults to False. + :type lazy: Boolean + :param localedir: Directory with translation catalogs. + :type localedir: str + """ + self.domain = domain + if localedir is None: + localedir = os.environ.get(domain.upper() + '_LOCALEDIR') + self.localedir = localedir + + def _make_translation_func(self, domain=None): + """Return a new translation function ready for use. + + Takes into account whether or not lazy translation is being + done. + + The domain can be specified to override the default from the + factory, but the localedir from the factory is always used + because we assume the log-level translation catalogs are + installed in the same directory as the main application + catalog. + + """ + if domain is None: + domain = self.domain + t = gettext.translation(domain, + localedir=self.localedir, + fallback=True) + # Use the appropriate method of the translation object based + # on the python version. + m = t.gettext if six.PY3 else t.ugettext + + def f(msg): + """oslo.i18n.gettextutils translation function.""" + if USE_LAZY: + return Message(msg, domain=domain) + return m(msg) + return f + + @property + def primary(self): + "The default translation function." + return self._make_translation_func() + + def _make_log_translation_func(self, level): + return self._make_translation_func(self.domain + '-log-' + level) + + @property + def log_info(self): + "Translate info-level log messages." + return self._make_log_translation_func('info') + + @property + def log_warning(self): + "Translate warning-level log messages." + return self._make_log_translation_func('warning') + + @property + def log_error(self): + "Translate error-level log messages." + return self._make_log_translation_func('error') + + @property + def log_critical(self): + "Translate critical-level log messages." + return self._make_log_translation_func('critical') + + +# NOTE(dhellmann): When this module moves out of the incubator into +# oslo.i18n, these global variables can be moved to an integration +# module within each application. + +# Create the global translation functions. +_translators = TranslatorFactory('ec2api') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + +# NOTE(dhellmann): End of globals that will move to the application's +# integration module. + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + global USE_LAZY + USE_LAZY = True + + +def install(domain): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + Note that to enable lazy translation, enable_lazy must be + called. + + :param domain: the translation domain + """ + from six import moves + tf = TranslatorFactory(domain) + moves.builtins.__dict__['_'] = tf.primary + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, + domain='ec2api', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + # Merge the dictionaries + # Copy each item in case one does not support deep copy. + params = {} + if isinstance(self.params, dict): + for key, val in self.params.items(): + params[key] = self._copy_param(val) + for key, val in other.items(): + params[key] = self._copy_param(val) + else: + params = self._copy_param(other) + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except Exception: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + if six.PY2: + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale_, alias) in six.iteritems(aliases): + if locale_ in language_list and alias not in language_list: + language_list.append(alias) + + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/ec2api/openstack/common/importutils.py b/ec2api/openstack/common/importutils.py new file mode 100644 index 00000000..25a88e7d --- /dev/null +++ b/ec2api/openstack/common/importutils.py @@ -0,0 +1,73 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) + try: + return getattr(sys.modules[mod_str], class_str) + except AttributeError: + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def import_versioned_module(version, submodule=None): + module = 'ec2api.v%s' % version + if submodule: + module = '.'.join((module, submodule)) + return import_module(module) + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/ec2api/openstack/common/jsonutils.py b/ec2api/openstack/common/jsonutils.py new file mode 100644 index 00000000..2c7a6cfd --- /dev/null +++ b/ec2api/openstack/common/jsonutils.py @@ -0,0 +1,190 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import codecs +import datetime +import functools +import inspect +import itertools +import sys + +if sys.version_info < (2, 7): + # On Python <= 2.6, json module is not C boosted, so try to use + # simplejson module if available + try: + import simplejson as json + except ImportError: + import json +else: + import json + +import six +import six.moves.xmlrpc_client as xmlrpclib + +from ec2api.openstack.common import gettextutils +from ec2api.openstack.common import importutils +from ec2api.openstack.common import strutils +from ec2api.openstack.common import timeutils + +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return six.text_type(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) + else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return six.text_type(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def dump(obj, fp, *args, **kwargs): + return json.dump(obj, fp, *args, **kwargs) + + +def loads(s, encoding='utf-8', **kwargs): + return json.loads(strutils.safe_decode(s, encoding), **kwargs) + + +def load(fp, encoding='utf-8', **kwargs): + return json.load(codecs.getreader(encoding)(fp), **kwargs) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/ec2api/openstack/common/local.py b/ec2api/openstack/common/local.py new file mode 100644 index 00000000..0819d5b9 --- /dev/null +++ b/ec2api/openstack/common/local.py @@ -0,0 +1,45 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Local storage of variables using weak references""" + +import threading +import weakref + + +class WeakLocal(threading.local): + def __getattribute__(self, attr): + rval = super(WeakLocal, self).__getattribute__(attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return super(WeakLocal, self).__setattr__(attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = threading.local() diff --git a/ec2api/openstack/common/log.py b/ec2api/openstack/common/log.py new file mode 100644 index 00000000..73bfd2e9 --- /dev/null +++ b/ec2api/openstack/common/log.py @@ -0,0 +1,689 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""OpenStack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import sys +import traceback + +from oslo.config import cfg +import six +from six import moves + +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import importutils +from ec2api.openstack.common import jsonutils +from ec2api.openstack.common import local + + +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of a logging configuration file. This file ' + 'is appended to any existing logging configuration ' + 'files. For details about logging configuration files, ' + 'see the Python logging module documentation.'), + cfg.StrOpt('log-format', + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s .'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths.'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and will change in J to honor RFC5424.'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Enables or disables syslog rfc5424 format ' + 'for logging. If enabled, prefixes the MSG part of the ' + 'syslog message with APP-NAME (RFC5424). The ' + 'format without the APP-NAME is deprecated in I, ' + 'and will be removed in J.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='Syslog facility to receive log lines.') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error.') +] + +DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', + 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', + 'oslo.messaging=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN', + 'urllib3.connectionpool=WARN', 'websocket=WARN'] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', + help='Format string to use for log messages with context.'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='Format string to use for log messages without context.'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='Data to append to log format when level is DEBUG.'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='Prefix each line of exception output with this format.'), + cfg.ListOpt('default_log_levels', + default=DEFAULT_LOG_LEVELS, + help='List of logger=LEVEL pairs.'), + cfg.BoolOpt('publish_errors', + default=False, + help='Enables or disables publication of error events.'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='Enables or disables fatal status of deprecations.'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='The format for an instance that is passed with the log ' + 'message.'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='The format for an instance UUID that is passed with the ' + 'log message.'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + return None + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + if six.PY3: + # In Python 3, the code fails because the 'manager' attribute + # cannot be found when using a LoggerAdapter as the + # underlying logger. Work around this issue. + self._logger.manager = self._logger.logger.manager + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + self._deprecated_messages_sent = dict() + + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + """Call this method when a deprecated feature is used. + + If the system is configured for fatal deprecations then the message + is logged at the 'critical' level and :class:`DeprecatedConfig` will + be raised. + + Otherwise, the message will be logged (once) at the 'warn' level. + + :raises: :class:`DeprecatedConfig` if the system is configured for + fatal deprecations. + + """ + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + + # Using a list because a tuple with dict can't be stored in a set. + sent_args = self._deprecated_messages_sent.setdefault(msg, list()) + + if args in sent_args: + # Already logged this message, so don't log it again. + return + + sent_args.append(args) + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + # NOTE(mrodden): catch any Message/other object and + # coerce to unicode before they can get + # to the python logging and possibly + # cause string encoding trouble + if not isinstance(msg, six.string_types): + msg = six.text_type(msg) + + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid') or + kwargs.pop('instance_uuid', None)) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra + + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [moves.filter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(exc_type, value, tb): + extra = {'exc_info': (exc_type, value, tb)} + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except (moves.configparser.Error, KeyError) as exc: + raise LogConfigError(log_config_append, six.text_type(exc)) + + +def setup(product_name, version='unknown'): + """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf(product_name, version) + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string=None, + default_log_levels=None): + # Just in case the caller is not setting the + # default_log_level. This is insurance because + # we introduced the default_log_level parameter + # later in a backwards in-compatible change + if default_log_levels is not None: + cfg.set_defaults( + log_opts, + default_log_levels=default_log_levels) + if logging_context_format_string is not None: + cfg.set_defaults( + log_opts, + logging_context_format_string=logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + logging.handlers.SysLogHandler.__init__(self, *args, **kwargs) + + def format(self, record): + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + msg = logging.handlers.SysLogHandler.format(self, record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(project, version): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not logpath: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + try: + handler = importutils.import_object( + "ec2api.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + except ImportError: + handler = importutils.import_object( + "oslo.messaging.notify.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + + datefmt = CONF.log_date_format + for handler in log_root.handlers: + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(project=project, + version=version, + datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + logger = logging.getLogger(mod) + # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name + # to integer code. + if sys.version_info < (2, 7): + level = logging.getLevelName(level_name) + logger.setLevel(level) + else: + logger.setLevel(level_name) + + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg.rstrip()) + + +class ContextFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + If available, uses the context value stored in TLS - local.store.context + + """ + + def __init__(self, *args, **kwargs): + """Initialize ContextFormatter instance + + Takes additional keyword arguments which can be used in the message + format string. + + :keyword project: project name + :type project: string + :keyword version: project version + :type version: string + + """ + + self.project = kwargs.pop('project', 'unknown') + self.version = kwargs.pop('version', 'unknown') + + logging.Formatter.__init__(self, *args, **kwargs) + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + + # store project info + record.project = self.project + record.version = self.version + + # store request info + context = getattr(local.store, 'context', None) + if context: + d = _dictify_context(context) + for k, v in d.items(): + setattr(record, k, v) + + # NOTE(sdague): default the fancier formatting params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color', 'user_identity'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id'): + fmt = CONF.logging_context_format_string + else: + fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + fmt += " " + CONF.logging_debug_format_suffix + + if sys.version_info < (3, 2): + self._fmt = fmt + else: + self._style = logging.PercentStyle(fmt) + self._fmt = self._style._fmt + # Cache this on the record, Logger will respect our formatted copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = moves.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/ec2api/openstack/common/loopingcall.py b/ec2api/openstack/common/loopingcall.py new file mode 100644 index 00000000..1b107184 --- /dev/null +++ b/ec2api/openstack/common/loopingcall.py @@ -0,0 +1,147 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import time + +from eventlet import event +from eventlet import greenthread + +from ec2api.openstack.common.gettextutils import _LE, _LW +from ec2api.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +# NOTE(zyluo): This lambda function was declared to avoid mocking collisions +# with time.time() called in the standard logging module +# during unittests. +_ts = lambda: time.time() + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCallBase. + + The poll-function passed to LoopingCallBase can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCallBase.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCallBase.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = _ts() + self.f(*self.args, **self.kw) + end = _ts() + if not self._running: + break + delay = end - start - interval + if delay > 0: + LOG.warn(_LW('task %(func_name)s run outlasted ' + 'interval by %(delay).2f sec'), + {'func_name': repr(self.f), 'delay': delay}) + greenthread.sleep(-delay if delay < 0 else 0) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug('Dynamic looping call %(func_name)s sleeping ' + 'for %(idle).02f seconds', + {'func_name': repr(self.f), 'idle': idle}) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/ec2api/openstack/common/service.py b/ec2api/openstack/common/service.py new file mode 100644 index 00000000..8ea9803f --- /dev/null +++ b/ec2api/openstack/common/service.py @@ -0,0 +1,512 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import logging as std_logging +import os +import random +import signal +import sys +import time + +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + +import eventlet +from eventlet import event +from oslo.config import cfg + +from ec2api.openstack.common import eventlet_backdoor +from ec2api.openstack.common.gettextutils import _LE, _LI, _LW +from ec2api.openstack.common import importutils +from ec2api.openstack.common import log as logging +from ec2api.openstack.common import systemd +from ec2api.openstack.common import threadgroup + + +rpc = importutils.try_import('ec2api.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + service.backdoor_port = self.backdoor_port + self.services.add(service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self.services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + raise SignalExit(signo) + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + if ready_callback: + ready_callback() + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + finally: + self.stop() + if rpc: + try: + rpc.cleanup() + except Exception: + # We're shutting down, so it doesn't matter at this point. + LOG.exception(_LE('Exception during rpc cleanup.')) + + return status, signo + + def wait(self, ready_callback=None): + systemd.notify_once() + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self, wait_interval=0.01): + """Constructor. + + :param wait_interval: The interval to sleep for between checks + of child process exit. + """ + self.children = {} + self.sigcaught = None + self.running = True + self.wait_interval = wait_interval + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_LI('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process_handle_signal(self): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Child caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_LE('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.launch_service(service) + return launcher + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_LI('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() + + os._exit(status) + + LOG.info(_LI('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_LI('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_LI('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_LW('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def _respawn_children(self): + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(self.wait_interval) + continue + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + systemd.notify_once() + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + while True: + self.handle_signal() + self._respawn_children() + # No signal means that stop was called. Don't clean up here. + if not self.sigcaught: + return + + signame = _signo_to_signame(self.sigcaught) + LOG.info(_LI('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + except eventlet.greenlet.GreenletExit: + LOG.info(_LI("Wait called after thread killed. Cleaning up.")) + + self.stop() + + def stop(self): + """Terminate child processes and wait on each.""" + self.running = False + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + # signal that the service is done shutting itself down: + self._done = event.Event() + + def reset(self): + # NOTE(Fengqian): docs for Event.reset() recommend against using it + self._done = event.Event() + + def start(self): + pass + + def stop(self): + self.tg.stop() + self.tg.wait() + # Signal that service cleanup is done: + if not self._done.ready(): + self._done.send() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = event.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + if not self.done.ready(): + self.done.send() + + # reap threads: + self.tg.stop() + + def wait(self): + self.tg.wait() + + def restart(self): + self.stop() + self.done = event.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=1): + if workers is None or workers == 1: + launcher = ServiceLauncher() + launcher.launch_service(service) + else: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + + return launcher diff --git a/ec2api/openstack/common/strutils.py b/ec2api/openstack/common/strutils.py new file mode 100644 index 00000000..d9beb811 --- /dev/null +++ b/ec2api/openstack/common/strutils.py @@ -0,0 +1,295 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import math +import re +import sys +import unicodedata + +import six + +from ec2api.openstack.common.gettextutils import _ + + +UNIT_PREFIX_EXPONENT = { + 'k': 1, + 'K': 1, + 'Ki': 1, + 'M': 2, + 'Mi': 2, + 'G': 3, + 'Gi': 3, + 'T': 4, + 'Ti': 4, +} +UNIT_SYSTEM_INFO = { + 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), + 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), +} + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +# NOTE(flaper87): The following 3 globals are used by `mask_password` +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' + '.*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False, default=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else returns the value specified by 'default'. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, six.string_types): + subject = six.text_type(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return default + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming text/bytes string using `incoming` if they're not + already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, six.text_type): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming text/bytes string using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, six.text_type): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + else: + return text + + +def string_to_bytes(text, unit_system='IEC', return_int=False): + """Converts a string into an float representation of bytes. + + The units supported for IEC :: + + Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) + KB, KiB, MB, MiB, GB, GiB, TB, TiB + + The units supported for SI :: + + kb(it), Mb(it), Gb(it), Tb(it) + kB, MB, GB, TB + + Note that the SI unit system does not support capital letter 'K' + + :param text: String input for bytes size conversion. + :param unit_system: Unit system for byte size conversion. + :param return_int: If True, returns integer representation of text + in bytes. (default: decimal) + :returns: Numerical representation of text in bytes. + :raises ValueError: If text has an invalid value. + + """ + try: + base, reg_ex = UNIT_SYSTEM_INFO[unit_system] + except KeyError: + msg = _('Invalid unit system: "%s"') % unit_system + raise ValueError(msg) + match = reg_ex.match(text) + if match: + magnitude = float(match.group(1)) + unit_prefix = match.group(2) + if match.group(3) in ['b', 'bit']: + magnitude /= 8 + else: + msg = _('Invalid string format: %s') % text + raise ValueError(msg) + if not unit_prefix: + res = magnitude + else: + res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) + if return_int: + return int(math.ceil(res)) + return res + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message diff --git a/ec2api/openstack/common/systemd.py b/ec2api/openstack/common/systemd.py new file mode 100644 index 00000000..51e2e493 --- /dev/null +++ b/ec2api/openstack/common/systemd.py @@ -0,0 +1,106 @@ +# Copyright 2012-2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper module for systemd service readiness notification. +""" + +import os +import socket +import sys + +from ec2api.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def _abstractify(socket_name): + if socket_name.startswith('@'): + # abstract namespace socket + socket_name = '\0%s' % socket_name[1:] + return socket_name + + +def _sd_notify(unset_env, msg): + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + try: + sock.connect(_abstractify(notify_socket)) + sock.sendall(msg) + if unset_env: + del os.environ['NOTIFY_SOCKET'] + except EnvironmentError: + LOG.debug("Systemd notification failed", exc_info=True) + finally: + sock.close() + + +def notify(): + """Send notification to Systemd that service is ready. + + For details see + http://www.freedesktop.org/software/systemd/man/sd_notify.html + """ + _sd_notify(False, 'READY=1') + + +def notify_once(): + """Send notification once to Systemd that service is ready. + + Systemd sets NOTIFY_SOCKET environment variable with the name of the + socket listening for notifications from services. + This method removes the NOTIFY_SOCKET environment variable to ensure + notification is sent only once. + """ + _sd_notify(True, 'READY=1') + + +def onready(notify_socket, timeout): + """Wait for systemd style notification on the socket. + + :param notify_socket: local socket address + :type notify_socket: string + :param timeout: socket timeout + :type timeout: float + :returns: 0 service ready + 1 service not ready + 2 timeout occurred + """ + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.settimeout(timeout) + sock.bind(_abstractify(notify_socket)) + try: + msg = sock.recv(512) + except socket.timeout: + return 2 + finally: + sock.close() + if 'READY=1' in msg: + return 0 + else: + return 1 + + +if __name__ == '__main__': + # simple CLI for testing + if len(sys.argv) == 1: + notify() + elif len(sys.argv) >= 2: + timeout = float(sys.argv[1]) + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + retval = onready(notify_socket, timeout) + sys.exit(retval) diff --git a/ec2api/openstack/common/threadgroup.py b/ec2api/openstack/common/threadgroup.py new file mode 100644 index 00000000..22571d5a --- /dev/null +++ b/ec2api/openstack/common/threadgroup.py @@ -0,0 +1,147 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import threading + +import eventlet +from eventlet import greenpool + +from ec2api.openstack.common import log as logging +from ec2api.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + + +class ThreadGroup(object): + """The point of the ThreadGroup class is to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + return th + + def thread_done(self, thread): + self.threads.remove(thread) + + def _stop_threads(self): + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + def stop_timers(self): + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def stop(self, graceful=False): + """stop function has the option of graceful=True/False. + + * In case of graceful=True, wait for all threads to be finished. + Never kill threads. + * In case of graceful=False, kill threads immediately. + """ + self.stop_timers() + if graceful: + # In case of graceful=True, wait for all threads to be + # finished, never kill threads + self.wait() + else: + # In case of graceful=False(Default), kill threads + # immediately + self._stop_threads() + + def wait(self): + for x in self.timers: + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + continue + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/ec2api/openstack/common/timeutils.py b/ec2api/openstack/common/timeutils.py new file mode 100644 index 00000000..c48da95f --- /dev/null +++ b/ec2api/openstack/common/timeutils.py @@ -0,0 +1,210 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 +import six + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format.""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(six.text_type(e)) + except TypeError as e: + raise ValueError(six.text_type(e)) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns an iso8601 formatted date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. + """ + utcnow.override_time = override_time or datetime.datetime.utcnow() + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert utcnow.override_time is not None + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times. + """ + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/ec2api/openstack/common/uuidutils.py b/ec2api/openstack/common/uuidutils.py new file mode 100644 index 00000000..234b880c --- /dev/null +++ b/ec2api/openstack/common/uuidutils.py @@ -0,0 +1,37 @@ +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/ec2api/paths.py b/ec2api/paths.py new file mode 100644 index 00000000..aac44876 --- /dev/null +++ b/ec2api/paths.py @@ -0,0 +1,64 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +from oslo.config import cfg + +path_opts = [ + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory where the ec2api python module is installed'), + cfg.StrOpt('bindir', + default=os.path.join(sys.prefix, 'local', 'bin'), + help='Directory where ec2api binaries are installed'), + cfg.StrOpt('state_path', + default='$pybasedir', + help="Top-level directory for maintaining ec2api's state"), +] + +CONF = cfg.CONF +CONF.register_opts(path_opts) + + +def basedir_def(*args): + """Return an uninterpolated path relative to $pybasedir.""" + return os.path.join('$pybasedir', *args) + + +def bindir_def(*args): + """Return an uninterpolated path relative to $bindir.""" + return os.path.join('$bindir', *args) + + +def state_path_def(*args): + """Return an uninterpolated path relative to $state_path.""" + return os.path.join('$state_path', *args) + + +def basedir_rel(*args): + """Return a path relative to $pybasedir.""" + return os.path.join(CONF.pybasedir, *args) + + +def bindir_rel(*args): + """Return a path relative to $bindir.""" + return os.path.join(CONF.bindir, *args) + + +def state_path_rel(*args): + """Return a path relative to $state_path.""" + return os.path.join(CONF.state_path, *args) diff --git a/ec2api/service.py b/ec2api/service.py new file mode 100644 index 00000000..ecaa430b --- /dev/null +++ b/ec2api/service.py @@ -0,0 +1,163 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import multiprocessing + +from oslo.config import cfg + +from ec2api import exception +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import importutils +from ec2api.openstack.common import log as logging +from ec2api.openstack.common import service +from ec2api import wsgi + +LOG = logging.getLogger(__name__) + +service_opts = [ + cfg.BoolOpt('use_ssl', + default=False, + help='Enable ssl connections or not'), + cfg.StrOpt('ec2api_listen', + default="0.0.0.0", + help='The IP address on which the EC2 API will listen.'), + cfg.IntOpt('ec2api_listen_port', + default=8788, + help='The port on which the EC2 API will listen.'), + cfg.IntOpt('ec2api_workers', + help='Number of workers for EC2 API service. The default will ' + 'be equal to the number of CPUs available.'), + cfg.IntOpt('service_down_time', + default=60, + help='Maximum time since last check-in for up service'), + ] + +CONF = cfg.CONF +CONF.register_opts(service_opts) + + +class WSGIService(object): + """Provides ability to launch API from a 'paste' configuration.""" + + def __init__(self, name, loader=None, use_ssl=False, max_url_len=None): + """Initialize, but do not start the WSGI server. + + :param name: The name of the WSGI server given to the loader. + :param loader: Loads the WSGI application using the given name. + :returns: None + + """ + self.name = name + self.manager = self._get_manager() + self.loader = loader or wsgi.Loader() + self.app = self.loader.load_app(name) + self.host = getattr(CONF, 'ec2api_listen', "0.0.0.0") + self.port = getattr(CONF, 'ec2api_listen_port', 0) + self.workers = (getattr(CONF, 'ec2api_workers', None) or + self.cpu_count()) + if self.workers and self.workers < 1: + worker_name = '%s_workers' % name + msg = (_("%(worker_name)s value of %(workers)s is invalid, " + "must be greater than 0") % + {'worker_name': worker_name, + 'workers': str(self.workers)}) + raise exception.InvalidInput(msg) + self.use_ssl = use_ssl + self.server = wsgi.Server(name, + self.app, + host=self.host, + port=self.port, + use_ssl=self.use_ssl, + max_url_len=max_url_len) + # Pull back actual port used + self.port = self.server.port + + def cpu_count(self): + try: + return multiprocessing.cpu_count() + except NotImplementedError: + return 1 + + def _get_manager(self): + """Initialize a Manager object appropriate for this service. + + Use the service name to look up a Manager subclass from the + configuration and initialize an instance. If no class name + is configured, just return None. + + :returns: a Manager instance, or None. + + """ + fl = '%s_manager' % self.name + if fl not in CONF: + return None + + manager_class_name = CONF.get(fl, None) + if not manager_class_name: + return None + + manager_class = importutils.import_class(manager_class_name) + return manager_class() + + def start(self): + """Start serving this service using loaded configuration. + + Also, retrieve updated port number in case '0' was passed in, which + indicates a random port should be used. + + :returns: None + + """ + if self.manager: + self.manager.init_host() + self.manager.pre_start_hook() + self.server.start() + if self.manager: + self.manager.post_start_hook() + + def stop(self): + """Stop serving this API. + + :returns: None + + """ + self.server.stop() + + def wait(self): + """Wait for the service to stop serving this API. + + :returns: None + + """ + self.server.wait() + + +# NOTE(vish): the global launcher is to maintain the existing +# functionality of calling service.serve + +# service.wait +_launcher = None + + +def serve(server, workers=None): + global _launcher + if _launcher: + raise RuntimeError(_('serve() can only be called once')) + + _launcher = service.launch(server, workers=workers) + + +def wait(): + _launcher.wait() diff --git a/ec2api/tests/__init__.py b/ec2api/tests/__init__.py new file mode 100644 index 00000000..831705e9 --- /dev/null +++ b/ec2api/tests/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`ec2api.tests` -- EC2api Unittests +===================================================== + +.. automodule:: ec2api.tests + :platform: Unix +""" + +# See http://code.google.com/p/python-nose/issues/detail?id=373 +# The code below enables nosetests to work with i18n _() blocks +import __builtin__ +setattr(__builtin__, '_', lambda x: x) diff --git a/ec2api/tests/fakes_request_response.py b/ec2api/tests/fakes_request_response.py new file mode 100644 index 00000000..23aca215 --- /dev/null +++ b/ec2api/tests/fakes_request_response.py @@ -0,0 +1,312 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from ec2api.tests import tools + +XML_RESULT_TEMPLATE = ''' +<%(action)sResponse + xmlns="http://ec2.amazonaws.com/doc/%(api_version)s/"> + %(request_id)s + %(data)s +''' +XML_ERROR_TEMPLATE = ''' + + + %(code)s%(message)s + + %(request_id)s +''' + +XML_FAKE_RESULT = ''' + + + true + false + 1234 + fake + + + fake + + + fake + + + + + + + + + fake + + + + + + + + + + fake + + + fake + + + + + + +''' +DICT_FAKE_RESULT_DATA = { + 'fakeInfo': { + 'fakeNone': None, + 'fakeTrue': True, + 'fakeFalse': False, + 'fakeInt': 1234, + 'fakeStr': 'fake', + 'fakeSet': [{'fakeData': 'fake'}, + {'fakeData': 'fake'}], + }, + 'fakeEmptySet': [], + 'fakeComplexSet': [ + {'fakeSubSet': [{'fakeData': 'fake'}, + {'fakeData': None}]}, + {'fakeSubSet': [{'fakeData': 'fake'}, + {'fakeData': 'fake'}]}, + ], +} +DICT_FAKE_RESULT = { + 'FakeActionResponse': tools.update_dict( + DICT_FAKE_RESULT_DATA, + {'requestId': None}) +} + +XML_SINGLE_RESULT = ''' + + req-8a80bb71-1e1d-49be-819f-fba429b0ddf1 + pending + + vol-00000001 + 1 + + 2014-06-04T19:55:55.448117 + + snap-00000001 + +''' +DICT_SINGLE_RESULT = { + 'CreateSnapshotResponse': { + 'status': 'pending', + 'description': None, + 'volumeId': 'vol-00000001', + 'volumeSize': 1, + 'progress': None, + 'startTime': '2014-06-04T19:55:55.448117', + 'ownerId': None, + 'snapshotId': 'snap-00000001', + 'requestId': 'req-8a80bb71-1e1d-49be-819f-fba429b0ddf1', + } +} + +XML_RESULT_SET = ''' + + req-1fc541a8-477d-4928-a90e-4448ea57ba51 + + + + 77dcabaee8ea4a8fbae697ddc09afdaf + true + aki-00000001 + available + + None (cirros-0.3.2-x86_64-uec-kernel) + instance-store + /dev/sda1 + kernel + cirros-0.3.2-x86_64-uec-kernel + + + + 77dcabaee8ea4a8fbae697ddc09afdaf + true + ari-00000002 + available + + None (cirros-0.3.2-x86_64-uec-ramdisk) + instance-store + /dev/sda1 + ramdisk + cirros-0.3.2-x86_64-uec-ramdisk + + + cirros-0.3.2-x86_64-uec + 77dcabaee8ea4a8fbae697ddc09afdaf + true + ami-00000003 + available + instance-store + + None (cirros-0.3.2-x86_64-uec) + aki-00000001 + ari-00000002 + /dev/sda1 + machine + + + + + 77dcabaee8ea4a8fbae697ddc09afdaf + true + ami-00000004 + available + + None (Fedora-x86_64-20-20131211.1-sda) + instance-store + /dev/sda1 + machine + Fedora-x86_64-20-20131211.1-sda + + + +''' +DICT_RESULT_SET = { + 'DescribeImagesResponse': { + 'imagesSet': [{ + 'description': None, + 'imageOwnerId': '77dcabaee8ea4a8fbae697ddc09afdaf', + 'isPublic': True, + 'imageId': 'aki-00000001', + 'imageState': 'available', + 'architecture': None, + 'imageLocation': 'None (cirros-0.3.2-x86_64-uec-kernel)', + 'rootDeviceType': 'instance-store', + 'rootDeviceName': '/dev/sda1', + 'imageType': 'kernel', + 'name': 'cirros-0.3.2-x86_64-uec-kernel', + }, + { + 'description': None, + 'imageOwnerId': '77dcabaee8ea4a8fbae697ddc09afdaf', + 'isPublic': True, + 'imageId': 'ari-00000002', + 'imageState': 'available', + 'architecture': None, + 'imageLocation': 'None (cirros-0.3.2-x86_64-uec-ramdisk)', + 'rootDeviceType': 'instance-store', + 'rootDeviceName': '/dev/sda1', + 'imageType': 'ramdisk', + 'name': 'cirros-0.3.2-x86_64-uec-ramdisk', + }, + { + 'name': 'cirros-0.3.2-x86_64-uec', + 'imageOwnerId': '77dcabaee8ea4a8fbae697ddc09afdaf', + 'isPublic': True, + 'imageId': 'ami-00000003', + 'imageState': 'available', + 'rootDeviceType': 'instance-store', + 'architecture': None, + 'imageLocation': 'None (cirros-0.3.2-x86_64-uec)', + 'kernelId': 'aki-00000001', + 'ramdiskId': 'ari-00000002', + 'rootDeviceName': '/dev/sda1', + 'imageType': 'machine', + 'description': None, + }, + { + 'description': None, + 'imageOwnerId': '77dcabaee8ea4a8fbae697ddc09afdaf', + 'isPublic': True, + 'imageId': 'ami-00000004', + 'imageState': 'available', + 'architecture': None, + 'imageLocation': 'None (Fedora-x86_64-20-20131211.1-sda)', + 'rootDeviceType': 'instance-store', + 'rootDeviceName': '/dev/sda1', + 'imageType': 'machine', + 'name': 'Fedora-x86_64-20-20131211.1-sda', + }], + 'requestId': 'req-1fc541a8-477d-4928-a90e-4448ea57ba51', + } +} + +XML_EMPTY_RESULT_SET = ''' + + a25fa489-f97f-428a-9d30-9fcb1e9b9b65 + + +''' +DICT_EMPTY_RESULT_SET = { + 'DescribeVolumesResponse': { + 'requestId': 'a25fa489-f97f-428a-9d30-9fcb1e9b9b65', + 'volumeSet': [], + } +} + +XML_ERROR = ''' +InvalidInstanceID.NotFound +Instance i-00000001 could not be found. +req-89eb083f-3c44-46e7-bc37-2c050ed7a9ce +''' +DICT_ERROR = { + 'Response': { + 'RequestID': 'req-89eb083f-3c44-46e7-bc37-2c050ed7a9ce', + 'Errors': { + 'Error': { + 'Code': 'InvalidInstanceID.NotFound', + 'Message': 'Instance i-00000001 could not be found.', + } + } + } +} + +XML_SILENT_OPERATIN_RESULT = ''' + + req-8a80bb71-1e1d-49be-819f-fba429b0ddf1 + true + +''' + +DOTTED_FAKE_PARAMS = { + 'FakeStr': 'fake', + 'FakeInt': '1234', + 'FakeBool': 'False', + 'FakeDict.FakeKey': 'fake', + 'FakeList.1.FakeElemKey': 'fake', + 'FakeList.2.FakeElemKey': 'fake', + 'FakeComplexList.1.FakeElemKey.1.FakeSubElemKey': 'fake', + 'FakeComplexList.1.FakeElemKey.2.FakeSubElemKey': 'fake', + 'FakeComplexList.1.FakeElemKey1': 'fake', + 'FakeComplexList.2.FakeElemKey.1.FakeSubElemKey': 'fake', + 'FakeComplexList.2.FakeElemKey.2.FakeSubElemKey': 'fake', + 'FakeComplexList.2.FakeElemKey1': 'fake', +} +DICT_FAKE_PARAMS = { + 'fake_str': 'fake', + 'fake_int': 1234, + 'fake_bool': False, + 'fake_dict': {'fake_key': 'fake'}, + 'fake_list': [{'fake_elem_key': 'fake'}, + {'fake_elem_key': 'fake'}], + 'fake_complex_list': [ + {'fake_elem_key': [{'fake_sub_elem_key': 'fake'}, + {'fake_sub_elem_key': 'fake'}], + 'fake_elem_key_1': 'fake'}, + {'fake_elem_key': [{'fake_sub_elem_key': 'fake'}, + {'fake_sub_elem_key': 'fake'}], + 'fake_elem_key_1': 'fake'}], +} diff --git a/ec2api/tests/matchers.py b/ec2api/tests/matchers.py new file mode 100644 index 00000000..dc99e208 --- /dev/null +++ b/ec2api/tests/matchers.py @@ -0,0 +1,451 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Matcher classes to be used inside of the testtools assertThat framework.""" + +import pprint + +from lxml import etree +from testtools import content + + +class DictKeysMismatch(object): + def __init__(self, d1only, d2only): + self.d1only = d1only + self.d2only = d2only + + def describe(self): + return ('Keys in d1 and not d2: %(d1only)s.' + ' Keys in d2 and not d1: %(d2only)s' % self.__dict__) + + def get_details(self): + return {} + + +class DictMismatch(object): + def __init__(self, key, d1_value, d2_value): + self.key = key + self.d1_value = d1_value + self.d2_value = d2_value + + def describe(self): + return ("Dictionaries do not match at %(key)s." + " d1: %(d1_value)s d2: %(d2_value)s" % self.__dict__) + + def get_details(self): + return {} + + +class DictMatches(object): + + def __init__(self, d1, approx_equal=False, tolerance=0.001): + self.d1 = d1 + self.approx_equal = approx_equal + self.tolerance = tolerance + + def __str__(self): + return 'DictMatches(%s)' % (pprint.pformat(self.d1)) + + # Useful assertions + def match(self, d2): + """Assert two dicts are equivalent. + + This is a 'deep' match in the sense that it handles nested + dictionaries appropriately. + + NOTE: + + If you don't care (or don't know) a given value, you can specify + the string DONTCARE as the value. This will cause that dict-item + to be skipped. + + """ + + d1keys = set(self.d1.keys()) + d2keys = set(d2.keys()) + if d1keys != d2keys: + d1only = d1keys - d2keys + d2only = d2keys - d1keys + return DictKeysMismatch(d1only, d2only) + + for key in d1keys: + d1value = self.d1[key] + d2value = d2[key] + try: + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= self.tolerance + except (ValueError, TypeError): + # If both values aren't convertible to float, just ignore + # ValueError if arg is a str, TypeError if it's something else + # (like None) + within_tolerance = False + + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): + matcher = DictMatches(d1value) + did_match = matcher.match(d2value) + if did_match is not None: + return did_match + elif 'DONTCARE' in (d1value, d2value): + continue + elif self.approx_equal and within_tolerance: + continue + elif d1value != d2value: + return DictMismatch(key, d1value, d2value) + + +class ListLengthMismatch(object): + def __init__(self, len1, len2): + self.len1 = len1 + self.len2 = len2 + + def describe(self): + return ('Length mismatch: len(L1)=%(len1)d != ' + 'len(L2)=%(len2)d' % self.__dict__) + + def get_details(self): + return {} + + +class DictListMatches(object): + + def __init__(self, l1, approx_equal=False, tolerance=0.001): + self.l1 = l1 + self.approx_equal = approx_equal + self.tolerance = tolerance + + def __str__(self): + return 'DictListMatches(%s)' % (pprint.pformat(self.l1)) + + # Useful assertions + def match(self, l2): + """Assert a list of dicts are equivalent.""" + + l1count = len(self.l1) + l2count = len(l2) + if l1count != l2count: + return ListLengthMismatch(l1count, l2count) + + for d1, d2 in zip(self.l1, l2): + matcher = DictMatches(d2, + approx_equal=self.approx_equal, + tolerance=self.tolerance) + did_match = matcher.match(d1) + if did_match: + return did_match + + +class SubDictMismatch(object): + def __init__(self, + key=None, + sub_value=None, + super_value=None, + keys=False): + self.key = key + self.sub_value = sub_value + self.super_value = super_value + self.keys = keys + + def describe(self): + if self.keys: + return "Keys between dictionaries did not match" + else: + return("Dictionaries do not match at %s. d1: %s d2: %s" + % (self.key, + self.super_value, + self.sub_value)) + + def get_details(self): + return {} + + +class IsSubDictOf(object): + + def __init__(self, super_dict): + self.super_dict = super_dict + + def __str__(self): + return 'IsSubDictOf(%s)' % (self.super_dict) + + def match(self, sub_dict): + """Assert a sub_dict is subset of super_dict.""" + if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())): + return SubDictMismatch(keys=True) + for k, sub_value in sub_dict.items(): + super_value = self.super_dict[k] + if isinstance(sub_value, dict): + matcher = IsSubDictOf(super_value) + did_match = matcher.match(sub_value) + if did_match is not None: + return did_match + elif 'DONTCARE' in (sub_value, super_value): + continue + else: + if sub_value != super_value: + return SubDictMismatch(k, sub_value, super_value) + + +class FunctionCallMatcher(object): + + def __init__(self, expected_func_calls): + self.expected_func_calls = expected_func_calls + self.actual_func_calls = [] + + def call(self, *args, **kwargs): + func_call = {'args': args, 'kwargs': kwargs} + self.actual_func_calls.append(func_call) + + def match(self): + dict_list_matcher = DictListMatches(self.expected_func_calls) + return dict_list_matcher.match(self.actual_func_calls) + + +class XMLMismatch(object): + """Superclass for XML mismatch.""" + + def __init__(self, state): + self.path = str(state) + self.expected = state.expected + self.actual = state.actual + + def describe(self): + return "%(path)s: XML does not match" % self.__dict__ + + def get_details(self): + return { + 'expected': content.text_content(self.expected), + 'actual': content.text_content(self.actual), + } + + +class XMLTagMismatch(XMLMismatch): + """XML tags don't match.""" + + def __init__(self, state, idx, expected_tag, actual_tag): + super(XMLTagMismatch, self).__init__(state) + self.idx = idx + self.expected_tag = expected_tag + self.actual_tag = actual_tag + + def describe(self): + return ("%(path)s: XML tag mismatch at index %(idx)d: " + "expected tag <%(expected_tag)s>; " + "actual tag <%(actual_tag)s>" % self.__dict__) + + +class XMLAttrKeysMismatch(XMLMismatch): + """XML attribute keys don't match.""" + + def __init__(self, state, expected_only, actual_only): + super(XMLAttrKeysMismatch, self).__init__(state) + self.expected_only = ', '.join(sorted(expected_only)) + self.actual_only = ', '.join(sorted(actual_only)) + + def describe(self): + return ("%(path)s: XML attributes mismatch: " + "keys only in expected: %(expected_only)s; " + "keys only in actual: %(actual_only)s" % self.__dict__) + + +class XMLAttrValueMismatch(XMLMismatch): + """XML attribute values don't match.""" + + def __init__(self, state, key, expected_value, actual_value): + super(XMLAttrValueMismatch, self).__init__(state) + self.key = key + self.expected_value = expected_value + self.actual_value = actual_value + + def describe(self): + return ("%(path)s: XML attribute value mismatch: " + "expected value of attribute %(key)s: %(expected_value)r; " + "actual value: %(actual_value)r" % self.__dict__) + + +class XMLTextValueMismatch(XMLMismatch): + """XML text values don't match.""" + + def __init__(self, state, expected_text, actual_text): + super(XMLTextValueMismatch, self).__init__(state) + self.expected_text = expected_text + self.actual_text = actual_text + + def describe(self): + return ("%(path)s: XML text value mismatch: " + "expected text value: %(expected_text)r; " + "actual value: %(actual_text)r" % self.__dict__) + + +class XMLUnexpectedChild(XMLMismatch): + """Unexpected child present in XML.""" + + def __init__(self, state, tag, idx): + super(XMLUnexpectedChild, self).__init__(state) + self.tag = tag + self.idx = idx + + def describe(self): + return ("%(path)s: XML unexpected child element <%(tag)s> " + "present at index %(idx)d" % self.__dict__) + + +class XMLExpectedChild(XMLMismatch): + """Expected child not present in XML.""" + + def __init__(self, state, tag, idx): + super(XMLExpectedChild, self).__init__(state) + self.tag = tag + self.idx = idx + + def describe(self): + return ("%(path)s: XML expected child element <%(tag)s> " + "not present at index %(idx)d" % self.__dict__) + + +class XMLMatchState(object): + """Maintain some state for matching. + + Tracks the XML node path and saves the expected and actual full + XML text, for use by the XMLMismatch subclasses. + """ + + def __init__(self, expected, actual): + self.path = [] + self.expected = expected + self.actual = actual + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, exc_tb): + self.path.pop() + return False + + def __str__(self): + return '/' + '/'.join(self.path) + + def node(self, tag, idx): + """Adds tag and index to the path; they will be popped off + + when the corresponding 'with' statement exits. + + :param tag: The element tag + :param idx: If not None, the integer index of the element + within its parent. Not included in the path + element if None. + """ + + if idx is not None: + self.path.append("%s[%d]" % (tag, idx)) + else: + self.path.append(tag) + return self + + +class XMLMatches(object): + """Compare XML strings. More complete than string comparison.""" + + def __init__(self, expected): + self.expected_xml = expected + self.expected = etree.fromstring(expected) + + def __str__(self): + return 'XMLMatches(%r)' % self.expected_xml + + def match(self, actual_xml): + actual = etree.fromstring(actual_xml) + + state = XMLMatchState(self.expected_xml, actual_xml) + result = self._compare_node(self.expected, actual, state, None) + + if result is False: + return XMLMismatch(state) + elif result is not True: + return result + + def _compare_node(self, expected, actual, state, idx): + """Recursively compares nodes within the XML tree.""" + + # Start by comparing the tags + if expected.tag != actual.tag: + return XMLTagMismatch(state, idx, expected.tag, actual.tag) + + with state.node(expected.tag, idx): + # Compare the attribute keys + expected_attrs = set(expected.attrib.keys()) + actual_attrs = set(actual.attrib.keys()) + if expected_attrs != actual_attrs: + expected_only = expected_attrs - actual_attrs + actual_only = actual_attrs - expected_attrs + return XMLAttrKeysMismatch(state, expected_only, actual_only) + + # Compare the attribute values + for key in expected_attrs: + expected_value = expected.attrib[key] + actual_value = actual.attrib[key] + + if 'DONTCARE' in (expected_value, actual_value): + continue + elif expected_value != actual_value: + return XMLAttrValueMismatch(state, key, expected_value, + actual_value) + + # Compare the contents of the node + if len(expected) == 0 and len(actual) == 0: + # No children, compare text values + if ('DONTCARE' not in (expected.text, actual.text) and + expected.text != actual.text): + return XMLTextValueMismatch(state, expected.text, + actual.text) + else: + expected_idx = 0 + actual_idx = 0 + while (expected_idx < len(expected) and + actual_idx < len(actual)): + # Ignore comments and processing instructions + # TODO(Vek): may interpret PIs in the future, to + # allow for, say, arbitrary ordering of some + # elements + if (expected[expected_idx].tag in + (etree.Comment, etree.ProcessingInstruction)): + expected_idx += 1 + continue + + # Compare the nodes + result = self._compare_node(expected[expected_idx], + actual[actual_idx], state, + actual_idx) + if result is not True: + return result + + # Step on to comparing the next nodes... + expected_idx += 1 + actual_idx += 1 + + # Make sure we consumed all nodes in actual + if actual_idx < len(actual): + return XMLUnexpectedChild(state, actual[actual_idx].tag, + actual_idx) + + # Make sure we consumed all nodes in expected + if expected_idx < len(expected): + for node in expected[expected_idx:]: + if (node.tag in + (etree.Comment, etree.ProcessingInstruction)): + continue + + return XMLExpectedChild(state, node.tag, actual_idx) + + # The nodes match + return True diff --git a/ec2api/tests/test_api_init.py b/ec2api/tests/test_api_init.py new file mode 100644 index 00000000..9bf23922 --- /dev/null +++ b/ec2api/tests/test_api_init.py @@ -0,0 +1,129 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import uuid + +import mock +from oslotest import base as test_base + +from ec2api import api +from ec2api.api import apirequest +from ec2api.api import cloud +from ec2api import exception +from ec2api.tests import fakes_request_response as fakes +from ec2api.tests import matchers +from ec2api import wsgi + + +class ApiInitTestCase(test_base.BaseTestCase): + + fake_context_class = collections.namedtuple('FakeRequestContext', + ['request_id']) + setattr(fake_context_class, 'to_dict', fake_context_class._asdict) + + def setUp(self): + super(ApiInitTestCase, self).setUp() + requester_patcher = mock.patch('ec2api.api.ec2client.EC2Requester') + self.requester_class = requester_patcher.start() + self.requester = self.requester_class.return_value + self.addCleanup(requester_patcher.stop) + + controller_patcher = mock.patch('ec2api.api.cloud.CloudController') + self.controller_class = controller_patcher.start() + self.controller = self.controller_class.return_value + self.addCleanup(controller_patcher.stop) + + self.fake_context = self.fake_context_class(str(uuid.uuid4())) + + ec2_request = apirequest.APIRequest('FakeAction', 'fake_v1', + {'Param': 'fake_param'}) + self.environ = {'REQUEST_METHOD': 'FAKE', + 'ec2.request': ec2_request, + 'ec2api.context': self.fake_context} + self.request = wsgi.Request(self.environ) + self.application = api.Executor() + + def test_execute(self): + self.controller.fake_action.return_value = {'fakeTag': 'fake_data'} + + res = self.request.send(self.application) + + self.assertEqual(200, res.status_code) + self.assertEqual('text/xml', res.content_type) + expected_xml = fakes.XML_RESULT_TEMPLATE % { + 'action': 'FakeAction', + 'api_version': 'fake_v1', + 'request_id': self.fake_context.request_id, + 'data': 'fake_data'} + self.assertThat(res.body, matchers.XMLMatches(expected_xml)) + self.controller.fake_action.assert_called_once_with(self.fake_context, + param='fake_param') + + def test_execute_error(self): + def do_check(ex, status, code, message): + self.controller.reset_mock() + self.controller.fake_action.side_effect = ex + + res = self.request.send(self.application) + + self.assertEqual(status, res.status_code) + self.assertEqual('text/xml', res.content_type) + expected_xml = fakes.XML_ERROR_TEMPLATE % { + 'code': code, + 'message': message, + 'request_id': self.fake_context.request_id} + self.assertThat(res.body, matchers.XMLMatches(expected_xml)) + self.controller.fake_action.assert_called_once_with( + self.fake_context, param='fake_param') + + do_check(exception.EC2Exception('fake_msg'), 500, + 'EC2Exception', 'Unknown error occurred.') + do_check(KeyError('fake_msg'), 500, + 'KeyError', 'Unknown error occurred.') + do_check(exception.InvalidVpcIDNotFound('fake_msg'), 400, + 'InvalidVpcID.NotFound', 'fake_msg') + + def test_execute_proxy(self): + self.controller_class.return_value = mock.create_autospec( + cloud.CloudController, instance=True) + # NOTE(ft): recreate APIRequest to use mock with autospec + ec2_request = apirequest.APIRequest('FakeAction', 'fake_v1', + {'Param': 'fake_param'}) + self.environ['ec2.request'] = ec2_request + self.environ['QUERY_STRING'] = 'Version=fake_v1&Action=FakeAction' + self.requester.request.return_value = ({'status': 200, + 'content-type': 'fake_type'}, + 'fake_data') + + res = self.request.send(self.application) + + self.requester_class.assert_called_once_with('fake_v1', 'FAKE') + self.requester.request.assert_called_once_with(self.fake_context, + 'FakeAction', + {'Param': 'fake_param'}) + self.assertEqual(200, res.status_code) + self.assertEqual('fake_type', res.content_type) + self.assertEqual('fake_data', res.body) + + def test_execute_proxy_error(self): + self.controller.fake_action.side_effect = exception.EC2ServerError( + {'status': 400, 'content-type': 'fake_type'}, + 'fake_content') + + res = self.request.send(self.application) + + self.assertEqual(400, res.status_code) + self.assertEqual('fake_type', res.content_type) + self.assertEqual('fake_content', res.body) diff --git a/ec2api/tests/test_tools.py b/ec2api/tests/test_tools.py new file mode 100644 index 00000000..9ec3bb96 --- /dev/null +++ b/ec2api/tests/test_tools.py @@ -0,0 +1,45 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import testtools + +from ec2api.tests import tools + + +class TestToolsTestCase(testtools.TestCase): + + def test_update_dict(self): + d1 = {'a': 1, 'b': 2} + d2 = {'b': 22, 'c': 33} + res = tools.update_dict(d1, {}) + self.assertEqual({'a': 1, 'b': 2}, res) + res = tools.update_dict(d1, d2) + self.assertEqual({'a': 1, 'b': 22, 'c': 33}, res) + self.assertEqual({'a': 1, 'b': 2}, d1) + + def test_purge_dict(self): + d1 = {'a': 1, 'b': 2, 'c': 3} + res = tools.purge_dict(d1, ()) + self.assertEqual({'a': 1, 'b': 2, 'c': 3}, res) + res = tools.purge_dict(d1, ('b', 'c')) + self.assertEqual({'a': 1}, res) + self.assertEqual({'a': 1, 'b': 2, 'c': 3}, d1) + + def test_patch_dict(self): + d1 = {'a': 1, 'b': 2, 'c': 3} + d2 = {'c': 33, 'd': 44, 'e': 55} + res = tools.patch_dict(d1, d2, ('b', 'e')) + self.assertEqual({'a': 1, 'c': 33, 'd': 44}, res) + self.assertEqual({'a': 1, 'b': 2, 'c': 3}, d1) diff --git a/ec2api/tests/tools.py b/ec2api/tests/tools.py new file mode 100644 index 00000000..e68361ef --- /dev/null +++ b/ec2api/tests/tools.py @@ -0,0 +1,38 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import copy + + +def update_dict(dict1, dict2): + """Get a copy of union of two dicts.""" + res = copy.deepcopy(dict1) + res.update(dict2) + return res + + +def purge_dict(dict1, trash_keys): + """Get a copy of dict, removed keys.""" + res = copy.deepcopy(dict1) + for key in trash_keys: + res.pop(key, None) + return res + + +def patch_dict(dict1, dict2, trash_iter): + """Get a copy of union of two dicts, removed keys.""" + res = update_dict(dict1, dict2) + res = purge_dict(res, trash_iter) + return res diff --git a/ec2api/utils.py b/ec2api/utils.py new file mode 100644 index 00000000..2e79b630 --- /dev/null +++ b/ec2api/utils.py @@ -0,0 +1,49 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Utilities and helper functions.""" + +import contextlib +import shutil +import tempfile + +from oslo.config import cfg + +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import log as logging + +utils_opts = [ + cfg.StrOpt('tempdir', + help='Explicitly specify the temporary working directory'), +] +CONF = cfg.CONF +CONF.register_opts(utils_opts) + +LOG = logging.getLogger(__name__) + + +@contextlib.contextmanager +def tempdir(**kwargs): + argdict = kwargs.copy() + if 'dir' not in argdict: + argdict['dir'] = CONF.tempdir + tmpdir = tempfile.mkdtemp(**argdict) + try: + yield tmpdir + finally: + try: + shutil.rmtree(tmpdir) + except OSError as e: + LOG.error(_('Could not remove tmpdir: %s'), str(e)) diff --git a/ec2api/version.py b/ec2api/version.py new file mode 100644 index 00000000..1b1ea8b9 --- /dev/null +++ b/ec2api/version.py @@ -0,0 +1,17 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +version_info = pbr.version.VersionInfo('ec2-api') diff --git a/ec2api/wsgi.py b/ec2api/wsgi.py new file mode 100644 index 00000000..8898dbcc --- /dev/null +++ b/ec2api/wsgi.py @@ -0,0 +1,501 @@ +# Copyright 2014 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utility methods for working with WSGI servers.""" + +from __future__ import print_function + +import os.path +import socket +import ssl +import sys + +import eventlet.wsgi +import greenlet +from oslo.config import cfg +from paste import deploy +import routes.middleware +import webob.dec +import webob.exc + +from ec2api import exception +from ec2api.openstack.common import excutils +from ec2api.openstack.common.gettextutils import _ +from ec2api.openstack.common import log as logging + +wsgi_opts = [ + cfg.StrOpt('api_paste_config', + default="api-paste.ini", + help='File name for the paste.deploy config for ec2api'), + cfg.StrOpt('wsgi_log_format', + default='%(client_ip)s "%(request_line)s" status: %(status_code)s' + ' len: %(body_length)s time: %(wall_seconds).7f', + help='A python format string that is used as the template to ' + 'generate log lines. The following values can be formatted ' + 'into it: client_ip, date_time, request_line, status_code, ' + 'body_length, wall_seconds.'), + cfg.StrOpt('ssl_ca_file', + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('ssl_cert_file', + help="SSL certificate of API server"), + cfg.StrOpt('ssl_key_file', + help="SSL private key of API server"), + cfg.IntOpt('tcp_keepidle', + default=600, + help="Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X."), + cfg.IntOpt('wsgi_default_pool_size', + default=1000, + help="Size of the pool of greenthreads used by wsgi"), + cfg.IntOpt('max_header_line', + default=16384, + help="Maximum line size of message headers to be accepted. " + "max_header_line may need to be increased when using " + "large tokens (typically those generated by the " + "Keystone v3 API with big service catalogs)."), + ] +CONF = cfg.CONF +CONF.register_opts(wsgi_opts) + +LOG = logging.getLogger(__name__) + + +class Server(object): + """Server class to manage a WSGI server, serving a WSGI application.""" + + default_pool_size = CONF.wsgi_default_pool_size + + def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, + protocol=eventlet.wsgi.HttpProtocol, backlog=128, + use_ssl=False, max_url_len=None): + """Initialize, but do not start, a WSGI server. + + :param name: Pretty name for logging. + :param app: The WSGI application to serve. + :param host: IP address to serve the application. + :param port: Port number to server the application. + :param pool_size: Maximum number of eventlets to spawn concurrently. + :param backlog: Maximum number of queued connections. + :param max_url_len: Maximum length of permitted URLs. + :returns: None + :raises: ec2api.exception.InvalidInput + """ + # Allow operators to customize http requests max header line size. + eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line + self.name = name + self.app = app + self._server = None + self._protocol = protocol + self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) + self._logger = logging.getLogger("ec2api.wsgi.server") + self._wsgi_logger = logging.WritableLogger(self._logger) + self._use_ssl = use_ssl + self._max_url_len = max_url_len + + if backlog < 1: + raise exception.InvalidInput( + reason='The backlog must be more than 1') + + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + try: + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + except Exception: + family = socket.AF_INET + + try: + self._socket = eventlet.listen(bind_addr, family, backlog=backlog) + except EnvironmentError: + LOG.error(_("Could not bind to %(host)s:%(port)s"), + {'host': host, 'port': port}) + raise + + (self.host, self.port) = self._socket.getsockname()[0:2] + LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__) + + def start(self): + """Start serving a WSGI application. + + :returns: None + """ + if self._use_ssl: + try: + ca_file = CONF.ssl_ca_file + cert_file = CONF.ssl_cert_file + key_file = CONF.ssl_key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError( + _("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError( + _("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError( + _("Unable to find key_file : %s") % key_file) + + if self._use_ssl and (not cert_file or not key_file): + raise RuntimeError( + _("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + ssl_kwargs = { + 'server_side': True, + 'certfile': cert_file, + 'keyfile': key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + self._socket = eventlet.wrap_ssl(self._socket, + **ssl_kwargs) + + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + self._socket.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to start %(name)s on %(host)s" + ":%(port)s with SSL support") % self.__dict__) + + wsgi_kwargs = { + 'func': eventlet.wsgi.server, + 'sock': self._socket, + 'site': self.app, + 'protocol': self._protocol, + 'custom_pool': self._pool, + 'log': self._wsgi_logger, + 'log_format': CONF.wsgi_log_format, + 'debug': False + } + + if self._max_url_len: + wsgi_kwargs['url_length_limit'] = self._max_url_len + + self._server = eventlet.spawn(**wsgi_kwargs) + + def stop(self): + """Stop this server. + + This is not a very nice action, as currently the method by which a + server is stopped is by killing its eventlet. + + :returns: None + + """ + LOG.info(_("Stopping WSGI server.")) + + if self._server is not None: + # Resize pool to stop new requests from being processed + self._pool.resize(0) + self._server.kill() + + def wait(self): + """Block, until the server has stopped. + + Waits on the server's eventlet to finish, then returns. + + :returns: None + + """ + try: + if self._server is not None: + self._server.wait() + except greenlet.GreenletExit: + LOG.info(_("WSGI server has stopped.")) + + +class Request(webob.Request): + pass + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = ec2api.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import ec2api.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(explanation='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + + """ + raise NotImplementedError(_('You must implement __call__')) + + +class Middleware(Application): + """Base WSGI middleware. + + These classes require an application to be + initialized that will be called next. By default the middleware will + simply call its wrapped app, or you can override __call__ to customize its + behavior. + + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = ec2api.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import ec2api.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Debug(Middleware): + """Helper class for debugging a WSGI application. + + Can be inserted into any WSGI application chain to get information + about the request and response. + + """ + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + print(('*' * 40) + ' REQUEST ENVIRON') + for key, value in req.environ.items(): + print(key, '=', value) + print() + resp = req.get_response(self.application) + + print(('*' * 40) + ' RESPONSE HEADERS') + for (key, value) in resp.headers.iteritems(): + print(key, '=', value) + print() + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Iterator that prints the contents of a wrapper string.""" + print(('*' * 40) + ' BODY') + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print() + + +class Router(object): + """WSGI middleware that maps incoming requests to WSGI apps.""" + + def __init__(self, mapper): + """Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be an object that can route + the request to the action-specific method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, '/svrlist', controller=sc, action='list') + + # Actions are all implicitly defined + mapper.resource('server', 'servers', controller=sc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) + + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Route the incoming request to a controller based on self.map. + + If no match, return a 404. + + """ + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=Request) + def _dispatch(req): + """Dispatch the request to the appropriate controller. + + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] + return app + + +class Loader(object): + """Used to load WSGI applications from paste configurations.""" + + def __init__(self, config_path=None): + """Initialize the loader, and attempt to find the config. + + :param config_path: Full or relative path to the paste config. + :returns: None + + """ + self.config_path = None + + config_path = config_path or CONF.api_paste_config + if not os.path.isabs(config_path): + self.config_path = CONF.find_file(config_path) + elif os.path.exists(config_path): + self.config_path = config_path + + if not self.config_path: + raise exception.ConfigNotFound(path=config_path) + + def load_app(self, name): + """Return the paste URLMap wrapped WSGI application. + + :param name: Name of the application to load. + :returns: Paste URLMap object wrapping the requested application. + :raises: `ec2api.exception.PasteAppNotFound` + + """ + try: + LOG.debug(_("Loading app %(name)s from %(path)s") % + {'name': name, 'path': self.config_path}) + return deploy.loadapp("config:%s" % self.config_path, name=name) + except LookupError as err: + LOG.error(err) + raise exception.PasteAppNotFound(name=name, path=self.config_path) diff --git a/etc/ec2api/api-paste.ini b/etc/ec2api/api-paste.ini new file mode 100644 index 00000000..a5ec79a1 --- /dev/null +++ b/etc/ec2api/api-paste.ini @@ -0,0 +1,39 @@ +####### +# EC2 # +####### + +[composite:ec2api] +use = egg:Paste#urlmap +/services/Cloud: ec2apicloud + +[composite:ec2apicloud] +use = call:ec2api.api.auth:pipeline_factory +keystone = ec2apifaultwrap logrequest ec2apikeystoneauth cloudrequest validator ec2apiexecutor + +[filter:ec2apifaultwrap] +paste.filter_factory = ec2api.api:FaultWrapper.factory + +[filter:logrequest] +paste.filter_factory = ec2api.api:RequestLogging.factory + +[filter:ec2apikeystoneauth] +paste.filter_factory = ec2api.api:EC2KeystoneAuth.factory + +[filter:cloudrequest] +paste.filter_factory = ec2api.api:Requestify.factory + +[filter:validator] +paste.filter_factory = ec2api.api:Validator.factory + +[app:ec2apiexecutor] +paste.app_factory = ec2api.api:Executor.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = ec2api.api.auth:EC2KeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory diff --git a/etc/ec2api/ec2api.conf.sample b/etc/ec2api/ec2api.conf.sample new file mode 100644 index 00000000..f3cce468 --- /dev/null +++ b/etc/ec2api/ec2api.conf.sample @@ -0,0 +1,717 @@ +[DEFAULT] + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions + +# Qpid broker hostname. (string value) +#qpid_hostname=nova + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=nova + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest + +# The RabbitMQ password. (string value) +#rabbit_password=guest + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=nova + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in ec2api.exception +# + +# Make exception message format errors fatal (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in ec2api.paths +# + +# Directory where the ec2api python module is installed +# (string value) +#pybasedir=/home/apavlov/progmatic/ec2-api + +# Directory where ec2api binaries are installed (string value) +#bindir=/usr/local/bin + +# Top-level directory for maintaining ec2api's state (string +# value) +#state_path=$pybasedir + + +# +# Options defined in ec2api.service +# + +# Enable ssl connections or not (boolean value) +#use_ssl=false + +# The IP address on which the EC2 API will listen. (string +# value) +#ec2api_listen=0.0.0.0 + +# The port on which the EC2 API will listen. (integer value) +#ec2api_listen_port=8788 + +# Number of workers for EC2 API service. The default will be +# equal to the number of CPUs available. (integer value) +#ec2api_workers= + +# Maximum time since last check-in for up service (integer +# value) +#service_down_time=60 + + +# +# Options defined in ec2api.utils +# + +# Explicitly specify the temporary working directory (string +# value) +#tempdir= + + +# +# Options defined in ec2api.wsgi +# + +# File name for the paste.deploy config for ec2api (string +# value) +#api_paste_config=api-paste.ini + +# A python format string that is used as the template to +# generate log lines. The following values can be formatted +# into it: client_ip, date_time, request_line, status_code, +# body_length, wall_seconds. (string value) +#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# CA certificate file to use to verify connecting clients +# (string value) +#ssl_ca_file= + +# SSL certificate of API server (string value) +#ssl_cert_file= + +# SSL private key of API server (string value) +#ssl_key_file= + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepidle=600 + +# Size of the pool of greenthreads used by wsgi (integer +# value) +#wsgi_default_pool_size=1000 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + + +# +# Options defined in ec2api.api +# + +# The IP address of the EC2 API server (string value) +#base_ec2_host=nova + +# The port of the EC2 API server (integer value) +#base_ec2_port=8773 + +# The protocol to use when connecting to the EC2 API server +# (http, https) (string value) +#base_ec2_scheme=http + +# The path prefix used to call the ec2 API server (string +# value) +#base_ec2_path=/services/Cloud + +# URL to get token from ec2 request. (string value) +#keystone_url=http://localhost:5000/v2.0 + +# Time in seconds before ec2 timestamp expires (integer value) +#ec2_timestamp_expiry=300 + + +# +# Options defined in ec2api.api.auth +# + +# whether to use per-user rate limiting for the api. (boolean +# value) +#api_rate_limit=false + +# Treat X-Forwarded-For as the canonical remote address. Only +# enable this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false + + +# +# Options defined in ec2api.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in ec2api.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=true + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will chang in J to honor RFC5424. (boolean +# value) +#use_syslog=false + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +[database] + +# +# Options defined in ec2api.db.api +# + +# Enable the experimental use of thread pooling for all DB API +# calls (boolean value) +# Deprecated group/name - [DEFAULT]/dbapi_use_tpool +#use_tpool=false + + +# +# Options defined in ec2api.novadb.sqlalchemy.api +# + +# The SQLAlchemy connection string used to connect to the nova +# database (string value) +#connection_nova= + +# The SQLAlchemy connection string used to connect to the +# slave database (string value) +#slave_connection= + + +# +# Options defined in ec2api.openstack.common.db.options +# + +# The file name to use with SQLite (string value) +#sqlite_db=ec2api.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous=true + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect=false + +# seconds between db connection retries (integer value) +#db_retry_interval=1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval=true + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval=10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries=20 + + +[keystone_authtoken] + +# +# Options defined in keystoneclient.middleware.auth_token +# + +# Prefix to prepend at the beginning of the path. Deprecated, +# use identity_uri. (string value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint. Deprecated, +# use identity_uri. (string value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint (http or https). +# Deprecated, use identity_uri. (string value) +#auth_protocol=https + +# Complete public Identity API endpoint (string value) +#auth_uri= + +# Complete admin Identity API endpoint. This should specify +# the unversioned root endpoint e.g. https://localhost:35357/ +# (string value) +#identity_uri= + +# API version of the admin Identity API endpoint (string +# value) +#auth_version= + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# This option is deprecated and may be removed in a future +# release. Single shared secret with the Keystone +# configuration used for bootstrapping a Keystone +# installation, or otherwise bypassing the normal +# authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token= + +# Keystone account username (string value) +#admin_user= + +# Keystone account password (string value) +#admin_password= + +# Keystone service account tenant name to validate user tokens +# (string value) +#admin_tenant_name=admin + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=10 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + +# If true, the revocation list will be checked for cached +# tokens. This requires that PKI tokens are configured on the +# Keystone server. (boolean value) +#check_revocations_for_cached=false + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + diff --git a/install.sh b/install.sh new file mode 100755 index 00000000..6fb4dbcc --- /dev/null +++ b/install.sh @@ -0,0 +1,247 @@ +#!/bin/bash -e + +#Parameters to configure +SERVICE_USERNAME=ec2api +SERVICE_PASSWORD=ec2api +SERVICE_TENANT=service +CONNECTION="mysql://ec2api:ec2api@127.0.0.1/ec2api?charset=utf8" +LOG_DIR=/var/log/ec2api +CONF_DIR=/etc/ec2api +SIGNING_DIR=/var/cache/ec2api + +#Check for environment +if [[ -z "$OS_AUTH_URL" || -z "$OS_USERNAME" || -z "$OS_PASSWORD" || -z "$OS_TENANT_NAME" ]]; then + echo "Please set OS_AUTH_URL, OS_USERNAME, OS_PASSWORD and OS_TENANT_NAME" + exit 1 +fi + + + +#### utilities functions merged from devstack to check required parameter is not empty +# Prints line number and "message" in error format +# err $LINENO "message" +function err() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} +# Prints backtrace info +# filename:lineno:function +function backtrace { + local level=$1 + local deep=$((${#BASH_SOURCE[@]} - 1)) + echo "[Call Trace]" + while [ $level -le $deep ]; do + echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" + deep=$((deep - 1)) + done +} + + +# Prints line number and "message" then exits +# die $LINENO "message" +function die() { + local exitcode=$? + set +o xtrace + local line=$1; shift + if [ $exitcode == 0 ]; then + exitcode=1 + fi + backtrace 2 + err $line "$*" + exit $exitcode +} + + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" and exits +# NOTE: env-var is the variable name without a '$' +# die_if_not_set $LINENO env-var "message" +function die_if_not_set() { + local exitcode=$? + FXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + die $line "$*" + fi + $FXTRACE +} + +# Test if the named environment variable is set and not zero length +# is_set env-var +function is_set() { + local var=\$"$1" + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this +} + +####################################### + +get_data() { + local match_column=$(($1 + 1)) + local regex="$2" + local output_column=$(($3 + 1)) + shift 3 + + output=$("$@" | \ + awk -F'|' \ + "! /^\+/ && \$${match_column} ~ \"^ *${regex} *\$\" \ + { print \$${output_column} }") + + echo "$output" +} + +get_id () { + get_data 1 id 2 "$@" +} + +get_user() { + local username=$1 + + local user_id=$(get_data 2 $username 1 keystone user-list) + + if [ -n "$user_id" ]; then + echo "Found existing $username user" >&2 + echo $user_id + else + echo "Creating $username user..." >&2 + get_id keystone user-create --name=$username \ + --pass="$SERVICE_PASSWORD" \ + --tenant $SERVICE_TENANT \ + --email=$username@example.com + fi +} + +add_role() { + local user_id=$1 + local tenant=$2 + local role_id=$3 + local username=$4 + + user_roles=$(keystone user-role-list \ + --user_id $user_id\ + --tenant $tenant 2>/dev/null) + die_if_not_set $LINENO user_roles "Fail to get user_roles for tenant($tenant) and user_id($user_id)" + existing_role=$(get_data 1 $role_id 1 echo "$user_roles") + if [ -n "$existing_role" ] + then + echo "User $username already has role $role_id" >&2 + return + fi + keystone user-role-add --tenant $tenant \ + --user_id $user_id \ + --role_id $role_id +} + + +# Determines if the given option is present in the INI file +# ini_has_option config-file section option +function ini_has_option() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + [ -n "$line" ] +} + +# Set an option in an INI file +# iniset config-file section option value +function iniset() { + local file=$1 + local section=$2 + local option=$3 + local value=$4 + if ! sudo grep -q "^\[$section\]" "$file"; then + # Add section at the end + sudo bash -c "echo -e \"\n[$section]\" >>\"$file\"" + fi + if ! ini_has_option "$file" "$section" "$option"; then + # Add it + sudo sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + else + # Replace it + sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" + fi +} + + +#create keystone user with admin privileges +ADMIN_ROLE=$(get_data 2 admin 1 keystone role-list) +die_if_not_set $LINENO ADMIN_ROLE "Fail to get ADMIN_ROLE by 'keystone role-list' " +SERVICE_TENANT_ID=$(get_data 2 service 1 keystone tenant-list) +die_if_not_set $LINENO SERVICE_TENANT_ID "Fail to get service tenant 'keystone tenant-list' " + +echo ADMIN_ROLE $ADMIN_ROLE +echo SERVICE_TENANT $SERVICE_TENANT + +SERVICE_USERID=$(get_user $SERVICE_USERNAME) +die_if_not_set $LINENO SERVICE_USERID "Fail to get user for $SERVICE_USERNAME" +echo SERVICE_USERID $SERVICE_USERID +add_role $SERVICE_USERID $SERVICE_TENANT $ADMIN_ROLE $SERVICE_USERNAME + +#create log dir +echo Creating log dir +sudo install -d $LOG_DIR + +CONF_FILE=$CONF_DIR/ec2api.conf +APIPASTE_FILE=$CONF_DIR/api-paste.ini +#copy conf files (do not override it) +echo Creating configs +sudo mkdir -p /etc/ec2api > /dev/null +if [ ! -s $CONF_FILE ]; then + sudo cp etc/ec2api/ec2api.conf.sample $CONF_FILE +fi +if [ ! -s $APIPASTE_FILE ]; then + sudo cp etc/ec2api/api-paste.ini $APIPASTE_FILE +fi + +AUTH_HOST=${OS_AUTH_URL#*//} +AUTH_HOST=${AUTH_HOST%:*} +AUTH_CACHE_DIR=${AUTH_CACHE_DIR:-/var/cache/ec2api} +AUTH_PORT=`keystone catalog|grep -A 9 identity|grep adminURL|awk '{print $4}'` +AUTH_PORT=${AUTH_PORT##*:} +AUTH_PORT=${AUTH_PORT%%/*} +AUTH_PROTO=${OS_AUTH_URL%%:*} +PUBLIC_URL=${OS_AUTH_URL%:*}:8788/ + +#update default config with some values +iniset $CONF_FILE DEFAULT api_paste_config $APIPASTE_FILE +iniset $CONF_FILE DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" +iniset $CONF_FILE DEFAULT verbose True +iniset $CONF_FILE DEFAULT keystone_url "$OS_AUTH_URL" +iniset $CONF_FILE database connection "$CONNECTION" + +iniset $CONF_FILE keystone_authtoken signing_dir $SIGNING_DIR +iniset $CONF_FILE keystone_authtoken auth_host $AUTH_HOST +iniset $CONF_FILE keystone_authtoken admin_user $SERVICE_USERNAME +iniset $CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD +iniset $CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT +iniset $CONF_FILE keystone_authtoken auth_protocol $AUTH_PROTO +iniset $CONF_FILE keystone_authtoken auth_port $AUTH_PORT + + +#init cache dir +echo Creating signing dir +sudo mkdir -p $AUTH_CACHE_DIR +sudo chown $USER $AUTH_CACHE_DIR +sudo rm -f $AUTH_CACHE_DIR/* + +#install it +echo Installing package +sudo python setup.py develop +sudo rm -rf build ec2_api.egg-info + +#recreate database +echo Setuping database +sudo bin/ec2api-db-setup deb diff --git a/openstack-common.conf b/openstack-common.conf new file mode 100644 index 00000000..be1f81c7 --- /dev/null +++ b/openstack-common.conf @@ -0,0 +1,7 @@ +[DEFAULT] + +# The list of modules to copy from openstack-common +modules=db,db.sqlalchemy,eventlet_backdoor,gettextutils,excutils,jsonutils,local,timeutils,service,uuidutils + +# The base module to hold the copy of openstack.common +base=ec2api diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..3038d1a7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,26 @@ +anyjson>=0.3.3 +argparse +Babel>=1.3 +eventlet>=0.13.0 +greenlet>=0.3.2 +httplib2>=0.7.5 +iso8601>=0.1.9 +jsonschema>=2.0.0,<3.0.0 +lxml>=2.3 +oslo.config>=1.4.0.0a2 +oslotest +paramiko>=1.13.0 +Paste +PasteDeploy>=1.5.0 +pbr>=0.6,!=0.7,<1.0 +pyasn1 +python-keystoneclient>=0.9.0 +Routes>=1.12.3,!=2.0 +six>=1.7.0 +SQLAlchemy>=0.8.4,!=0.9.5,<=0.9.99 +sqlalchemy-migrate>=0.9.1 +stevedore>=0.14 +suds>=0.4 +WebOb>=1.2.3 + + diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 00000000..91d96c22 --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Ec2api's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " --unittests-only Run unit tests only, exclude functional tests." + echo " -p, --flake8 Just run flake8" + echo " -P, --no-flake8 Don't run static code checks" + echo " -h, --help Print this usage message" + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_option { + case "$1" in + -h|--help) usage;; + -V|--virtual-env) let always_venv=1; let never_venv=0;; + -N|--no-virtual-env) let always_venv=0; let never_venv=1;; + -p|--flake8) let just_flake8=1;; + -P|--no-flake8) let no_flake8=1;; + -f|--force) let force=1;; + -u|--update) update=1;; + --unittests-only) noseopts="$noseopts --exclude-dir=ec2api/tests/functional";; + -c|--coverage) noseopts="$noseopts --with-coverage --cover-package=ec2api";; + -*) noseopts="$noseopts $1";; + *) noseargs="$noseargs $1" + esac +} + +venv=.venv +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +noseopts= +noseargs= +wrapper="" +just_flake8=0 +no_flake8=0 +update=0 + +export NOSE_WITH_OPENSTACK=1 +export NOSE_OPENSTACK_COLOR=1 +export NOSE_OPENSTACK_RED=0.05 +export NOSE_OPENSTACK_YELLOW=0.025 +export NOSE_OPENSTACK_SHOW_ELAPSED=1 +export NOSE_OPENSTACK_STDOUT=1 + +for arg in "$@"; do + process_option $arg +done + +function run_tests { + # Cleanup *pyc + ${wrapper} find . -type f -name "*.pyc" -delete + # Just run the test suites in current environment + ${wrapper} rm -f tests.sqlite + ${wrapper} $NOSETESTS +} + +function run_flake8 { + echo "Running flake8 ..." + if [ $never_venv -eq 1 ]; then + echo "**WARNING**:" >&2 + echo "Running flake8 without virtual env may miss OpenStack HACKING detection" >&2 + fi + + ${wrapper} flake8 +} + + +NOSETESTS="nosetests $noseopts $noseargs" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + wrapper=${with_venv} + fi + fi + fi +fi + +if [ $just_flake8 -eq 1 ]; then + run_flake8 + exit +fi + +run_tests || exit + +if [ -z "$noseargs" ]; then + if [ $no_flake8 -eq 0 ]; then + run_flake8 + fi +fi diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..1596d748 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,69 @@ +[metadata] +name = ec2-api +version = 2014.2.1 +summary = OpenStack Ec2api Service +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 2.6 + +[files] +packages = + ec2api +scripts = + bin/ec2api-db-setup + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[entry_points] +console_scripts = + ec2-api=ec2api.cmd.api:main + ec2-api-manage=ec2api.cmd.manage:main + +[build_sphinx] +all_files = 1 +build-dir = doc/build +source-dir = doc/source + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + +[compile_catalog] +directory = ec2api/locale +domain = ec2api + +[update_catalog] +domain = ec2api +output_dir = ec2api/locale +input_file = ec2api/locale/ec2api.pot + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = ec2api/locale/ec2api.pot + +[nosetests] +# NOTE(jkoelker) To run the test suite under nose install the following +# coverage http://pypi.python.org/pypi/coverage +# tissue http://pypi.python.org/pypi/tissue (pep8 checker) +# openstack-nose https://github.com/jkoelker/openstack-nose +verbosity=2 +tests=ec2api/tests +cover-package = ec2api +cover-html = true +cover-erase = true diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..73637574 --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 00000000..998bedbe --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,13 @@ +coverage>=3.6 +discover +feedparser +fixtures>=0.3.14 +hacking>=0.9.2,<0.10 +mox>=0.5.3 +mock>=1.0 +psycopg2 +pylint==0.25.2 +python-subunit>=0.0.18 +sphinx>=1.1.2,!=1.2.0,<1.3 +testrepository>=0.0.18 +testtools>=0.9.34 diff --git a/tools/config/README b/tools/config/README new file mode 100644 index 00000000..0d5bd574 --- /dev/null +++ b/tools/config/README @@ -0,0 +1,20 @@ +This generate_sample.sh tool is used to generate etc/nova/nova.conf.sample + +Run it from the top-level working directory i.e. + + $> ./tools/config/generate_sample.sh -b ./ -p nova -o etc/nova + +Watch out for warnings about modules like libvirt, qpid and zmq not +being found - these warnings are significant because they result +in options not appearing in the generated config file. + + +The analyze_opts.py tool is used to find options which appear in +/etc/nova/nova.conf but not in etc/nova/nova.conf.sample +This helps identify options in the nova.conf file which are not used by nova. +The tool also identifies any options which are set to the default value. + +Run it from the top-level working directory i.e. + + $> ./tools/config/analyze_opts.py + diff --git a/tools/config/analyze_opts.py b/tools/config/analyze_opts.py new file mode 100755 index 00000000..8dd0fbf6 --- /dev/null +++ b/tools/config/analyze_opts.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +# Copyright (c) 2012, Cloudscaling +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +''' +find_unused_options.py + +Compare the nova.conf file with the nova.conf.sample file to find any unused +options or default values in nova.conf +''' + +from __future__ import print_function + +import argparse +import os +import sys + +sys.path.append(os.getcwd()) +from oslo.config import iniparser + + +class PropertyCollecter(iniparser.BaseParser): + def __init__(self): + super(PropertyCollecter, self).__init__() + self.key_value_pairs = {} + + def assignment(self, key, value): + self.key_value_pairs[key] = value + + def new_section(self, section): + pass + + @classmethod + def collect_properties(cls, lineiter, sample_format=False): + def clean_sample(f): + for line in f: + if line.startswith("#") and not line.startswith("# "): + line = line[1:] + yield line + pc = cls() + if sample_format: + lineiter = clean_sample(lineiter) + pc.parse(lineiter) + return pc.key_value_pairs + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='''Compare the nova.conf + file with the nova.conf.sample file to find any unused options or + default values in nova.conf''') + + parser.add_argument('-c', action='store', + default='/etc/nova/nova.conf', + help='path to nova.conf\ + (defaults to /etc/nova/nova.conf)') + parser.add_argument('-s', default='./etc/nova/nova.conf.sample', + help='path to nova.conf.sample\ + (defaults to ./etc/nova/nova.conf.sample') + options = parser.parse_args() + + conf_file_options = PropertyCollecter.collect_properties(open(options.c)) + sample_conf_file_options = PropertyCollecter.collect_properties( + open(options.s), sample_format=True) + + for k, v in sorted(conf_file_options.items()): + if k not in sample_conf_file_options: + print("Unused:", k) + for k, v in sorted(conf_file_options.items()): + if k in sample_conf_file_options and v == sample_conf_file_options[k]: + print("Default valued:", k) diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh new file mode 100755 index 00000000..48add478 --- /dev/null +++ b/tools/config/check_uptodate.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +PROJECT_NAME=${PROJECT_NAME:-nova} +CFGFILE_NAME=${PROJECT_NAME}.conf.sample + +if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then + CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME} +elif [ -e etc/${CFGFILE_NAME} ]; then + CFGFILE=etc/${CFGFILE_NAME} +else + echo "${0##*/}: can not find config file" + exit 1 +fi + +TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX` +trap "rm -rf $TEMPDIR" EXIT + +tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR} + +if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE} +then + echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date." + echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh." + exit 1 +fi diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh new file mode 100755 index 00000000..94d6f3ec --- /dev/null +++ b/tools/config/generate_sample.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash + +print_hint() { + echo "Try \`${0##*/} --help' for more information." >&2 +} + +PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \ + --long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@") + +if [ $? != 0 ] ; then print_hint ; exit 1 ; fi + +eval set -- "$PARSED_OPTIONS" + +while true; do + case "$1" in + -h|--help) + echo "${0##*/} [options]" + echo "" + echo "options:" + echo "-h, --help show brief help" + echo "-b, --base-dir=DIR project base directory" + echo "-p, --package-name=NAME project package name" + echo "-o, --output-dir=DIR file output directory" + echo "-m, --module=MOD extra python module to interrogate for options" + echo "-l, --library=LIB extra library that registers options for discovery" + exit 0 + ;; + -b|--base-dir) + shift + BASEDIR=`echo $1 | sed -e 's/\/*$//g'` + shift + ;; + -p|--package-name) + shift + PACKAGENAME=`echo $1` + shift + ;; + -o|--output-dir) + shift + OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'` + shift + ;; + -m|--module) + shift + MODULES="$MODULES -m $1" + shift + ;; + -l|--library) + shift + LIBRARIES="$LIBRARIES -l $1" + shift + ;; + --) + break + ;; + esac +done + +BASEDIR=${BASEDIR:-`pwd`} +if ! [ -d $BASEDIR ] +then + echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1 +elif [[ $BASEDIR != /* ]] +then + BASEDIR=$(cd "$BASEDIR" && pwd) +fi + +PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}} +TARGETDIR=$BASEDIR/$PACKAGENAME +if ! [ -d $TARGETDIR ] +then + echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1 +fi + +OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc} +# NOTE(bnemec): Some projects put their sample config in etc/, +# some in etc/$PACKAGENAME/ +if [ -d $OUTPUTDIR/$PACKAGENAME ] +then + OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME +elif ! [ -d $OUTPUTDIR ] +then + echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2 + exit 1 +fi + +BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'` +find $TARGETDIR -type f -name "*.pyc" -delete +FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \ + -exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u) + +RC_FILE="`dirname $0`/oslo.config.generator.rc" +if test -r "$RC_FILE" +then + source "$RC_FILE" +fi + +for mod in ${NOVA_CONFIG_GENERATOR_EXTRA_MODULES}; do + MODULES="$MODULES -m $mod" +done + +for lib in ${NOVA_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do + LIBRARIES="$LIBRARIES -l $lib" +done + +export EVENTLET_NO_GREENDNS=yes + +OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs) +[ "$OS_VARS" ] && eval "unset \$OS_VARS" +DEFAULT_MODULEPATH=nova.openstack.common.config.generator +MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH} +OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample +python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE + +# Hook to allow projects to append custom config file snippets +CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null) +for CONCAT_FILE in $CONCAT_FILES; do + cat $CONCAT_FILE >> $OUTPUTFILE +done diff --git a/tools/config/oslo.config.generator.rc b/tools/config/oslo.config.generator.rc new file mode 100644 index 00000000..f13b70f1 --- /dev/null +++ b/tools/config/oslo.config.generator.rc @@ -0,0 +1,2 @@ +NOVA_CONFIG_GENERATOR_EXTRA_LIBRARIES=oslo.messaging +NOVA_CONFIG_GENERATOR_EXTRA_MODULES=keystoneclient.middleware.auth_token diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py new file mode 100755 index 00000000..e997993a --- /dev/null +++ b/tools/db/schema_diff.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility for diff'ing two versions of the DB schema. + +Each release cycle the plan is to compact all of the migrations from that +release into a single file. This is a manual and, unfortunately, error-prone +process. To ensure that the schema doesn't change, this tool can be used to +diff the compacted DB schema to the original, uncompacted form. + + +The schema versions are specified by providing a git ref (a branch name or +commit hash) and a SQLAlchemy-Migrate version number: +Run like: + + ./tools/db/schema_diff.py mysql master:latest my_branch:82 +""" +import datetime +import glob +import os +import subprocess +import sys + + +### Dump + + +def dump_db(db_driver, db_name, migration_version, dump_filename): + db_driver.create(db_name) + try: + migrate(db_driver, db_name, migration_version) + db_driver.dump(db_name, dump_filename) + finally: + db_driver.drop(db_name) + + +### Diff + + +def diff_files(filename1, filename2): + pipeline = ['diff -U 3 %(filename1)s %(filename2)s' % locals()] + + # Use colordiff if available + if subprocess.call(['which', 'colordiff']) == 0: + pipeline.append('colordiff') + + pipeline.append('less -R') + + cmd = ' | '.join(pipeline) + subprocess.check_call(cmd, shell=True) + + +### Database + + +class MySQL(object): + def create(self, name): + subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name]) + + def drop(self, name): + subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name]) + + def dump(self, name, dump_filename): + subprocess.check_call( + 'mysqldump -u root %(name)s > %(dump_filename)s' % locals(), + shell=True) + + def url(self, name): + return 'mysql://root@localhost/%s' % name + + +class Postgres(object): + def create(self, name): + subprocess.check_call(['createdb', name]) + + def drop(self, name): + subprocess.check_call(['dropdb', name]) + + def dump(self, name, dump_filename): + subprocess.check_call( + 'pg_dump %(name)s > %(dump_filename)s' % locals(), + shell=True) + + def url(self, name): + return 'postgres://localhost/%s' % name + + +def _get_db_driver_class(db_type): + if db_type == "mysql": + return MySQL + elif db_type == "postgres": + return Postgres + else: + raise Exception(_("database %s not supported") % db_type) + + +### Migrate + + +MIGRATE_REPO = os.path.join(os.getcwd(), "ec2api/db/sqlalchemy/migrate_repo") + + +def migrate(db_driver, db_name, migration_version): + earliest_version = _migrate_get_earliest_version() + + # NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of + # migration numbers. + _migrate_cmd( + db_driver, db_name, 'version_control', str(earliest_version - 1)) + + upgrade_cmd = ['upgrade'] + if migration_version != 'latest': + upgrade_cmd.append(str(migration_version)) + + _migrate_cmd(db_driver, db_name, *upgrade_cmd) + + +def _migrate_cmd(db_driver, db_name, *cmd): + manage_py = os.path.join(MIGRATE_REPO, 'manage.py') + + args = ['python', manage_py] + args += cmd + args += ['--repository=%s' % MIGRATE_REPO, + '--url=%s' % db_driver.url(db_name)] + + subprocess.check_call(args) + + +def _migrate_get_earliest_version(): + versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py') + + versions = [] + for path in glob.iglob(versions_glob): + filename = os.path.basename(path) + prefix = filename.split('_', 1)[0] + try: + version = int(prefix) + except ValueError: + pass + versions.append(version) + + versions.sort() + return versions[0] + + +### Git + + +def git_current_branch_name(): + ref_name = git_symbolic_ref('HEAD', quiet=True) + current_branch_name = ref_name.replace('refs/heads/', '') + return current_branch_name + + +def git_symbolic_ref(ref, quiet=False): + args = ['git', 'symbolic-ref', ref] + if quiet: + args.append('-q') + proc = subprocess.Popen(args, stdout=subprocess.PIPE) + stdout, stderr = proc.communicate() + return stdout.strip() + + +def git_checkout(branch_name): + subprocess.check_call(['git', 'checkout', branch_name]) + + +def git_has_uncommited_changes(): + return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1 + + +### Command + + +def die(msg): + print >> sys.stderr, "ERROR: %s" % msg + sys.exit(1) + + +def usage(msg=None): + if msg: + print >> sys.stderr, "ERROR: %s" % msg + + prog = "schema_diff.py" + args = ["", "", + ""] + + print >> sys.stderr, "usage: %s %s" % (prog, ' '.join(args)) + sys.exit(1) + + +def parse_options(): + try: + db_type = sys.argv[1] + except IndexError: + usage("must specify DB type") + + try: + orig_branch, orig_version = sys.argv[2].split(':') + except IndexError: + usage('original branch and version required (e.g. master:82)') + + try: + new_branch, new_version = sys.argv[3].split(':') + except IndexError: + usage('new branch and version required (e.g. master:82)') + + return db_type, orig_branch, orig_version, new_branch, new_version + + +def main(): + timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S") + + ORIG_DB = 'orig_db_%s' % timestamp + NEW_DB = 'new_db_%s' % timestamp + + ORIG_DUMP = ORIG_DB + ".dump" + NEW_DUMP = NEW_DB + ".dump" + + options = parse_options() + db_type, orig_branch, orig_version, new_branch, new_version = options + + # Since we're going to be switching branches, ensure user doesn't have any + # uncommited changes + if git_has_uncommited_changes(): + die("You have uncommited changes. Please commit them before running " + "this command.") + + db_driver = _get_db_driver_class(db_type)() + + users_branch = git_current_branch_name() + git_checkout(orig_branch) + + try: + # Dump Original Schema + dump_db(db_driver, ORIG_DB, orig_version, ORIG_DUMP) + + # Dump New Schema + git_checkout(new_branch) + dump_db(db_driver, NEW_DB, new_version, NEW_DUMP) + + diff_files(ORIG_DUMP, NEW_DUMP) + finally: + git_checkout(users_branch) + + if os.path.exists(ORIG_DUMP): + os.unlink(ORIG_DUMP) + + if os.path.exists(NEW_DUMP): + os.unlink(NEW_DUMP) + + +if __name__ == "__main__": + main() diff --git a/tools/enable-pre-commit-hook.sh b/tools/enable-pre-commit-hook.sh new file mode 100755 index 00000000..d97c634a --- /dev/null +++ b/tools/enable-pre-commit-hook.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +PRE_COMMIT_SCRIPT=.git/hooks/pre-commit + +make_hook() { + echo "exec ./run_tests.sh -N -p" >> $PRE_COMMIT_SCRIPT + chmod +x $PRE_COMMIT_SCRIPT + + if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then + echo "pre-commit hook was created successfully" + else + echo "unable to create pre-commit hook" + fi +} + +# NOTE(jk0): Make sure we are in ec2-api's root directory before adding the hook. +if [ ! -d ".git" ]; then + echo "unable to find .git; moving up a directory" + cd .. + if [ -d ".git" ]; then + make_hook + else + echo "still unable to find .git; hook not created" + fi +else + make_hook +fi + diff --git a/tools/install_venv.py b/tools/install_venv.py new file mode 100644 index 00000000..5ae0b80d --- /dev/null +++ b/tools/install_venv.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import install_venv_common as install_venv + + +def print_help(venv, root): + help = """ + EC2 development environment setup is complete. + + EC2 development uses virtualenv to track and manage Python dependencies + while in development and testing. + + To activate the EC2 virtualenv for the extent of your current shell + session you can run: + + $ source %s/bin/activate + + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: + + $ %s/tools/with_venv.sh + + Also, make test will automatically use the virtualenv. + """ + print help % (venv, root) + + +def main(argv): + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + + if os.environ.get('tools_path'): + root = os.environ['tools_path'] + venv = os.path.join(root, '.venv') + if os.environ.get('venv'): + venv = os.environ['venv'] + + pip_requires = os.path.join(root, 'requirements.txt') + test_requires = os.path.join(root, 'test-requirements.txt') + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'ec2-api' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + options = install.parse_args(argv) + install.check_python_version() + install.check_dependencies() + install.create_virtualenv(no_site_packages=options.no_site_packages) + install.install_dependencies() + install.post_process() + print_help(venv, root) + +if __name__ == '__main__': + main(sys.argv) diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py new file mode 100644 index 00000000..b755a88e --- /dev/null +++ b/tools/install_venv_common.py @@ -0,0 +1,213 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides methods needed by installation script for OpenStack development +virtual environments. + +Since this script is used to bootstrap a virtualenv from the system's Python +environment, it should be kept strictly compatible with Python 2.6. + +Synced in from openstack-common +""" + +from __future__ import print_function + +import optparse +import os +import subprocess +import sys + + +class InstallVenv(object): + + def __init__(self, root, venv, requirements, + test_requirements, py_version, + project): + self.root = root + self.venv = venv + self.requirements = requirements + self.test_requirements = test_requirements + self.py_version = py_version + self.project = project + + def die(self, message, *args): + print(message % args, file=sys.stderr) + sys.exit(1) + + def check_python_version(self): + if sys.version_info < (2, 6): + self.die("Need Python Version >= 2.6") + + def run_command_with_code(self, cmd, redirect_output=True, + check_exit_code=True): + """Runs a command in an out-of-process shell. + + Returns the output of that command. Working directory is self.root. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None + + proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return (output, proc.returncode) + + def run_command(self, cmd, redirect_output=True, check_exit_code=True): + return self.run_command_with_code(cmd, redirect_output, + check_exit_code)[0] + + def get_distro(self): + if (os.path.exists('/etc/fedora-release') or + os.path.exists('/etc/redhat-release')): + return Fedora( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + else: + return Distro( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + + def check_dependencies(self): + self.get_distro().install_virtualenv() + + def create_virtualenv(self, no_site_packages=True): + """Creates the virtual environment and installs PIP. + + Creates the virtual environment and installs PIP only into the + virtual environment. + """ + if not os.path.isdir(self.venv): + print('Creating venv...', end=' ') + if no_site_packages: + self.run_command(['virtualenv', '-q', '--no-site-packages', + self.venv]) + else: + self.run_command(['virtualenv', '-q', self.venv]) + print('done.') + else: + print("venv already exists...") + pass + + def pip_install(self, *args): + self.run_command(['tools/with_venv.sh', + 'pip', 'install', '--upgrade'] + list(args), + redirect_output=False) + + def install_dependencies(self): + print('Installing dependencies with pip (this can take a while)...') + + # First things first, make sure our venv has the latest pip and + # setuptools and pbr + self.pip_install('pip>=1.4') + self.pip_install('setuptools') + self.pip_install('pbr') + + self.pip_install('-r', self.requirements, '-r', self.test_requirements) + + def post_process(self): + self.get_distro().post_process() + + def parse_args(self, argv): + """Parses command-line arguments.""" + parser = optparse.OptionParser() + parser.add_option('-n', '--no-site-packages', + action='store_true', + help="Do not inherit packages from global Python " + "install") + return parser.parse_args(argv[1:])[0] + + +class Distro(InstallVenv): + + def check_cmd(self, cmd): + return bool(self.run_command(['which', cmd], + check_exit_code=False).strip()) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if self.check_cmd('easy_install'): + print('Installing virtualenv via easy_install...', end=' ') + if self.run_command(['easy_install', 'virtualenv']): + print('Succeeded') + return + else: + print('Failed') + + self.die('ERROR: virtualenv not found.\n\n%s development' + ' requires virtualenv, please install it using your' + ' favorite package management tool' % self.project) + + def post_process(self): + """Any distribution-specific post-processing gets done here. + + In particular, this is useful for applying patches to code inside + the venv. + """ + pass + + +class Fedora(Distro): + """This covers all Fedora-based distributions. + + Includes: Fedora, RHEL, CentOS, Scientific Linux + """ + + def check_pkg(self, pkg): + return self.run_command_with_code(['rpm', '-q', pkg], + check_exit_code=False)[1] == 0 + + def apply_patch(self, originalfile, patchfile): + self.run_command(['patch', '-N', originalfile, patchfile], + check_exit_code=False) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if not self.check_pkg('python-virtualenv'): + self.die("Please install 'python-virtualenv'.") + + super(Fedora, self).install_virtualenv() + + def post_process(self): + """Workaround for a bug in eventlet. + + This currently affects RHEL6.1, but the fix can safely be + applied to all RHEL and Fedora distributions. + + This can be removed when the fix is applied upstream. + + EC2: https://bugs.launchpad.net/nova/+bug/884915 + Upstream: https://bitbucket.org/eventlet/eventlet/issue/89 + RHEL: https://bugzilla.redhat.com/958868 + """ + + if os.path.exists('contrib/redhat-eventlet.patch'): + # Install "patch" program if it's not there + if not self.check_pkg('patch'): + self.die("Please install 'patch'.") + + # Apply the eventlet patch + self.apply_patch(os.path.join(self.venv, 'lib', self.py_version, + 'site-packages', + 'eventlet/green/subprocess.py'), + 'contrib/redhat-eventlet.patch') diff --git a/tools/lintstack.py b/tools/lintstack.py new file mode 100755 index 00000000..3684b357 --- /dev/null +++ b/tools/lintstack.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012, AT&T Labs, Yun Mao +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""pylint error checking.""" + +import cStringIO as StringIO +import json +import re +import sys + +from pylint import lint +from pylint.reporters import text + +# Note(maoy): E1103 is error code related to partial type inference +ignore_codes = ["E1103"] +# Note(maoy): the error message is the pattern of E0202. It should be ignored +# for ec2api.tests modules +ignore_messages = ["An attribute affected in ec2api.tests"] +# Note(maoy): we ignore all errors in openstack.common because it should be +# checked elsewhere. We also ignore ec2api.tests for now due to high false +# positive rate. +ignore_modules = ["ec2api/openstack/common/", "ec2api/tests/"] + +KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" + + +class LintOutput(object): + + _cached_filename = None + _cached_content = None + + def __init__(self, filename, lineno, line_content, code, message, + lintoutput): + self.filename = filename + self.lineno = lineno + self.line_content = line_content + self.code = code + self.message = message + self.lintoutput = lintoutput + + @classmethod + def from_line(cls, line): + m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) + matched = m.groups() + filename, lineno, code, message = (matched[0], int(matched[1]), + matched[2], matched[-1]) + if cls._cached_filename != filename: + with open(filename) as f: + cls._cached_content = list(f.readlines()) + cls._cached_filename = filename + line_content = cls._cached_content[lineno - 1].rstrip() + return cls(filename, lineno, line_content, code, message, + line.rstrip()) + + @classmethod + def from_msg_to_dict(cls, msg): + """From the output of pylint msg, to a dict, where each key + is a unique error identifier, value is a list of LintOutput + """ + result = {} + for line in msg.splitlines(): + obj = cls.from_line(line) + if obj.is_ignored(): + continue + key = obj.key() + if key not in result: + result[key] = [] + result[key].append(obj) + return result + + def is_ignored(self): + if self.code in ignore_codes: + return True + if any(self.filename.startswith(name) for name in ignore_modules): + return True + if any(msg in self.message for msg in ignore_messages): + return True + return False + + def key(self): + if self.code in ["E1101", "E1103"]: + # These two types of errors are like Foo class has no member bar. + # We discard the source code so that the error will be ignored + # next time another Foo.bar is encountered. + return self.message, "" + return self.message, self.line_content.strip() + + def json(self): + return json.dumps(self.__dict__) + + def review_str(self): + return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" + "%(code)s: %(message)s" % self.__dict__) + + +class ErrorKeys(object): + + @classmethod + def print_json(cls, errors, output=sys.stdout): + print >>output, "# automatically generated by tools/lintstack.py" + for i in sorted(errors.keys()): + print >>output, json.dumps(i) + + @classmethod + def from_file(cls, filename): + keys = set() + for line in open(filename): + if line and line[0] != "#": + d = json.loads(line) + keys.add(tuple(d)) + return keys + + +def run_pylint(): + buff = StringIO.StringIO() + reporter = text.ParseableTextReporter(output=buff) + args = ["--include-ids=y", "-E", "ec2api"] + lint.Run(args, reporter=reporter, exit=False) + val = buff.getvalue() + buff.close() + return val + + +def generate_error_keys(msg=None): + print "Generating", KNOWN_PYLINT_EXCEPTIONS_FILE + if msg is None: + msg = run_pylint() + errors = LintOutput.from_msg_to_dict(msg) + with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: + ErrorKeys.print_json(errors, output=f) + + +def validate(newmsg=None): + print "Loading", KNOWN_PYLINT_EXCEPTIONS_FILE + known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) + if newmsg is None: + print "Running pylint. Be patient..." + newmsg = run_pylint() + errors = LintOutput.from_msg_to_dict(newmsg) + + print "Unique errors reported by pylint: was %d, now %d." \ + % (len(known), len(errors)) + passed = True + for err_key, err_list in errors.items(): + for err in err_list: + if err_key not in known: + print err.lintoutput + print + passed = False + if passed: + print "Congrats! pylint check passed." + redundant = known - set(errors.keys()) + if redundant: + print "Extra credit: some known pylint exceptions disappeared." + for i in sorted(redundant): + print json.dumps(i) + print "Consider regenerating the exception file if you will." + else: + print ("Please fix the errors above. If you believe they are false" + " positives, run 'tools/lintstack.py generate' to overwrite.") + sys.exit(1) + + +def usage(): + print """Usage: tools/lintstack.py [generate|validate] + To generate pylint_exceptions file: tools/lintstack.py generate + To validate the current commit: tools/lintstack.py + """ + + +def main(): + option = "validate" + if len(sys.argv) > 1: + option = sys.argv[1] + if option == "generate": + generate_error_keys() + elif option == "validate": + validate() + else: + usage() + + +if __name__ == "__main__": + main() diff --git a/tools/lintstack.sh b/tools/lintstack.sh new file mode 100755 index 00000000..d8591d03 --- /dev/null +++ b/tools/lintstack.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# Copyright (c) 2012-2013, AT&T Labs, Yun Mao +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Use lintstack.py to compare pylint errors. +# We run pylint twice, once on HEAD, once on the code before the latest +# commit for review. +set -e +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +# Get the current branch name. +GITHEAD=`git rev-parse --abbrev-ref HEAD` +if [[ "$GITHEAD" == "HEAD" ]]; then + # In detached head mode, get revision number instead + GITHEAD=`git rev-parse HEAD` + echo "Currently we are at commit $GITHEAD" +else + echo "Currently we are at branch $GITHEAD" +fi + +cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py + +if git rev-parse HEAD^2 2>/dev/null; then + # The HEAD is a Merge commit. Here, the patch to review is + # HEAD^2, the master branch is at HEAD^1, and the patch was + # written based on HEAD^2~1. + PREV_COMMIT=`git rev-parse HEAD^2~1` + git checkout HEAD~1 + # The git merge is necessary for reviews with a series of patches. + # If not, this is a no-op so won't hurt either. + git merge $PREV_COMMIT +else + # The HEAD is not a merge commit. This won't happen on gerrit. + # Most likely you are running against your own patch locally. + # We assume the patch to examine is HEAD, and we compare it against + # HEAD~1 + git checkout HEAD~1 +fi + +# First generate tools/pylint_exceptions from HEAD~1 +$TOOLS_DIR/lintstack.head.py generate +# Then use that as a reference to compare against HEAD +git checkout $GITHEAD +$TOOLS_DIR/lintstack.head.py +echo "Check passed. FYI: the pylint exceptions are:" +cat $TOOLS_DIR/pylint_exceptions + diff --git a/tools/patch_tox_venv.py b/tools/patch_tox_venv.py new file mode 100644 index 00000000..14e65266 --- /dev/null +++ b/tools/patch_tox_venv.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import install_venv_common as install_venv # noqa + + +def first_file(file_list): + for candidate in file_list: + if os.path.exists(candidate): + return candidate + + +def main(argv): + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + + venv = os.environ['VIRTUAL_ENV'] + + pip_requires = first_file([ + os.path.join(root, 'requirements.txt'), + os.path.join(root, 'tools', 'pip-requires'), + ]) + test_requires = first_file([ + os.path.join(root, 'test-requirements.txt'), + os.path.join(root, 'tools', 'test-requires'), + ]) + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'ec2-api' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + #NOTE(dprince): For Tox we only run post_process (which patches files, etc) + install.post_process() + +if __name__ == '__main__': + main(sys.argv) diff --git a/tools/regression_tester.py b/tools/regression_tester.py new file mode 100755 index 00000000..5f3d8e07 --- /dev/null +++ b/tools/regression_tester.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Tool for checking if patch contains a regression test. + +By default runs against current patch but can be set to use any gerrit review +as specified by change number (uses 'git review -d'). + +Idea: take tests from patch to check, and run against code from previous patch. +If new tests pass, then no regression test, if new tests fails against old code +then either +* new tests depend on new code and cannot confirm regression test is valid + (false positive) +* new tests detects the bug being fixed (detect valid regression test) +Due to the risk of false positives, the results from this need some human +interpretation. +""" + +import optparse +import string +import subprocess +import sys + + +def run(cmd, fail_ok=False): + print "running: %s" % cmd + obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + shell=True) + obj.wait() + if obj.returncode != 0 and not fail_ok: + print "The above command terminated with an error." + sys.exit(obj.returncode) + return obj.stdout.read() + + +def main(): + usage = """ + Tool for checking if a patch includes a regression test. + + Usage: %prog [options]""" + parser = optparse.OptionParser(usage) + parser.add_option("-r", "--review", dest="review", + help="gerrit review number to test") + (options, args) = parser.parse_args() + if options.review: + original_branch = run("git rev-parse --abbrev-ref HEAD") + run("git review -d %s" % options.review) + else: + print ("no gerrit review number specified, running on latest commit" + "on current branch.") + + test_works = False + + # run new tests with old code + run("git checkout HEAD^ ec2api") + run("git checkout HEAD ec2api/tests") + + # identify which tests have changed + tests = run("git whatchanged --format=oneline -1 | grep \"ec2api/tests\" " + "| cut -f2").split() + test_list = [] + for test in tests: + test_list.append(string.replace(test[0:-3], '/', '.')) + + if test_list == []: + test_works = False + expect_failure = "" + else: + # run new tests, expect them to fail + expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)), + fail_ok=True) + if "FAILED (id=" in expect_failure: + test_works = True + + # cleanup + run("git checkout HEAD ec2api") + if options.review: + new_branch = run("git status | head -1 | cut -d ' ' -f 4") + run("git checkout %s" % original_branch) + run("git branch -D %s" % new_branch) + + print expect_failure + print "" + print "*******************************" + if test_works: + print "FOUND a regression test" + else: + print "NO regression test" + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tools/with_venv.sh b/tools/with_venv.sh new file mode 100755 index 00000000..94e05c12 --- /dev/null +++ b/tools/with_venv.sh @@ -0,0 +1,7 @@ +#!/bin/bash +tools_path=${tools_path:-$(dirname $0)} +venv_path=${venv_path:-${tools_path}} +venv_dir=${venv_name:-/../.venv} +TOOLS=${tools_path} +VENV=${venv:-${venv_path}/${venv_dir}} +source ${VENV}/bin/activate && "$@" diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..1270eed3 --- /dev/null +++ b/tox.ini @@ -0,0 +1,56 @@ +[tox] +minversion = 1.6 +envlist = py26,py27,py33,pep8 +skipsdist = True + +[testenv] +sitepackages = True +usedevelop = True +install_command = pip install -U {opts} {packages} +setenv = VIRTUAL_ENV={envdir} + LANG=en_US.UTF-8 + LANGUAGE=en_US:en + LC_ALL=C +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + python tools/patch_tox_venv.py + python setup.py testr --slowest --testr-args='{posargs}' + +[tox:jenkins] +sitepackages = True +downloadcache = ~/cache/pip + +[testenv:pep8] +sitepackages = False +commands = + flake8 {posargs} + +[testenv:pylint] +setenv = VIRTUAL_ENV={envdir} +commands = bash tools/lintstack.sh + +[testenv:cover] +# Also do not run test_coverage_ext tests while gathering coverage as those +# tests conflict with coverage. +setenv = VIRTUAL_ENV={envdir} +commands = + python tools/patch_tox_venv.py + python setup.py testr --coverage \ + --testr-args='^(?!.*test.*coverage).*$' + +[testenv:venv] +commands = {posargs} + +[flake8] +# E712 is ignored on purpose, since it is normal to use 'column == true' +# in sqlalchemy. +# H803 skipped on purpose per list discussion. +# TODO Hacking 0.6 checks to fix +# H102 Apache 2.0 license header not found + +ignore = E121,E122,E123,E124,E126,E127,E128,E711,E712,H102,H303,H404,F403,F811,F841,H803 +exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools + +[hacking] +import_exceptions = ec2api.openstack.common.gettextutils._