update gce api to current OpenStack

- add devstack plugin
- update openstack common
- move unit tests to unit folder
- update infrastructural files

Change-Id: Id72006f70110dbd1762f42b582470ac5f3439f2a
This commit is contained in:
Andrey Pavlov 2015-09-02 15:56:59 +03:00
parent 99d3eedf59
commit 75905790e5
110 changed files with 887 additions and 5321 deletions

View File

@ -1,4 +1,7 @@
[DEFAULT] [DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ gceapi/tests $LISTOPT $IDOPTION test_command=OS_STDOUT_CAPTURE=1 \
OS_STDERR_CAPTURE=1 \
OS_LOG_CAPTURE=1 \
${PYTHON:-python} -m subunit.run discover -t ./ gceapi/tests/unit $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE test_id_option=--load-list $IDFILE
test_list_option=--list test_list_option=--list

13
devstack/README.rst Normal file
View File

@ -0,0 +1,13 @@
======================
Enabling in Devstack
======================
1. Download DevStack
2. Add this repo as an external repository::
> cat local.conf
[[local|localrc]]
enable_plugin gce-api https://github.com/stackforge/gce-api
3. run ``stack.sh``

249
devstack/plugin.sh Executable file
View File

@ -0,0 +1,249 @@
# lib/gce-api
# Dependencies:
# ``functions`` file
# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
# install_gceapi
# configure_gceapi
# start_gceapi
# stop_gceapi
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set -o xtrace
# Defaults
# --------
# Set up default directories
GCEAPI_DIR=$DEST/gce-api
GCEAPI_CONF_DIR=${GCEAPI_CONF_DIR:-/etc/gceapi}
GCEAPI_CONF_FILE=${GCEAPI_CONF_DIR}/gceapi.conf
GCEAPI_DEBUG=${GCEAPI_DEBUG:-True}
GCEAPI_STATE_PATH=${GCEAPI_STATE_PATH:=$DATA_DIR/gceapi}
GCEAPI_SERVICE_PORT=${GCEAPI_SERVICE_PORT:-8787}
GCEAPI_RABBIT_VHOST=${GCEAPI_RABBIT_VHOST:-''}
GCEAPI_ADMIN_USER=${GCEAPI_ADMIN_USER:-gceapi}
GCEAPI_KEYSTONE_SIGNING_DIR=${GCEAPI_KEYSTONE_SIGNING_DIR:-/tmp/keystone-signing-gceapi}
# Support entry points installation of console scripts
if [[ -d $GCEAPI_DIR/bin ]]; then
GCEAPI_BIN_DIR=$GCEAPI_DIR/bin
else
GCEAPI_BIN_DIR=$(get_python_exec_prefix)
fi
function recreate_endpoint {
local endpoint=$1
local description=$2
local port=$3
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
# Remove nova's gce service/endpoint
local endpoint_id=$(openstack endpoint list \
--column "ID" \
--column "Region" \
--column "Service Name" \
| grep " $REGION_NAME " \
| grep " $endpoint " | get_field 1)
if [[ -n "$endpoint_id" ]]; then
openstack endpoint delete $endpoint_id
fi
local service_id=$(openstack service list \
-c "ID" -c "Name" \
| grep " $endpoint " | get_field 1)
if [[ -n "$service_id" ]]; then
openstack service delete $service_id
fi
local service_id=$(openstack service create \
$endpoint \
--name "$endpoint" \
--description="$description" \
-f value -c id)
openstack endpoint create \
$service_id \
--region "$REGION_NAME" \
--publicurl "$SERVICE_PROTOCOL://$SERVICE_HOST:$port/" \
--adminurl "$SERVICE_PROTOCOL://$SERVICE_HOST:$port/" \
--internalurl "$SERVICE_PROTOCOL://$SERVICE_HOST:$port/"
fi
}
# create_gceapi_accounts() - Set up common required gceapi accounts
#
# Tenant User Roles
# ------------------------------
# service gceapi admin
function create_gceapi_accounts() {
if ! is_service_enabled key; then
return
fi
SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
GCEAPI_USER=$(openstack user create \
$GCEAPI_ADMIN_USER \
--password "$SERVICE_PASSWORD" \
--project $SERVICE_TENANT \
--email gceapi@example.com \
| grep " id " | get_field 2)
openstack role add \
$ADMIN_ROLE \
--project $SERVICE_TENANT \
--user $GCEAPI_USER
recreate_endpoint "gce" "GCE Compatibility Layer" $GCEAPI_SERVICE_PORT
}
function mkdir_chown_stack {
if [[ ! -d "$1" ]]; then
sudo mkdir -p "$1"
fi
sudo chown $STACK_USER "$1"
}
function configure_gceapi_rpc_backend() {
# Configure the rpc service.
iniset_rpc_backend gceapi $GCEAPI_CONF_FILE DEFAULT
# TODO(ruhe): get rid of this ugly workaround.
inicomment $GCEAPI_CONF_FILE DEFAULT rpc_backend
# Set non-default rabbit virtual host if required.
if [[ -n "$GCEAPI_RABBIT_VHOST" ]]; then
iniset $GCEAPI_CONF_FILE DEFAULT rabbit_virtual_host $GCEAPI_RABBIT_VHOST
fi
}
function configure_gceapi_networking {
# Use keyword 'public' if gceapi external network was not set.
# If it was set but the network is not exist then
# first available external network will be selected.
local ext_net=${GCEAPI_EXTERNAL_NETWORK:-'public'}
# Configure networking options for gceapi
if [[ -n "$ext_net" ]]; then
iniset $GCEAPI_CONF_FILE DEFAULT public_network $ext_net
fi
if [[ ,${ENABLED_SERVICES} =~ ,"q-" ]]; then
iniset $GCEAPI_CONF_FILE DEFAULT network_api quantum
else
iniset $GCEAPI_CONF_FILE DEFAULT network_api nova
fi
}
# Entry points
# ------------
# configure_gceapi() - Set config files, create data dirs, etc
function configure_gceapi {
mkdir_chown_stack "$GCEAPI_CONF_DIR"
# Generate gceapi configuration file and configure common parameters.
touch $GCEAPI_CONF_FILE
cp $GCEAPI_DIR/etc/gceapi/api-paste.ini $GCEAPI_CONF_DIR
cleanup_gceapi
iniset $GCEAPI_CONF_FILE DEFAULT debug $GCEAPI_DEBUG
iniset $GCEAPI_CONF_FILE DEFAULT use_syslog $SYSLOG
iniset $GCEAPI_CONF_FILE DEFAULT state_path $GCEAPI_STATE_PATH
# gceapi Api Configuration
#-------------------------
iniset $GCEAPI_CONF_FILE DEFAULT region $REGION_NAME
#iniset $GCEAPI_CONF_FILE DEFAULT admin_tenant_name $SERVICE_TENANT_NAME
#iniset $GCEAPI_CONF_FILE DEFAULT admin_user $GCEAPI_ADMIN_USER
#iniset $GCEAPI_CONF_FILE DEFAULT admin_password $SERVICE_PASSWORD
#iniset $GCEAPI_CONF_FILE DEFAULT keystone_url "http://${KEYSTONE_AUTH_HOST}:35357/v2.0"
configure_gceapi_rpc_backend
# configure the database.
iniset $GCEAPI_CONF_FILE database connection `database_connection_url gceapi`
configure_gceapi_networking
}
# init_gceapi() - Initialize databases, etc.
function init_gceapi() {
# (re)create gceapi database
recreate_database gceapi utf8
$GCEAPI_BIN_DIR/gce-api-manage --config-file $GCEAPI_CONF_FILE db_sync
}
# install_gceapi() - Collect source and prepare
function install_gceapi() {
# TODO(ruhe): use setup_develop once gceapi requirements match with global-requirement.txt
# both functions (setup_develop and setup_package) are defined at:
# http://git.openstack.org/cgit/openstack-dev/devstack/tree/functions-common
setup_package $GCEAPI_DIR -e
}
# start_gceapi() - Start running processes, including screen
function start_gceapi() {
screen_it gce-api "cd $GCEAPI_DIR && $GCEAPI_BIN_DIR/gce-api --config-file $GCEAPI_CONF_DIR/gceapi.conf"
}
# stop_gceapi() - Stop running processes
function stop_gceapi() {
# Kill the gceapi screen windows
screen -S $SCREEN_NAME -p gce-api -X kill
}
function cleanup_gceapi() {
# Cleanup keystone signing dir
sudo rm -rf $GCEAPI_KEYSTONE_SIGNING_DIR
}
# main dispatcher
if [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing gce-api"
install_gceapi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring gce-api"
configure_gceapi
create_gceapi_accounts
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
echo_summary "Initializing gce-api"
init_gceapi
start_gceapi
fi
if [[ "$1" == "unstack" ]]; then
stop_gceapi
cleanup_gceapi
fi
# Restore xtrace
$XTRACE
# Local variables:
# mode: shell-script
# End:

4
devstack/settings Normal file
View File

@ -0,0 +1,4 @@
# Devstack settings
# we have to add gce-api to enabled services for screen_it to work
enable_service gce-api

View File

@ -0,0 +1,4 @@
To generate the sample gceapi.conf file, run the following
command from the top level of the gceapi directory:
tox -egenconfig

View File

@ -0,0 +1,10 @@
[DEFAULT]
output_file = etc/gceapi/gceapi.conf.sample
wrap_width = 79
namespace = gceapi
namespace = gceapi.api
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.service.service
namespace = oslo.db
namespace = oslo.concurrency

View File

@ -1,284 +0,0 @@
[DEFAULT]
#
# Options defined in gceapi.auth
#
# whether to use per-user rate limiting for the api. (boolean
# value)
#api_rate_limit=false
# The strategy to use for auth: keystone only for now. (string
# value)
#auth_strategy=keystone
# Treat X-Forwarded-For as the canonical remote address. Only
# enable this if you have a sanitizing proxy. (boolean value)
#use_forwarded_for=false
#
# Options defined in gceapi.exception
#
# make exception message format errors fatal (boolean value)
#fatal_exception_format_errors=false
#
# Options defined in gceapi.paths
#
# Directory where the gceapi python module is installed
# (string value)
#pybasedir=/usr/lib/python/site-packages
# Directory where gceapi binaries are installed (string value)
#bindir=$pybasedir/bin
# Top-level directory for maintaining gceapi's state (string
# value)
#state_path=$pybasedir
#
# Options defined in gceapi.service
#
# Enable ssl connections or not (boolean value)
#use_ssl=false
# maximum time since last check-in for up service (integer
# value)
#service_down_time=60
# IP address for gce api to listen (string value)
#gce_listen=0.0.0.0
# port for gce api to listen (integer value)
#gce_listen_port=8787
#
# Options defined in gceapi.wsgi
#
# File name for the paste.deploy config for gceapi-api (string
# value)
#api_paste_config=api-paste.ini
# A python format string that is used as the template to
# generate log lines. The following values can be formatted
# into it: client_ip, date_time, request_line, status_code,
# body_length, wall_seconds. (string value)
#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
# CA certificate file to use to verify connecting clients
# (string value)
#ssl_ca_file=<None>
# SSL certificate of API server (string value)
#ssl_cert_file=<None>
# SSL private key of API server (string value)
#ssl_key_file=<None>
# Sets the value of TCP_KEEPIDLE in seconds for each server
# socket. Not supported on OS X. (integer value)
#tcp_keepidle=600
#
# Options defined in gceapi.api
#
# Name of network API. neutron(quantum) or nova (string value)
#network_api=neutron
# Keystone URL (string value)
#keystone_gce_url=http://127.0.0.1:5000/v2.0
# name of public network (string value)
#public_network=public
# Place of protocol files (string value)
#protocol_dir=<None>
# Region of this service
#region=RegionOne
#
# Options defined in gceapi.openstack.common.db.sqlalchemy.session
#
# The file name to use with SQLite (string value)
#sqlite_db=gceapi.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous=true
#
# Options defined in gceapi.openstack.common.eventlet_backdoor
#
# Enable eventlet backdoor. Acceptable values are 0, <port>,
# and <start>:<end>, where 0 results in listening on a random
# tcp port number; <port> results in listening on the
# specified port number (and not enabling backdoor if that
# port is in use); and <start>:<end> results in listening on
# the smallest unused port number within the specified range
# of port numbers. The chosen port is displayed in the
# service's log file. (string value)
#backdoor_port=<None>
#
# Options defined in gceapi.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error (boolean value)
#use_stderr=true
# Format string to use for log messages with context (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages without context
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Data to append to log format when level is DEBUG (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# List of logger=LEVEL pairs (list value)
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
# Publish error events (boolean value)
#publish_errors=false
# Make deprecations fatal (boolean value)
#fatal_deprecations=false
# If an instance is passed with the log message, format it
# like this (string value)
#instance_format="[instance: %(uuid)s] "
# If an instance UUID is passed with the log message, format
# it like this (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# The name of logging configuration file. It does not disable
# existing loggers, but just appends specified logging
# configuration to any other existing logging options. Please
# see the Python logging module documentation for details on
# logging configuration files. (string value)
#log_config_append=<None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
# Format string for %%(asctime)s in log records. Default:
# %(default)s (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
#log_file=<None>
# (Optional) The base directory used for relative --log-file
# paths (string value)
#log_dir=<None>
# Use syslog for logging. Existing syslog format is DEPRECATED
# during I, and then will be changed in J to honor RFC5424
# (boolean value)
#use_syslog=false
# (Optional) Use syslog rfc5424 format for logging. If
# enabled, will add APP-NAME (RFC5424) before the MSG part of
# the syslog message. The old format without APP-NAME is
# deprecated in I, and will be removed in J. (boolean value)
#use_syslog_rfc_format=false
# Syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
[database]
#
# Options defined in gceapi.openstack.common.db.api
#
# The backend to use for db (string value)
#backend=sqlalchemy
#
# Options defined in gceapi.openstack.common.db.sqlalchemy.session
#
# The SQLAlchemy connection string used to connect to the
# database (string value)
#connection=sqlite:////gceapi/openstack/common/db/$sqlite_db
# The SQLAlchemy connection string used to connect to the
# slave database (string value)
#slave_connection=
# Timeout before idle sql connections are reaped (integer
# value)
#idle_timeout=3600
# Minimum number of SQL connections to keep open in a pool
# (integer value)
#min_pool_size=1
# Maximum number of SQL connections to keep open in a pool
# (integer value)
#max_pool_size=<None>
# Maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value)
#max_retries=10
# Interval between retries of opening a sql connection
# (integer value)
#retry_interval=10
# If set, use this value for max_overflow with sqlalchemy
# (integer value)
#max_overflow=<None>
# Verbosity of SQL debugging information. 0=None,
# 100=Everything (integer value)
#connection_debug=0
# Add python stack traces to SQL as comment strings (boolean
# value)
#connection_trace=false
# If set, use this value for pool_timeout with sqlalchemy
# (integer value)
#pool_timeout=<None>

View File

@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from gceapi.api import addresses from gceapi.api import addresses
from gceapi.api import discovery from gceapi.api import discovery
@ -29,7 +30,6 @@ from gceapi.api import regions
from gceapi.api import routes from gceapi.api import routes
from gceapi.api import snapshots from gceapi.api import snapshots
from gceapi.api import zones from gceapi.api import zones
from gceapi.openstack.common import log as logging
from gceapi import wsgi from gceapi import wsgi
from gceapi import wsgi_ext as openstack_api from gceapi import wsgi_ext as openstack_api

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo.config import cfg from oslo_config import cfg
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import clients from gceapi.api import clients
@ -21,7 +21,7 @@ from gceapi.api import operation_util
from gceapi.api import region_api from gceapi.api import region_api
from gceapi.api import scopes from gceapi.api import scopes
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
CONF = cfg.CONF CONF = cfg.CONF

View File

@ -19,7 +19,7 @@ from gceapi.api import region_api
from gceapi.api import scopes from gceapi.api import scopes
from gceapi.api import utils from gceapi.api import utils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
class API(base_api.API): class API(base_api.API):

View File

@ -18,11 +18,11 @@ Classes in this layer aggregate functionality of OpenStack necessary
and sufficient to handle supported GCE API requests and sufficient to handle supported GCE API requests
""" """
from oslo.config import cfg from oslo_config import cfg
from oslo_utils import timeutils
from gceapi import db from gceapi import db
from gceapi import exception from gceapi import exception
from gceapi.openstack.common import timeutils
FLAGS = cfg.CONF FLAGS = cfg.CONF

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -15,11 +13,11 @@
from keystoneclient.v2_0 import client as kc from keystoneclient.v2_0 import client as kc
from novaclient import client as novaclient from novaclient import client as novaclient
from novaclient import shell as novashell from novaclient import exceptions as nova_exception
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _, _LW
from gceapi.openstack.common import log as logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -43,26 +41,25 @@ except ImportError:
logger.info(_('glanceclient not available')) logger.info(_('glanceclient not available'))
# Nova API version with microversions support
REQUIRED_NOVA_API_VERSION = '2.1'
LEGACY_NOVA_API_VERSION = '2'
# Nova API's 2.3 microversion provides additional EC2 compliant instance
# properties
REQUIRED_NOVA_API_MICROVERSION = '2.3'
_nova_api_version = None
def nova(context, service_type='compute'): def nova(context, service_type='compute'):
computeshell = novashell.OpenStackComputeShell()
extensions = computeshell._discover_extensions("1.1")
args = { args = {
'project_id': context.project_id,
'auth_url': CONF.keystone_gce_url, 'auth_url': CONF.keystone_gce_url,
'service_type': service_type, 'auth_token': context.auth_token,
'username': None, 'bypass_url': url_for(context, service_type),
'api_key': None,
'extensions': extensions,
} }
global _nova_api_version
client = novaclient.Client(1.1, **args) if not _nova_api_version:
_nova_api_version = _get_nova_api_version(context)
management_url = get_endpoint(context, service_type) return novaclient.Client(_nova_api_version, **args)
client.client.auth_token = context.auth_token
client.client.management_url = management_url
return client
def neutron(context): def neutron(context):
@ -73,7 +70,7 @@ def neutron(context):
'auth_url': CONF.keystone_gce_url, 'auth_url': CONF.keystone_gce_url,
'service_type': 'network', 'service_type': 'network',
'token': context.auth_token, 'token': context.auth_token,
'endpoint_url': get_endpoint(context, 'network'), 'endpoint_url': url_for(context, 'network'),
} }
return neutronclient.Client(**args) return neutronclient.Client(**args)
@ -90,7 +87,7 @@ def glance(context):
} }
return glanceclient.Client( return glanceclient.Client(
"1", endpoint=get_endpoint(context, 'image'), **args) "1", endpoint=url_for(context, 'image'), **args)
def cinder(context): def cinder(context):
@ -105,7 +102,7 @@ def cinder(context):
} }
_cinder = cinderclient.Client('1', **args) _cinder = cinderclient.Client('1', **args)
management_url = get_endpoint(context, 'volume') management_url = url_for(context, 'volume')
_cinder.client.auth_token = context.auth_token _cinder.client.auth_token = context.auth_token
_cinder.client.management_url = management_url _cinder.client.management_url = management_url
@ -113,33 +110,47 @@ def cinder(context):
def keystone(context): def keystone(context):
_keystone = kc.Client( return kc.Client(
token=context.auth_token, token=context.auth_token,
project_id=context.project_id,
tenant_id=context.project_id, tenant_id=context.project_id,
auth_url=CONF.keystone_gce_url) auth_url=CONF.keystone_gce_url)
return _keystone
def url_for(context, service_type):
service_catalog = context.service_catalog
if not service_catalog:
catalog = keystone(context).service_catalog.catalog
service_catalog = catalog['serviceCatalog']
context.service_catalog = service_catalog
return get_url_from_catalog(service_catalog, service_type)
def get_endpoint_from_catalog(service_catalog, service_type): def get_url_from_catalog(service_catalog, service_type):
for service in service_catalog: for service in service_catalog:
if service["type"] != service_type: if service['type'] != service_type:
continue continue
for endpoint in service["endpoints"]: for endpoint in service['endpoints']:
if endpoint["region"] != CONF["region"]: if 'publicURL' in endpoint:
continue return endpoint['publicURL']
return endpoint.get("publicURL") elif endpoint.get('interface') == 'public':
# NOTE(andrey-mp): keystone v3
return None return endpoint['url']
else:
return None
return None return None
def get_endpoint(context, service_type): def _get_nova_api_version(context):
service_catalog = context.service_catalog try:
if not service_catalog: novaclient.Client(REQUIRED_NOVA_API_VERSION)
catalog = keystone(context).service_catalog.catalog except nova_exception.UnsupportedVersion:
service_catalog = catalog["serviceCatalog"] logger.warning(
context.service_catalog = service_catalog _LW('Nova client does not support v2.1 Nova API, use v2 instead. '
'A lot of useful EC2 compliant instance properties '
'will be unavailable.'))
return LEGACY_NOVA_API_VERSION
return get_endpoint_from_catalog(service_catalog, service_type) # NOTE(ft): novaclient supports microversions, use the last required one
return REQUIRED_NOVA_API_MICROVERSION

View File

@ -18,7 +18,9 @@ import os.path
import re import re
from webob import exc from webob import exc
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from gceapi.api import clients from gceapi.api import clients
from gceapi.api import operation_api from gceapi.api import operation_api
@ -26,9 +28,7 @@ from gceapi.api import operation_util
from gceapi.api import scopes from gceapi.api import scopes
from gceapi.api import utils from gceapi.api import utils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import log as logging
from gceapi.openstack.common import timeutils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
FLAGS = cfg.CONF FLAGS = cfg.CONF
@ -268,7 +268,7 @@ class Controller(object):
""" """
context = self._get_context(request) context = self._get_context(request)
public_url = clients.get_endpoint(context, "gceapi") public_url = clients.url_for(context, "gceapi")
if public_url: if public_url:
public_url = public_url.rstrip("/") + "/"\ public_url = public_url.rstrip("/") + "/"\
+ request.script_name.lstrip("/") + request.script_name.lstrip("/")

View File

@ -18,10 +18,10 @@ import threading
import webob import webob
from keystoneclient.v2_0 import client as keystone_client from keystoneclient.v2_0 import client as keystone_client
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from gceapi.api import clients from gceapi.api import clients
from gceapi.openstack.common import log as logging
from gceapi import wsgi_ext as openstack_wsgi from gceapi import wsgi_ext as openstack_wsgi
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -46,7 +46,7 @@ class Controller(object):
keystone = keystone_client.Client(username=user, password=password, keystone = keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=FLAGS.keystone_gce_url) tenant_name=tenant, auth_url=FLAGS.keystone_gce_url)
catalog = keystone.service_catalog.catalog["serviceCatalog"] catalog = keystone.service_catalog.catalog["serviceCatalog"]
public_url = clients.get_endpoint_from_catalog(catalog, "gceapi") public_url = clients.get_url_from_catalog(catalog, "gceapi")
if not public_url: if not public_url:
public_url = req.host_url public_url = req.host_url
public_url = public_url.rstrip("/") public_url = public_url.rstrip("/")

View File

@ -14,14 +14,15 @@
import copy import copy
from oslo_log import log as logging
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import clients from gceapi.api import clients
from gceapi.api import network_api from gceapi.api import network_api
from gceapi.api import operation_util from gceapi.api import operation_util
from gceapi.api import utils from gceapi.api import utils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import log as logging
PROTOCOL_MAP = { PROTOCOL_MAP = {
@ -175,8 +176,8 @@ class API(base_api.API):
if not too_complex_for_gce: if not too_complex_for_gce:
sourceRanges = [cidr for cidr in grouped_rules] or ["0.0.0.0/0"] sourceRanges = [cidr for cidr in grouped_rules] or ["0.0.0.0/0"]
if common_rules: if common_rules:
allowed = [_build_gce_port_rule(proto, common_rules[proto]) allowed = [_build_gce_port_rule(p, common_rules[p])
for proto in common_rules] for p in common_rules]
firewall["sourceRanges"] = sourceRanges firewall["sourceRanges"] = sourceRanges
firewall["allowed"] = allowed firewall["allowed"] = allowed

View File

@ -23,7 +23,7 @@ from gceapi.api import operation_api
from gceapi.api import operation_util from gceapi.api import operation_util
from gceapi.api import utils from gceapi.api import utils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
class API(base_api.API): class API(base_api.API):

View File

@ -12,12 +12,13 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo_log import log as logging
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import clients from gceapi.api import clients
from gceapi.api import operation_util from gceapi.api import operation_util
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -15,6 +15,8 @@
import random import random
import string import string
from oslo_log import log as logging
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import clients from gceapi.api import clients
from gceapi.api import disk_api from gceapi.api import disk_api
@ -29,8 +31,7 @@ from gceapi.api import project_api
from gceapi.api import scopes from gceapi.api import scopes
from gceapi.api import utils from gceapi.api import utils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -14,6 +14,8 @@
import string import string
from oslo_log import log as logging
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import clients from gceapi.api import clients
from gceapi.api import disk_api from gceapi.api import disk_api
@ -21,8 +23,7 @@ from gceapi.api import operation_api
from gceapi.api import operation_util from gceapi.api import operation_util
from gceapi.api import utils from gceapi.api import utils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo_log import log as logging
import webob import webob
from gceapi.api import common as gce_common from gceapi.api import common as gce_common
@ -22,8 +23,7 @@ from gceapi.api import operation_util
from gceapi.api import scopes from gceapi.api import scopes
from gceapi.api import wsgi as gce_wsgi from gceapi.api import wsgi as gce_wsgi
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import log as logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -13,14 +13,14 @@
# limitations under the License. # limitations under the License.
import netaddr import netaddr
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import clients from gceapi.api import clients
from gceapi.api import operation_util from gceapi.api import operation_util
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import log as logging
CONF = cfg.CONF CONF = cfg.CONF

View File

@ -19,7 +19,7 @@ from gceapi.api import clients
from gceapi.api import operation_util from gceapi.api import operation_util
from gceapi.api import utils from gceapi.api import utils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
class API(base_api.API): class API(base_api.API):

View File

@ -19,12 +19,12 @@ import uuid
from keystoneclient import exceptions from keystoneclient import exceptions
from keystoneclient.v2_0 import client as keystone_client from keystoneclient.v2_0 import client as keystone_client
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import webob import webob
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import log as logging
from gceapi.openstack.common import timeutils
from gceapi import wsgi_ext as openstack_wsgi from gceapi import wsgi_ext as openstack_wsgi
FLAGS = cfg.CONF FLAGS = cfg.CONF
@ -76,7 +76,7 @@ class Controller(object):
"</form>"\ "</form>"\
"</body></html>" "</body></html>"
class Client: class Client(object):
auth_start_time = 0 auth_start_time = 0
auth_token = None auth_token = None
expires_in = 1 expires_in = 1

View File

@ -14,11 +14,12 @@
import uuid import uuid
from oslo_utils import timeutils
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import scopes from gceapi.api import scopes
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import timeutils
class API(base_api.API): class API(base_api.API):

View File

@ -14,9 +14,10 @@
import threading import threading
from oslo_utils import timeutils
from gceapi.api import operation_api from gceapi.api import operation_api
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import timeutils
def init_operation(context, op_type, target_type, target_name, scope): def init_operation(context, op_type, target_type, target_name, scope):

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo.config import cfg from oslo_config import cfg
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import clients from gceapi.api import clients

View File

@ -21,7 +21,7 @@ from gceapi.api import operation_util
from gceapi.api import project_api from gceapi.api import project_api
from gceapi.api import scopes from gceapi.api import scopes
from gceapi.api import wsgi as gce_wsgi from gceapi.api import wsgi as gce_wsgi
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
class Controller(gce_common.Controller): class Controller(gce_common.Controller):

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo.config import cfg from oslo_config import cfg
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.api import scopes from gceapi.api import scopes

View File

@ -21,7 +21,7 @@ from gceapi.api import network_api
from gceapi.api import operation_util from gceapi.api import operation_util
from gceapi.api import utils from gceapi.api import utils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
ALL_IP_CIDR = "0.0.0.0/0" ALL_IP_CIDR = "0.0.0.0/0"

View File

@ -14,7 +14,7 @@
from gceapi.api import base_api from gceapi.api import base_api
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
NOT_SUPPORTED_MESSAGE = _("Routes are not supported with nova network") NOT_SUPPORTED_MESSAGE = _("Routes are not supported with nova network")

View File

@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import webob import webob
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import jsonutils
from gceapi.openstack.common import log as logging
from gceapi import wsgi_ext as openstack_wsgi from gceapi import wsgi_ext as openstack_wsgi
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -18,14 +18,14 @@ Common Auth Middleware.
""" """
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import webob.dec import webob.dec
import webob.exc import webob.exc
from gceapi import context from gceapi import context
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import jsonutils
from gceapi.openstack.common import log as logging
from gceapi import wsgi from gceapi import wsgi

View File

@ -1,19 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gceapi.openstack.common import gettextutils
gettextutils.install('gceapi')

View File

@ -1,48 +1,37 @@
#!/usr/bin/env python # Copyright 2014
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # The Cloudscaling Group, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License");
# not use this file except in compliance with the License. You may obtain # you may not use this file except in compliance with the License.
# a copy of the License at # You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# #
# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# Unless required by applicable law or agreed to in writing, software # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # See the License for the specific language governing permissions and
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # limitations under the License.
# License for the specific language governing permissions and limitations
# under the License.
""" """
Gceapi API Server GCEapi API Server
""" """
import eventlet
import sys import sys
eventlet.patcher.monkey_patch(os=False) from oslo_config import cfg
from oslo_log import log as logging
from oslo.config import cfg
from gceapi import config from gceapi import config
from gceapi.openstack.common import log as logging
from gceapi import service from gceapi import service
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('use_ssl', 'gceapi.service')
def main(): def main():
config.parse_args(sys.argv) config.parse_args(sys.argv)
logging.setup('gceapi') logging.setup(CONF, 'gceapi')
server = service.WSGIService( server = service.WSGIService('gceapi', max_url_len=16384)
'gce', use_ssl=CONF.use_ssl, max_url_len=16384)
service.serve(server) service.serve(server)
service.wait() service.wait()

View File

@ -1,29 +1,31 @@
# Copyright 2014 # Copyright 2013 Cloudscaling Group, Inc
# The Cloudscaling Group, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License"); you may
# you may not use this file except in compliance with the License. # not use this file except in compliance with the License. You may obtain
# You may obtain a copy of the License at # a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # http://www.apache.org/licenses/LICENSE-2.0
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # Unless required by applicable law or agreed to in writing, software
# See the License for the specific language governing permissions and # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# limitations under the License. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" """
CLI interface for GCE API management. CLI interface for GCE API management.
""" """
import os
import sys import sys
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log
from gceapi import config
from gceapi.db import migration from gceapi.db import migration
from gceapi.openstack.common import log from gceapi.i18n import _
from gceapi import version
CONF = cfg.CONF CONF = cfg.CONF
@ -35,9 +37,9 @@ def do_db_version():
def do_db_sync(): def do_db_sync():
""" """Place a database under migration control and upgrade,
Place a database under migration control and upgrade,
creating first if necessary. creating if necessary.
""" """
migration.db_sync(CONF.command.version) migration.db_sync(CONF.command.version)
@ -61,13 +63,20 @@ command_opt = cfg.SubCommandOpt('command',
def main(): def main():
CONF.register_cli_opt(command_opt) CONF.register_cli_opt(command_opt)
try: try:
default_config_files = cfg.find_config_files('gceapi') config.parse_args(sys.argv)
CONF(sys.argv[1:], project='gceapi', prog='gce-api-manage', log.setup(CONF, "gceapi")
version=version.version_info.version_string(), except cfg.ConfigFilesNotFoundError:
default_config_files=default_config_files) cfgfile = CONF.config_file[-1] if CONF.config_file else None
log.setup("gceapi") if cfgfile and not os.access(cfgfile, os.R_OK):
except RuntimeError as e: st = os.stat(cfgfile)
sys.exit("ERROR: %s" % e) print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run gce-api-manage as root.'))
return(2)
try: try:
CONF.command.func() CONF.command.func()

View File

@ -1,34 +1,49 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014
# The Cloudscaling Group, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License");
# not use this file except in compliance with the License. You may obtain # you may not use this file except in compliance with the License.
# a copy of the License at # You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# #
# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# Unless required by applicable law or agreed to in writing, software # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # See the License for the specific language governing permissions and
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # limitations under the License.
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg from oslo_config import cfg
from oslo_db import options
from oslo_log import log
from gceapi.openstack.common.db.sqlalchemy import session as db_session
from gceapi import paths from gceapi import paths
from gceapi import version from gceapi import version
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
CONF = cfg.CONF
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('gceapi.sqlite')
_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
'keystonemiddleware=WARN', 'routes.middleware=WARN',
'stevedore=WARN', 'keystoneclient.auth=WARN']
_DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '
'%(levelname)s %(name)s [%(request_id)s '
'%(user_identity)s] %(instance)s'
'%(message)s')
def parse_args(argv, default_config_files=None): def parse_args(argv, default_config_files=None):
db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
sqlite_db='gceapi.sqlite') log.register_options(CONF)
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='gceapi.sqlite')
cfg.CONF(argv[1:], cfg.CONF(argv[1:],
project='gceapi', project='gceapi',
version=version.version_info.version_string(), version=version.version_info.version_string(),

View File

@ -21,11 +21,12 @@
import uuid import uuid
from oslo_log import log as logging
from oslo_utils import timeutils
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import local from gceapi.openstack.common import local
from gceapi.openstack.common import log as logging
from gceapi.openstack.common import timeutils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -27,11 +27,56 @@ functions from gceapi.db namespace, not the gceapi.db.api namespace.
""" """
from gceapi.openstack.common.db import api as db_api from eventlet import tpool
from oslo_config import cfg
from oslo_db import api as db_api
from oslo_log import log as logging
tpool_opts = [
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls'),
]
CONF = cfg.CONF
CONF.register_opts(tpool_opts, 'database')
_BACKEND_MAPPING = {'sqlalchemy': 'gceapi.db.sqlalchemy.api'} _BACKEND_MAPPING = {'sqlalchemy': 'gceapi.db.sqlalchemy.api'}
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
class GCEDBAPI(object):
"""gce's DB API wrapper class.
This wraps the oslo DB API with an option to be able to use eventlet's
thread pooling. Since the CONF variable may not be loaded at the time
this class is instantiated, we must look at it on the first DB API call.
"""
def __init__(self):
self.__db_api = None
@property
def _db_api(self):
if not self.__db_api:
gce_db_api = db_api.DBAPI(CONF.database.backend,
backend_mapping=_BACKEND_MAPPING)
if CONF.database.use_tpool:
self.__db_api = tpool.Proxy(gce_db_api)
else:
self.__db_api = gce_db_api
return self.__db_api
def __getattr__(self, key):
return getattr(self._db_api, key)
IMPL = GCEDBAPI()
LOG = logging.getLogger(__name__)
def add_item(context, kind, data): def add_item(context, kind, data):

View File

@ -1,23 +1,23 @@
# Copyright 2014 # Copyright 2013 Cloudscaling Group, Inc
# The Cloudscaling Group, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License"); you may
# you may not use this file except in compliance with the License. # not use this file except in compliance with the License. You may obtain
# You may obtain a copy of the License at # a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # http://www.apache.org/licenses/LICENSE-2.0
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # Unless required by applicable law or agreed to in writing, software
# See the License for the specific language governing permissions and # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# limitations under the License. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database setup and migration commands.""" """Database setup and migration commands."""
from oslo.config import cfg from oslo_config import cfg
from gceapi import exception from gceapi import exception
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
CONF = cfg.CONF CONF = cfg.CONF
@ -60,14 +60,17 @@ IMPL = LazyPluggable('backend',
config_group='database', config_group='database',
sqlalchemy='gceapi.db.sqlalchemy.migration') sqlalchemy='gceapi.db.sqlalchemy.migration')
INIT_VERSION = 0
def db_sync(version=None): def db_sync(version=None):
"""Migrate the database to `version` or the most recent version.""" """Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(INIT_VERSION, version=version) return IMPL.db_sync(version=version)
def db_version(): def db_version():
"""Display the current database version.""" """Display the current database version."""
return IMPL.db_version(INIT_VERSION) return IMPL.db_version()
def db_initial_version():
"""The starting version for the database."""
return IMPL.db_initial_version()

View File

@ -1,13 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -18,18 +18,34 @@ import ast
import functools import functools
import sys import sys
from oslo.config import cfg from oslo_config import cfg
from oslo_db.sqlalchemy import session as db_session
import gceapi.context import gceapi.context
from gceapi.db.sqlalchemy import models from gceapi.db.sqlalchemy import models
from gceapi.openstack.common.db.sqlalchemy import session as db_session
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('connection',
'gceapi.openstack.common.db.sqlalchemy.session',
group='database')
get_session = db_session.get_session
_MASTER_FACADE = None
def _create_facade_lazily():
global _MASTER_FACADE
if _MASTER_FACADE is None:
_MASTER_FACADE = db_session.EngineFacade.from_config(CONF)
return _MASTER_FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend(): def get_backend():

View File

@ -1,13 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,13 +0,0 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -12,19 +12,75 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os import os
from gceapi.openstack.common.db.sqlalchemy import migration from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from gceapi.db.sqlalchemy import api as db_session
from gceapi import exception
from gceapi.i18n import _
INIT_VERSION = 0
_REPOSITORY = None
get_engine = db_session.get_engine
def db_sync(init_version, version=None): def db_sync(version=None):
return migration.db_sync(_get_repo_path(), version, init_version) if version is not None:
try:
version = int(version)
except ValueError:
raise exception.GceapiException(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version(init_version): def db_version():
return migration.db_version(_get_repo_path(), init_version) repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.GceapiException(
_("Upgrade DB using Essex release first."))
def _get_repo_path(): def db_initial_version():
return os.path.join(os.path.abspath(os.path.dirname(__file__)), return INIT_VERSION
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo') 'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY

View File

@ -16,11 +16,10 @@
SQLAlchemy models for gceapi data. SQLAlchemy models for gceapi data.
""" """
from oslo_db.sqlalchemy import models
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Index, PrimaryKeyConstraint, String, Text from sqlalchemy import Column, Index, PrimaryKeyConstraint, String, Text
from gceapi.openstack.common.db.sqlalchemy import models
BASE = declarative_base() BASE = declarative_base()

View File

@ -26,11 +26,12 @@ SHOULD include dedicated exception logging.
import sys import sys
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from gceapi.i18n import _
import webob.exc import webob.exc
from gceapi.openstack.common.gettextutils import _
from gceapi.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

46
gceapi/i18n.py Normal file
View File

@ -0,0 +1,46 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
import oslo_i18n
DOMAIN = 'gce-api'
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def translate(value, user_locale):
return oslo_i18n.translate(value, user_locale)
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)

View File

@ -0,0 +1,45 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
try:
import oslo_i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators = oslo_i18n.TranslatorFactory(domain='gceapi')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
except ImportError:
# NOTE(dims): Support for cases where a project wants to use
# code from oslo-incubator, but is not ready to be internationalized
# (like tempest)
_ = _LI = _LW = _LE = _LC = lambda x: x

View File

@ -1,57 +0,0 @@
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Multiple DB API backend support.
Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
"""
from oslo.config import cfg
from gceapi.openstack.common import importutils
db_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts, 'database')
class DBAPI(object):
def __init__(self, backend_mapping=None):
if backend_mapping is None:
backend_mapping = {}
backend_name = CONF.database.backend
# Import the untranslated name if we don't have a
# mapping.
backend_path = backend_mapping.get(backend_name, backend_name)
backend_mod = importutils.import_module(backend_path)
self.__backend = backend_mod.get_backend()
def __getattr__(self, key):
return getattr(self.__backend, key)

View File

@ -1,54 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from gceapi.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(str(message))
class DBConnectionError(DBError):
"""Wraps connection specific exception."""
pass

View File

@ -1,265 +0,0 @@
# coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from gceapi.openstack.common.db import exception
from gceapi.openstack.common.db.sqlalchemy import session as db_session
from gceapi.openstack.common.gettextutils import _
get_engine = db_session.get_engine
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(abs_path, init_version)
repository = _find_migrate_repo(abs_path)
_db_schema_sanity_check()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def _db_schema_sanity_check():
engine = get_engine()
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
engine.url.database)]
if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, '
'please make sure all tables are CHARSET=utf8'
) % ','.join(table_names))
def db_version(abs_path, init_version):
"""Show the current version of the repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(abs_path, init_version)
return versioning_api.db_version(get_engine(), repository)
else:
raise exception.DbMigrationError(
message=_(
"The database is not under version control, but has "
"tables. Please stamp the current version of the schema "
"manually."))
def db_version_control(abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
return Repository(abs_path)

View File

@ -1,117 +0,0 @@
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
import six
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from gceapi.openstack.common.db.sqlalchemy import session as sa
from gceapi.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session=None):
"""Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
self._i = iter(columns)
return self
def next(self):
n = six.advance_iterator(self._i)
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)

View File

@ -1,187 +0,0 @@
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provision test environment for specific DB backends"""
import argparse
import os
import random
import string
from six import moves
import sqlalchemy
from gceapi.openstack.common.db import exception as exc
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://')
def _gen_credentials(*names):
"""Generate credentials."""
auth_dict = {}
for name in names:
val = ''.join(random.choice(string.ascii_lowercase)
for i in moves.range(10))
auth_dict[name] = val
return auth_dict
def _get_engine(uri=SQL_CONNECTION):
"""Engine creation
By default the uri is SQL_CONNECTION which is admin credentials.
Call the function without arguments to get admin connection. Admin
connection required to create temporary user and database for each
particular test. Otherwise use existing connection to recreate connection
to the temporary database.
"""
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
def _execute_sql(engine, sql, driver):
"""Initialize connection, execute sql query and close it."""
try:
with engine.connect() as conn:
if driver == 'postgresql':
conn.connection.set_isolation_level(0)
for s in sql:
conn.execute(s)
except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin '
'credentials or database does not exist.')
raise exc.DBConnectionError(msg % SQL_CONNECTION)
def create_database(engine):
"""Provide temporary user and database for each particular test."""
driver = engine.name
auth = _gen_credentials('database', 'user', 'passwd')
sqls = {
'mysql': [
"drop database if exists %(database)s;",
"grant all on %(database)s.* to '%(user)s'@'localhost'"
" identified by '%(passwd)s';",
"create database %(database)s;",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
"create user %(user)s with password '%(passwd)s';",
"create database %(database)s owner %(user)s;",
]
}
if driver == 'sqlite':
return 'sqlite:////tmp/%s' % auth['database']
try:
sql_rows = sqls[driver]
except KeyError:
raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(engine, sql_query, driver)
params = auth.copy()
params['backend'] = driver
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
def drop_database(engine, current_uri):
"""Drop temporary database and user after each particular test."""
engine = _get_engine(current_uri)
admin_engine = _get_engine()
driver = engine.name
auth = {'database': engine.url.database, 'user': engine.url.username}
if driver == 'sqlite':
try:
os.remove(auth['database'])
except OSError:
pass
return
sqls = {
'mysql': [
"drop database if exists %(database)s;",
"drop user '%(user)s'@'localhost';",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
]
}
try:
sql_rows = sqls[driver]
except KeyError:
raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(admin_engine, sql_query, driver)
def main():
"""Controller to handle commands
::create: Create test user and database with random names.
::drop: Drop user and database created by previous command.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Under normal circumstances is not used directly.'
' Used in .testr.conf to automate test database creation'
' and dropping processes.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')
create = subparsers.add_parser(
'create',
help='Create temporary test '
'databases and users.')
create.set_defaults(which='create')
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')
drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases and users.')
drop.set_defaults(which='drop')
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')
args = parser.parse_args()
engine = _get_engine()
which = args.which
if which == "create":
for i in range(int(args.instances_count)):
print(create_database(engine))
elif which == "drop":
for db in args.instances:
drop_database(engine, db)
if __name__ == "__main__":
main()

View File

@ -1,867 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example::
session.set_defaults(
sql_connection="sqlite:///var/lib/gceapi/sqlite.db",
sqlite_db="/var/lib/gceapi/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
Examples::
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. Database Errors like IntegrityError will be raised in
session's __exit__ handler, and any try/except within the context managed
by session will not be triggered. And catching other non-database errors in
the session will not trigger the ROLLBACK, so exception handlers should
always be outside the session, unless the developer wants to do a partial
commit on purpose. If the connection is dropped before this is possible,
the database will implicitly roll back the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
::
def create_many_foo(context, foos):
session = get_session()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so::
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement::
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
Note: create_duplicate_foo is a trivially simple example of catching an
exception while using "with session.begin". Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = get_session()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
::
def myfunc(foo):
session = get_session()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = get_session()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example::
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted::
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples::
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
::
def soft_delete_bar_model():
session = get_session()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method::
def soft_delete_multi_models():
session = get_session()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
::
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import os.path
import re
import time
from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from gceapi.openstack.common.db import exception
from gceapi.openstack.common.gettextutils import _
from gceapi.openstack.common import log as logging
from gceapi.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='gceapi.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If True, SQLite uses synchronous mode'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
slave_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
for pattern in _DUP_KEY_RE_DB[engine_name]:
match = pattern.match(integrity_error.message)
if match:
break
else:
return
columns = match.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
@functools.wraps(f)
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def get_engine(sqlite_fk=False, slave_engine=False,
mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
mysql_traditional_mode=mysql_traditional_mode)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
cursor = dbapi_conn.cursor()
try:
ping_sql = 'select 1'
if engine.name == 'ibm_db_sa':
# DB2 requires a table expression
ping_sql = 'select 1 from (values (1)) AS t1'
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _('Database server has gone away: %s') % ex
LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg)
else:
raise
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
"""Set engine mode to 'traditional'.
Required to prevent silent truncates at insert or update operations
under MySQL. By default MySQL truncates inserted string if it longer
than a declared field just with warning. That is fraught with data
corruption.
"""
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False,
mysql_traditional_mode=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if CONF.database.max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
if CONF.database.pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback)
if mysql_traditional_mode:
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
else:
LOG.warning(_("This application has not enabled MySQL traditional"
" mode, which means silent data corruption may"
" occur. Please encourage the application"
" developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for filename, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if filename.endswith('session.py') and method == '_do_query':
continue
if filename.endswith('api.py') and method == 'wrapper':
continue
if filename.endswith('utils.py') and method == '_inner':
continue
if filename.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if filename.endswith('db/api.py'):
continue
# only trace inside gceapi
index = filename.rfind('gceapi')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (filename[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers():
"""Make sure slave handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
assert normal.drivername == slave.drivername

View File

@ -1,269 +0,0 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import subprocess
import lockfile
from six import moves
import sqlalchemy
import sqlalchemy.exc
from gceapi.openstack.common.db.sqlalchemy import utils
from gceapi.openstack.common.gettextutils import _
from gceapi.openstack.common import log as logging
from gceapi.openstack.common.py3kcompat import urlutils
from gceapi.openstack.common import test
LOG = logging.getLogger(__name__)
def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None:
return utils.is_backend_avail(backend='mysql',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None:
return utils.is_backend_avail(backend='postgres',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _set_db_lock(lock_path=None, lock_prefix=None):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
path = lock_path or os.environ.get("GCEAPI_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock:
LOG.debug(_('Got lock "%s"') % f.__name__)
return f(*args, **kwargs)
finally:
LOG.debug(_('Lock released "%s"') % f.__name__)
return wrapper
return decorator
class BaseMigrationTestCase(test.BaseTestCase):
"""Base class fort testing of migration utils."""
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.test_databases = {}
self.migration_api = None
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = moves.configparser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
except moves.configparser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
self._reset_databases()
def tearDown(self):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
super(BaseMigrationTestCase, self).tearDown()
def execute_cmd(self, cmd=None):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
LOG.debug(output)
self.assertEqual(0, process.returncode,
"Failed to run: %s\n%s" % (cmd, output))
def _reset_pg(self, conn_pieces):
(user,
password,
database,
host) = utils.get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("drop database if exists %s;") % database
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(droptable)
sql = ("create database %s;") % database
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
@_set_db_lock(lock_prefix='migration_tests-')
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = urlutils.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
utils.get_db_connection_info(conn_pieces)
sql = ("drop database if exists %(db)s; "
"create database %(db)s;") % {'db': database}
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
"-e \"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
self.execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
self._reset_pg(conn_pieces)
class WalkVersionsMixin(object):
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
self.migration_api.version_control(engine, self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(engine,
self.REPOSITORY))
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
for version in versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
downgraded = self._migrate_down(
engine, version - 1, with_data=True)
if downgraded:
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(versions):
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine, version - 1)
if snake_walk and downgraded:
self._migrate_up(engine, version)
self._migrate_down(engine, version - 1)
def _migrate_down(self, engine, version, with_data=False):
try:
self.migration_api.downgrade(engine, self.REPOSITORY, version)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(
version, self.migration_api.db_version(engine, self.REPOSITORY))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%03d" % (version + 1), None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%03d" % version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self.migration_api.upgrade(engine, self.REPOSITORY, version)
self.assertEqual(version,
self.migration_api.db_version(engine,
self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
(version, engine))
raise

View File

@ -1,548 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from gceapi.openstack.common.gettextutils import _
from gceapi.openstack.common import log as logging
from gceapi.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)

View File

@ -16,21 +16,21 @@
from __future__ import print_function from __future__ import print_function
import copy
import errno import errno
import gc import gc
import logging
import os import os
import pprint import pprint
import socket import socket
import sys import sys
import traceback import traceback
import eventlet
import eventlet.backdoor import eventlet.backdoor
import greenlet import greenlet
from oslo.config import cfg from oslo_config import cfg
from gceapi.openstack.common.gettextutils import _ from gceapi.openstack.common._i18n import _LI
from gceapi.openstack.common import log as logging
help_for_backdoor_port = ( help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results " "Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
@ -41,7 +41,6 @@ help_for_backdoor_port = (
"chosen port is displayed in the service's log file.") "chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [ eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port', cfg.StrOpt('backdoor_port',
default=None,
help="Enable eventlet backdoor. %s" % help_for_backdoor_port) help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
] ]
@ -50,6 +49,12 @@ CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception): class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex): def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
@ -137,8 +142,10 @@ def initialize_if_enabled():
# In the case of backdoor port being zero, a port number is assigned by # In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here. # listen(). In any case, pull the port number out here.
port = sock.getsockname()[1] port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') % LOG.info(
{'port': port, 'pid': os.getpid()}) _LI('Eventlet backdoor listening on %(port)s for process %(pid)d'),
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals) locals=backdoor_locals)
return port return port

View File

@ -1,99 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception related utilities.
"""
import logging
import sys
import time
import traceback
import six
from gceapi.openstack.common.gettextutils import _
class save_and_reraise_exception(object):
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example::
except Exception:
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
this_exc_message = six.u(str(exc))
if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
# Do not log any more frequently than once a minute unless
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
time.sleep(1)
return inner_func

View File

@ -1,440 +0,0 @@
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from gceapi.openstack.common.gettextutils import _
"""
import copy
import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
_localedir = os.environ.get('gceapi'.upper() + '_LOCALEDIR')
_t = gettext.translation('gceapi', localedir=_localedir, fallback=True)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='gceapi')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='gceapi', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except TypeError:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)

View File

@ -1,66 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default

View File

@ -1,182 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
# however the function and object call signatures
# remained the same. This whole try/except block should
# be removed and replaced with a call to six.moves once
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
import xmlrpc.client as xmlrpclib
import six
from gceapi.openstack.common import gettextutils
from gceapi.openstack.common import importutils
from gceapi.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)

View File

@ -1,657 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from gceapi.openstack.common.gettextutils import _
from gceapi.openstack.common import importutils
from gceapi.openstack.common import jsonutils
from gceapi.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and then will be changed in J to honor RFC5424'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Use syslog rfc5424 format for logging. '
'If enabled, will add APP-NAME (RFC5424) before the '
'MSG part of the syslog message. The old format '
'without APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='Publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
def format(self, record):
msg = super(RFCSysLogHandler, self).format(record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"gceapi.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))

View File

@ -1,67 +0,0 @@
#
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Python2/Python3 compatibility layer for OpenStack
"""
import six
if six.PY3:
# python3
import urllib.error
import urllib.parse
import urllib.request
urlencode = urllib.parse.urlencode
urljoin = urllib.parse.urljoin
quote = urllib.parse.quote
quote_plus = urllib.parse.quote_plus
parse_qsl = urllib.parse.parse_qsl
unquote = urllib.parse.unquote
unquote_plus = urllib.parse.unquote_plus
urlparse = urllib.parse.urlparse
urlsplit = urllib.parse.urlsplit
urlunsplit = urllib.parse.urlunsplit
SplitResult = urllib.parse.SplitResult
urlopen = urllib.request.urlopen
URLError = urllib.error.URLError
pathname2url = urllib.request.pathname2url
else:
# python2
import urllib
import urllib2
import urlparse
urlencode = urllib.urlencode
quote = urllib.quote
quote_plus = urllib.quote_plus
unquote = urllib.unquote
unquote_plus = urllib.unquote_plus
parse = urlparse
parse_qsl = parse.parse_qsl
urljoin = parse.urljoin
urlparse = parse.urlparse
urlsplit = parse.urlsplit
urlunsplit = parse.urlunsplit
SplitResult = parse.SplitResult
urlopen = urllib2.urlopen
URLError = urllib2.URLError
pathname2url = urllib.pathname2url

View File

@ -1,88 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common utilities used in testing"""
import logging
import os
import tempfile
import fixtures
import testtools
_TRUE_VALUES = ('True', 'true', '1', 'yes')
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self._set_timeout()
self._fake_output()
self._fake_logs()
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.tempdirs = []
def _set_timeout(self):
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
def _fake_output(self):
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
def _fake_logs(self):
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = logging.DEBUG
else:
level = logging.INFO
capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES
if capture_logs:
self.useFixture(
fixtures.FakeLogger(
format=_LOG_FORMAT,
level=level,
nuke_handlers=capture_logs,
)
)
else:
logging.basicConfig(format=_LOG_FORMAT, level=level)
def create_tempfiles(self, files, ext='.conf'):
tempfiles = []
for (basename, contents) in files:
if not os.path.isabs(basename):
(fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext)
else:
path = basename + ext
fd = os.open(path, os.O_CREAT | os.O_WRONLY)
tempfiles.append(path)
try:
os.write(fd, contents)
finally:
os.close(fd)
return tempfiles

View File

@ -1,210 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formatted date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:param dt: the time
:param window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon

View File

@ -1,25 +1,21 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014
# The Cloudscaling Group, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License");
# not use this file except in compliance with the License. You may obtain # you may not use this file except in compliance with the License.
# a copy of the License at # You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# #
# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# Unless required by applicable law or agreed to in writing, software # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # See the License for the specific language governing permissions and
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # limitations under the License.
# License for the specific language governing permissions and limitations
# under the License.
import os import os
import sys
from oslo.config import cfg from oslo_config import cfg
path_opts = [ path_opts = [
cfg.StrOpt('pybasedir', cfg.StrOpt('pybasedir',
@ -27,7 +23,7 @@ path_opts = [
'../')), '../')),
help='Directory where the gceapi python module is installed'), help='Directory where the gceapi python module is installed'),
cfg.StrOpt('bindir', cfg.StrOpt('bindir',
default='$pybasedir/bin', default=os.path.join(sys.prefix, 'local', 'bin'),
help='Directory where gceapi binaries are installed'), help='Directory where gceapi binaries are installed'),
cfg.StrOpt('state_path', cfg.StrOpt('state_path',
default='$pybasedir', default='$pybasedir',

View File

@ -24,12 +24,12 @@ import sys
import eventlet import eventlet
import greenlet import greenlet
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from gceapi.i18n import _
from gceapi.openstack.common import eventlet_backdoor from gceapi.openstack.common import eventlet_backdoor
from gceapi.openstack.common.gettextutils import _
from gceapi.openstack.common import importutils
from gceapi.openstack.common import log as logging
from gceapi import wsgi from gceapi import wsgi
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -1,30 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`gceapi.tests` -- Gceapi Unittests
=====================================================
.. automodule:: gceapi.tests
:platform: Unix
"""
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)

View File

@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC. # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -14,3 +15,16 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""
:mod:`gceapi.tests` -- Gceapi Unittests
=====================================================
.. automodule:: gceapi.tests
:platform: Unix
"""
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)

View File

@ -20,18 +20,17 @@ from glanceclient import client as glanceclient
from keystoneclient.v2_0 import client as kc from keystoneclient.v2_0 import client as kc
from neutronclient.v2_0 import client as neutronclient from neutronclient.v2_0 import client as neutronclient
from novaclient import client as novaclient from novaclient import client as novaclient
from novaclient import shell as novashell from oslo_utils import timeutils
import gceapi.api import gceapi.api
from gceapi.openstack.common import timeutils from gceapi.tests.unit.api import fake_cinder_client
from gceapi import test from gceapi.tests.unit.api import fake_db
from gceapi.tests.api import fake_cinder_client from gceapi.tests.unit.api import fake_glance_client
from gceapi.tests.api import fake_db from gceapi.tests.unit.api import fake_keystone_client
from gceapi.tests.api import fake_glance_client from gceapi.tests.unit.api import fake_neutron_client
from gceapi.tests.api import fake_keystone_client from gceapi.tests.unit.api import fake_nova_client
from gceapi.tests.api import fake_neutron_client from gceapi.tests.unit.api import fake_request
from gceapi.tests.api import fake_nova_client from gceapi.tests.unit import test
from gceapi.tests.api import fake_request
COMMON_OPERATION = { COMMON_OPERATION = {
@ -101,8 +100,6 @@ class GCEControllerTest(test.TestCase):
fake_glance_client.FakeGlanceClient) fake_glance_client.FakeGlanceClient)
self.stubs.Set(cinderclient, "Client", self.stubs.Set(cinderclient, "Client",
fake_cinder_client.FakeCinderClient) fake_cinder_client.FakeCinderClient)
self.stubs.Set(novashell.OpenStackComputeShell, '_discover_extensions',
fake_nova_client.fake_discover_extensions)
self.stubs.Set(novaclient, 'Client', fake_nova_client.FakeNovaClient) self.stubs.Set(novaclient, 'Client', fake_nova_client.FakeNovaClient)
self.db_fixture = self.useFixture(fake_db.DBFixture(self.stubs)) self.db_fixture = self.useFixture(fake_db.DBFixture(self.stubs))
self.stubs.Set( self.stubs.Set(

View File

@ -16,8 +16,8 @@ import copy
from cinderclient import exceptions as exc from cinderclient import exceptions as exc
from gceapi.tests.api import fake_request from gceapi.tests.unit.api import fake_request
from gceapi.tests.api import utils from gceapi.tests.unit.api import utils
FAKE_DISKS = [utils.FakeObject({ FAKE_DISKS = [utils.FakeObject({

View File

@ -15,10 +15,10 @@
import copy import copy
from glanceclient import exc as glance_exc from glanceclient import exc as glance_exc
from oslo_utils import timeutils
from gceapi.openstack.common import timeutils from gceapi.tests.unit.api import fake_request
from gceapi.tests.api import fake_request from gceapi.tests.unit.api import utils
from gceapi.tests.api import utils
_TIMESTAMP = timeutils.parse_isotime('2013-08-01T11:30:25') _TIMESTAMP = timeutils.parse_isotime('2013-08-01T11:30:25')

View File

@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from gceapi.tests.api import fake_request from gceapi.tests.unit.api import fake_request
from gceapi.tests.api import utils from gceapi.tests.unit.api import utils
FAKE_PROJECTS = [utils.FakeObject({ FAKE_PROJECTS = [utils.FakeObject({

View File

@ -15,7 +15,7 @@
import copy import copy
import uuid import uuid
from gceapi.tests.api import fake_request from gceapi.tests.unit.api import fake_request
FAKE_NETWORKS = { FAKE_NETWORKS = {

View File

@ -19,9 +19,9 @@ import uuid
from novaclient import client as novaclient from novaclient import client as novaclient
from gceapi.api import base_api from gceapi.api import base_api
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.tests.api import fake_request from gceapi.tests.unit.api import fake_request
from gceapi.tests.api import utils from gceapi.tests.unit.api import utils
FAKE_DETAILED_ZONES = [utils.FakeObject({ FAKE_DETAILED_ZONES = [utils.FakeObject({

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from gceapi.api import addresses from gceapi.api import addresses
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_ADDRESSES = [{ EXPECTED_ADDRESSES = [{
"kind": "compute#address", "kind": "compute#address",

View File

@ -14,7 +14,7 @@
import copy import copy
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_DISK_1 = { EXPECTED_DISK_1 = {

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from gceapi.api import utils from gceapi.api import utils
from gceapi import test from gceapi.tests.unit import test
class FieldsTest(test.TestCase): class FieldsTest(test.TestCase):

View File

@ -15,7 +15,7 @@
import copy import copy
import gceapi.context import gceapi.context
from gceapi.tests.api import common from gceapi.tests.unit.api import common
DEFAULT_FIREWALL = { DEFAULT_FIREWALL = {

View File

@ -14,7 +14,7 @@
import copy import copy
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_IMAGE_1 = { EXPECTED_IMAGE_1 = {
"kind": "compute#image", "kind": "compute#image",

View File

@ -14,7 +14,7 @@
import copy import copy
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_INSTANCES = [{ EXPECTED_INSTANCES = [{
"kind": "compute#instance", "kind": "compute#instance",

View File

@ -15,7 +15,7 @@
import copy import copy
from gceapi.api import machine_types from gceapi.api import machine_types
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_FLAVORS = [{ EXPECTED_FLAVORS = [{

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from gceapi.api import networks from gceapi.api import networks
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_NETWORKS = [{ EXPECTED_NETWORKS = [{

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from gceapi.api import operations from gceapi.api import operations
from gceapi.tests.api import common from gceapi.tests.unit.api import common
FAKE_ADD_INSTANCE = { FAKE_ADD_INSTANCE = {
u'status': u'RUNNING', u'status': u'RUNNING',

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from gceapi.api import projects from gceapi.api import projects
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_PROJECT = { EXPECTED_PROJECT = {

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from gceapi.api import regions from gceapi.api import regions
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_REGIONS = [ EXPECTED_REGIONS = [

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from gceapi.tests.api import common from gceapi.tests.unit.api import common
FAKE_LOCAL_ROUTE_1 = { FAKE_LOCAL_ROUTE_1 = {
u'priority': 1000, u'priority': 1000,

View File

@ -15,7 +15,7 @@
import copy import copy
from gceapi.api import snapshots from gceapi.api import snapshots
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_SNAPSHOTS = [{ EXPECTED_SNAPSHOTS = [{
"kind": "compute#snapshot", "kind": "compute#snapshot",

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from gceapi.api import zones from gceapi.api import zones
from gceapi.tests.api import common from gceapi.tests.unit.api import common
EXPECTED_ZONES = [{ EXPECTED_ZONES = [{

View File

@ -30,21 +30,16 @@ import collections
import eventlet import eventlet
import fixtures import fixtures
import mox import mox
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
import stubout import stubout
import testtools import testtools
from gceapi.openstack.common import log as logging
from gceapi import paths from gceapi import paths
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('connection',
'gceapi.openstack.common.db.sqlalchemy.session',
group='database')
CONF.set_override('use_stderr', False)
logging.setup('gceapi')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
eventlet.monkey_patch(os=False) eventlet.monkey_patch(os=False)

View File

@ -1,18 +1,16 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014
# The Cloudscaling Group, Inc.
# Copyright 2011 OpenStack Foundation
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License");
# not use this file except in compliance with the License. You may obtain # you may not use this file except in compliance with the License.
# a copy of the License at # You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# #
# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# Unless required by applicable law or agreed to in writing, software # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # See the License for the specific language governing permissions and
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # limitations under the License.
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version import pbr.version

View File

@ -21,11 +21,12 @@
import os.path import os.path
import socket import socket
import sys
import eventlet.wsgi import eventlet.wsgi
import greenlet import greenlet
from oslo.config import cfg from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from paste import deploy from paste import deploy
import routes.middleware import routes.middleware
import ssl import ssl
@ -33,9 +34,8 @@ import webob.dec
import webob.exc import webob.exc
from gceapi import exception from gceapi import exception
from gceapi.openstack.common import excutils from gceapi.i18n import _
from gceapi.openstack.common.gettextutils import _
from gceapi.openstack.common import log as logging
wsgi_opts = [ wsgi_opts = [
cfg.StrOpt('api_paste_config', cfg.StrOpt('api_paste_config',
@ -95,7 +95,6 @@ class Server(object):
self._protocol = protocol self._protocol = protocol
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("gceapi.%s.wsgi.server" % self.name) self._logger = logging.getLogger("gceapi.%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl self._use_ssl = use_ssl
self._max_url_len = max_url_len self._max_url_len = max_url_len
@ -186,7 +185,6 @@ class Server(object):
'site': self.app, 'site': self.app,
'protocol': self._protocol, 'protocol': self._protocol,
'custom_pool': self._pool, 'custom_pool': self._pool,
'log': self._wsgi_logger,
'log_format': CONF.wsgi_log_format 'log_format': CONF.wsgi_log_format
} }
@ -357,42 +355,6 @@ class Middleware(Application):
return self.process_response(response) return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print(('*' * 40) + ' REQUEST ENVIRON')
for key, value in req.environ.items():
print(key, '=', value)
print()
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS')
for (key, value) in resp.headers.iteritems():
print(key, '=', value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print ('*' * 40) + ' BODY'
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print
class Router(object): class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps.""" """WSGI middleware that maps incoming requests to WSGI apps."""

View File

@ -21,13 +21,13 @@ import re
import routes import routes
import time import time
from oslo_log import log as logging
from oslo_serialization import jsonutils
import webob import webob
from gceapi import exception from gceapi import exception
from gceapi.openstack.common import gettextutils from gceapi import i18n
from gceapi.openstack.common.gettextutils import _ from gceapi.i18n import _
from gceapi.openstack.common import jsonutils
from gceapi.openstack.common import log as logging
from gceapi import wsgi from gceapi import wsgi
@ -201,7 +201,7 @@ class Request(webob.Request):
if not self.accept_language: if not self.accept_language:
return None return None
return self.accept_language.best_match( return self.accept_language.best_match(
gettextutils.get_available_languages('gceapi')) i18n.get_available_languages('gceapi'))
class ActionDispatcher(object): class ActionDispatcher(object):
@ -714,7 +714,7 @@ class Fault(webob.exc.HTTPException):
LOG.debug(_("Returning %(code)s to user: %(explanation)s"), LOG.debug(_("Returning %(code)s to user: %(explanation)s"),
{'code': code, 'explanation': explanation}) {'code': code, 'explanation': explanation})
explanation = gettextutils.translate(explanation, user_locale) explanation = i18n.translate(explanation, user_locale)
fault_data = { fault_data = {
fault_name: { fault_name: {
'code': code, 'code': code,
@ -779,11 +779,11 @@ class RateLimitFault(webob.exc.HTTPException):
metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}} metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}}
self.content['overLimit']['message'] = \ self.content['overLimit']['message'] = \
gettextutils.translate( i18n.translate(
self.content['overLimit']['message'], self.content['overLimit']['message'],
user_locale) user_locale)
self.content['overLimit']['details'] = \ self.content['overLimit']['details'] = \
gettextutils.translate( i18n.translate(
self.content['overLimit']['details'], self.content['overLimit']['details'],
user_locale) user_locale)

View File

@ -251,4 +251,4 @@ sudo rm -rf build gce_api.egg-info
#recreate database #recreate database
echo Setuping database echo Setuping database
sudo bin/gceapi-db-setup deb sudo tools/db/gceapi-db-setup deb

Some files were not shown because too many files have changed in this diff Show More